From bb447af1432e1ab8679b9e5ad459ccafd81129ff Mon Sep 17 00:00:00 2001 From: Stephen Williams Date: Sun, 2 Aug 2009 17:04:00 -0700 Subject: [PATCH] Threads load from signals / force propagates without refiltering. Two small fixes: Threads should load signal values from signal_value objects, not signal functors, and the force method should not run its value through the filter. --- vvp/vthread.cc | 6 +++--- vvp/vvp_net.h | 8 ++++++++ vvp/vvp_net_sig.cc | 13 ++++++++++--- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/vvp/vthread.cc b/vvp/vthread.cc index 31d246539..da5fb5eeb 100644 --- a/vvp/vthread.cc +++ b/vvp/vthread.cc @@ -2953,10 +2953,10 @@ static vvp_vector4_t load_base(vthread_t thr, vvp_code_t cp) /* For the %load to work, the functor must actually be a signal functor. Only signals save their vector value. */ - vvp_fun_signal_vec*sig = dynamic_cast (net->fun); + vvp_signal_value*sig = dynamic_cast (net->fil); if (sig == 0) { - cerr << "%%load/v error: Net arg not a vector signal? " - << typeid(*net->fun).name() << endl; + cerr << "%%load/v error: Net arg not a signal? " + << typeid(*net->fil).name() << endl; assert(sig); } diff --git a/vvp/vvp_net.h b/vvp/vvp_net.h index e3943bf78..8bad4e378 100644 --- a/vvp/vvp_net.h +++ b/vvp/vvp_net.h @@ -1146,7 +1146,15 @@ class vvp_net_fil_t : public vvp_vpi_callback { bool test_force_mask(unsigned bit) const; bool test_force_mask_is_zero() const; + // This template method is used by derived classes to process + // the val through the force mask. The force value is the + // currently forced value, and the buf is a value that this + // method will use to hold a filtered value, if needed. This + // method returns a pointer to val or buf. template const T*filter_mask_(const T&val, const T&force, T&buf); + // This template method is a scalar value of the above. It + // leaves the val, or it replaces it iwth a forced value. + // (Not really implemented, yet.) template bool filter_mask_(T&val); private: diff --git a/vvp/vvp_net_sig.cc b/vvp/vvp_net_sig.cc index ce993e194..b3bd59b60 100644 --- a/vvp/vvp_net_sig.cc +++ b/vvp/vvp_net_sig.cc @@ -124,7 +124,7 @@ void vvp_net_t::force_vec4(const vvp_vector4_t&val, vvp_vector2_t mask) { assert(fil); fil->force_fil_vec4(val, mask); - send_vec4(val, 0); + vvp_send_vec4(out_, val, 0); } void vvp_fun_signal8::force_fil_vec4(const vvp_vector4_t&val, vvp_vector2_t mask) @@ -844,7 +844,11 @@ vvp_wire_vec4::vvp_wire_vec4(unsigned wid, vvp_bit4_t init) const vvp_vector4_t* vvp_wire_vec4::filter_vec4(const vvp_vector4_t&bit) { - return filter_mask_(bit, force4_, filter4_); + // Keep track of the value being driven from this net, even if + // it is not ultimately what survives the force filter. + bits4_ = bit; + const vvp_vector4_t*tmp = filter_mask_(bit, force4_, filter4_); + return tmp; } const vvp_vector8_t* vvp_wire_vec4::filter_vec8(const vvp_vector8_t&bit) @@ -933,7 +937,10 @@ vvp_scalar_t vvp_wire_vec4::scalar_value(unsigned idx) const vvp_vector4_t vvp_wire_vec4::vec4_value() const { - assert(0); + vvp_vector4_t tmp = bits4_; + for (unsigned idx = 0 ; idx < bits4_.size() ; idx += 1) + tmp.set_bit(idx, filtered_value_(bits4_, idx)); + return tmp; } vvp_wire_vec8::vvp_wire_vec8(unsigned wid)