diff --git a/compiler.h b/compiler.h index 2b38802be..ba0d371f6 100644 --- a/compiler.h +++ b/compiler.h @@ -87,6 +87,7 @@ extern bool debug_eval_tree; extern bool debug_elaborate; extern bool debug_synth2; extern bool debug_optimizer; +extern bool debug_automatic; /* Path to a directory useful for finding subcomponents. */ extern const char*basedir; diff --git a/elab_anet.cc b/elab_anet.cc index 35bc2cf68..7323c7ee3 100644 --- a/elab_anet.cc +++ b/elab_anet.cc @@ -115,7 +115,7 @@ NetNet* PEIdent::elaborate_anet(Design*des, NetScope*scope) const const NetExpr*par = 0; NetEvent* eve = 0; - symbol_search(des, scope, path_, sig, mem, par, eve); + symbol_search(this, des, scope, path_, sig, mem, par, eve); if (mem != 0) { @@ -211,4 +211,3 @@ NetNet* PEIdent::elaborate_anet(Design*des, NetScope*scope) const * Check lvalue of procedural continuous assign (PR#29) * */ - diff --git a/elab_expr.cc b/elab_expr.cc index 3ea8ce605..413e5317a 100644 --- a/elab_expr.cc +++ b/elab_expr.cc @@ -1641,7 +1641,7 @@ unsigned PEIdent::test_width(Design*des, NetScope*scope, const NetExpr*ex1, *ex2; - symbol_search(des, scope, path_, net, par, eve, ex1, ex2); + symbol_search(0, des, scope, path_, net, par, eve, ex1, ex2); // If there is a part/bit select expression, then process it // here. This constrains the results no matter what kind the @@ -1754,7 +1754,7 @@ NetExpr* PEIdent::elaborate_expr(Design*des, NetScope*scope, const NetExpr*ex1, *ex2; - NetScope*found_in = symbol_search(des, scope, path_, + NetScope*found_in = symbol_search(this, des, scope, path_, net, par, eve, ex1, ex2); diff --git a/elab_lval.cc b/elab_lval.cc index 174af3260..8cdc13611 100644 --- a/elab_lval.cc +++ b/elab_lval.cc @@ -152,7 +152,7 @@ NetAssign_* PEIdent::elaborate_lval(Design*des, const NetExpr*par = 0; NetEvent* eve = 0; - symbol_search(des, scope, path_, reg, par, eve); + symbol_search(this, des, scope, path_, reg, par, eve); if (reg == 0) { cerr << get_fileline() << ": error: Could not find variable ``" << path_ << "'' in ``" << scope_path(scope) << diff --git a/elab_net.cc b/elab_net.cc index 4e0983c30..fa1d16f4a 100644 --- a/elab_net.cc +++ b/elab_net.cc @@ -374,7 +374,7 @@ NetNet* PEIdent::elaborate_lnet_common_(Design*des, NetScope*scope, const NetExpr*par = 0; NetEvent* eve = 0; - symbol_search(des, scope, path_, sig, par, eve); + symbol_search(this, des, scope, path_, sig, par, eve); if (eve != 0) { cerr << get_fileline() << ": error: named events (" << path_ @@ -631,4 +631,3 @@ NetNet* PEIdent::elaborate_port(Design*des, NetScope*scope) const return sig; } - diff --git a/elab_sig.cc b/elab_sig.cc index cf323c783..359985b51 100644 --- a/elab_sig.cc +++ b/elab_sig.cc @@ -266,7 +266,7 @@ bool PEIdent::elaborate_sig(Design*des, NetScope*scope) const if (error_implicit) return true; - symbol_search(des, scope, path_, sig, par, eve); + symbol_search(this, des, scope, path_, sig, par, eve); if (eve != 0) return false; diff --git a/elaborate.cc b/elaborate.cc index 503b04b54..084b62556 100644 --- a/elaborate.cc +++ b/elaborate.cc @@ -2861,7 +2861,8 @@ NetProc* PEventStatement::elaborate_st(Design*des, NetScope*scope, const NetExpr*par = 0; NetEvent* eve = 0; - NetScope*found_in = symbol_search(des, scope, id->path(), + NetScope*found_in = symbol_search(this, des, scope, + id->path(), sig, par, eve); if (found_in && eve) { @@ -3463,7 +3464,7 @@ NetProc* PTrigger::elaborate(Design*des, NetScope*scope) const const NetExpr*par = 0; NetEvent* eve = 0; - NetScope*found_in = symbol_search(des, scope, event_, + NetScope*found_in = symbol_search(this, des, scope, event_, sig, par, eve); if (found_in == 0) { diff --git a/eval.cc b/eval.cc index 41cb60a5b..1bf506552 100644 --- a/eval.cc +++ b/eval.cc @@ -185,7 +185,7 @@ verinum* PEIdent::eval_const(Design*des, NetScope*scope) const return new verinum(scope->genvar_tmp_val); } - symbol_search(des, scope, path_, net, expr, eve); + symbol_search(this, des, scope, path_, net, expr, eve); if (expr == 0) return 0; diff --git a/main.cc b/main.cc index 99bfa4a2c..fb4ec97ef 100644 --- a/main.cc +++ b/main.cc @@ -124,6 +124,7 @@ bool debug_eval_tree = false; bool debug_elaborate = false; bool debug_synth2 = false; bool debug_optimizer = false; +bool debug_automatic = false; /* * Verbose messages enabled. @@ -392,6 +393,8 @@ static void read_iconfig_file(const char*ipath) } else if (strcmp(cp,"optimizer") == 0) { debug_optimizer = true; cerr << "debug: Enable optimizer debug" << endl; + } else if (strcmp(cp,"automatic") == 0) { + debug_automatic = true; } else { } diff --git a/net_event.cc b/net_event.cc index 3804ff8fb..ba9c5d3cc 100644 --- a/net_event.cc +++ b/net_event.cc @@ -173,6 +173,12 @@ void NetEvent::find_similar_event(list&event_list) if (tmp == this) continue; + /* For automatic tasks, the VVP runtime holds state for events + in the automatically allocated context. This means we can't + merge similar events in different automatic tasks. */ + if (scope()->is_auto() && (tmp->scope() != scope())) + continue; + if ((*idx).second != probe_count) continue; @@ -553,4 +559,3 @@ NetProc* NetEvWait::statement() * Simulate named event trigger and waits. * */ - diff --git a/netmisc.h b/netmisc.h index 97792d8f7..29333b063 100644 --- a/netmisc.h +++ b/netmisc.h @@ -36,21 +36,25 @@ * ex2 is the lsb expression for the range. If there is no range, then * these values are set to 0. */ -extern NetScope* symbol_search(Design*des, - NetScope*start, pform_name_t path, +extern NetScope* symbol_search(const LineInfo*li, + Design*des, + NetScope*start, + pform_name_t path, NetNet*&net, /* net/reg */ const NetExpr*&par,/* parameter */ NetEvent*&eve, /* named event */ const NetExpr*&ex1, const NetExpr*&ex2); -inline NetScope* symbol_search(Design*des, - NetScope*start, const pform_name_t&path, +inline NetScope* symbol_search(const LineInfo*li, + Design*des, + NetScope*start, + const pform_name_t&path, NetNet*&net, /* net/reg */ const NetExpr*&par,/* parameter */ NetEvent*&eve /* named event */) { const NetExpr*ex1, *ex2; - return symbol_search(des, start, path, net, par, eve, ex1, ex2); + return symbol_search(li, des, start, path, net, par, eve, ex1, ex2); } /* diff --git a/pform.cc b/pform.cc index 7c0427631..15a52d8ae 100644 --- a/pform.cc +++ b/pform.cc @@ -110,11 +110,12 @@ PTask* pform_push_task_scope(char*name, bool is_auto) PTask*task; if (pform_cur_generate) { task = new PTask(task_name, pform_cur_generate->lexical_scope, - is_auto); + is_auto || debug_automatic); pform_cur_generate->tasks[task->pscope_name()] = task; pform_cur_generate->lexical_scope = task; } else { - task = new PTask(task_name, lexical_scope, is_auto); + task = new PTask(task_name, lexical_scope, + is_auto || debug_automatic); pform_cur_module->tasks[task->pscope_name()] = task; lexical_scope = task; } @@ -129,11 +130,12 @@ PFunction* pform_push_function_scope(char*name, bool is_auto) PFunction*func; if (pform_cur_generate) { func = new PFunction(func_name, pform_cur_generate->lexical_scope, - is_auto); + is_auto || debug_automatic); pform_cur_generate->funcs[func->pscope_name()] = func; pform_cur_generate->lexical_scope = func; } else { - func = new PFunction(func_name, lexical_scope, is_auto); + func = new PFunction(func_name, lexical_scope, + is_auto || debug_automatic); pform_cur_module->funcs[func->pscope_name()] = func; lexical_scope = func; } @@ -181,6 +183,20 @@ static LexicalScope*pform_get_cur_scope() return lexical_scope; } +static bool pform_at_module_level() +{ + if (pform_cur_generate) + if (pform_cur_generate->lexical_scope) + return false; + else + return true; + else + if (lexical_scope->pscope_parent()) + return false; + else + return true; +} + PWire*pform_get_wire_in_scope(perm_string name) { /* Note that if we are processing a generate, then the @@ -1293,6 +1309,13 @@ void pform_make_pgassign_list(svector*alist, void pform_make_reginit(const struct vlltype&li, perm_string name, PExpr*expr) { + if (! pform_at_module_level()) { + VLerror(li, "variable declaration assignments are only " + "allowed at the module level."); + delete expr; + return; + } + PWire*cur = pform_get_wire_in_scope(name); if (cur == 0) { VLerror(li, "internal error: reginit to non-register?"); diff --git a/symbol_search.cc b/symbol_search.cc index bc723d81d..0355fb6bb 100644 --- a/symbol_search.cc +++ b/symbol_search.cc @@ -28,7 +28,8 @@ /* * Search for the hierarchical name. */ -NetScope*symbol_search(Design*des, NetScope*scope, pform_name_t path, +NetScope*symbol_search(const LineInfo*li, Design*des, NetScope*scope, + pform_name_t path, NetNet*&net, const NetExpr*&par, NetEvent*&eve, @@ -57,6 +58,13 @@ NetScope*symbol_search(Design*des, NetScope*scope, pform_name_t path, return 0; scope = des->find_scope(scope, path_list); + + if (scope->is_auto() && li) { + cerr << li->get_fileline() << ": error: Hierarchical " + "reference to automatically allocated item " + "`" << key << "' in path `" << path << "'" << endl; + des->errors += 1; + } } while (scope) { diff --git a/vpi/sys_lxt.c b/vpi/sys_lxt.c index 6521d749b..b3bdb73f7 100644 --- a/vpi/sys_lxt.c +++ b/vpi/sys_lxt.c @@ -546,7 +546,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) case vpiTimeVar: case vpiReg: type = "reg"; } - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); nexus_id = vpi_get(_vpiNexusId, item); @@ -593,7 +593,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) case vpiRealVar: - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); { char*tmp = create_full_name(name); diff --git a/vpi/sys_lxt2.c b/vpi/sys_lxt2.c index 797078502..73135db04 100644 --- a/vpi/sys_lxt2.c +++ b/vpi/sys_lxt2.c @@ -552,7 +552,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) case vpiTimeVar: case vpiReg: type = "reg"; } - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); nexus_id = vpi_get(_vpiNexusId, item); @@ -603,7 +603,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) case vpiRealVar: - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); { char*tmp = create_full_name(name); diff --git a/vpi/sys_vcd.c b/vpi/sys_vcd.c index 222745167..c77044ed4 100644 --- a/vpi/sys_vcd.c +++ b/vpi/sys_vcd.c @@ -513,7 +513,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) break; } - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); prefix = is_escaped_id(name) ? "\\" : ""; @@ -578,7 +578,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) break; } - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; /* Declare the variable in the VCD file. */ name = vpi_get_str(vpiName, item); diff --git a/vpi_user.h b/vpi_user.h index d5660b453..f1157e1ac 100644 --- a/vpi_user.h +++ b/vpi_user.h @@ -348,6 +348,7 @@ typedef struct t_vpi_delay { # define vpiSysFuncReal vpiRealFunc # define vpiSysFuncTime vpiTimeFunc # define vpiSysFuncSized vpiSizedFunc +#define vpiAutomatic 50 #define vpiConstantSelect 53 #define vpiSigned 65 /* IVL private properties */ diff --git a/vvp/arith.cc b/vvp/arith.cc index 126cfdaa1..3945ea076 100644 --- a/vvp/arith.cc +++ b/vvp/arith.cc @@ -63,7 +63,8 @@ vvp_arith_abs::~vvp_arith_abs() { } -void vvp_arith_abs::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_abs::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { vvp_vector4_t out (bit.size(), BIT4_0);; @@ -81,13 +82,14 @@ void vvp_arith_abs::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) break; } - vvp_send_vec4(ptr.ptr()->out, out); + vvp_send_vec4(ptr.ptr()->out, out, 0); } -void vvp_arith_abs::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_abs::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { double out = fabs(bit); - vvp_send_real(ptr.ptr()->out, out); + vvp_send_real(ptr.ptr()->out, out, 0); } vvp_arith_cast_int::vvp_arith_cast_int(unsigned wid) @@ -99,9 +101,10 @@ vvp_arith_cast_int::~vvp_arith_cast_int() { } -void vvp_arith_cast_int::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_cast_int::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { - vvp_send_vec4(ptr.ptr()->out, vvp_vector4_t(wid_, bit)); + vvp_send_vec4(ptr.ptr()->out, vvp_vector4_t(wid_, bit), 0); } vvp_arith_cast_real::vvp_arith_cast_real(bool signed_flag) @@ -113,11 +116,12 @@ vvp_arith_cast_real::~vvp_arith_cast_real() { } -void vvp_arith_cast_real::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_cast_real::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { double val; vector4_to_value(bit, val, signed_); - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } // Division @@ -135,21 +139,22 @@ void vvp_arith_div::wide4_(vvp_net_ptr_t ptr) { vvp_vector2_t a2 (op_a_); if (a2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t b2 (op_b_); if (b2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t res2 = a2 / b2; - vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res2, wid_)); + vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res2, wid_), 0); } -void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -160,13 +165,13 @@ void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) unsigned long a; if (! vector4_to_value(op_a_, a)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } unsigned long b; if (! vector4_to_value(op_b_, b)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -202,7 +207,7 @@ void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) val >>= 1; } - vvp_send_vec4(ptr.ptr()->out, vval); + vvp_send_vec4(ptr.ptr()->out, vval, 0); } @@ -219,21 +224,22 @@ void vvp_arith_mod::wide_(vvp_net_ptr_t ptr) { vvp_vector2_t a2 (op_a_); if (a2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t b2 (op_b_); if (b2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t res = a2 % b2; - vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res, res.size())); + vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res, res.size()), 0); } -void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -244,13 +250,13 @@ void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) unsigned long a; if (! vector4_to_value(op_a_, a)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } unsigned long b; if (! vector4_to_value(op_b_, b)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -275,7 +281,7 @@ void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) for (unsigned idx = 0 ; idx < wid_ ; idx += 1) xval.set_bit(idx, BIT4_X); - vvp_send_vec4(ptr.ptr()->out, xval); + vvp_send_vec4(ptr.ptr()->out, xval, 0); return; } @@ -295,7 +301,7 @@ void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) val >>= 1; } - vvp_send_vec4(ptr.ptr()->out, vval); + vvp_send_vec4(ptr.ptr()->out, vval, 0); } @@ -316,17 +322,18 @@ void vvp_arith_mult::wide_(vvp_net_ptr_t ptr) vvp_vector2_t b2 (op_b_); if (a2.is_NaN() || b2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t result = a2 * b2; vvp_vector4_t res4 = vector2_to_vector4(result, wid_); - vvp_send_vec4(ptr.ptr()->out, res4); + vvp_send_vec4(ptr.ptr()->out, res4, 0); } -void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -337,13 +344,13 @@ void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) long a; if (! vector4_to_value(op_a_, a, false, true)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } long b; if (! vector4_to_value(op_b_, b, false, true)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -360,7 +367,7 @@ void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) val >>= 1; } - vvp_send_vec4(ptr.ptr()->out, vval); + vvp_send_vec4(ptr.ptr()->out, vval, 0); } @@ -375,14 +382,15 @@ vvp_arith_pow::~vvp_arith_pow() { } -void vvp_arith_pow::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_pow::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); vvp_vector4_t res4; if (signed_flag_) { if (op_a_.has_xz() || op_b_.has_xz()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -396,7 +404,7 @@ void vvp_arith_pow::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_vector2_t b2 (op_b_); if (a2.is_NaN() || b2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -404,7 +412,7 @@ void vvp_arith_pow::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) res4 = vector2_to_vector4(result, wid_); } - vvp_send_vec4(ptr.ptr()->out, res4); + vvp_send_vec4(ptr.ptr()->out, res4, 0); } @@ -419,7 +427,8 @@ vvp_arith_sum::~vvp_arith_sum() { } -void vvp_arith_sum::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_sum::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -438,14 +447,14 @@ void vvp_arith_sum::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_bit4_t cur = add_with_carry(a, b, carry); if (cur == BIT4_X) { - vvp_send_vec4(net->out, x_val_); + vvp_send_vec4(net->out, x_val_, 0); return; } value.set_bit(idx, cur); } - vvp_send_vec4(net->out, value); + vvp_send_vec4(net->out, value, 0); } vvp_arith_sub::vvp_arith_sub(unsigned wid) @@ -463,7 +472,8 @@ vvp_arith_sub::~vvp_arith_sub() * further reduce the operation to adding in the inverted value and * adding a correction. */ -void vvp_arith_sub::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_sub::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -482,14 +492,14 @@ void vvp_arith_sub::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_bit4_t cur = add_with_carry(a, b, carry); if (cur == BIT4_X) { - vvp_send_vec4(net->out, x_val_); + vvp_send_vec4(net->out, x_val_, 0); return; } value.set_bit(idx, cur); } - vvp_send_vec4(net->out, value); + vvp_send_vec4(net->out, value, 0); } vvp_cmp_eeq::vvp_cmp_eeq(unsigned wid) @@ -497,7 +507,8 @@ vvp_cmp_eeq::vvp_cmp_eeq(unsigned wid) { } -void vvp_cmp_eeq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_eeq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -513,7 +524,7 @@ void vvp_cmp_eeq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_net_t*net = ptr.ptr(); - vvp_send_vec4(net->out, eeq); + vvp_send_vec4(net->out, eeq, 0); } vvp_cmp_nee::vvp_cmp_nee(unsigned wid) @@ -521,7 +532,8 @@ vvp_cmp_nee::vvp_cmp_nee(unsigned wid) { } -void vvp_cmp_nee::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_nee::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -537,7 +549,7 @@ void vvp_cmp_nee::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_net_t*net = ptr.ptr(); - vvp_send_vec4(net->out, eeq); + vvp_send_vec4(net->out, eeq, 0); } vvp_cmp_eq::vvp_cmp_eq(unsigned wid) @@ -551,7 +563,8 @@ vvp_cmp_eq::vvp_cmp_eq(unsigned wid) * there are X/Z bits anywhere in A or B, the result is X. Finally, * the result is 1. */ -void vvp_cmp_eq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_eq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -583,7 +596,7 @@ void vvp_cmp_eq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } vvp_net_t*net = ptr.ptr(); - vvp_send_vec4(net->out, res); + vvp_send_vec4(net->out, res, 0); } @@ -598,7 +611,8 @@ vvp_cmp_ne::vvp_cmp_ne(unsigned wid) * there are X/Z bits anywhere in A or B, the result is X. Finally, * the result is 0. */ -void vvp_cmp_ne::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_ne::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -630,7 +644,7 @@ void vvp_cmp_ne::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } vvp_net_t*net = ptr.ptr(); - vvp_send_vec4(net->out, res); + vvp_send_vec4(net->out, res, 0); } @@ -651,7 +665,7 @@ void vvp_cmp_gtge_base_::recv_vec4_base_(vvp_net_ptr_t ptr, : compare_gtge(op_a_, op_b_, out_if_equal); vvp_vector4_t val (1); val.set_bit(0, out); - vvp_send_vec4(ptr.ptr()->out, val); + vvp_send_vec4(ptr.ptr()->out, val, 0); return; } @@ -662,7 +676,8 @@ vvp_cmp_ge::vvp_cmp_ge(unsigned wid, bool flag) { } -void vvp_cmp_ge::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_ge::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { recv_vec4_base_(ptr, bit, BIT4_1); } @@ -672,7 +687,8 @@ vvp_cmp_gt::vvp_cmp_gt(unsigned wid, bool flag) { } -void vvp_cmp_gt::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_gt::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { recv_vec4_base_(ptr, bit, BIT4_0); } @@ -687,7 +703,8 @@ vvp_shiftl::~vvp_shiftl() { } -void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -695,7 +712,7 @@ void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) unsigned long shift; if (! vector4_to_value(op_b_, shift)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -708,7 +725,7 @@ void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) for (unsigned idx = shift ; idx < out.size() ; idx += 1) out.set_bit(idx, op_a_.value(idx-shift)); - vvp_send_vec4(ptr.ptr()->out, out); + vvp_send_vec4(ptr.ptr()->out, out, 0); } vvp_shiftr::vvp_shiftr(unsigned wid, bool signed_flag) @@ -720,7 +737,8 @@ vvp_shiftr::~vvp_shiftr() { } -void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -728,7 +746,7 @@ void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) unsigned long shift; if (! vector4_to_value(op_b_, shift)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -745,7 +763,7 @@ void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) for (unsigned idx = 0 ; idx < shift ; idx += 1) out.set_bit(idx+out.size()-shift, pad); - vvp_send_vec4(ptr.ptr()->out, out); + vvp_send_vec4(ptr.ptr()->out, out, 0); } @@ -780,12 +798,13 @@ vvp_arith_mult_real::~vvp_arith_mult_real() { } -void vvp_arith_mult_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_mult_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = op_a_ * op_b_; - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real power. */ @@ -797,12 +816,13 @@ vvp_arith_pow_real::~vvp_arith_pow_real() { } -void vvp_arith_pow_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_pow_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = pow(op_a_, op_b_); - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real division. */ @@ -814,12 +834,13 @@ vvp_arith_div_real::~vvp_arith_div_real() { } -void vvp_arith_div_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_div_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = op_a_ / op_b_; - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real modulus. */ @@ -831,12 +852,13 @@ vvp_arith_mod_real::~vvp_arith_mod_real() { } -void vvp_arith_mod_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_mod_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = fmod(op_a_, op_b_); - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real summation. */ @@ -848,12 +870,13 @@ vvp_arith_sum_real::~vvp_arith_sum_real() { } -void vvp_arith_sum_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_sum_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = op_a_ + op_b_; - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real subtraction. */ @@ -865,12 +888,13 @@ vvp_arith_sub_real::~vvp_arith_sub_real() { } -void vvp_arith_sub_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_sub_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = op_a_ - op_b_; - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real compare equal. */ @@ -878,7 +902,8 @@ vvp_cmp_eq_real::vvp_cmp_eq_real() { } -void vvp_cmp_eq_real::recv_real(vvp_net_ptr_t ptr, const double bit) +void vvp_cmp_eq_real::recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -886,7 +911,7 @@ void vvp_cmp_eq_real::recv_real(vvp_net_ptr_t ptr, const double bit) if (op_a_ == op_b_) res.set_bit(0, BIT4_1); else res.set_bit(0, BIT4_0); - vvp_send_vec4(ptr.ptr()->out, res); + vvp_send_vec4(ptr.ptr()->out, res, 0); } /* Real compare not equal. */ @@ -894,7 +919,8 @@ vvp_cmp_ne_real::vvp_cmp_ne_real() { } -void vvp_cmp_ne_real::recv_real(vvp_net_ptr_t ptr, const double bit) +void vvp_cmp_ne_real::recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -902,7 +928,7 @@ void vvp_cmp_ne_real::recv_real(vvp_net_ptr_t ptr, const double bit) if (op_a_ != op_b_) res.set_bit(0, BIT4_1); else res.set_bit(0, BIT4_0); - vvp_send_vec4(ptr.ptr()->out, res); + vvp_send_vec4(ptr.ptr()->out, res, 0); } /* Real compare greater than or equal. */ @@ -910,7 +936,8 @@ vvp_cmp_ge_real::vvp_cmp_ge_real() { } -void vvp_cmp_ge_real::recv_real(vvp_net_ptr_t ptr, const double bit) +void vvp_cmp_ge_real::recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -918,7 +945,7 @@ void vvp_cmp_ge_real::recv_real(vvp_net_ptr_t ptr, const double bit) if (op_a_ >= op_b_) res.set_bit(0, BIT4_1); else res.set_bit(0, BIT4_0); - vvp_send_vec4(ptr.ptr()->out, res); + vvp_send_vec4(ptr.ptr()->out, res, 0); } /* Real compare greater than. */ @@ -926,7 +953,8 @@ vvp_cmp_gt_real::vvp_cmp_gt_real() { } -void vvp_cmp_gt_real::recv_real(vvp_net_ptr_t ptr, const double bit) +void vvp_cmp_gt_real::recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -934,5 +962,5 @@ void vvp_cmp_gt_real::recv_real(vvp_net_ptr_t ptr, const double bit) if (op_a_ > op_b_) res.set_bit(0, BIT4_1); else res.set_bit(0, BIT4_0); - vvp_send_vec4(ptr.ptr()->out, res); + vvp_send_vec4(ptr.ptr()->out, res, 0); } diff --git a/vvp/arith.h b/vvp/arith.h index cd05355ce..4f5812448 100644 --- a/vvp/arith.h +++ b/vvp/arith.h @@ -54,8 +54,10 @@ class vvp_arith_abs : public vvp_net_fun_t { explicit vvp_arith_abs(); ~vvp_arith_abs(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); private: }; @@ -65,7 +67,8 @@ class vvp_arith_cast_int : public vvp_net_fun_t { explicit vvp_arith_cast_int(unsigned wid); ~vvp_arith_cast_int(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); private: unsigned wid_; @@ -76,7 +79,8 @@ class vvp_arith_cast_real : public vvp_net_fun_t { explicit vvp_arith_cast_real(bool signed_flag); ~vvp_arith_cast_real(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: bool signed_; @@ -87,7 +91,8 @@ class vvp_arith_div : public vvp_arith_ { public: explicit vvp_arith_div(unsigned wid, bool signed_flag); ~vvp_arith_div(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: void wide4_(vvp_net_ptr_t ptr); bool signed_flag_; @@ -98,7 +103,8 @@ class vvp_arith_mod : public vvp_arith_ { public: explicit vvp_arith_mod(unsigned wid, bool signed_flag); ~vvp_arith_mod(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: void wide_(vvp_net_ptr_t ptr); bool signed_flag_; @@ -114,7 +120,8 @@ class vvp_cmp_eeq : public vvp_arith_ { public: explicit vvp_cmp_eeq(unsigned wid); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -122,7 +129,8 @@ class vvp_cmp_nee : public vvp_arith_ { public: explicit vvp_cmp_nee(unsigned wid); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -130,7 +138,8 @@ class vvp_cmp_eq : public vvp_arith_ { public: explicit vvp_cmp_eq(unsigned wid); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -138,7 +147,8 @@ class vvp_cmp_ne : public vvp_arith_ { public: explicit vvp_cmp_ne(unsigned wid); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -165,7 +175,8 @@ class vvp_cmp_ge : public vvp_cmp_gtge_base_ { public: explicit vvp_cmp_ge(unsigned wid, bool signed_flag); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -174,7 +185,8 @@ class vvp_cmp_gt : public vvp_cmp_gtge_base_ { public: explicit vvp_cmp_gt(unsigned wid, bool signed_flag); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; /* @@ -187,7 +199,8 @@ class vvp_arith_mult : public vvp_arith_ { public: explicit vvp_arith_mult(unsigned wid); ~vvp_arith_mult(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: void wide_(vvp_net_ptr_t ptr); }; @@ -197,7 +210,8 @@ class vvp_arith_pow : public vvp_arith_ { public: explicit vvp_arith_pow(unsigned wid, bool signed_flag); ~vvp_arith_pow(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: bool signed_flag_; }; @@ -207,7 +221,8 @@ class vvp_arith_sub : public vvp_arith_ { public: explicit vvp_arith_sub(unsigned wid); ~vvp_arith_sub(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -216,7 +231,8 @@ class vvp_arith_sum : public vvp_arith_ { public: explicit vvp_arith_sum(unsigned wid); ~vvp_arith_sum(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -225,7 +241,8 @@ class vvp_shiftl : public vvp_arith_ { public: explicit vvp_shiftl(unsigned wid); ~vvp_shiftl(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); }; class vvp_shiftr : public vvp_arith_ { @@ -233,7 +250,8 @@ class vvp_shiftr : public vvp_arith_ { public: explicit vvp_shiftr(unsigned wid, bool signed_flag); ~vvp_shiftr(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); private: bool signed_flag_; @@ -263,7 +281,8 @@ class vvp_arith_sum_real : public vvp_arith_real_ { public: explicit vvp_arith_sum_real(); ~vvp_arith_sum_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_div_real : public vvp_arith_real_ { @@ -271,7 +290,8 @@ class vvp_arith_div_real : public vvp_arith_real_ { public: explicit vvp_arith_div_real(); ~vvp_arith_div_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_mod_real : public vvp_arith_real_ { @@ -279,7 +299,8 @@ class vvp_arith_mod_real : public vvp_arith_real_ { public: explicit vvp_arith_mod_real(); ~vvp_arith_mod_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_mult_real : public vvp_arith_real_ { @@ -287,7 +308,8 @@ class vvp_arith_mult_real : public vvp_arith_real_ { public: explicit vvp_arith_mult_real(); ~vvp_arith_mult_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_pow_real : public vvp_arith_real_ { @@ -295,7 +317,8 @@ class vvp_arith_pow_real : public vvp_arith_real_ { public: explicit vvp_arith_pow_real(); ~vvp_arith_pow_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_sub_real : public vvp_arith_real_ { @@ -303,35 +326,40 @@ class vvp_arith_sub_real : public vvp_arith_real_ { public: explicit vvp_arith_sub_real(); ~vvp_arith_sub_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_cmp_eq_real : public vvp_arith_real_ { public: explicit vvp_cmp_eq_real(); - void recv_real(vvp_net_ptr_t ptr, const double bit); + void recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t); }; class vvp_cmp_ne_real : public vvp_arith_real_ { public: explicit vvp_cmp_ne_real(); - void recv_real(vvp_net_ptr_t ptr, const double bit); + void recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t); }; class vvp_cmp_ge_real : public vvp_arith_real_ { public: explicit vvp_cmp_ge_real(); - void recv_real(vvp_net_ptr_t ptr, const double bit); + void recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t); }; class vvp_cmp_gt_real : public vvp_arith_real_ { public: explicit vvp_cmp_gt_real(); - void recv_real(vvp_net_ptr_t ptr, const double bit); + void recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t); }; #endif diff --git a/vvp/array.cc b/vvp/array.cc index b480b6004..4994a3cd6 100644 --- a/vvp/array.cc +++ b/vvp/array.cc @@ -332,6 +332,9 @@ static int vpi_array_get(int code, vpiHandle ref) case vpiSize: return (int) obj->array_count; + case vpiAutomatic: + return (int) obj->scope->is_automatic; + default: return 0; } @@ -727,7 +730,7 @@ void array_set_word(vvp_array_t arr, assert(vsig); vvp_net_ptr_t ptr (vsig->node, 0); - vvp_send_vec4_pv(ptr, val, part_off, val.size(), vpip_size(vsig)); + vvp_send_vec4_pv(ptr, val, part_off, val.size(), vpip_size(vsig), 0); array_word_change(arr, address); } @@ -852,9 +855,13 @@ void compile_var_array(char*label, char*name, int last, int first, /* Make the words. */ arr->vals_width = labs(msb-lsb) + 1; - arr->vals = new vvp_vector4array_t(arr->vals_width, arr->array_count, - vpip_peek_current_scope()->is_automatic); - vpip_add_item_to_current_scope(arr->vals); + if (vpip_peek_current_scope()->is_automatic) { + arr->vals = new vvp_vector4array_aa(arr->vals_width, + arr->array_count); + } else { + arr->vals = new vvp_vector4array_sa(arr->vals_width, + arr->array_count); + } vpip_make_dec_const(&arr->msb, msb); vpip_make_dec_const(&arr->lsb, lsb); @@ -908,11 +915,9 @@ class vvp_fun_arrayport : public vvp_net_fun_t { explicit vvp_fun_arrayport(vvp_array_t mem, vvp_net_t*net, long addr); ~vvp_fun_arrayport(); - void check_word_change(unsigned long addr); + virtual void check_word_change(unsigned long addr) = 0; - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); - - private: + protected: vvp_array_t arr_; vvp_net_t *net_; unsigned long addr_; @@ -938,7 +943,37 @@ vvp_fun_arrayport::~vvp_fun_arrayport() { } -void vvp_fun_arrayport::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +class vvp_fun_arrayport_sa : public vvp_fun_arrayport { + + public: + explicit vvp_fun_arrayport_sa(vvp_array_t mem, vvp_net_t*net); + explicit vvp_fun_arrayport_sa(vvp_array_t mem, vvp_net_t*net, long addr); + ~vvp_fun_arrayport_sa(); + + void check_word_change(unsigned long addr); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); + + private: +}; + +vvp_fun_arrayport_sa::vvp_fun_arrayport_sa(vvp_array_t mem, vvp_net_t*net) +: vvp_fun_arrayport(mem, net) +{ +} + +vvp_fun_arrayport_sa::vvp_fun_arrayport_sa(vvp_array_t mem, vvp_net_t*net, long addr) +: vvp_fun_arrayport(mem, net, addr) +{ +} + +vvp_fun_arrayport_sa::~vvp_fun_arrayport_sa() +{ +} + +void vvp_fun_arrayport_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { bool addr_valid_flag; @@ -948,7 +983,7 @@ void vvp_fun_arrayport::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) addr_valid_flag = vector4_to_value(bit, addr_); if (! addr_valid_flag) addr_ = arr_->array_count; - vvp_send_vec4(port.ptr()->out, array_get_word(arr_,addr_)); + vvp_send_vec4(port.ptr()->out, array_get_word(arr_,addr_), 0); break; default: @@ -957,13 +992,111 @@ void vvp_fun_arrayport::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) } } -void vvp_fun_arrayport::check_word_change(unsigned long addr) +void vvp_fun_arrayport_sa::check_word_change(unsigned long addr) { if (addr != addr_) return; vvp_vector4_t bit = array_get_word(arr_, addr_); - vvp_send_vec4(net_->out, bit); + vvp_send_vec4(net_->out, bit, 0); +} + +class vvp_fun_arrayport_aa : public vvp_fun_arrayport, public automatic_hooks_s { + + public: + explicit vvp_fun_arrayport_aa(vvp_array_t mem, vvp_net_t*net); + explicit vvp_fun_arrayport_aa(vvp_array_t mem, vvp_net_t*net, long addr); + ~vvp_fun_arrayport_aa(); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + void check_word_change(unsigned long addr); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); + + private: + struct __vpiScope*context_scope_; + unsigned context_idx_; +}; + +vvp_fun_arrayport_aa::vvp_fun_arrayport_aa(vvp_array_t mem, vvp_net_t*net) +: vvp_fun_arrayport(mem, net) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_arrayport_aa::vvp_fun_arrayport_aa(vvp_array_t mem, vvp_net_t*net, long addr) +: vvp_fun_arrayport(mem, net, addr) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_arrayport_aa::~vvp_fun_arrayport_aa() +{ +} + +void vvp_fun_arrayport_aa::alloc_instance(vvp_context_t context) +{ + unsigned long*addr = new unsigned long; + vvp_set_context_item(context, context_idx_, addr); + + *addr = addr_; +} + +void vvp_fun_arrayport_aa::reset_instance(vvp_context_t context) +{ + unsigned long*addr = static_cast + (vvp_get_context_item(context, context_idx_)); + + *addr = addr_; +} + +void vvp_fun_arrayport_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + unsigned long*addr = static_cast + (vvp_get_context_item(context, context_idx_)); + + bool addr_valid_flag; + + switch (port.port()) { + + case 0: // Address input + addr_valid_flag = vector4_to_value(bit, *addr); + if (! addr_valid_flag) + *addr = arr_->array_count; + vvp_send_vec4(port.ptr()->out, array_get_word(arr_,*addr), + context); + break; + + default: + fprintf(stdout, "XXXX write ports not implemented.\n"); + assert(0); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} + +void vvp_fun_arrayport_aa::check_word_change(unsigned long addr) +{ + unsigned long*port_addr = static_cast + (vthread_get_wt_context_item(context_idx_)); + + if (addr != *port_addr) + return; + + vvp_vector4_t bit = array_get_word(arr_, addr); + vvp_send_vec4(net_->out, bit, vthread_get_wt_context()); } static void array_attach_port(vvp_array_t array, vvp_fun_arrayport*fun) @@ -1046,9 +1179,15 @@ bool array_port_resolv_list_t::resolve(bool mes) vvp_fun_arrayport*fun; if (use_addr) - fun = new vvp_fun_arrayport(mem, ptr, addr); + if (vpip_peek_current_scope()->is_automatic) + fun = new vvp_fun_arrayport_aa(mem, ptr, addr); + else + fun = new vvp_fun_arrayport_sa(mem, ptr, addr); else - fun = new vvp_fun_arrayport(mem, ptr); + if (vpip_peek_current_scope()->is_automatic) + fun = new vvp_fun_arrayport_aa(mem, ptr); + else + fun = new vvp_fun_arrayport_sa(mem, ptr); ptr->fun = fun; array_attach_port(mem, fun); diff --git a/vvp/bufif.cc b/vvp/bufif.cc index df356f812..9b11e2ee3 100644 --- a/vvp/bufif.cc +++ b/vvp/bufif.cc @@ -35,7 +35,8 @@ vvp_fun_bufif::vvp_fun_bufif(bool en_invert, bool out_invert, count_functors_bufif += 1; } -void vvp_fun_bufif::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_bufif::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { switch (ptr.port()) { case 0: @@ -115,4 +116,3 @@ void vvp_fun_bufif::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) * Revision 1.8 2002/08/12 01:35:07 steve * conditional ident string using autoconfig. */ - diff --git a/vvp/bufif.h b/vvp/bufif.h index 05f242ef5..6c55c21f8 100644 --- a/vvp/bufif.h +++ b/vvp/bufif.h @@ -40,7 +40,8 @@ class vvp_fun_bufif : public vvp_net_fun_t { vvp_fun_bufif(bool en_invert, bool out_invert, unsigned str0, unsigned str1); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); private: vvp_vector4_t bit_; diff --git a/vvp/compile.cc b/vvp/compile.cc index 28ce20ce4..023b23e2e 100644 --- a/vvp/compile.cc +++ b/vvp/compile.cc @@ -83,6 +83,7 @@ const static struct opcode_table_s opcode_table[] = { { "%add", of_ADD, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, { "%add/wr", of_ADD_WR, 2, {OA_BIT1, OA_BIT2, OA_NONE} }, { "%addi", of_ADDI, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, + { "%alloc", of_ALLOC, 1, {OA_VPI_PTR, OA_NONE, OA_NONE} }, { "%and", of_AND, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, { "%and/r", of_ANDR, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, { "%andi", of_ANDI, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, @@ -134,6 +135,7 @@ const static struct opcode_table_s opcode_table[] = { { "%force/v",of_FORCE_V,3, {OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, { "%force/wr",of_FORCE_WR,2, {OA_FUNC_PTR, OA_BIT1, OA_NONE} }, { "%force/x0",of_FORCE_X0,3,{OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, + { "%free", of_FREE, 1, {OA_VPI_PTR, OA_NONE, OA_NONE} }, { "%inv", of_INV, 2, {OA_BIT1, OA_BIT2, OA_NONE} }, { "%ix/add", of_IX_ADD, 2, {OA_BIT1, OA_NUMBER, OA_NONE} }, { "%ix/get", of_IX_GET, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, @@ -181,7 +183,7 @@ const static struct opcode_table_s opcode_table[] = { { "%release/wr",of_RELEASE_WR,2,{OA_FUNC_PTR,OA_BIT1,OA_NONE} }, { "%set/av", of_SET_AV, 3, {OA_ARR_PTR, OA_BIT1, OA_BIT2} }, { "%set/v", of_SET_VEC,3, {OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, - { "%set/wr", of_SET_WORDR,2,{OA_VPI_PTR, OA_BIT1, OA_NONE} }, + { "%set/wr", of_SET_WORDR,2,{OA_FUNC_PTR, OA_BIT1, OA_NONE} }, { "%set/x0", of_SET_X0, 3, {OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, // { "%set/x0/x",of_SET_X0_X,3,{OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, { "%shiftl/i0", of_SHIFTL_I0, 2, {OA_BIT1,OA_NUMBER, OA_NONE} }, @@ -1687,34 +1689,6 @@ void compile_fork(char*label, struct symb_s dest, struct symb_s scope) compile_vpi_lookup(&code->handle, scope.text); } -void compile_alloc(char*label, struct symb_s scope) -{ - if (label) - compile_codelabel(label); - - - /* Fill in the basics of the %alloc in the instruction. */ - vvp_code_t code = codespace_allocate(); - code->opcode = of_ALLOC; - - /* Figure out the target SCOPE. */ - compile_vpi_lookup(&code->handle, scope.text); -} - -void compile_free(char*label, struct symb_s scope) -{ - if (label) - compile_codelabel(label); - - - /* Fill in the basics of the %free in the instruction. */ - vvp_code_t code = codespace_allocate(); - code->opcode = of_FREE; - - /* Figure out the target SCOPE. */ - compile_vpi_lookup(&code->handle, scope.text); -} - void compile_vpi_call(char*label, char*name, long file_idx, long lineno, unsigned argc, vpiHandle*argv) diff --git a/vvp/compile.h b/vvp/compile.h index 1d2da755d..f6bb6a89e 100644 --- a/vvp/compile.h +++ b/vvp/compile.h @@ -356,8 +356,7 @@ extern void compile_ufunc(char*label, char*code, unsigned wid, * the threads. */ extern void compile_event(char*label, char*type, - unsigned argc, struct symb_s*argv, - bool debug_flag); + unsigned argc, struct symb_s*argv); extern void compile_named_event(char*label, char*type); @@ -406,9 +405,6 @@ extern void compile_fork(char*label, struct symb_s targ_s, struct symb_s scope_s); extern void compile_codelabel(char*label); -extern void compile_alloc(char*label, struct symb_s scope_s); -extern void compile_free(char*label, struct symb_s scope_s); - /* * The parser uses these functions to compile .scope statements. * The implementations of these live in the vpi_scope.cc file. diff --git a/vvp/concat.cc b/vvp/concat.cc index 0a8c5716a..b17562459 100644 --- a/vvp/concat.cc +++ b/vvp/concat.cc @@ -45,7 +45,8 @@ vvp_fun_concat::~vvp_fun_concat() { } -void vvp_fun_concat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_concat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { unsigned pdx = port.port(); @@ -64,7 +65,7 @@ void vvp_fun_concat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) val_.set_bit(off+idx, bit.value(idx)); } - vvp_send_vec4(port.ptr()->out, val_); + vvp_send_vec4(port.ptr()->out, val_, 0); } void compile_concat(char*label, unsigned w0, unsigned w1, @@ -91,7 +92,8 @@ vvp_fun_repeat::~vvp_fun_repeat() { } -void vvp_fun_repeat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_repeat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { assert(bit.size() == wid_/rep_); @@ -105,7 +107,7 @@ void vvp_fun_repeat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) } - vvp_send_vec4(port.ptr()->out, val); + vvp_send_vec4(port.ptr()->out, val, 0); } void compile_repeat(char*label, long width, long repeat, struct symb_s arg) @@ -140,4 +142,3 @@ void compile_repeat(char*label, long width, long repeat, struct symb_s arg) * Add missing concat.cc to cvs * */ - diff --git a/vvp/delay.cc b/vvp/delay.cc index d68c11820..66f3a0ffe 100644 --- a/vvp/delay.cc +++ b/vvp/delay.cc @@ -183,7 +183,8 @@ void vvp_fun_delay::clean_pulse_events_(vvp_time64_t use_delay) * wrong. What should happen is that if there are multiple changes, * multiple vectors approaching the result should be scheduled. */ -void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { if (port.port() > 0) { // Get the integer value of the bit vector, or 0 if @@ -248,7 +249,7 @@ void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) if (use_delay == 0) { cur_vec4_ = bit; initial_ = false; - vvp_send_vec4(net_->out, cur_vec4_); + vvp_send_vec4(net_->out, cur_vec4_, 0); } else { struct event_*cur = new struct event_(use_simtime); cur->run_run_ptr = &vvp_fun_delay::run_run_vec4_; @@ -283,7 +284,8 @@ void vvp_fun_delay::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) } } -void vvp_fun_delay::recv_real(vvp_net_ptr_t port, double bit) +void vvp_fun_delay::recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t) { if (port.port() > 0) { /* If the port is not 0, then this is a delay value that @@ -328,7 +330,7 @@ void vvp_fun_delay::recv_real(vvp_net_ptr_t port, double bit) if (use_delay == 0) { cur_real_ = bit; initial_ = false; - vvp_send_real(net_->out, cur_real_); + vvp_send_real(net_->out, cur_real_, 0); } else { struct event_*cur = new struct event_(use_simtime); cur->run_run_ptr = &vvp_fun_delay::run_run_real_; @@ -357,7 +359,7 @@ void vvp_fun_delay::run_run() void vvp_fun_delay::run_run_vec4_(struct event_*cur) { cur_vec4_ = cur->ptr_vec4; - vvp_send_vec4(net_->out, cur_vec4_); + vvp_send_vec4(net_->out, cur_vec4_, 0); } void vvp_fun_delay::run_run_vec8_(struct vvp_fun_delay::event_*cur) @@ -369,7 +371,7 @@ void vvp_fun_delay::run_run_vec8_(struct vvp_fun_delay::event_*cur) void vvp_fun_delay::run_run_real_(struct vvp_fun_delay::event_*cur) { cur_real_ = cur->ptr_real; - vvp_send_real(net_->out, cur_real_); + vvp_send_real(net_->out, cur_real_, 0); } vvp_fun_modpath::vvp_fun_modpath(vvp_net_t*net) @@ -418,7 +420,8 @@ static vvp_time64_t delay_from_edge(vvp_bit4_t a, vvp_bit4_t b, return array[ edge_table[a][b] ]; } -void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { /* Only the first port is used. */ if (port.port() > 0) @@ -532,7 +535,7 @@ void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) void vvp_fun_modpath::run_run() { - vvp_send_vec4(net_->out, cur_vec4_); + vvp_send_vec4(net_->out, cur_vec4_, 0); } vvp_fun_modpath_src::vvp_fun_modpath_src(vvp_time64_t del[12]) @@ -561,7 +564,8 @@ void vvp_fun_modpath_src::put_delay12(const vvp_time64_t val[12]) delay_[idx] = val[idx]; } -void vvp_fun_modpath_src::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_modpath_src::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { if (port.port() == 0) { // The modpath input... diff --git a/vvp/delay.h b/vvp/delay.h index d886f0c94..f7da4a907 100644 --- a/vvp/delay.h +++ b/vvp/delay.h @@ -85,9 +85,11 @@ class vvp_fun_delay : public vvp_net_fun_t, private vvp_gen_event_s { vvp_fun_delay(vvp_net_t*net, vvp_bit4_t init, const vvp_delay_t&d); ~vvp_fun_delay(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); - void recv_real(vvp_net_ptr_t port, double bit); + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t); //void recv_long(vvp_net_ptr_t port, long bit); private: @@ -153,7 +155,8 @@ class vvp_fun_modpath : public vvp_net_fun_t, private vvp_gen_event_s { void add_modpath_src(vvp_fun_modpath_src*that, bool ifnone); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); private: virtual void run_run(); @@ -181,7 +184,8 @@ class vvp_fun_modpath_src : public vvp_net_fun_t { ~vvp_fun_modpath_src(); public: - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); virtual bool test_vec4(const vvp_vector4_t&bit); void get_delay12(vvp_time64_t out[12]) const; diff --git a/vvp/dff.cc b/vvp/dff.cc index a726933ba..0b6b58725 100644 --- a/vvp/dff.cc +++ b/vvp/dff.cc @@ -39,7 +39,8 @@ vvp_dff::~vvp_dff() { } -void vvp_dff::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_dff::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { vvp_bit4_t tmp; @@ -57,7 +58,7 @@ void vvp_dff::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) tmp = clk_cur_; clk_cur_ = bit.value(0); if (clk_cur_ == BIT4_1 && tmp != BIT4_1) - vvp_send_vec4(port.ptr()->out, d_); + vvp_send_vec4(port.ptr()->out, d_, 0); break; case 2: // CE @@ -67,7 +68,7 @@ void vvp_dff::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) case 3: // Asynch-D d_ = bit; - vvp_send_vec4(port.ptr()->out, d_); + vvp_send_vec4(port.ptr()->out, d_, 0); break; } } diff --git a/vvp/dff.h b/vvp/dff.h index 95593fbdb..884541334 100644 --- a/vvp/dff.h +++ b/vvp/dff.h @@ -40,7 +40,8 @@ class vvp_dff : public vvp_net_fun_t { explicit vvp_dff(bool invert_clk =false, bool invert_ce =false); ~vvp_dff(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); private: bool iclk_, ice_; diff --git a/vvp/event.cc b/vvp/event.cc index 91e2c306a..bff6fb18f 100644 --- a/vvp/event.cc +++ b/vvp/event.cc @@ -32,7 +32,7 @@ # include -void waitable_hooks_s::run_waiting_threads_(unsigned context_idx) +void waitable_hooks_s::run_waiting_threads_(vthread_t&threads) { // Run the non-blocking event controls. last = &event_ctls; @@ -48,17 +48,11 @@ void waitable_hooks_s::run_waiting_threads_(unsigned context_idx) } } - vthread_t tmp; - if (context_idx) { - waitable_state_s*state = static_cast - (vthread_get_wt_context_item(context_idx)); - tmp = state->threads; - state->threads = 0; - } else { - tmp = threads; - threads = 0; - } - if (tmp) vthread_schedule_list(tmp); + vthread_t tmp = threads; + if (tmp == 0) return; + threads = 0; + + vthread_schedule_list(tmp); } evctl::evctl(unsigned long ecount) @@ -117,9 +111,9 @@ evctl_vector::evctl_vector(vvp_net_ptr_t ptr, const vvp_vector4_t&value, void evctl_vector::run_run() { if (wid_ != 0) { - vvp_send_vec4_pv(ptr_, value_, off_, value_.size(), wid_); + vvp_send_vec4_pv(ptr_, value_, off_, value_.size(), wid_, 0); } else { - vvp_send_vec4(ptr_, value_); + vvp_send_vec4(ptr_, value_, 0); } } @@ -186,114 +180,156 @@ const vvp_fun_edge::edge_t vvp_edge_negedge const vvp_fun_edge::edge_t vvp_edge_none = 0; struct vvp_fun_edge_state_s : public waitable_state_s { - vvp_fun_edge_state_s() : bit(BIT4_X) {} + vvp_fun_edge_state_s() + { + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + bits[idx] = BIT4_X; + } - vvp_bit4_t bit; + vvp_bit4_t bits[4]; }; -vvp_fun_edge::vvp_fun_edge(edge_t e, bool debug_flag) -: edge_(e), debug_(debug_flag) +vvp_fun_edge::vvp_fun_edge(edge_t e) +: edge_(e) { - bits_[0] = BIT4_X; - bits_[1] = BIT4_X; - bits_[2] = BIT4_X; - bits_[3] = BIT4_X; } vvp_fun_edge::~vvp_fun_edge() { } -void vvp_fun_edge::alloc_instance(vvp_context_t context) +bool vvp_fun_edge::recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_bit4_t&old_bit, vthread_t&threads) { - vvp_set_context_item(context, context_idx, new vvp_fun_edge_state_s); -} - -void vvp_fun_edge::reset_instance(vvp_context_t context) -{ - vvp_fun_edge_state_s*state = static_cast - (vvp_get_context_item(context, context_idx)); - state->threads = 0; - state->bit = BIT4_X; -} - -void vvp_fun_edge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) -{ - vvp_bit4_t*old_bit; - if (context_idx) { - vvp_fun_edge_state_s*state = static_cast - (vthread_get_wt_context_item(context_idx)); - old_bit = &state->bit; - } else { - old_bit = &bits_[port.port()]; - } - /* See what kind of edge this represents. */ - edge_t mask = VVP_EDGE(*old_bit, bit.value(0)); + edge_t mask = VVP_EDGE(old_bit, bit.value(0)); /* Save the current input for the next time around. */ - *old_bit = bit.value(0); + old_bit = bit.value(0); if ((edge_ == vvp_edge_none) || (edge_ & mask)) { - run_waiting_threads_(context_idx); + run_waiting_threads_(threads); + return true; + } + return false; +} +vvp_fun_edge_sa::vvp_fun_edge_sa(edge_t e) +: vvp_fun_edge(e), threads_(0) +{ + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + bits_[idx] = BIT4_X; +} + +vvp_fun_edge_sa::~vvp_fun_edge_sa() +{ +} + +vthread_t vvp_fun_edge_sa::add_waiting_thread(vthread_t thread) +{ + vthread_t tmp = threads_; + threads_ = thread; + + return tmp; +} + +void vvp_fun_edge_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + if (recv_vec4_(port, bit, bits_[port.port()], threads_)) { vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, bit); + vvp_send_vec4(net->out, bit, 0); } } +vvp_fun_edge_aa::vvp_fun_edge_aa(edge_t e) +: vvp_fun_edge(e) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_edge_aa::~vvp_fun_edge_aa() +{ +} + +void vvp_fun_edge_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_fun_edge_state_s); +} + +void vvp_fun_edge_aa::reset_instance(vvp_context_t context) +{ + vvp_fun_edge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + state->threads = 0; + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + state->bits[idx] = BIT4_X; +} + + +vthread_t vvp_fun_edge_aa::add_waiting_thread(vthread_t thread) +{ + vvp_fun_edge_state_s*state = static_cast + (vthread_get_wt_context_item(context_idx_)); + + vthread_t tmp = state->threads; + state->threads = thread; + + return tmp; +} + +void vvp_fun_edge_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + vvp_fun_edge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (recv_vec4_(port, bit, state->bits[port.port()], state->threads)) { + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} struct vvp_fun_anyedge_state_s : public waitable_state_s { - vvp_fun_anyedge_state_s() : bitsr(0.0) {} + vvp_fun_anyedge_state_s() + { + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + bitsr[idx] = 0.0; + } - vvp_vector4_t bits; - double bitsr; + vvp_vector4_t bits[4]; + double bitsr[4]; }; -vvp_fun_anyedge::vvp_fun_anyedge(bool debug_flag) -: debug_(debug_flag) +vvp_fun_anyedge::vvp_fun_anyedge() { - for (unsigned idx = 0 ; idx < 4 ; idx += 1) - bitsr_[idx] = 0.0; } vvp_fun_anyedge::~vvp_fun_anyedge() { } -void vvp_fun_anyedge::alloc_instance(vvp_context_t context) -{ - vvp_set_context_item(context, context_idx, new vvp_fun_anyedge_state_s); -} - -void vvp_fun_anyedge::reset_instance(vvp_context_t context) -{ - vvp_fun_anyedge_state_s*state = static_cast - (vvp_get_context_item(context, context_idx)); - state->threads = 0; - state->bits.set_to_x(); - state->bitsr = 0.0; -} - -void vvp_fun_anyedge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +bool vvp_fun_anyedge::recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_vector4_t&old_bits, vthread_t&threads) { bool flag = false; - vvp_vector4_t*old_bits; - if (context_idx) { - vvp_fun_anyedge_state_s*state = static_cast - (vthread_get_wt_context_item(context_idx)); - old_bits = &state->bits; - } else { - old_bits = &bits_[port.port()]; - } - - if (old_bits->size() != bit.size()) { + if (old_bits.size() != bit.size()) { flag = true; } else { for (unsigned idx = 0 ; idx < bit.size() ; idx += 1) { - if (old_bits->value(idx) != bit.value(idx)) { + if (old_bits.value(idx) != bit.value(idx)) { flag = true; break; } @@ -301,29 +337,136 @@ void vvp_fun_anyedge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) } if (flag) { - *old_bits = bit; - run_waiting_threads_(context_idx); + old_bits = bit; + run_waiting_threads_(threads); + } + + return flag; +} + +bool vvp_fun_anyedge::recv_real_(vvp_net_ptr_t port, double bit, + double&old_bits, vthread_t&threads) +{ + if (old_bits != bit) { + old_bits = bit; + run_waiting_threads_(threads); + return true; + } + return false; +} + +vvp_fun_anyedge_sa::vvp_fun_anyedge_sa() +: threads_(0) +{ + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + bitsr_[idx] = 0.0; +} + +vvp_fun_anyedge_sa::~vvp_fun_anyedge_sa() +{ +} + +vthread_t vvp_fun_anyedge_sa::add_waiting_thread(vthread_t thread) +{ + vthread_t tmp = threads_; + threads_ = thread; + + return tmp; +} + +void vvp_fun_anyedge_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + if (recv_vec4_(port, bit, bits_[port.port()], threads_)) { vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, bit); + vvp_send_vec4(net->out, bit, 0); } } -void vvp_fun_anyedge::recv_real(vvp_net_ptr_t port, double bit) +void vvp_fun_anyedge_sa::recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t) { - double*old_bits; - if (context_idx) { - vvp_fun_anyedge_state_s*state = static_cast - (vthread_get_wt_context_item(context_idx)); - old_bits = &state->bitsr; - } else { - old_bits = &bitsr_[port.port()]; - } - - if (*old_bits != bit) { - *old_bits = bit; - run_waiting_threads_(context_idx); + if (recv_real_(port, bit, bitsr_[port.port()], threads_)) { vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, vvp_vector4_t()); + vvp_send_vec4(net->out, vvp_vector4_t(), 0); + } +} + +vvp_fun_anyedge_aa::vvp_fun_anyedge_aa() +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_anyedge_aa::~vvp_fun_anyedge_aa() +{ +} + +void vvp_fun_anyedge_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_fun_anyedge_state_s); +} + +void vvp_fun_anyedge_aa::reset_instance(vvp_context_t context) +{ + vvp_fun_anyedge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + state->threads = 0; + for (unsigned idx = 0 ; idx < 4 ; idx += 1) { + state->bits[idx].set_to_x(); + state->bitsr[idx] = 0.0; + } +} + +vthread_t vvp_fun_anyedge_aa::add_waiting_thread(vthread_t thread) +{ + vvp_fun_anyedge_state_s*state = static_cast + (vthread_get_wt_context_item(context_idx_)); + + vthread_t tmp = state->threads; + state->threads = thread; + + return tmp; +} + +void vvp_fun_anyedge_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + vvp_fun_anyedge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (recv_vec4_(port, bit, state->bits[port.port()], state->threads)) { + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} + +void vvp_fun_anyedge_aa::recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context) +{ + if (context) { + vvp_fun_anyedge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (recv_real_(port, bit, state->bitsr[port.port()], state->threads)) { + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, vvp_vector4_t(), context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_real(port, bit, context); + context = vvp_get_next_context(context); + } } } @@ -335,23 +478,82 @@ vvp_fun_event_or::~vvp_fun_event_or() { } -void vvp_fun_event_or::alloc_instance(vvp_context_t context) +vvp_fun_event_or_sa::vvp_fun_event_or_sa() +: threads_(0) { - vvp_set_context_item(context, context_idx, new waitable_state_s); } -void vvp_fun_event_or::reset_instance(vvp_context_t context) +vvp_fun_event_or_sa::~vvp_fun_event_or_sa() +{ +} + +vthread_t vvp_fun_event_or_sa::add_waiting_thread(vthread_t thread) +{ + vthread_t tmp = threads_; + threads_ = thread; + + return tmp; +} + +void vvp_fun_event_or_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + run_waiting_threads_(threads_); + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, 0); +} + +vvp_fun_event_or_aa::vvp_fun_event_or_aa() +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_event_or_aa::~vvp_fun_event_or_aa() +{ +} + +void vvp_fun_event_or_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new waitable_state_s); +} + +void vvp_fun_event_or_aa::reset_instance(vvp_context_t context) { waitable_state_s*state = static_cast - (vvp_get_context_item(context, context_idx)); + (vvp_get_context_item(context, context_idx_)); + state->threads = 0; } -void vvp_fun_event_or::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +vthread_t vvp_fun_event_or_aa::add_waiting_thread(vthread_t thread) { - run_waiting_threads_(context_idx); - vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, bit); + waitable_state_s*state = static_cast + (vthread_get_wt_context_item(context_idx_)); + + vthread_t tmp = state->threads; + state->threads = thread; + + return tmp; +} + +void vvp_fun_event_or_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + waitable_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + run_waiting_threads_(state->threads); + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, context); + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } } vvp_named_event::vvp_named_event(struct __vpiHandle*h) @@ -363,25 +565,78 @@ vvp_named_event::~vvp_named_event() { } -void vvp_named_event::alloc_instance(vvp_context_t context) +vvp_named_event_sa::vvp_named_event_sa(struct __vpiHandle*h) +: vvp_named_event(h), threads_(0) { - vvp_set_context_item(context, context_idx, new waitable_state_s); } -void vvp_named_event::reset_instance(vvp_context_t context) +vvp_named_event_sa::~vvp_named_event_sa() +{ +} + +vthread_t vvp_named_event_sa::add_waiting_thread(vthread_t thread) +{ + vthread_t tmp = threads_; + threads_ = thread; + + return tmp; +} + +void vvp_named_event_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + run_waiting_threads_(threads_); + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, 0); + + vpip_run_named_event_callbacks(handle_); +} + +vvp_named_event_aa::vvp_named_event_aa(struct __vpiHandle*h) +: vvp_named_event(h) +{ + context_idx_ = vpip_add_item_to_context(this, vpip_peek_context_scope()); +} + +vvp_named_event_aa::~vvp_named_event_aa() +{ +} + +void vvp_named_event_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new waitable_state_s); +} + +void vvp_named_event_aa::reset_instance(vvp_context_t context) { waitable_state_s*state = static_cast - (vvp_get_context_item(context, context_idx)); + (vvp_get_context_item(context, context_idx_)); + state->threads = 0; } -void vvp_named_event::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +vthread_t vvp_named_event_aa::add_waiting_thread(vthread_t thread) { - run_waiting_threads_(context_idx); - vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, bit); + waitable_state_s*state = static_cast + (vthread_get_wt_context_item(context_idx_)); - vpip_run_named_event_callbacks(handle_); + vthread_t tmp = state->threads; + state->threads = thread; + + return tmp; +} + +void vvp_named_event_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + assert(context); + + waitable_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + run_waiting_threads_(state->threads); + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, context); } /* @@ -394,9 +649,7 @@ void vvp_named_event::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) static void compile_event_or(char*label, unsigned argc, struct symb_s*argv); -void compile_event(char*label, char*type, - unsigned argc, struct symb_s*argv, - bool debug_flag) +void compile_event(char*label, char*type, unsigned argc, struct symb_s*argv) { vvp_net_fun_t*fun = 0; @@ -408,9 +661,12 @@ void compile_event(char*label, char*type, if (strcmp(type,"edge") == 0) { free(type); - vvp_fun_anyedge*event_fun = new vvp_fun_anyedge(debug_flag); - vpip_add_item_to_current_scope(event_fun); - fun = event_fun; + + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_anyedge_aa; + } else { + fun = new vvp_fun_anyedge_sa; + } } else { @@ -424,9 +680,12 @@ void compile_event(char*label, char*type, assert(argc <= 4); free(type); - vvp_fun_edge*event_fun = new vvp_fun_edge(edge, debug_flag); - vpip_add_item_to_current_scope(event_fun); - fun = event_fun; + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_edge_aa(edge); + } else { + fun = new vvp_fun_edge_sa(edge); + } + } vvp_net_t* ptr = new vvp_net_t; @@ -440,11 +699,12 @@ void compile_event(char*label, char*type, static void compile_event_or(char*label, unsigned argc, struct symb_s*argv) { - vvp_fun_event_or*fun = new vvp_fun_event_or; vvp_net_t* ptr = new vvp_net_t; - ptr->fun = fun; - - vpip_add_item_to_current_scope(fun); + if (vpip_peek_current_scope()->is_automatic) { + ptr->fun = new vvp_fun_event_or_aa; + } else { + ptr->fun = new vvp_fun_event_or_sa; + } define_functor_symbol(label, ptr); free(label); @@ -466,10 +726,12 @@ void compile_named_event(char*label, char*name) vvp_net_t*ptr = new vvp_net_t; vpiHandle obj = vpip_make_named_event(name, ptr); - vvp_named_event*fun = new vvp_named_event(obj); - ptr->fun = fun; - vpip_add_item_to_current_scope(fun); + if (vpip_peek_current_scope()->is_automatic) { + ptr->fun = new vvp_named_event_aa(obj); + } else { + ptr->fun = new vvp_named_event_sa(obj); + } define_functor_symbol(label, ptr); compile_vpi_symbol(label, obj); vpip_attach_to_current_scope(obj); diff --git a/vvp/event.h b/vvp/event.h index 2d66416e3..8a769d8af 100644 --- a/vvp/event.h +++ b/vvp/event.h @@ -102,13 +102,15 @@ extern void schedule_evctl(vvp_array_t memory, unsigned index, struct waitable_hooks_s { public: - waitable_hooks_s() : threads(0), event_ctls(0) { last = &event_ctls; } - vthread_t threads; + waitable_hooks_s() : event_ctls(0) { last = &event_ctls; } + + virtual vthread_t add_waiting_thread(vthread_t thread) = 0; + evctl*event_ctls; evctl**last; protected: - void run_waiting_threads_(unsigned context_idx); + void run_waiting_threads_(vthread_t&threads); }; /* @@ -118,6 +120,7 @@ struct waitable_hooks_s { */ struct waitable_state_s { waitable_state_s() : threads(0) { } + vthread_t threads; }; @@ -130,24 +133,62 @@ class vvp_fun_edge : public vvp_net_fun_t, public waitable_hooks_s { public: typedef unsigned short edge_t; - explicit vvp_fun_edge(edge_t e, bool debug_flag); + explicit vvp_fun_edge(edge_t e); virtual ~vvp_fun_edge(); - void alloc_instance(vvp_context_t context); - void reset_instance(vvp_context_t context); - - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + protected: + bool recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_bit4_t&old_bit, vthread_t&threads); private: - vvp_bit4_t bits_[4]; edge_t edge_; - bool debug_; }; extern const vvp_fun_edge::edge_t vvp_edge_posedge; extern const vvp_fun_edge::edge_t vvp_edge_negedge; extern const vvp_fun_edge::edge_t vvp_edge_none; +/* + * Statically allocated vvp_fun_edge. + */ +class vvp_fun_edge_sa : public vvp_fun_edge { + + public: + explicit vvp_fun_edge_sa(edge_t e); + virtual ~vvp_fun_edge_sa(); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + private: + vthread_t threads_; + vvp_bit4_t bits_[4]; +}; + +/* + * Automatically allocated vvp_fun_edge. + */ +class vvp_fun_edge_aa : public vvp_fun_edge, public automatic_hooks_s { + + public: + explicit vvp_fun_edge_aa(edge_t e); + virtual ~vvp_fun_edge_aa(); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + private: + struct __vpiScope*context_scope_; + unsigned context_idx_; +}; + /* * The vvp_fun_anyedge functor checks to see if any value in an input * vector changes. Unlike the vvp_fun_edge, which watches for the LSB @@ -161,20 +202,63 @@ extern const vvp_fun_edge::edge_t vvp_edge_none; class vvp_fun_anyedge : public vvp_net_fun_t, public waitable_hooks_s { public: - explicit vvp_fun_anyedge(bool debug_flag); + explicit vvp_fun_anyedge(); virtual ~vvp_fun_anyedge(); + protected: + bool recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_vector4_t&old_bits, vthread_t&threads); + bool recv_real_(vvp_net_ptr_t port, double bit, + double&old_bits, vthread_t&threads); +}; + +/* + * Statically allocated vvp_fun_anyedge. + */ +class vvp_fun_anyedge_sa : public vvp_fun_anyedge { + + public: + explicit vvp_fun_anyedge_sa(); + virtual ~vvp_fun_anyedge_sa(); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context); + + private: + vthread_t threads_; + vvp_vector4_t bits_[4]; + // In case I'm a real-valued event. + double bitsr_[4]; +}; + +/* + * Automatically allocated vvp_fun_anyedge. + */ +class vvp_fun_anyedge_aa : public vvp_fun_anyedge, public automatic_hooks_s { + + public: + explicit vvp_fun_anyedge_aa(); + virtual ~vvp_fun_anyedge_aa(); + void alloc_instance(vvp_context_t context); void reset_instance(vvp_context_t context); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t port, double bit); + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context); private: - bool debug_; - vvp_vector4_t bits_[4]; - // In case I'm a real-valued event. - double bitsr_[4]; + struct __vpiScope*context_scope_; + unsigned context_idx_; }; /* @@ -186,13 +270,46 @@ class vvp_fun_event_or : public vvp_net_fun_t, public waitable_hooks_s { public: explicit vvp_fun_event_or(); ~vvp_fun_event_or(); +}; + +/* + * Statically allocated vvp_fun_event_or. + */ +class vvp_fun_event_or_sa : public vvp_fun_event_or { + + public: + explicit vvp_fun_event_or_sa(); + ~vvp_fun_event_or_sa(); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + private: + vthread_t threads_; +}; + +/* + * Automatically allocated vvp_fun_event_or. + */ +class vvp_fun_event_or_aa : public vvp_fun_event_or, public automatic_hooks_s { + + public: + explicit vvp_fun_event_or_aa(); + ~vvp_fun_event_or_aa(); void alloc_instance(vvp_context_t context); void reset_instance(vvp_context_t context); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); private: + struct __vpiScope*context_scope_; + unsigned context_idx_; }; /* @@ -206,13 +323,47 @@ class vvp_named_event : public vvp_net_fun_t, public waitable_hooks_s { explicit vvp_named_event(struct __vpiHandle*eh); ~vvp_named_event(); - void alloc_instance(vvp_context_t context); - void reset_instance(vvp_context_t context); - - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); - - private: + protected: struct __vpiHandle*handle_; }; +/* + * Statically allocated vvp_named_event. + */ +class vvp_named_event_sa : public vvp_named_event { + + public: + explicit vvp_named_event_sa(struct __vpiHandle*eh); + ~vvp_named_event_sa(); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); + + private: + vthread_t threads_; +}; + +/* + * Automatically allocated vvp_named_event. + */ +class vvp_named_event_aa : public vvp_named_event, public automatic_hooks_s { + + public: + explicit vvp_named_event_aa(struct __vpiHandle*eh); + ~vvp_named_event_aa(); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + private: + unsigned context_idx_; +}; + #endif // __event_H diff --git a/vvp/extend.cc b/vvp/extend.cc index 1e5b13f04..4927026fa 100644 --- a/vvp/extend.cc +++ b/vvp/extend.cc @@ -35,10 +35,11 @@ vvp_fun_extend_signed::~vvp_fun_extend_signed() { } -void vvp_fun_extend_signed::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_extend_signed::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { if (bit.size() >= width_) { - vvp_send_vec4(port.ptr()->out, bit); + vvp_send_vec4(port.ptr()->out, bit, 0); return; } @@ -51,5 +52,5 @@ void vvp_fun_extend_signed::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bi for (unsigned idx = bit.size() ; idx < res.size() ; idx += 1) res.set_bit(idx, pad); - vvp_send_vec4(port.ptr()->out, res); + vvp_send_vec4(port.ptr()->out, res, 0); } diff --git a/vvp/lexor.lex b/vvp/lexor.lex index 0cf3fc272..a2c19b7d5 100644 --- a/vvp/lexor.lex +++ b/vvp/lexor.lex @@ -183,8 +183,6 @@ "%vpi_func/r" { return K_vpi_func_r; } "%disable" { return K_disable; } "%fork" { return K_fork; } -"%alloc" { return K_alloc; } -"%free" { return K_free; } /* Handle the specialized variable access functions. */ diff --git a/vvp/logic.cc b/vvp/logic.cc index 3ba7c965b..6eb59db4b 100644 --- a/vvp/logic.cc +++ b/vvp/logic.cc @@ -42,7 +42,8 @@ vvp_fun_boolean_::~vvp_fun_boolean_() { } -void vvp_fun_boolean_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_boolean_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { unsigned port = ptr.port(); if (input_[port] .eeq( bit )) @@ -56,7 +57,8 @@ void vvp_fun_boolean_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } void vvp_fun_boolean_::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { unsigned port = ptr.port(); @@ -106,7 +108,7 @@ void vvp_fun_and::run_run() result.set_bit(idx, bitbit); } - vvp_send_vec4(ptr->out, result); + vvp_send_vec4(ptr->out, result, 0); } vvp_fun_buf::vvp_fun_buf() @@ -123,7 +125,8 @@ vvp_fun_buf::~vvp_fun_buf() * The buf functor is very simple--change the z bits to x bits in the * vector it passes, and propagate the result. */ -void vvp_fun_buf::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_buf::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { if (ptr.port() != 0) return; @@ -146,7 +149,7 @@ void vvp_fun_buf::run_run() vvp_vector4_t tmp (input_); tmp.change_z2x(); - vvp_send_vec4(ptr->out, tmp); + vvp_send_vec4(ptr->out, tmp, 0); } vvp_fun_bufz::vvp_fun_bufz() @@ -162,20 +165,22 @@ vvp_fun_bufz::~vvp_fun_bufz() * The bufz is similar to the buf device, except that it does not * bother translating z bits to x. */ -void vvp_fun_bufz::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_bufz::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { if (ptr.port() != 0) return; - vvp_send_vec4(ptr.ptr()->out, bit); + vvp_send_vec4(ptr.ptr()->out, bit, 0); } -void vvp_fun_bufz::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_fun_bufz::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { if (ptr.port() != 0) return; - vvp_send_real(ptr.ptr()->out, bit); + vvp_send_real(ptr.ptr()->out, bit, 0); } vvp_fun_muxr::vvp_fun_muxr() @@ -190,7 +195,8 @@ vvp_fun_muxr::~vvp_fun_muxr() { } -void vvp_fun_muxr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_muxr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { /* The real valued mux can only take in the select as a vector4_t. The muxed data is real. */ @@ -219,7 +225,8 @@ void vvp_fun_muxr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } } -void vvp_fun_muxr::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_fun_muxr::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { switch (ptr.port()) { case 0: @@ -252,16 +259,16 @@ void vvp_fun_muxr::run_run() switch (select_) { case SEL_PORT0: - vvp_send_real(ptr->out, a_); + vvp_send_real(ptr->out, a_, 0); break; case SEL_PORT1: - vvp_send_real(ptr->out, b_); + vvp_send_real(ptr->out, b_, 0); break; default: if (a_ == b_) { - vvp_send_real(ptr->out, a_); + vvp_send_real(ptr->out, a_, 0); } else { - vvp_send_real(ptr->out, 0.0); // Should this be NaN? + vvp_send_real(ptr->out, 0.0, 0); // Should this be NaN? } break; } @@ -284,7 +291,8 @@ vvp_fun_muxz::~vvp_fun_muxz() { } -void vvp_fun_muxz::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_muxz::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { switch (ptr.port()) { case 0: @@ -331,10 +339,10 @@ void vvp_fun_muxz::run_run() switch (select_) { case SEL_PORT0: - vvp_send_vec4(ptr->out, a_); + vvp_send_vec4(ptr->out, a_, 0); break; case SEL_PORT1: - vvp_send_vec4(ptr->out, b_); + vvp_send_vec4(ptr->out, b_, 0); break; default: { @@ -357,7 +365,7 @@ void vvp_fun_muxz::run_run() for (unsigned idx = min_size ; idx < max_size ; idx += 1) res.set_bit(idx, BIT4_X); - vvp_send_vec4(ptr->out, res); + vvp_send_vec4(ptr->out, res, 0); } break; } @@ -377,7 +385,8 @@ vvp_fun_not::~vvp_fun_not() * The buf functor is very simple--change the z bits to x bits in the * vector it passes, and propagate the result. */ -void vvp_fun_not::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_not::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { if (ptr.port() != 0) return; @@ -404,7 +413,7 @@ void vvp_fun_not::run_run() result.set_bit(idx, bitbit); } - vvp_send_vec4(ptr->out, result); + vvp_send_vec4(ptr->out, result, 0); } vvp_fun_or::vvp_fun_or(unsigned wid, bool invert) @@ -440,7 +449,7 @@ void vvp_fun_or::run_run() result.set_bit(idx, bitbit); } - vvp_send_vec4(ptr->out, result); + vvp_send_vec4(ptr->out, result, 0); } vvp_fun_xor::vvp_fun_xor(unsigned wid, bool invert) @@ -476,7 +485,7 @@ void vvp_fun_xor::run_run() result.set_bit(idx, bitbit); } - vvp_send_vec4(ptr->out, result); + vvp_send_vec4(ptr->out, result, 0); } /* diff --git a/vvp/logic.h b/vvp/logic.h index 841ce4aaa..ddbb66d48 100644 --- a/vvp/logic.h +++ b/vvp/logic.h @@ -32,9 +32,11 @@ class vvp_fun_boolean_ : public vvp_net_fun_t, protected vvp_gen_event_s { explicit vvp_fun_boolean_(unsigned wid); ~vvp_fun_boolean_(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec4_pv(vvp_net_ptr_t p, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t); protected: vvp_vector4_t input_[4]; @@ -64,7 +66,8 @@ class vvp_fun_buf: public vvp_net_fun_t, private vvp_gen_event_s { explicit vvp_fun_buf(); virtual ~vvp_fun_buf(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); private: void run_run(); @@ -84,8 +87,10 @@ class vvp_fun_bufz: public vvp_net_fun_t { explicit vvp_fun_bufz(); virtual ~vvp_fun_bufz(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t p, double bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); + void recv_real(vvp_net_ptr_t p, double bit, + vvp_context_t); private: }; @@ -109,7 +114,8 @@ class vvp_fun_muxz : public vvp_net_fun_t, private vvp_gen_event_s { explicit vvp_fun_muxz(unsigned width); virtual ~vvp_fun_muxz(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); private: void run_run(); @@ -128,8 +134,10 @@ class vvp_fun_muxr : public vvp_net_fun_t, private vvp_gen_event_s { explicit vvp_fun_muxr(); virtual ~vvp_fun_muxr(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t p, double bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); + void recv_real(vvp_net_ptr_t p, double bit, + vvp_context_t); private: void run_run(); @@ -147,7 +155,8 @@ class vvp_fun_not: public vvp_net_fun_t, private vvp_gen_event_s { explicit vvp_fun_not(); virtual ~vvp_fun_not(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); private: void run_run(); diff --git a/vvp/npmos.cc b/vvp/npmos.cc index 2a1243bde..1d8db67b5 100644 --- a/vvp/npmos.cc +++ b/vvp/npmos.cc @@ -28,7 +28,8 @@ vvp_fun_pmos_::vvp_fun_pmos_(bool enable_invert) } -void vvp_fun_pmos_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_pmos_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { /* Data input is processed through eh recv_vec8 method, because the strength must be preserved. */ @@ -89,7 +90,7 @@ vvp_fun_pmos::vvp_fun_pmos(bool enable_invert) void vvp_fun_pmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) { if (ptr.port() == 1) { - recv_vec4(ptr, reduce4(bit)); + recv_vec4(ptr, reduce4(bit), 0); return; } @@ -108,7 +109,7 @@ vvp_fun_rpmos::vvp_fun_rpmos(bool enable_invert) void vvp_fun_rpmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) { if (ptr.port() == 1) { - recv_vec4(ptr, reduce4(bit)); + recv_vec4(ptr, reduce4(bit), 0); return; } @@ -128,7 +129,8 @@ vvp_fun_cmos_::vvp_fun_cmos_() { } -void vvp_fun_cmos_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t &bit) +void vvp_fun_cmos_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t &bit, + vvp_context_t) { /* Data input is processed through the recv_vec8 method, because the strength must be preserved. */ @@ -190,7 +192,7 @@ vvp_fun_cmos::vvp_fun_cmos() void vvp_fun_cmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) { if (ptr.port() == 1 || ptr.port() == 2) { - recv_vec4(ptr, reduce4(bit)); + recv_vec4(ptr, reduce4(bit), 0); return; } @@ -209,7 +211,7 @@ vvp_fun_rcmos::vvp_fun_rcmos() void vvp_fun_rcmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) { if (ptr.port() == 1) { - recv_vec4(ptr, reduce4(bit)); + recv_vec4(ptr, reduce4(bit), 0); return; } @@ -219,4 +221,3 @@ void vvp_fun_rcmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) bit_ = resistive_reduction(bit); generate_output_(ptr); } - diff --git a/vvp/npmos.h b/vvp/npmos.h index dc97b7290..31d36400a 100644 --- a/vvp/npmos.h +++ b/vvp/npmos.h @@ -51,7 +51,8 @@ class vvp_fun_pmos_ : public vvp_net_fun_t { public: explicit vvp_fun_pmos_(bool enable_invert); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); protected: void generate_output_(vvp_net_ptr_t port); @@ -107,7 +108,8 @@ class vvp_fun_cmos_ : public vvp_net_fun_t { public: explicit vvp_fun_cmos_(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t &bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t &bit, + vvp_context_t); protected: void generate_output_(vvp_net_ptr_t port); diff --git a/vvp/parse.y b/vvp/parse.y index 3502dd2d9..cd4081bb5 100644 --- a/vvp/parse.y +++ b/vvp/parse.y @@ -85,7 +85,7 @@ static struct __vpiModPath*modpath_dst = 0; %token K_THREAD K_TIMESCALE K_TRAN K_TRANIF0 K_TRANIF1 K_TRANVP K_UFUNC %token K_UDP K_UDP_C K_UDP_S %token K_VAR K_VAR_S K_VAR_I K_VAR_R K_vpi_call K_vpi_func K_vpi_func_r -%token K_disable K_fork K_alloc K_free +%token K_disable K_fork %token K_vpi_module K_vpi_time_precision K_file_names %token T_INSTR @@ -483,16 +483,16 @@ statement named event instead. */ | T_LABEL K_EVENT T_SYMBOL ',' symbols ';' - { compile_event($1, $3, $5.cnt, $5.vect, false); } + { compile_event($1, $3, $5.cnt, $5.vect); } | T_LABEL K_EVENT K_DEBUG T_SYMBOL ',' symbols ';' - { compile_event($1, $4, $6.cnt, $6.vect, true); } + { compile_event($1, $4, $6.cnt, $6.vect); } | T_LABEL K_EVENT T_STRING ';' { compile_named_event($1, $3); } | T_LABEL K_EVENT_OR symbols ';' - { compile_event($1, 0, $3.cnt, $3.vect, false); } + { compile_event($1, 0, $3.cnt, $3.vect); } /* Instructions may have a label, and have zero or more @@ -533,12 +533,6 @@ statement | label_opt K_fork symbol ',' symbol ';' { compile_fork($1, $3, $5); } - | label_opt K_alloc symbol ';' - { compile_alloc($1, $3); } - - | label_opt K_free symbol ';' - { compile_free($1, $3); } - /* Scope statements come in two forms. There are the scope declaration and the scope recall. The declarations create the scope, with their association with a parent. The label of the diff --git a/vvp/part.cc b/vvp/part.cc index 3b7f6e500..0db98b927 100644 --- a/vvp/part.cc +++ b/vvp/part.cc @@ -27,17 +27,34 @@ # include # include +struct vvp_fun_part_state_s { + vvp_fun_part_state_s() : bitsr(0.0) {} + + vvp_vector4_t bits; + double bitsr; +}; + vvp_fun_part::vvp_fun_part(unsigned base, unsigned wid) : base_(base), wid_(wid) { - net_ = 0; } vvp_fun_part::~vvp_fun_part() { } -void vvp_fun_part::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +vvp_fun_part_sa::vvp_fun_part_sa(unsigned base, unsigned wid) +: vvp_fun_part(base, wid) +{ + net_ = 0; +} + +vvp_fun_part_sa::~vvp_fun_part_sa() +{ +} + +void vvp_fun_part_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { assert(port.port() == 0); @@ -55,11 +72,12 @@ void vvp_fun_part::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) /* * Handle the case that the part select node is actually fed by a part * select assignment. It's not exactly clear what might make this - * happen, but is does seem to happen and this should have sell + * happen, but is does seem to happen and this should have well * defined behavior. */ -void vvp_fun_part::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) +void vvp_fun_part_sa::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { assert(bit.size() == wid); @@ -69,10 +87,10 @@ void vvp_fun_part::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, assert(tmp.size() == vwid); tmp.set_vec(base, bit); - recv_vec4(port, tmp); + recv_vec4(port, tmp, 0); } -void vvp_fun_part::run_run() +void vvp_fun_part_sa::run_run() { vvp_net_t*ptr = net_; net_ = 0; @@ -82,7 +100,90 @@ void vvp_fun_part::run_run() if (idx + base_ < val_.size()) res.set_bit(idx, val_.value(base_+idx)); } - vvp_send_vec4(ptr->out, res); + vvp_send_vec4(ptr->out, res, 0); +} + +vvp_fun_part_aa::vvp_fun_part_aa(unsigned base, unsigned wid) +: vvp_fun_part(base, wid) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_part_aa::~vvp_fun_part_aa() +{ +} + +void vvp_fun_part_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_vector4_t); +} + +void vvp_fun_part_aa::reset_instance(vvp_context_t context) +{ + vvp_vector4_t*val = static_cast + (vvp_get_context_item(context, context_idx_)); + + val->set_to_x(); +} + +void vvp_fun_part_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + assert(port.port() == 0); + + vvp_vector4_t*val = static_cast + (vvp_get_context_item(context, context_idx_)); + + vvp_vector4_t tmp (wid_, BIT4_X); + for (unsigned idx = 0 ; idx < wid_ ; idx += 1) { + if (idx + base_ < bit.size()) + tmp.set_bit(idx, bit.value(base_+idx)); + } + if (!val->eeq( tmp )) { + *val = tmp; + vvp_send_vec4(port.ptr()->out, tmp, context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} + +/* + * Handle the case that the part select node is actually fed by a part + * select assignment. It's not exactly clear what might make this + * happen, but is does seem to happen and this should have well + * defined behavior. + */ +void vvp_fun_part_aa::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context) +{ + if (context) { + assert(bit.size() == wid); + + vvp_vector4_t*val = static_cast + (vvp_get_context_item(context, context_idx_)); + + vvp_vector4_t tmp = *val; + if (tmp.size() == 0) + tmp = vvp_vector4_t(vwid); + + assert(tmp.size() == vwid); + tmp.set_vec(base, bit); + recv_vec4(port, tmp, context); + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4_pv(port, bit, base, wid, vwid, context); + context = vvp_get_next_context(context); + } + } } vvp_fun_part_pv::vvp_fun_part_pv(unsigned b, unsigned w, unsigned v) @@ -94,7 +195,8 @@ vvp_fun_part_pv::~vvp_fun_part_pv() { } -void vvp_fun_part_pv::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_part_pv::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) { assert(port.port() == 0); @@ -106,7 +208,7 @@ void vvp_fun_part_pv::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) } assert(bit.size() == wid_); - vvp_send_vec4_pv(port.ptr()->out, bit, base_, wid_, vwid_); + vvp_send_vec4_pv(port.ptr()->out, bit, base_, wid_, vwid_, context); } void vvp_fun_part_pv::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) @@ -125,7 +227,7 @@ void vvp_fun_part_pv::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) } vvp_fun_part_var::vvp_fun_part_var(unsigned w) -: base_(0), wid_(w) +: wid_(w) { } @@ -133,18 +235,20 @@ vvp_fun_part_var::~vvp_fun_part_var() { } -void vvp_fun_part_var::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +bool vvp_fun_part_var::recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned&base, vvp_vector4_t&source, + vvp_vector4_t&ref) { unsigned long tmp; switch (port.port()) { case 0: - source_ = bit; + source = bit; break; case 1: tmp = ULONG_MAX; vector4_to_value(bit, tmp); - if (tmp == base_) return; - base_ = tmp; + if (tmp == base) return false; + base = tmp; break; default: fprintf(stderr, "Unsupported port type %d.\n", port.port()); @@ -155,21 +259,40 @@ void vvp_fun_part_var::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) vvp_vector4_t res (wid_); for (unsigned idx = 0 ; idx < wid_ ; idx += 1) { - unsigned adr = base_+idx; - if (adr >= source_.size()) + unsigned adr = base+idx; + if (adr >= source.size()) break; - res.set_bit(idx, source_.value(adr)); + res.set_bit(idx, source.value(adr)); } - if (! ref_.eeq(res)) { - ref_ = res; - vvp_send_vec4(port.ptr()->out, res); + if (! ref.eeq(res)) { + ref = res; + return true; + } + return false; +} + +vvp_fun_part_var_sa::vvp_fun_part_var_sa(unsigned w) +: vvp_fun_part_var(w), base_(0) +{ +} + +vvp_fun_part_var_sa::~vvp_fun_part_var_sa() +{ +} + +void vvp_fun_part_var_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + if (recv_vec4_(port, bit, base_, source_, ref_)) { + vvp_send_vec4(port.ptr()->out, ref_, 0); } } -void vvp_fun_part_var::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) +void vvp_fun_part_var_sa::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { assert(bit.size() == wid); @@ -179,8 +302,86 @@ void vvp_fun_part_var::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, assert(tmp.size() == vwid); tmp.set_vec(base, bit); - recv_vec4(port, tmp); + recv_vec4(port, tmp, 0); +} +struct vvp_fun_part_var_state_s { + vvp_fun_part_var_state_s() : base(0) { } + + unsigned base; + vvp_vector4_t source; + vvp_vector4_t ref; +}; + +vvp_fun_part_var_aa::vvp_fun_part_var_aa(unsigned w) +: vvp_fun_part_var(w) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_part_var_aa::~vvp_fun_part_var_aa() +{ +} + +void vvp_fun_part_var_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_fun_part_var_state_s); +} + +void vvp_fun_part_var_aa::reset_instance(vvp_context_t context) +{ + vvp_fun_part_var_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + state->base = 0; + state->source.set_to_x(); + state->ref.set_to_x(); +} + +void vvp_fun_part_var_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + vvp_fun_part_var_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (recv_vec4_(port, bit, state->base, state->source, state->ref)) { + vvp_send_vec4(port.ptr()->out, state->ref, context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} + +void vvp_fun_part_var_aa::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context) +{ + if (context) { + vvp_fun_part_var_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + assert(bit.size() == wid); + + vvp_vector4_t tmp = state->source; + if (tmp.size() == 0) + tmp = vvp_vector4_t(vwid); + + assert(tmp.size() == vwid); + tmp.set_vec(base, bit); + recv_vec4(port, tmp, context); + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } } /* @@ -201,7 +402,12 @@ void link_node_1(char*label, char*source, vvp_net_fun_t*fun) void compile_part_select(char*label, char*source, unsigned base, unsigned wid) { - vvp_fun_part*fun = new vvp_fun_part(base, wid); + vvp_fun_part*fun = 0; + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_part_aa(base, wid); + } else { + fun = new vvp_fun_part_sa(base, wid); + } link_node_1(label, source, fun); } @@ -216,7 +422,12 @@ void compile_part_select_pv(char*label, char*source, void compile_part_select_var(char*label, char*source, char*var, unsigned wid) { - vvp_fun_part_var*fun = new vvp_fun_part_var(wid); + vvp_fun_part_var*fun = 0; + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_part_var_aa(wid); + } else { + fun = new vvp_fun_part_var_sa(wid); + } vvp_net_t*net = new vvp_net_t; net->fun = fun; @@ -226,4 +437,3 @@ void compile_part_select_var(char*label, char*source, char*var, input_connect(net, 0, source); input_connect(net, 1, var); } - diff --git a/vvp/part.h b/vvp/part.h index 3fdffd886..b6ff2bf10 100644 --- a/vvp/part.h +++ b/vvp/part.h @@ -27,32 +27,73 @@ * select starts. Input 2, which is typically constant, is the width * of the result. */ -class vvp_fun_part : public vvp_net_fun_t, private vvp_gen_event_s { +class vvp_fun_part : public vvp_net_fun_t { public: vvp_fun_part(unsigned base, unsigned wid); ~vvp_fun_part(); + protected: + unsigned base_; + unsigned wid_; +}; + +/* + * Statically allocated vvp_fun_part. + */ +class vvp_fun_part_sa : public vvp_fun_part, public vvp_gen_event_s { + public: - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + vvp_fun_part_sa(unsigned base, unsigned wid); + ~vvp_fun_part_sa(); + + public: + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned, unsigned, unsigned); + unsigned, unsigned, unsigned, + vvp_context_t); private: void run_run(); private: - unsigned base_; - unsigned wid_; vvp_vector4_t val_; vvp_net_t*net_; }; +/* + * Automatically allocated vvp_fun_part. + */ +class vvp_fun_part_aa : public vvp_fun_part, public automatic_hooks_s { + + public: + vvp_fun_part_aa(unsigned base, unsigned wid); + ~vvp_fun_part_aa(); + + public: + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned, unsigned, unsigned, + vvp_context_t context); + + private: + struct __vpiScope*context_scope_; + unsigned context_idx_; +}; + /* vvp_fun_part_pv * This node takes a vector input and turns it into the part select of * a wider output network. It used the recv_vec4_pv methods of the - * destination nodes to propagate the part select. + * destination nodes to propagate the part select. It can be used in + * both statically and automatically allocated scopes, as it has no + * dynamic state. */ class vvp_fun_part_pv : public vvp_net_fun_t { @@ -61,7 +102,9 @@ class vvp_fun_part_pv : public vvp_net_fun_t { ~vvp_fun_part_pv(); public: - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); private: @@ -81,18 +124,61 @@ class vvp_fun_part_var : public vvp_net_fun_t { explicit vvp_fun_part_var(unsigned wid); ~vvp_fun_part_var(); + protected: + bool recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned&base, vvp_vector4_t&source, + vvp_vector4_t&ref); + + unsigned wid_; +}; + +/* + * Statically allocated vvp_fun_part_var. + */ +class vvp_fun_part_var_sa : public vvp_fun_part_var { + public: - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + explicit vvp_fun_part_var_sa(unsigned wid); + ~vvp_fun_part_var_sa(); + + public: + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned, unsigned, unsigned); + unsigned, unsigned, unsigned, + vvp_context_t); private: unsigned base_; - unsigned wid_; vvp_vector4_t source_; // Save the last output, for detecting change. vvp_vector4_t ref_; }; +/* + * Automatically allocated vvp_fun_part_var. + */ +class vvp_fun_part_var_aa : public vvp_fun_part_var, public automatic_hooks_s { + + public: + explicit vvp_fun_part_var_aa(unsigned wid); + ~vvp_fun_part_var_aa(); + + public: + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned, unsigned, unsigned, + vvp_context_t context); + + private: + struct __vpiScope*context_scope_; + unsigned context_idx_; +}; + #endif diff --git a/vvp/reduce.cc b/vvp/reduce.cc index 238193845..0ed283daf 100644 --- a/vvp/reduce.cc +++ b/vvp/reduce.cc @@ -34,7 +34,8 @@ * All the reduction operations take a single vector input and produce * a scalar result. The vvp_reduce_base class codifies these general * characteristics, leaving only the calculation of the result for the - * base class. + * base class. This can be used in both statically and automatically + * allocated scopes, as bits_ is only used for temporary storage. */ class vvp_reduce_base : public vvp_net_fun_t { @@ -42,9 +43,11 @@ class vvp_reduce_base : public vvp_net_fun_t { vvp_reduce_base(); virtual ~vvp_reduce_base(); - void recv_vec4(vvp_net_ptr_t prt, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t prt, const vvp_vector4_t&bit, + vvp_context_t context); void recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context); virtual vvp_bit4_t calculate_result() const =0; @@ -60,16 +63,18 @@ vvp_reduce_base::~vvp_reduce_base() { } -void vvp_reduce_base::recv_vec4(vvp_net_ptr_t prt, const vvp_vector4_t&bit) +void vvp_reduce_base::recv_vec4(vvp_net_ptr_t prt, const vvp_vector4_t&bit, + vvp_context_t context) { bits_ = bit; vvp_bit4_t res = calculate_result(); vvp_vector4_t rv (1, res); - vvp_send_vec4(prt.ptr()->out, rv); + vvp_send_vec4(prt.ptr()->out, rv, context); } void vvp_reduce_base::recv_vec4_pv(vvp_net_ptr_t prt, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context) { if (bits_.size() == 0) { bits_ = vvp_vector4_t(vwid); @@ -80,7 +85,7 @@ void vvp_reduce_base::recv_vec4_pv(vvp_net_ptr_t prt, const vvp_vector4_t&bit, bits_.set_vec(base, bit); vvp_bit4_t res = calculate_result(); vvp_vector4_t rv (1, res); - vvp_send_vec4(prt.ptr()->out, rv); + vvp_send_vec4(prt.ptr()->out, rv, context); } class vvp_reduce_and : public vvp_reduce_base { diff --git a/vvp/resolv.cc b/vvp/resolv.cc index 3b779a093..4919a1223 100644 --- a/vvp/resolv.cc +++ b/vvp/resolv.cc @@ -35,13 +35,15 @@ resolv_functor::~resolv_functor() { } -void resolv_functor::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void resolv_functor::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { recv_vec8(port, vvp_vector8_t(bit, 6,6 /* STRONG */)); } void resolv_functor::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { assert(bit.size() == wid); vvp_vector4_t res (vwid); @@ -55,7 +57,7 @@ void resolv_functor::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, for (unsigned idx = base+wid ; idx < vwid ; idx += 1) res.set_bit(idx, BIT4_Z); - recv_vec4(port, res); + recv_vec4(port, res, 0); } void resolv_functor::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) @@ -123,7 +125,8 @@ resolv_wired_logic::~resolv_wired_logic() { } -void resolv_wired_logic::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void resolv_wired_logic::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { unsigned pdx = port.port(); vvp_net_t*ptr = port.ptr(); @@ -143,7 +146,7 @@ void resolv_wired_logic::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) out = wired_logic_math_(out, val_[idx]); } - vvp_send_vec4(ptr->out, out); + vvp_send_vec4(ptr->out, out, 0); } vvp_vector4_t resolv_triand::wired_logic_math_(vvp_vector4_t&a, vvp_vector4_t&b) diff --git a/vvp/resolv.h b/vvp/resolv.h index 4f0e683c6..1766f6ea3 100644 --- a/vvp/resolv.h +++ b/vvp/resolv.h @@ -40,11 +40,13 @@ class resolv_functor : public vvp_net_fun_t { explicit resolv_functor(vvp_scalar_t hiz_value, const char* debug =0); ~resolv_functor(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t); void recv_vec8_pv(vvp_net_ptr_t port, const vvp_vector8_t&bit, unsigned base, unsigned wid, unsigned vwid); @@ -62,7 +64,8 @@ class resolv_wired_logic : public vvp_net_fun_t { explicit resolv_wired_logic(void); ~resolv_wired_logic(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); protected: virtual vvp_vector4_t wired_logic_math_(vvp_vector4_t&a, vvp_vector4_t&b) =0; diff --git a/vvp/schedule.cc b/vvp/schedule.cc index 50944c695..27bacc263 100644 --- a/vvp/schedule.cc +++ b/vvp/schedule.cc @@ -141,9 +141,9 @@ void assign_vector4_event_s::run_run(void) { count_assign_events += 1; if (vwid > 0) - vvp_send_vec4_pv(ptr, val, base, val.size(), vwid); + vvp_send_vec4_pv(ptr, val, base, val.size(), vwid, 0); else - vvp_send_vec4(ptr, val); + vvp_send_vec4(ptr, val, 0); } static const size_t ASSIGN4_CHUNK_COUNT = 524288 / sizeof(struct assign_vector4_event_s); @@ -205,7 +205,7 @@ struct assign_real_event_s : public event_s { void assign_real_event_s::run_run(void) { count_assign_events += 1; - vvp_send_real(ptr, val); + vvp_send_real(ptr, val, 0); } static const size_t ASSIGNR_CHUNK_COUNT = 8192 / sizeof(struct assign_real_event_s); diff --git a/vvp/ufunc.cc b/vvp/ufunc.cc index caa7d78de..f7b0b01f2 100644 --- a/vvp/ufunc.cc +++ b/vvp/ufunc.cc @@ -17,6 +17,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ +# include "vvp_net.h" # include "compile.h" # include "symbols.h" # include "codes.h" @@ -61,15 +62,15 @@ ufunc_core::~ufunc_core() * input variables of the function for execution. The method copies * the input values collected by the core to the variables. */ -void ufunc_core::assign_bits_to_ports(void) +void ufunc_core::assign_bits_to_ports(vvp_context_t context) { for (unsigned idx = 0 ; idx < port_count() ; idx += 1) { vvp_net_t*net = ports_[idx]; vvp_net_ptr_t pp (net, 0); if (vvp_fun_signal_real*tmp = dynamic_cast(net->fun)) - tmp->recv_real(pp, value_r(idx)); + tmp->recv_real(pp, value_r(idx), context); if (vvp_fun_signal_vec*tmp = dynamic_cast(net->fun)) - tmp->recv_vec4(pp, value(idx)); + tmp->recv_vec4(pp, value(idx), context); } } diff --git a/vvp/ufunc.h b/vvp/ufunc.h index 28b88e9cd..1d7feebfc 100644 --- a/vvp/ufunc.h +++ b/vvp/ufunc.h @@ -61,7 +61,7 @@ class ufunc_core : public vvp_wide_fun_core { struct __vpiScope*call_scope() { return call_scope_; } struct __vpiScope*func_scope() { return func_scope_; } - void assign_bits_to_ports(void); + void assign_bits_to_ports(vvp_context_t context); void finish_thread(vthread_t thr); private: diff --git a/vvp/vpi_callback.cc b/vvp/vpi_callback.cc index a422399fa..c99cf3b03 100644 --- a/vvp/vpi_callback.cc +++ b/vvp/vpi_callback.cc @@ -569,7 +569,7 @@ void vvp_vpi_callback_wordable::attach_as_word(vvp_array_t arr, unsigned long ad array_word_ = addr; } -void vvp_fun_signal::get_value(struct t_vpi_value*vp) +void vvp_fun_signal4::get_value(struct t_vpi_value*vp) { switch (vp->format) { case vpiScalarVal: diff --git a/vvp/vpi_event.cc b/vvp/vpi_event.cc index bd99a7397..bb0385603 100644 --- a/vvp/vpi_event.cc +++ b/vvp/vpi_event.cc @@ -27,6 +27,21 @@ # include # include +static int named_event_get(int code, vpiHandle ref) +{ + assert((ref->vpi_type->type_code==vpiNamedEvent)); + + struct __vpiNamedEvent*obj = (struct __vpiNamedEvent*)ref; + + switch (code) { + + case vpiAutomatic: + return (int) obj->scope->is_automatic; + } + + return 0; +} + static char* named_event_get_str(int code, vpiHandle ref) { assert((ref->vpi_type->type_code==vpiNamedEvent)); @@ -57,7 +72,7 @@ static vpiHandle named_event_get_handle(int code, vpiHandle ref) static const struct __vpirt vpip_named_event_rt = { vpiNamedEvent, - 0, + named_event_get, named_event_get_str, 0, 0, @@ -122,4 +137,3 @@ void vpip_run_named_event_callbacks(vpiHandle ref) } } } - diff --git a/vvp/vpi_priv.h b/vvp/vpi_priv.h index e2b0634a7..c88197bca 100644 --- a/vvp/vpi_priv.h +++ b/vvp/vpi_priv.h @@ -183,8 +183,10 @@ struct __vpiScope { /* Keep an array of items to be automatically allocated */ struct automatic_hooks_s**item; unsigned nitem; + /* Keep a list of live contexts. */ + vvp_context_t live_contexts; /* Keep a list of freed contexts. */ - vvp_context_t free_context; + vvp_context_t free_contexts; /* Keep a list of threads in the scope. */ vthread_t threads; signed int time_units :8; @@ -193,7 +195,9 @@ struct __vpiScope { extern struct __vpiScope* vpip_peek_current_scope(void); extern void vpip_attach_to_current_scope(vpiHandle obj); -extern void vpip_add_item_to_current_scope(automatic_hooks_s*item); +extern struct __vpiScope* vpip_peek_context_scope(void); +extern unsigned vpip_add_item_to_context(automatic_hooks_s*item, + struct __vpiScope*scope); extern vpiHandle vpip_make_root_iterator(void); extern void vpip_make_root_iterator(struct __vpiHandle**&table, unsigned&ntable); @@ -219,6 +223,7 @@ struct __vpiSignal { unsigned signed_flag : 1; unsigned isint_ : 1; // original type was integer unsigned is_netarray : 1; // This is word of a net array + unsigned is_automatic : 1; /* The represented value is here. */ vvp_net_t*node; }; diff --git a/vvp/vpi_real.cc b/vvp/vpi_real.cc index 4c4dc5f98..6965ed145 100644 --- a/vvp/vpi_real.cc +++ b/vvp/vpi_real.cc @@ -43,6 +43,9 @@ static int real_var_get(int code, vpiHandle ref) case vpiLineNo: return 0; // Not implemented for now! + + case vpiAutomatic: + return (int) rfp->scope->is_automatic; } return 0; @@ -131,11 +134,13 @@ static vpiHandle real_var_put_value(vpiHandle ref, p_vpi_value vp, int) switch (vp->format) { case vpiRealVal: - vvp_send_real(destination, vp->value.real); + vvp_send_real(destination, vp->value.real, + vthread_get_wt_context()); break; case vpiIntVal: - vvp_send_real(destination, (double)vp->value.integer); + vvp_send_real(destination, (double)vp->value.integer, + vthread_get_wt_context()); break; default: diff --git a/vvp/vpi_scope.cc b/vvp/vpi_scope.cc index 1a18e0f56..f12103753 100644 --- a/vvp/vpi_scope.cc +++ b/vvp/vpi_scope.cc @@ -316,26 +316,6 @@ static void attach_to_scope_(struct __vpiScope*scope, vpiHandle obj) scope->intern[idx] = obj; } -static void add_item_to_scope_(struct __vpiScope*scope, automatic_hooks_s*item) -{ - assert(scope); - - // there is no need to record items for static scopes - if (!scope->is_automatic) return; - - unsigned idx = scope->nitem++; - item->context_idx = 1 + idx; - - if (scope->item == 0) - scope->item = (automatic_hooks_s**) - malloc(sizeof(automatic_hooks_s*)); - else - scope->item = (automatic_hooks_s**) - realloc(scope->item, sizeof(automatic_hooks_s*)*scope->nitem); - - scope->item[idx] = item; -} - /* * When the compiler encounters a scope declaration, this function * creates and initializes a __vpiScope object with the requested name @@ -392,7 +372,8 @@ compile_scope_decl(char*label, char*type, char*name, const char*tname, scope->nintern = 0; scope->item = 0; scope->nitem = 0; - scope->free_context = 0; + scope->live_contexts = 0; + scope->free_contexts = 0; scope->threads = 0; current_scope = scope; @@ -415,6 +396,10 @@ compile_scope_decl(char*label, char*type, char*name, const char*tname, scope->time_units = sp->time_units; scope->time_precision = sp->time_precision; + /* Scopes within automatic scopes are themselves automatic. */ + if (sp->is_automatic) + scope->is_automatic = true; + } else { scope->scope = 0x0; @@ -458,7 +443,38 @@ void vpip_attach_to_current_scope(vpiHandle obj) attach_to_scope_(current_scope, obj); } -void vpip_add_item_to_current_scope(automatic_hooks_s*item) +struct __vpiScope* vpip_peek_context_scope(void) { - add_item_to_scope_(current_scope, item); + struct __vpiScope*scope = current_scope; + + /* A context is allocated for each automatic task or function. + Storage for nested scopes (named blocks) is allocated in + the parent context. */ + while (scope->scope && scope->scope->is_automatic) + scope = scope->scope; + + return scope; } + +unsigned vpip_add_item_to_context(automatic_hooks_s*item, + struct __vpiScope*scope) +{ + assert(scope); + assert(scope->is_automatic); + + unsigned idx = scope->nitem++; + + if (scope->item == 0) + scope->item = (automatic_hooks_s**) + malloc(sizeof(automatic_hooks_s*)); + else + scope->item = (automatic_hooks_s**) + realloc(scope->item, sizeof(automatic_hooks_s*)*scope->nitem); + + scope->item[idx] = item; + + /* Offset the context index by 2 to leave space for the list links. */ + return 2 + idx; +} + + diff --git a/vvp/vpi_signal.cc b/vvp/vpi_signal.cc index 02cd3dab0..575372434 100644 --- a/vvp/vpi_signal.cc +++ b/vvp/vpi_signal.cc @@ -531,6 +531,8 @@ static int signal_get(int code, vpiHandle ref) case vpiLeftRange: return rfp->msb; case vpiRightRange: return rfp->lsb; + case vpiAutomatic: return rfp->is_automatic; + case _vpiNexusId: if (rfp->msb == rfp->lsb) return (int) (unsigned long) rfp->node; @@ -765,7 +767,7 @@ static vpiHandle signal_put_value(vpiHandle ref, s_vpi_value*vp, int flags) port-0. This is the port where signals receive input. */ vvp_net_ptr_t destination (rfp->node, dest_port); - vvp_send_vec4(destination, val); + vvp_send_vec4(destination, val, vthread_get_wt_context()); return ref; } @@ -861,6 +863,7 @@ vpiHandle vpip_make_int(const char*name, int msb, int lsb, vvp_net_t*vec) struct __vpiSignal*rfp = (struct __vpiSignal*)obj; obj->vpi_type = &vpip_reg_rt; rfp->isint_ = true; + rfp->is_automatic = vpip_peek_current_scope()->is_automatic; return obj; } @@ -871,7 +874,9 @@ vpiHandle vpip_make_reg(const char*name, int msb, int lsb, bool signed_flag, vvp_net_t*vec) { vpiHandle obj = vpip_make_net(name, msb,lsb, signed_flag, vec); + struct __vpiSignal*rfp = (struct __vpiSignal*)obj; obj->vpi_type = &vpip_reg_rt; + rfp->is_automatic = vpip_peek_current_scope()->is_automatic; return obj; } @@ -910,6 +915,7 @@ vpiHandle vpip_make_net(const char*name, int msb, int lsb, obj->signed_flag = signed_flag? 1 : 0; obj->isint_ = 0; obj->is_netarray = 0; + obj->is_automatic = vpip_peek_current_scope()->is_automatic; obj->node = node; // Place this object within a scope. If this object is @@ -1100,9 +1106,10 @@ static vpiHandle PV_put_value(vpiHandle ref, p_vpi_value vp, int) vvp_net_ptr_t dest(rfp->net, 0); if (full_sig) { - vvp_send_vec4(dest, val); + vvp_send_vec4(dest, val, vthread_get_wt_context()); } else { - vvp_send_vec4_pv(dest, val, base, width, sig_size); + vvp_send_vec4_pv(dest, val, base, width, sig_size, + vthread_get_wt_context()); } return 0; diff --git a/vvp/vpi_tasks.cc b/vvp/vpi_tasks.cc index 403125406..6e6fd2638 100644 --- a/vvp/vpi_tasks.cc +++ b/vvp/vpi_tasks.cc @@ -403,7 +403,7 @@ static vpiHandle sysfunc_put_4net_value(vpiHandle ref, p_vpi_value vp, int) assert(0); } - vvp_send_vec4(rfp->fnet->out, val); + vvp_send_vec4(rfp->fnet->out, val, vthread_get_wt_context()); return 0; } @@ -427,7 +427,7 @@ static vpiHandle sysfunc_put_rnet_value(vpiHandle ref, p_vpi_value vp, int) assert(0); } - vvp_send_real(rfp->fnet->out, val); + vvp_send_real(rfp->fnet->out, val, vthread_get_wt_context()); return 0; } diff --git a/vvp/vthread.cc b/vvp/vthread.cc index 4a0b89249..65a5a4250 100644 --- a/vvp/vthread.cc +++ b/vvp/vthread.cc @@ -303,15 +303,16 @@ static void multiply_array_imm(unsigned long*res, unsigned long*val, /* * Allocate a context for use by a child thread. By preference, use - * the last freed context. If none available, create a new one. + * the last freed context. If none available, create a new one. Add + * it to the list of live contexts in that scope. */ -static vvp_context_t vthread_alloc_context(__vpiScope*scope) +static vvp_context_t vthread_alloc_context(struct __vpiScope*scope) { assert(scope->is_automatic); - vvp_context_t context = scope->free_context; + vvp_context_t context = scope->free_contexts; if (context) { - scope->free_context = vvp_get_next_context(context); + scope->free_contexts = vvp_get_next_context(context); for (unsigned idx = 0 ; idx < scope->nitem ; idx += 1) { scope->item[idx]->reset_instance(context); } @@ -322,20 +323,35 @@ static vvp_context_t vthread_alloc_context(__vpiScope*scope) } } + vvp_set_next_context(context, scope->live_contexts); + scope->live_contexts = context; + return context; } /* * Free a context previously allocated to a child thread by pushing it - * onto the freed context stack. + * onto the freed context stack. Remove it from the list of live contexts + * in that scope. */ -static void vthread_free_context(vvp_context_t context, __vpiScope*scope) +static void vthread_free_context(vvp_context_t context, struct __vpiScope*scope) { assert(scope->is_automatic); assert(context); - vvp_set_next_context(context, scope->free_context); - scope->free_context = context; + if (context == scope->live_contexts) { + scope->live_contexts = vvp_get_next_context(context); + } else { + vvp_context_t tmp = scope->live_contexts; + while (context != vvp_get_next_context(tmp)) { + assert(tmp); + tmp = vvp_get_next_context(tmp); + } + vvp_set_next_context(tmp, vvp_get_next_context(context)); + } + + vvp_set_next_context(context, scope->free_contexts); + scope->free_contexts = context; } /* @@ -530,6 +546,22 @@ void vthread_schedule_list(vthread_t thr) schedule_vthread(thr, 0); } +vvp_context_t vthread_get_wt_context() +{ + if (running_thread) + return running_thread->wt_context; + else + return 0; +} + +vvp_context_t vthread_get_rd_context() +{ + if (running_thread) + return running_thread->rd_context; + else + return 0; +} + vvp_context_item_t vthread_get_wt_context_item(unsigned context_idx) { assert(running_thread && running_thread->wt_context); @@ -557,7 +589,7 @@ bool of_ALLOC(vthread_t thr, vvp_code_t cp) vvp_context_t child_context = vthread_alloc_context(cp->scope); /* Push the allocated context onto the write context stack. */ - vvp_set_next_context(child_context, thr->wt_context); + vvp_set_stacked_context(child_context, thr->wt_context); thr->wt_context = child_context; return true; @@ -1205,7 +1237,7 @@ bool of_CASSIGN_V(vthread_t thr, vvp_code_t cp) /* set the value into port 1 of the destination. */ vvp_net_ptr_t ptr (net, 1); - vvp_send_vec4(ptr, value); + vvp_send_vec4(ptr, value, 0); return true; } @@ -1217,7 +1249,7 @@ bool of_CASSIGN_WR(vthread_t thr, vvp_code_t cp) /* Set the value into port 1 of the destination. */ vvp_net_ptr_t ptr (net, 1); - vvp_send_real(ptr, value); + vvp_send_real(ptr, value, 0); return true; } @@ -1251,7 +1283,7 @@ bool of_CASSIGN_X0(vthread_t thr, vvp_code_t cp) vvp_vector4_t vector = vthread_bits_to_vector(thr, base, wid); vvp_net_ptr_t ptr (net, 1); - vvp_send_vec4_pv(ptr, vector, index, wid, sig->size()); + vvp_send_vec4_pv(ptr, vector, index, wid, sig->size(), 0); return true; } @@ -2315,7 +2347,7 @@ bool of_FORCE_V(vthread_t thr, vvp_code_t cp) /* Set the value into port 2 of the destination. */ vvp_net_ptr_t ptr (net, 2); - vvp_send_vec4(ptr, value); + vvp_send_vec4(ptr, value, 0); return true; } @@ -2327,7 +2359,7 @@ bool of_FORCE_WR(vthread_t thr, vvp_code_t cp) /* Set the value into port 2 of the destination. */ vvp_net_ptr_t ptr (net, 2); - vvp_send_real(ptr, value); + vvp_send_real(ptr, value, 0); return true; } @@ -2362,7 +2394,7 @@ bool of_FORCE_X0(vthread_t thr, vvp_code_t cp) vvp_vector4_t vector = vthread_bits_to_vector(thr, base, wid); vvp_net_ptr_t ptr (net, 2); - vvp_send_vec4_pv(ptr, vector, index, wid, sig->size()); + vvp_send_vec4_pv(ptr, vector, index, wid, sig->size(), 0); return true; } @@ -2410,7 +2442,7 @@ bool of_FREE(vthread_t thr, vvp_code_t cp) { /* Pop the child context from the read context stack. */ vvp_context_t child_context = thr->rd_context; - thr->rd_context = vvp_get_next_context(child_context); + thr->rd_context = vvp_get_stacked_context(child_context); /* Free the context. */ vthread_free_context(child_context, cp->scope); @@ -2701,10 +2733,10 @@ bool of_JOIN(vthread_t thr, vvp_code_t cp) if (thr->wt_context != thr->rd_context) { /* Pop the child context from the write context stack. */ vvp_context_t child_context = thr->wt_context; - thr->wt_context = vvp_get_next_context(child_context); + thr->wt_context = vvp_get_stacked_context(child_context); /* Push the child context onto the read context stack */ - vvp_set_next_context(child_context, thr->rd_context); + vvp_set_stacked_context(child_context, thr->rd_context); thr->rd_context = child_context; } @@ -3944,19 +3976,18 @@ bool of_SET_VEC(vthread_t thr, vvp_code_t cp) /* set the value into port 0 of the destination. */ vvp_net_ptr_t ptr (cp->net, 0); - vvp_send_vec4(ptr, vthread_bits_to_vector(thr, bit, wid)); + vvp_send_vec4(ptr, vthread_bits_to_vector(thr, bit, wid), + thr->wt_context); return true; } bool of_SET_WORDR(vthread_t thr, vvp_code_t cp) { - struct __vpiHandle*tmp = cp->handle; - t_vpi_value val; + /* set the value into port 0 of the destination. */ + vvp_net_ptr_t ptr (cp->net, 0); - val.format = vpiRealVal; - val.value.real = thr->words[cp->bit_idx[0]].w_real; - vpi_put_value(tmp, &val, 0, vpiNoDelay); + vvp_send_real(ptr, thr->words[cp->bit_idx[0]].w_real, thr->wt_context); return true; } @@ -4015,7 +4046,7 @@ bool of_SET_X0(vthread_t thr, vvp_code_t cp) } vvp_net_ptr_t ptr (net, 0); - vvp_send_vec4_pv(ptr, bit_vec, index, wid, sig->size()); + vvp_send_vec4_pv(ptr, bit_vec, index, wid, sig->size(), thr->wt_context); return true; } @@ -4201,18 +4232,10 @@ bool of_WAIT(vthread_t thr, vvp_code_t cp) thr->waiting_for_event = 1; /* Add this thread to the list in the event. */ - vvp_net_fun_t*fun = cp->net->fun; - if (fun->context_idx) { - waitable_state_s*es = static_cast - (vthread_get_wt_context_item(fun->context_idx)); - thr->wait_next = es->threads; - es->threads = thr; - } else { - waitable_hooks_s*ep = dynamic_cast (fun); - assert(ep); - thr->wait_next = ep->threads; - ep->threads = thr; - } + waitable_hooks_s*ep = dynamic_cast (cp->net->fun); + assert(ep); + thr->wait_next = ep->add_waiting_thread(thr); + /* Return false to suspend this thread. */ return false; } @@ -4316,7 +4339,7 @@ bool of_EXEC_UFUNC(vthread_t thr, vvp_code_t cp) /* Copy all the inputs to the ufunc object to the port variables of the function. This copies all the values atomically. */ - cp->ufunc_core_ptr->assign_bits_to_ports(); + cp->ufunc_core_ptr->assign_bits_to_ports(child_context); /* Create a temporary thread and run it immediately. A function may not contain any blocking statements, so vthread_run() can diff --git a/vvp/vthread.h b/vvp/vthread.h index f578b4ef8..46999fa57 100644 --- a/vvp/vthread.h +++ b/vvp/vthread.h @@ -62,27 +62,37 @@ extern void vthread_run(vthread_t thr); */ extern void vthread_schedule_list(vthread_t thr); +/* + * This function returns a handle to the writable context of the currently + * running thread. Normally the writable context is the context allocated + * to the scope associated with that thread. However, between executing a + * %alloc instruction and executing the associated %fork instruction, the + * writable context changes to the newly allocated context, thus allowing + * the input parameters of an automatic task or function to be written to + * the task/function local variables. + */ +extern vvp_context_t vthread_get_wt_context(); + +/* + * This function returns a handle to the readable context of the currently + * running thread. Normally the readable context is the context allocated + * to the scope associated with that thread. However, between executing a + * %join instruction and executing the associated %free instruction, the + * readable context changes to the context allocated to the newly joined + * thread, thus allowing the output parameters of an automatic task or + * function to be read from the task/function local variables. + */ +extern vvp_context_t vthread_get_rd_context(); + /* * This function returns a handle to an item in the writable context - * of the currently running thread. Normally the writable context is - * the context allocated to the scope associated with that thread. - * However, between executing a %alloc instruction and executing the - * associated %fork instruction, the writable context changes to the - * newly allocated context, thus allowing the input parameters of an - * automatic task or function to be written to the task/function local - * variables. + * of the currently running thread. */ extern vvp_context_item_t vthread_get_wt_context_item(unsigned context_idx); /* * This function returns a handle to an item in the readable context - * of the currently running thread. Normally the readable context is - * the context allocated to the scope associated with that thread. - * However, between executing a %join instruction and executing the - * associated %free instruction, the readable context changes to the - * context allocated to the newly joined thread, thus allowing the - * output parameters of an automatic task or function to be read from - * the task/function local variables. + * of the currently running thread. */ extern vvp_context_item_t vthread_get_rd_context_item(unsigned context_idx); diff --git a/vvp/vvp_island.cc b/vvp/vvp_island.cc index 411aadcda..dc0e41c7b 100644 --- a/vvp/vvp_island.cc +++ b/vvp/vvp_island.cc @@ -124,9 +124,11 @@ class vvp_island_port : public vvp_net_fun_t { explicit vvp_island_port(vvp_island*ip); ~vvp_island_port(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); virtual void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t); virtual void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); vvp_vector8_t invalue; @@ -309,13 +311,15 @@ vvp_island_port::~vvp_island_port() { } -void vvp_island_port::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_island_port::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { recv_vec8(port, vvp_vector8_t(bit, 6, 6)); } void vvp_island_port::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { vvp_vector8_t tmp(bit, 6, 6); if (invalue.size()==0) { diff --git a/vvp/vvp_net.cc b/vvp/vvp_net.cc index b998e13b7..9f08f8dd2 100644 --- a/vvp/vvp_net.cc +++ b/vvp/vvp_net.cc @@ -19,6 +19,7 @@ # include "config.h" # include "vvp_net.h" +# include "vpi_priv.h" # include "schedule.h" # include "statistics.h" # include @@ -227,13 +228,13 @@ void vvp_send_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&val) } } -void vvp_send_real(vvp_net_ptr_t ptr, double val) +void vvp_send_real(vvp_net_ptr_t ptr, double val, vvp_context_t context) { while (struct vvp_net_t*cur = ptr.ptr()) { vvp_net_ptr_t next = cur->port[ptr.port()]; if (cur->fun) - cur->fun->recv_real(ptr, val); + cur->fun->recv_real(ptr, val, context); ptr = next; } @@ -1298,12 +1299,65 @@ bool vector4_to_value(const vvp_vector4_t&vec, double&val, bool signed_flag) return flag; } -vvp_vector4array_t::vvp_vector4array_t(unsigned width__, unsigned words__, - bool is_automatic) -: width_(width__), words_(words__), array_(0) +vvp_vector4array_t::vvp_vector4array_t(unsigned width__, unsigned words__) +: width_(width__), words_(words__) { - if (is_automatic) return; +} +vvp_vector4array_t::~vvp_vector4array_t() +{ +} + +void vvp_vector4array_t::set_word_(v4cell*cell, const vvp_vector4_t&that) +{ + assert(that.size_ == width_); + + if (width_ <= vvp_vector4_t::BITS_PER_WORD) { + cell->abits_val_ = that.abits_val_; + cell->bbits_val_ = that.bbits_val_; + return; + } + + unsigned cnt = (width_ + vvp_vector4_t::BITS_PER_WORD-1)/vvp_vector4_t::BITS_PER_WORD; + + if (cell->abits_ptr_ == 0) { + cell->abits_ptr_ = new unsigned long[2*cnt]; + cell->bbits_ptr_ = cell->abits_ptr_ + cnt; + } + + for (unsigned idx = 0 ; idx < cnt ; idx += 1) + cell->abits_ptr_[idx] = that.abits_ptr_[idx]; + for (unsigned idx = 0 ; idx < cnt ; idx += 1) + cell->bbits_ptr_[idx] = that.bbits_ptr_[idx]; +} + +vvp_vector4_t vvp_vector4array_t::get_word_(v4cell*cell) const +{ + if (width_ <= vvp_vector4_t::BITS_PER_WORD) { + vvp_vector4_t res; + res.size_ = width_; + res.abits_val_ = cell->abits_val_; + res.bbits_val_ = cell->bbits_val_; + return res; + } + + vvp_vector4_t res (width_, BIT4_X); + if (cell->abits_ptr_ == 0) + return res; + + unsigned cnt = (width_ + vvp_vector4_t::BITS_PER_WORD-1)/vvp_vector4_t::BITS_PER_WORD; + + for (unsigned idx = 0 ; idx < cnt ; idx += 1) + res.abits_ptr_[idx] = cell->abits_ptr_[idx]; + for (unsigned idx = 0 ; idx < cnt ; idx += 1) + res.bbits_ptr_[idx] = cell->bbits_ptr_[idx]; + + return res; +} + +vvp_vector4array_sa::vvp_vector4array_sa(unsigned width__, unsigned words__) +: vvp_vector4array_t(width__, words__) +{ array_ = new v4cell[words_]; if (width_ <= vvp_vector4_t::BITS_PER_WORD) { @@ -1319,7 +1373,7 @@ vvp_vector4array_t::vvp_vector4array_t(unsigned width__, unsigned words__, } } -vvp_vector4array_t::~vvp_vector4array_t() +vvp_vector4array_sa::~vvp_vector4array_sa() { if (array_) { if (width_ > vvp_vector4_t::BITS_PER_WORD) { @@ -1331,7 +1385,38 @@ vvp_vector4array_t::~vvp_vector4array_t() } } -void vvp_vector4array_t::alloc_instance(vvp_context_t context) +void vvp_vector4array_sa::set_word(unsigned index, const vvp_vector4_t&that) +{ + assert(index < words_); + + v4cell*cell = &array_[index]; + + set_word_(cell, that); +} + +vvp_vector4_t vvp_vector4array_sa::get_word(unsigned index) const +{ + if (index >= words_) + return vvp_vector4_t(width_, BIT4_X); + + assert(index < words_); + + v4cell*cell = &array_[index]; + + return get_word_(cell); +} + +vvp_vector4array_aa::vvp_vector4array_aa(unsigned width__, unsigned words__) +: vvp_vector4array_t(width__, words__) +{ + context_idx_ = vpip_add_item_to_context(this, vpip_peek_context_scope()); +} + +vvp_vector4array_aa::~vvp_vector4array_aa() +{ +} + +void vvp_vector4array_aa::alloc_instance(vvp_context_t context) { v4cell*array = new v4cell[words_]; @@ -1347,13 +1432,13 @@ void vvp_vector4array_t::alloc_instance(vvp_context_t context) } } - vvp_set_context_item(context, context_idx, array); + vvp_set_context_item(context, context_idx_, array); } -void vvp_vector4array_t::reset_instance(vvp_context_t context) +void vvp_vector4array_aa::reset_instance(vvp_context_t context) { v4cell*cell = static_cast - (vvp_get_context_item(context, context_idx)); + (vvp_get_context_item(context, context_idx_)); if (width_ <= vvp_vector4_t::BITS_PER_WORD) { for (unsigned idx = 0 ; idx < words_ ; idx += 1) { @@ -1375,72 +1460,27 @@ void vvp_vector4array_t::reset_instance(vvp_context_t context) } } -void vvp_vector4array_t::set_word(unsigned index, const vvp_vector4_t&that) +void vvp_vector4array_aa::set_word(unsigned index, const vvp_vector4_t&that) { assert(index < words_); - assert(that.size_ == width_); - v4cell*cell; - if (context_idx) - cell = static_cast - (vthread_get_wt_context_item(context_idx)) + index; - else - cell = &(array_[index]); + v4cell*cell = static_cast + (vthread_get_wt_context_item(context_idx_)) + index; - if (width_ <= vvp_vector4_t::BITS_PER_WORD) { - cell->abits_val_ = that.abits_val_; - cell->bbits_val_ = that.bbits_val_; - return; - } - - unsigned cnt = (width_ + vvp_vector4_t::BITS_PER_WORD-1)/vvp_vector4_t::BITS_PER_WORD; - - if (cell->abits_ptr_ == 0) { - cell->abits_ptr_ = new unsigned long[2*cnt]; - cell->bbits_ptr_ = cell->abits_ptr_ + cnt; - } - - for (unsigned idx = 0 ; idx < cnt ; idx += 1) - cell->abits_ptr_[idx] = that.abits_ptr_[idx]; - for (unsigned idx = 0 ; idx < cnt ; idx += 1) - cell->bbits_ptr_[idx] = that.bbits_ptr_[idx]; + set_word_(cell, that); } -vvp_vector4_t vvp_vector4array_t::get_word(unsigned index) const +vvp_vector4_t vvp_vector4array_aa::get_word(unsigned index) const { if (index >= words_) return vvp_vector4_t(width_, BIT4_X); assert(index < words_); - v4cell*cell; - if (context_idx) - cell = static_cast - (vthread_get_rd_context_item(context_idx)) + index; - else - cell = &(array_[index]); - - if (width_ <= vvp_vector4_t::BITS_PER_WORD) { - vvp_vector4_t res; - res.size_ = width_; - res.abits_val_ = cell->abits_val_; - res.bbits_val_ = cell->bbits_val_; - return res; - } - - vvp_vector4_t res (width_, BIT4_X); - if (cell->abits_ptr_ == 0) - return res; - - unsigned cnt = (width_ + vvp_vector4_t::BITS_PER_WORD-1)/vvp_vector4_t::BITS_PER_WORD; - - for (unsigned idx = 0 ; idx < cnt ; idx += 1) - res.abits_ptr_[idx] = cell->abits_ptr_[idx]; - for (unsigned idx = 0 ; idx < cnt ; idx += 1) - res.bbits_ptr_[idx] = cell->bbits_ptr_[idx]; - - return res; + v4cell*cell = static_cast + (vthread_get_rd_context_item(context_idx_)) + index; + return get_word_(cell); } template T coerce_to_width(const T&that, unsigned width) @@ -2257,15 +2297,17 @@ vvp_net_fun_t::~vvp_net_fun_t() { } -void vvp_net_fun_t::recv_vec4(vvp_net_ptr_t, const vvp_vector4_t&) +void vvp_net_fun_t::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { fprintf(stderr, "internal error: %s: recv_vec4 not implemented\n", typeid(*this).name()); assert(0); } -void vvp_net_fun_t::recv_vec4_pv(vvp_net_ptr_t, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) +void vvp_net_fun_t::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { cerr << "internal error: " << typeid(*this).name() << ": " << "recv_vec4_pv(" << bit << ", " << base @@ -2273,18 +2315,18 @@ void vvp_net_fun_t::recv_vec4_pv(vvp_net_ptr_t, const vvp_vector4_t&bit, assert(0); } +void vvp_net_fun_t::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) +{ + recv_vec4(port, reduce4(bit), 0); +} + void vvp_net_fun_t::recv_vec8_pv(vvp_net_ptr_t port, const vvp_vector8_t&bit, unsigned base, unsigned wid, unsigned vwid) { - recv_vec4_pv(port, reduce4(bit), base, wid, vwid); + recv_vec4_pv(port, reduce4(bit), base, wid, vwid, 0); } -void vvp_net_fun_t::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) -{ - recv_vec4(port, reduce4(bit)); -} - -void vvp_net_fun_t::recv_real(vvp_net_ptr_t, double bit) +void vvp_net_fun_t::recv_real(vvp_net_ptr_t port, double bit, vvp_context_t) { fprintf(stderr, "internal error: %s: recv_real(%f) not implemented\n", typeid(*this).name(), bit); @@ -2320,7 +2362,8 @@ vvp_fun_drive::~vvp_fun_drive() { } -void vvp_fun_drive::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_drive::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { assert(port.port() == 0); vvp_send_vec8(port.ptr()->out, vvp_vector8_t(bit, drive0_, drive1_)); @@ -2416,26 +2459,11 @@ void vvp_fun_signal_base::recv_long_pv(vvp_net_ptr_t ptr, long bit, } } -vvp_fun_signal::vvp_fun_signal(unsigned wid, vvp_bit4_t init) +vvp_fun_signal4_sa::vvp_fun_signal4_sa(unsigned wid, vvp_bit4_t init) : bits4_(wid, init) { } -void vvp_fun_signal::alloc_instance(vvp_context_t context) -{ - unsigned wid = bits4_.size(); - - vvp_set_context_item(context, context_idx, new vvp_vector4_t(wid)); -} - -void vvp_fun_signal::reset_instance(vvp_context_t context) -{ - vvp_vector4_t*bits = static_cast - (vvp_get_context_item(context, context_idx)); - - bits->set_to_x(); -} - /* * Nets simply reflect their input to their output. * @@ -2447,12 +2475,9 @@ void vvp_fun_signal::reset_instance(vvp_context_t context) * herein is to keep a "needs_init_" flag that is turned false after * the first propagation, and forces the first propagation to happen * even if it matches the initial value. - * - * Continuous and forced assignments are not permitted on automatic - * variables. So we only need incur the overhead of checking for an - * automatic variable when we are doing a normal unmasked assign. */ -void vvp_fun_signal::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_signal4_sa::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { switch (ptr.port()) { case 0: // Normal input (feed from net, or set from process) @@ -2460,15 +2485,8 @@ void vvp_fun_signal::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) copy the bits, otherwise we need to see if there are any holes in the mask so we can set those bits. */ if (assign_mask_.size() == 0) { - vvp_vector4_t*bits4; - if (context_idx) { - bits4 = static_cast - (vthread_get_wt_context_item(context_idx)); - } else { - bits4 = &bits4_; - } - if (needs_init_ || !bits4->eeq(bit)) { - *bits4 = bit; + if (needs_init_ || !bits4_.eeq(bit)) { + bits4_ = bit; needs_init_ = false; calculate_output_(ptr); } @@ -2514,8 +2532,14 @@ void vvp_fun_signal::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } } -void vvp_fun_signal::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) +void vvp_fun_signal4_sa::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) +{ + recv_vec4(ptr, reduce4(bit), 0); +} + +void vvp_fun_signal4_sa::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { assert(bit.size() == wid); assert(bits4_.size() == vwid); @@ -2523,16 +2547,9 @@ void vvp_fun_signal::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, switch (ptr.port()) { case 0: // Normal input if (assign_mask_.size() == 0) { - vvp_vector4_t*bits4; - if (context_idx) { - bits4 = static_cast - (vthread_get_wt_context_item(context_idx)); - } else { - bits4 = &bits4_; - } - for (unsigned idx = 0 ; idx < wid ; idx += 1) { - if (base+idx >= bits4->size()) break; - bits4->set_bit(base+idx, bit.value(idx)); + for (unsigned idx = 0 ; idx < wid ; idx += 1) { + if (base+idx >= bits4_.size()) break; + bits4_.set_bit(base+idx, bit.value(idx)); } needs_init_ = false; calculate_output_(ptr); @@ -2586,13 +2603,13 @@ void vvp_fun_signal::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, } } -void vvp_fun_signal::recv_vec8_pv(vvp_net_ptr_t ptr, const vvp_vector8_t&bit, - unsigned base, unsigned wid, unsigned vwid) +void vvp_fun_signal4_sa::recv_vec8_pv(vvp_net_ptr_t ptr, const vvp_vector8_t&bit, + unsigned base, unsigned wid, unsigned vwid) { - recv_vec4_pv(ptr, reduce4(bit), base, wid, vwid); + recv_vec4_pv(ptr, reduce4(bit), base, wid, vwid, 0); } -void vvp_fun_signal::calculate_output_(vvp_net_ptr_t ptr) +void vvp_fun_signal4_sa::calculate_output_(vvp_net_ptr_t ptr) { if (force_mask_.size()) { assert(bits4_.size() == force_mask_.size()); @@ -2602,37 +2619,27 @@ void vvp_fun_signal::calculate_output_(vvp_net_ptr_t ptr) if (force_mask_.value(idx)) bits.set_bit(idx, force_.value(idx)); } - vvp_send_vec4(ptr.ptr()->out, bits); - } else if (context_idx) { - vvp_vector4_t*bits4 = static_cast - (vthread_get_wt_context_item(context_idx)); - vvp_send_vec4(ptr.ptr()->out, *bits4); + vvp_send_vec4(ptr.ptr()->out, bits, 0); } else { - vvp_send_vec4(ptr.ptr()->out, bits4_); + vvp_send_vec4(ptr.ptr()->out, bits4_, 0); } run_vpi_callbacks(); } -void vvp_fun_signal::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) -{ - recv_vec4(ptr, reduce4(bit)); -} - - -void vvp_fun_signal::release(vvp_net_ptr_t ptr, bool net) +void vvp_fun_signal4_sa::release(vvp_net_ptr_t ptr, bool net) { force_mask_ = vvp_vector2_t(); if (net) { - vvp_send_vec4(ptr.ptr()->out, bits4_); + vvp_send_vec4(ptr.ptr()->out, bits4_, 0); run_vpi_callbacks(); } else { bits4_ = force_; } } -void vvp_fun_signal::release_pv(vvp_net_ptr_t ptr, bool net, - unsigned base, unsigned wid) +void vvp_fun_signal4_sa::release_pv(vvp_net_ptr_t ptr, bool net, + unsigned base, unsigned wid) { assert(bits4_.size() >= base + wid); @@ -2645,7 +2652,7 @@ void vvp_fun_signal::release_pv(vvp_net_ptr_t ptr, bool net, if (net) calculate_output_(ptr); } -unsigned vvp_fun_signal::size() const +unsigned vvp_fun_signal4_sa::size() const { if (force_mask_.size()) return force_.size(); @@ -2653,33 +2660,25 @@ unsigned vvp_fun_signal::size() const return bits4_.size(); } -vvp_bit4_t vvp_fun_signal::value(unsigned idx) const +vvp_bit4_t vvp_fun_signal4_sa::value(unsigned idx) const { if (force_mask_.size() && force_mask_.value(idx)) { return force_.value(idx); - } else if (context_idx) { - vvp_vector4_t*bits4 = static_cast - (vthread_get_rd_context_item(context_idx)); - return bits4->value(idx); } else { return bits4_.value(idx); } } -vvp_scalar_t vvp_fun_signal::scalar_value(unsigned idx) const +vvp_scalar_t vvp_fun_signal4_sa::scalar_value(unsigned idx) const { if (force_mask_.size() && force_mask_.value(idx)) { return vvp_scalar_t(force_.value(idx), 6, 6); - } else if (context_idx) { - vvp_vector4_t*bits4 = static_cast - (vthread_get_rd_context_item(context_idx)); - return vvp_scalar_t(bits4->value(idx), 6, 6); } else { return vvp_scalar_t(bits4_.value(idx), 6, 6); } } -vvp_vector4_t vvp_fun_signal::vec4_value() const +vvp_vector4_t vvp_fun_signal4_sa::vec4_value() const { if (force_mask_.size()) { assert(bits4_.size() == force_mask_.size()); @@ -2690,21 +2689,117 @@ vvp_vector4_t vvp_fun_signal::vec4_value() const bits.set_bit(idx, force_.value(idx)); } return bits; - } else if (context_idx) { - vvp_vector4_t*bits4 = static_cast - (vthread_get_rd_context_item(context_idx)); - return *bits4; } else { return bits4_; } } +vvp_fun_signal4_aa::vvp_fun_signal4_aa(unsigned wid, vvp_bit4_t init) +{ + context_idx_ = vpip_add_item_to_context(this, vpip_peek_context_scope()); + size_ = wid; +} + +void vvp_fun_signal4_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_vector4_t(size_)); +} + +void vvp_fun_signal4_aa::reset_instance(vvp_context_t context) +{ + vvp_vector4_t*bits = static_cast + (vvp_get_context_item(context, context_idx_)); + + bits->set_to_x(); +} + +/* + * Continuous and forced assignments are not permitted on automatic + * variables. So we only expect to receive on port 0. + */ +void vvp_fun_signal4_aa::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t context) +{ + assert(ptr.port() == 0); + assert(context); + + vvp_vector4_t*bits4 = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (!bits4->eeq(bit)) { + *bits4 = bit; + vvp_send_vec4(ptr.ptr()->out, *bits4, context); + } +} + +void vvp_fun_signal4_aa::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context) +{ + assert(ptr.port() == 0); + assert(bit.size() == wid); + assert(size_ == vwid); + assert(context); + + vvp_vector4_t*bits4 = static_cast + (vvp_get_context_item(context, context_idx_)); + + for (unsigned idx = 0 ; idx < wid ; idx += 1) { + if (base+idx >= bits4->size()) break; + bits4->set_bit(base+idx, bit.value(idx)); + } + vvp_send_vec4(ptr.ptr()->out, *bits4, context); +} + +void vvp_fun_signal4_aa::release(vvp_net_ptr_t ptr, bool net) +{ + /* Automatic variables can't be forced. */ + assert(0); +} + +void vvp_fun_signal4_aa::release_pv(vvp_net_ptr_t ptr, bool net, + unsigned base, unsigned wid) +{ + /* Automatic variables can't be forced. */ + assert(0); +} + +unsigned vvp_fun_signal4_aa::size() const +{ + return size_; +} + +vvp_bit4_t vvp_fun_signal4_aa::value(unsigned idx) const +{ + vvp_vector4_t*bits4 = static_cast + (vthread_get_rd_context_item(context_idx_)); + + return bits4->value(idx); +} + +vvp_scalar_t vvp_fun_signal4_aa::scalar_value(unsigned idx) const +{ + vvp_vector4_t*bits4 = static_cast + (vthread_get_rd_context_item(context_idx_)); + + return vvp_scalar_t(bits4->value(idx), 6, 6); +} + +vvp_vector4_t vvp_fun_signal4_aa::vec4_value() const +{ + vvp_vector4_t*bits4 = static_cast + (vthread_get_rd_context_item(context_idx_)); + + return *bits4; +} + vvp_fun_signal8::vvp_fun_signal8(unsigned wid) : bits8_(wid) { } -void vvp_fun_signal8::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_signal8::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { recv_vec8(ptr, vvp_vector8_t(bit,6,6)); } @@ -2748,7 +2843,8 @@ void vvp_fun_signal8::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) } void vvp_fun_signal8::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { recv_vec8_pv(ptr, vvp_vector8_t(bit,6,6), base, wid, vwid); } @@ -2879,39 +2975,6 @@ vvp_scalar_t vvp_fun_signal8::scalar_value(unsigned idx) const return bits8_.value(idx); } -vvp_fun_signal_real::vvp_fun_signal_real() -{ - bits_ = 0.0; -} - -void vvp_fun_signal_real::alloc_instance(vvp_context_t context) -{ - double*bits = new double; - *bits = 0.0; - vvp_set_context_item(context, context_idx, bits); -} - -void vvp_fun_signal_real::reset_instance(vvp_context_t context) -{ - double*bits = static_cast - (vvp_get_context_item(context, context_idx)); - - *bits = 0.0; -} - -double vvp_fun_signal_real::real_value() const -{ - if (force_mask_.size()) { - return force_; - } else if (context_idx) { - double*bits = static_cast - (vthread_get_rd_context_item(context_idx)); - return *bits; - } else { - return bits_; - } -} - /* * Testing for equality, we want a bitwise test instead of an * arithmetic test because we want to treat for example -0 different @@ -2922,22 +2985,29 @@ bool bits_equal(double a, double b) return memcmp(&a, &b, sizeof a) == 0; } -void vvp_fun_signal_real::recv_real(vvp_net_ptr_t ptr, double bit) +vvp_fun_signal_real_sa::vvp_fun_signal_real_sa() +{ + bits_ = 0.0; +} + +double vvp_fun_signal_real_sa::real_value() const +{ + if (force_mask_.size()) + return force_; + else + return bits_; +} + +void vvp_fun_signal_real_sa::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { switch (ptr.port()) { case 0: if (!continuous_assign_active_) { - double*bits; - if (context_idx) { - bits = static_cast - (vthread_get_wt_context_item(context_idx)); - } else { - bits = &bits_; - } - if (needs_init_ || !bits_equal(*bits,bit)) { - *bits = bit; + if (needs_init_ || !bits_equal(bits_, bit)) { + bits_ = bit; needs_init_ = false; - vvp_send_real(ptr.ptr()->out, bit); + vvp_send_real(ptr.ptr()->out, bit, 0); run_vpi_callbacks(); } } @@ -2946,14 +3016,14 @@ void vvp_fun_signal_real::recv_real(vvp_net_ptr_t ptr, double bit) case 1: // Continuous assign value continuous_assign_active_ = true; bits_ = bit; - vvp_send_real(ptr.ptr()->out, bit); + vvp_send_real(ptr.ptr()->out, bit, 0); run_vpi_callbacks(); break; case 2: // Force value force_mask_ = vvp_vector2_t(1, 1); force_ = bit; - vvp_send_real(ptr.ptr()->out, bit); + vvp_send_real(ptr.ptr()->out, bit, 0); run_vpi_callbacks(); break; @@ -2964,24 +3034,81 @@ void vvp_fun_signal_real::recv_real(vvp_net_ptr_t ptr, double bit) } } -void vvp_fun_signal_real::release(vvp_net_ptr_t ptr, bool net) +void vvp_fun_signal_real_sa::release(vvp_net_ptr_t ptr, bool net) { force_mask_ = vvp_vector2_t(); if (net) { - vvp_send_real(ptr.ptr()->out, bits_); + vvp_send_real(ptr.ptr()->out, bits_, 0); run_vpi_callbacks(); } else { bits_ = force_; } } -void vvp_fun_signal_real::release_pv(vvp_net_ptr_t ptr, bool net, - unsigned base, unsigned wid) +void vvp_fun_signal_real_sa::release_pv(vvp_net_ptr_t ptr, bool net, + unsigned base, unsigned wid) { fprintf(stderr, "Error: cannot take bit/part select of a real value!\n"); assert(0); } +vvp_fun_signal_real_aa::vvp_fun_signal_real_aa() +{ + context_idx_ = vpip_add_item_to_context(this, vpip_peek_context_scope()); +} + +void vvp_fun_signal_real_aa::alloc_instance(vvp_context_t context) +{ + double*bits = new double; + vvp_set_context_item(context, context_idx_, bits); + + *bits = 0.0; +} + +void vvp_fun_signal_real_aa::reset_instance(vvp_context_t context) +{ + double*bits = static_cast + (vvp_get_context_item(context, context_idx_)); + + *bits = 0.0; +} + +double vvp_fun_signal_real_aa::real_value() const +{ + double*bits = static_cast + (vthread_get_rd_context_item(context_idx_)); + + return *bits; +} + +void vvp_fun_signal_real_aa::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t context) +{ + assert(ptr.port() == 0); + assert(context); + + double*bits = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (!bits_equal(*bits,bit)) { + *bits = bit; + vvp_send_real(ptr.ptr()->out, bit, context); + } +} + +void vvp_fun_signal_real_aa::release(vvp_net_ptr_t ptr, bool net) +{ + /* Automatic variables can't be forced. */ + assert(0); +} + +void vvp_fun_signal_real_aa::release_pv(vvp_net_ptr_t ptr, bool net, + unsigned base, unsigned wid) +{ + /* Automatic variables can't be forced. */ + assert(0); +} + /* **** vvp_wide_fun_* methods **** */ vvp_wide_fun_core::vvp_wide_fun_core(vvp_net_t*net, unsigned nports) @@ -3004,7 +3131,7 @@ void vvp_wide_fun_core::propagate_vec4(const vvp_vector4_t&bit, if (delay) schedule_assign_plucked_vector(ptr_->out, delay, bit, 0, bit.size()); else - vvp_send_vec4(ptr_->out, bit); + vvp_send_vec4(ptr_->out, bit, 0); } void vvp_wide_fun_core::propagate_real(double bit, @@ -3014,7 +3141,7 @@ void vvp_wide_fun_core::propagate_real(double bit, // schedule_assign_vector(ptr_->out, bit, delay); assert(0); // Need a real-value version of assign_vector. } else { - vvp_send_real(ptr_->out, bit); + vvp_send_real(ptr_->out, bit, 0); } } @@ -3069,19 +3196,20 @@ vvp_wide_fun_t::~vvp_wide_fun_t() { } -void vvp_wide_fun_t::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_wide_fun_t::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { unsigned pidx = port_base_ + port.port(); core_->dispatch_vec4_from_input_(pidx, bit); } -void vvp_wide_fun_t::recv_real(vvp_net_ptr_t port, double bit) +void vvp_wide_fun_t::recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t) { unsigned pidx = port_base_ + port.port(); core_->dispatch_real_from_input_(pidx, bit); } - /* **** vvp_scalar_t methods **** */ /* diff --git a/vvp/vvp_net.h b/vvp/vvp_net.h index 24b888c85..a4a21afcf 100644 --- a/vvp/vvp_net.h +++ b/vvp/vvp_net.h @@ -52,7 +52,9 @@ class vvp_delay_t; /* * Storage for items declared in automatically allocated scopes (i.e. automatic - * tasks and functions). + * tasks and functions). The first two slots in each context are reserved for + * linking to other contexts. The function that adds items to a context knows + * this, and allocates context indices accordingly. */ typedef void**vvp_context_t; @@ -60,7 +62,8 @@ typedef void*vvp_context_item_t; inline vvp_context_t vvp_allocate_context(unsigned nitem) { - return (vvp_context_t)malloc((1 + nitem) * sizeof(void*)); + + return (vvp_context_t)malloc((2 + nitem) * sizeof(void*)); } inline vvp_context_t vvp_get_next_context(vvp_context_t context) @@ -73,6 +76,16 @@ inline void vvp_set_next_context(vvp_context_t context, vvp_context_t next) context[0] = next; } +inline vvp_context_t vvp_get_stacked_context(vvp_context_t context) +{ + return (vvp_context_t)context[1]; +} + +inline void vvp_set_stacked_context(vvp_context_t context, vvp_context_t stack) +{ + context[1] = stack; +} + inline vvp_context_item_t vvp_get_context_item(vvp_context_t context, unsigned item_idx) { @@ -88,18 +101,15 @@ inline void vvp_set_context_item(vvp_context_t context, unsigned item_idx, /* * An "automatic" functor is one which may be associated with an automatically * allocated scope item. This provides the infrastructure needed to allocate - * and access the state information for individual instances of the item. A - * context_idx value of 0 indicates a statically allocated item. + * the state information for individual instances of the item. */ struct automatic_hooks_s { - automatic_hooks_s() : context_idx(0) {} + automatic_hooks_s() {} virtual ~automatic_hooks_s() {} - virtual void alloc_instance(vvp_context_t context) {} - virtual void reset_instance(vvp_context_t context) {} - - unsigned context_idx; + virtual void alloc_instance(vvp_context_t context) = 0; + virtual void reset_instance(vvp_context_t context) = 0; }; /* @@ -179,6 +189,8 @@ class vvp_vector4_t { friend vvp_vector4_t operator ~(const vvp_vector4_t&that); friend class vvp_vector4array_t; + friend class vvp_vector4array_sa; + friend class vvp_vector4array_aa; public: explicit vvp_vector4_t(unsigned size =0, vvp_bit4_t bits =BIT4_X); @@ -458,22 +470,19 @@ extern bool vector4_to_value(const vvp_vector4_t&a, double&val, bool is_signed); /* * vvp_vector4array_t */ -class vvp_vector4array_t : public automatic_hooks_s { +class vvp_vector4array_t { public: - vvp_vector4array_t(unsigned width, unsigned words, bool is_automatic); + vvp_vector4array_t(unsigned width, unsigned words); ~vvp_vector4array_t(); - void alloc_instance(vvp_context_t context); - void reset_instance(vvp_context_t context); - unsigned width() const { return width_; } unsigned words() const { return words_; } - vvp_vector4_t get_word(unsigned idx) const; - void set_word(unsigned idx, const vvp_vector4_t&that); + virtual vvp_vector4_t get_word(unsigned idx) const = 0; + virtual void set_word(unsigned idx, const vvp_vector4_t&that) = 0; - private: + protected: struct v4cell { union { unsigned long abits_val_; @@ -485,15 +494,52 @@ class vvp_vector4array_t : public automatic_hooks_s { }; }; + vvp_vector4_t get_word_(v4cell*cell) const; + void set_word_(v4cell*cell, const vvp_vector4_t&that); + unsigned width_; unsigned words_; - v4cell* array_; private: // Not implemented vvp_vector4array_t(const vvp_vector4array_t&); vvp_vector4array_t& operator = (const vvp_vector4array_t&); }; +/* + * Statically allocated vvp_vector4array_t + */ +class vvp_vector4array_sa : public vvp_vector4array_t { + + public: + vvp_vector4array_sa(unsigned width, unsigned words); + ~vvp_vector4array_sa(); + + vvp_vector4_t get_word(unsigned idx) const; + void set_word(unsigned idx, const vvp_vector4_t&that); + + private: + v4cell* array_; +}; + +/* + * Automatically allocated vvp_vector4array_t + */ +class vvp_vector4array_aa : public vvp_vector4array_t, public automatic_hooks_s { + + public: + vvp_vector4array_aa(unsigned width, unsigned words); + ~vvp_vector4array_aa(); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + vvp_vector4_t get_word(unsigned idx) const; + void set_word(unsigned idx, const vvp_vector4_t&that); + + private: + unsigned context_idx_; +}; + /* vvp_vector2_t */ class vvp_vector2_t { @@ -924,21 +970,34 @@ struct vvp_net_t { * default behavior for recv_vec8 and recv_vec8_pv is to reduce the * operand to a vvp_vector4_t and pass it on to the recv_vec4 or * recv_vec4_pv method. + * + * The recv_vec4, recv_vec4_pv, and recv_real methods are also + * passed a context pointer. When the received bit has propagated + * from a statically allocated node, this will be a null pointer. + * When the received bit has propagated from an automatically + * allocated node, this will be a pointer to the context that + * contains the instance of that bit that has just been modified. + * When the received bit was from a procedural assignment or from + * a VPI set_value() operation, this will be a pointer to the + * writable context associated with the currently running thread. */ -class vvp_net_fun_t : public automatic_hooks_s { +class vvp_net_fun_t { public: vvp_net_fun_t(); virtual ~vvp_net_fun_t(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); virtual void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); - virtual void recv_real(vvp_net_ptr_t port, double bit); + virtual void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context); virtual void recv_long(vvp_net_ptr_t port, long bit); // Part select variants of above virtual void recv_vec4_pv(vvp_net_ptr_t p, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context); virtual void recv_vec8_pv(vvp_net_ptr_t p, const vvp_vector8_t&bit, unsigned base, unsigned wid, unsigned vwid); virtual void recv_long_pv(vvp_net_ptr_t port, long bit, @@ -974,7 +1033,8 @@ class vvp_fun_concat : public vvp_net_fun_t { unsigned w2, unsigned w3); ~vvp_fun_concat(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); private: unsigned wid_[4]; @@ -993,7 +1053,8 @@ class vvp_fun_repeat : public vvp_net_fun_t { vvp_fun_repeat(unsigned width, unsigned repeat); ~vvp_fun_repeat(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); private: unsigned wid_; @@ -1017,7 +1078,8 @@ class vvp_fun_drive : public vvp_net_fun_t { vvp_fun_drive(vvp_bit4_t init, unsigned str0 =6, unsigned str1 =6); ~vvp_fun_drive(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); //void recv_long(vvp_net_ptr_t port, long bit); private: @@ -1037,7 +1099,8 @@ class vvp_fun_extend_signed : public vvp_net_fun_t { explicit vvp_fun_extend_signed(unsigned wid); ~vvp_fun_extend_signed(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); private: unsigned width_; @@ -1174,20 +1237,31 @@ class vvp_fun_signal_vec : public vvp_fun_signal_base { virtual vvp_vector4_t vec4_value() const =0; }; -class vvp_fun_signal : public vvp_fun_signal_vec { +class vvp_fun_signal4 : public vvp_fun_signal_vec { public: - explicit vvp_fun_signal(unsigned wid, vvp_bit4_t init=BIT4_X); + explicit vvp_fun_signal4() {}; - void alloc_instance(vvp_context_t context); - void reset_instance(vvp_context_t context); + void get_value(struct t_vpi_value*value); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); +}; + +/* + * Statically allocated vvp_fun_signal4. + */ +class vvp_fun_signal4_sa : public vvp_fun_signal4 { + + public: + explicit vvp_fun_signal4_sa(unsigned wid, vvp_bit4_t init=BIT4_X); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); // Part select variants of above void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t); void recv_vec8_pv(vvp_net_ptr_t port, const vvp_vector8_t&bit, unsigned base, unsigned wid, unsigned vwid); @@ -1202,8 +1276,6 @@ class vvp_fun_signal : public vvp_fun_signal_vec { void release_pv(vvp_net_ptr_t port, bool net, unsigned base, unsigned wid); - void get_value(struct t_vpi_value*value); - private: void calculate_output_(vvp_net_ptr_t ptr); @@ -1211,17 +1283,54 @@ class vvp_fun_signal : public vvp_fun_signal_vec { vvp_vector4_t force_; }; +/* + * Automatically allocated vvp_fun_signal4. + */ +class vvp_fun_signal4_aa : public vvp_fun_signal4, public automatic_hooks_s { + + public: + explicit vvp_fun_signal4_aa(unsigned wid, vvp_bit4_t init=BIT4_X); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + // Part select variants of above + void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t); + + // Get information about the vector value. + unsigned size() const; + vvp_bit4_t value(unsigned idx) const; + vvp_scalar_t scalar_value(unsigned idx) const; + vvp_vector4_t vec4_value() const; + + // Commands + void release(vvp_net_ptr_t port, bool net); + void release_pv(vvp_net_ptr_t port, bool net, + unsigned base, unsigned wid); + + private: + unsigned context_idx_; + unsigned size_; +}; + class vvp_fun_signal8 : public vvp_fun_signal_vec { public: explicit vvp_fun_signal8(unsigned wid); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); // Part select variants of above void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context); void recv_vec8_pv(vvp_net_ptr_t port, const vvp_vector8_t&bit, unsigned base, unsigned wid, unsigned vwid); @@ -1245,16 +1354,27 @@ class vvp_fun_signal8 : public vvp_fun_signal_vec { vvp_vector8_t force_; }; -class vvp_fun_signal_real : public vvp_fun_signal_base { +class vvp_fun_signal_real : public vvp_fun_signal_base { public: - explicit vvp_fun_signal_real(); + explicit vvp_fun_signal_real() {}; - void alloc_instance(vvp_context_t context); - void reset_instance(vvp_context_t context); + // Get information about the vector value. + virtual double real_value() const = 0; - //void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t port, double bit); + void get_value(struct t_vpi_value*value); +}; + +/* + * Statically allocated vvp_fun_signal_real. + */ +class vvp_fun_signal_real_sa : public vvp_fun_signal_real { + + public: + explicit vvp_fun_signal_real_sa(); + + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t); // Get information about the vector value. double real_value() const; @@ -1264,13 +1384,37 @@ class vvp_fun_signal_real : public vvp_fun_signal_base { void release_pv(vvp_net_ptr_t port, bool net, unsigned base, unsigned wid); - void get_value(struct t_vpi_value*value); - private: double bits_; double force_; }; +/* + * Automatically allocated vvp_fun_signal_real. + */ +class vvp_fun_signal_real_aa : public vvp_fun_signal_real, public automatic_hooks_s { + + public: + explicit vvp_fun_signal_real_aa(); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context); + + // Get information about the vector value. + double real_value() const; + + // Commands + void release(vvp_net_ptr_t port, bool net); + void release_pv(vvp_net_ptr_t port, bool net, + unsigned base, unsigned wid); + + private: + unsigned context_idx_; +}; + /* * Wide Functors: * Wide functors represent special devices that may have more than 4 @@ -1340,8 +1484,10 @@ class vvp_wide_fun_t : public vvp_net_fun_t { vvp_wide_fun_t(vvp_wide_fun_core*c, unsigned base); ~vvp_wide_fun_t(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t port, double bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context); private: vvp_wide_fun_core*core_; @@ -1349,20 +1495,22 @@ class vvp_wide_fun_t : public vvp_net_fun_t { }; -inline void vvp_send_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&val) +inline void vvp_send_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&val, + vvp_context_t context) { while (struct vvp_net_t*cur = ptr.ptr()) { vvp_net_ptr_t next = cur->port[ptr.port()]; if (cur->fun) - cur->fun->recv_vec4(ptr, val); + cur->fun->recv_vec4(ptr, val, context); ptr = next; } } extern void vvp_send_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&val); -extern void vvp_send_real(vvp_net_ptr_t ptr, double val); +extern void vvp_send_real(vvp_net_ptr_t ptr, double val, + vvp_context_t context); extern void vvp_send_long(vvp_net_ptr_t ptr, long val); extern void vvp_send_long_pv(vvp_net_ptr_t ptr, long val, unsigned base, unsigned width); @@ -1387,20 +1535,21 @@ extern void vvp_send_long_pv(vvp_net_ptr_t ptr, long val, * mirror of the destination vector. */ inline void vvp_send_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&val, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context) { while (struct vvp_net_t*cur = ptr.ptr()) { vvp_net_ptr_t next = cur->port[ptr.port()]; if (cur->fun) - cur->fun->recv_vec4_pv(ptr, val, base, wid, vwid); + cur->fun->recv_vec4_pv(ptr, val, base, wid, vwid, context); ptr = next; } } inline void vvp_send_vec8_pv(vvp_net_ptr_t ptr, const vvp_vector8_t&val, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid) { while (struct vvp_net_t*cur = ptr.ptr()) { vvp_net_ptr_t next = cur->port[ptr.port()]; diff --git a/vvp/words.cc b/vvp/words.cc index 8e61931ef..4537e2368 100644 --- a/vvp/words.cc +++ b/vvp/words.cc @@ -34,10 +34,15 @@ static void __compile_var_real(char*label, char*name, vvp_array_t array, unsigned long array_addr, int msb, int lsb) { - vvp_fun_signal_real*fun = new vvp_fun_signal_real; + vvp_fun_signal_real*fun; + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_signal_real_aa; + } else { + fun = new vvp_fun_signal_real_sa; + } vvp_net_t*net = new vvp_net_t; net->fun = fun; - vpip_add_item_to_current_scope(fun); + define_functor_symbol(label, net); vpiHandle obj = vpip_make_real_var(name, net); @@ -79,11 +84,15 @@ static void __compile_var(char*label, char*name, { unsigned wid = ((msb > lsb)? msb-lsb : lsb-msb) + 1; - vvp_fun_signal*vsig = new vvp_fun_signal(wid); + vvp_fun_signal_vec*vsig; + if (vpip_peek_current_scope()->is_automatic) { + vsig = new vvp_fun_signal4_aa(wid); + } else { + vsig = new vvp_fun_signal4_sa(wid); + } vvp_net_t*node = new vvp_net_t; - node->fun = vsig; - vpip_add_item_to_current_scope(vsig); + define_functor_symbol(label, node); vpiHandle obj = 0; @@ -159,7 +168,7 @@ static void __compile_net(char*label, char*name, vvp_fun_signal_base*vsig = net8_flag ? dynamic_cast(new vvp_fun_signal8(wid)) - : dynamic_cast(new vvp_fun_signal(wid,BIT4_Z)); + : dynamic_cast(new vvp_fun_signal4_sa(wid,BIT4_Z)); node->fun = vsig; /* Add the label into the functor symbol table. */ @@ -220,7 +229,7 @@ static void __compile_real(char*label, char*name, vvp_array_t array = array_label ? array_find(array_label) : 0; assert(array_label ? array!=0 : true); - vvp_fun_signal_real*fun = new vvp_fun_signal_real; + vvp_fun_signal_real*fun = new vvp_fun_signal_real_sa; net->fun = fun; /* Add the label into the functor symbol table. */