diff --git a/PExpr.cc b/PExpr.cc index dbaed82db..3db0af799 100644 --- a/PExpr.cc +++ b/PExpr.cc @@ -73,6 +73,16 @@ PEBComp::~PEBComp() { } +PEBLogic::PEBLogic(char op, PExpr*l, PExpr*r) +: PEBinary(op, l, r) +{ + assert(op == 'a' || op == 'o'); +} + +PEBLogic::~PEBLogic() +{ +} + PEBShift::PEBShift(char op, PExpr*l, PExpr*r) : PEBinary(op, l, r) { diff --git a/PExpr.h b/PExpr.h index 5b9ba3610..a7d2b54db 100644 --- a/PExpr.h +++ b/PExpr.h @@ -494,6 +494,26 @@ class PEBComp : public PEBinary { NetExpr* elaborate_expr(Design*des, NetScope*scope, int expr_width, bool sys_task_arg) const; + NetExpr*elaborate_pexpr(Design*des, NetScope*sc) const; +}; + +/* + * This derived class is for handling logical expressions: && and ||. +*/ +class PEBLogic : public PEBinary { + + public: + explicit PEBLogic(char op, PExpr*l, PExpr*r); + ~PEBLogic(); + + virtual unsigned test_width(Design*des, NetScope*scope, + unsigned min, unsigned lval, + ivl_variable_type_t&expr_type, + bool&flag); + + NetExpr* elaborate_expr(Design*des, NetScope*scope, + int expr_width, bool sys_task_arg) const; + NetExpr*elaborate_pexpr(Design*des, NetScope*sc) const; }; class PEBShift : public PEBinary { diff --git a/compiler.h b/compiler.h index 2b38802be..ba0d371f6 100644 --- a/compiler.h +++ b/compiler.h @@ -87,6 +87,7 @@ extern bool debug_eval_tree; extern bool debug_elaborate; extern bool debug_synth2; extern bool debug_optimizer; +extern bool debug_automatic; /* Path to a directory useful for finding subcomponents. */ extern const char*basedir; diff --git a/developer-quick-start.txt b/developer-quick-start.txt index 48ffa1f9b..33506b62c 100644 --- a/developer-quick-start.txt +++ b/developer-quick-start.txt @@ -10,7 +10,7 @@ participating in the Icarus Verilog development process. That information will not be repeated here. What this documentation *will* cover is the gross structure of the -Icarus Verilog core compiler source. This will help orient you to the +Icarus Verilog compiler source. This will help orient you to the source code itself, so that you can find the global parts where you can look for even better detail. @@ -40,7 +40,7 @@ on the core itself. - The loadable code generators (tgt-*/) This core compiler, after it is finished with parsing and semantic -analysis, uses loadable code generators to emit code for suppoted +analysis, uses loadable code generators to emit code for supported targets. The tgt-*/ directories contains the source for the target code generators that are bundled with Icarus Verilog. The tgt-vvp/ directory in particular contains the code generator for the vvp @@ -65,20 +65,20 @@ and the source is in this subdirectory. The Icarus Verilog support for the deprecated PLI-1 is in this subdirectory. The vvp runtime does not directly support the -PLI-1. Insead, the libveriuser library emulates it using the builtin +PLI-1. Instead, the libveriuser library emulates it using the builtin PLI-2 support. - The Cadence PLI module compatibility module (cadpli/) It is possible in some specialized situations to load and execute -PLI-1 code writen for Verilog-XL. This directory contains the source +PLI-1 code written for Verilog-XL. This directory contains the source for the module that provides the Cadence PLI interface. * The Core Compiler The "ivl" binary is the core compiler that does the heavy lifting of -compiling the Veriog source (including libraries) and generating the +compiling the Verilog source (including libraries) and generating the output. This is the most complex component of the Icarus Verilog compilation system. @@ -86,8 +86,8 @@ The process in the abstract starts with the Verilog lexical analysis and parsing to generate an internal "pform". The pform is then translated by elaboration into the "netlist" form. The netlist is processed by some functors (which include some optimizations and -optional synthesys) then is translated into the ivl_target internal -form. And finallly, the ivl_target form is passed via the ivl_target.h +optional synthesis) then is translated into the ivl_target internal +form. And finally, the ivl_target form is passed via the ivl_target.h API to the code generators. - Lexical Analysis @@ -105,9 +105,9 @@ large set of potential keywords. - Parsing The parser input file "parse.y" is passed to the "bison" program to -generate the parser. The parser uses the functions in parse*., -parse*.cc, pform*.h and pform*.cc to generate the pform from the -stream of input tokens. The pfrom is what compiler writers call a +generate the parser. The parser uses the functions in parse*.h, +parse*.cc, pform.h, and pform*.cc to generate the pform from the +stream of input tokens. The pform is what compiler writers call a "decorated parse tree". The pform itself is described by the classes in the header files diff --git a/dup_expr.cc b/dup_expr.cc index 91b6bc867..45ad48d5d 100644 --- a/dup_expr.cc +++ b/dup_expr.cc @@ -138,6 +138,14 @@ NetEUFunc* NetEUFunc::dup_expr() const return tmp; } +NetEUBits* NetEUBits::dup_expr() const +{ + NetEUBits*tmp = new NetEUBits(op_, expr_->dup_expr()); + assert(tmp); + tmp->set_line(*this); + return tmp; +} + NetEUnary* NetEUnary::dup_expr() const { NetEUnary*tmp = new NetEUnary(op_, expr_->dup_expr()); diff --git a/elab_anet.cc b/elab_anet.cc index 35bc2cf68..7323c7ee3 100644 --- a/elab_anet.cc +++ b/elab_anet.cc @@ -115,7 +115,7 @@ NetNet* PEIdent::elaborate_anet(Design*des, NetScope*scope) const const NetExpr*par = 0; NetEvent* eve = 0; - symbol_search(des, scope, path_, sig, mem, par, eve); + symbol_search(this, des, scope, path_, sig, mem, par, eve); if (mem != 0) { @@ -211,4 +211,3 @@ NetNet* PEIdent::elaborate_anet(Design*des, NetScope*scope) const * Check lvalue of procedural continuous assign (PR#29) * */ - diff --git a/elab_expr.cc b/elab_expr.cc index a21a7eb42..0513c1a43 100644 --- a/elab_expr.cc +++ b/elab_expr.cc @@ -279,8 +279,6 @@ NetExpr* PEBinary::elaborate_expr_base_(Design*des, NetExpr*lp, NetExpr*rp, int expr_wid) const { - bool flag; - if (debug_elaborate) { cerr << get_fileline() << ": debug: elaborate expression " << *this << " expr_wid=" << expr_wid << endl; @@ -296,11 +294,12 @@ NetExpr* PEBinary::elaborate_expr_base_(Design*des, case 'a': case 'o': - lp = condition_reduce(lp); - rp = condition_reduce(rp); - tmp = new NetEBLogic(op_, lp, rp); - tmp->set_line(*this); - break; + cerr << get_fileline() << ": internal error: " + << "Elaboration of " << human_readable_op(op_) + << " Should have been handled in NetEBLogic::elaborate." + << endl; + des->errors += 1; + return 0; case 'p': tmp = new NetEBPow(op_, lp, rp); @@ -341,38 +340,18 @@ NetExpr* PEBinary::elaborate_expr_base_(Design*des, case 'E': /* === */ case 'N': /* !== */ - if (lp->expr_type() == IVL_VT_REAL || - rp->expr_type() == IVL_VT_REAL) { - cerr << get_fileline() << ": error: " - << human_readable_op(op_) - << "may not have real operands." << endl; - return 0; - } - /* Fall through... */ case 'e': /* == */ case 'n': /* != */ - if (dynamic_cast(rp) - && (lp->expr_width() > rp->expr_width())) - rp->set_width(lp->expr_width()); - - if (dynamic_cast(lp) - && (lp->expr_width() < rp->expr_width())) - lp->set_width(rp->expr_width()); - - /* from here, handle this like other compares. */ case 'L': /* <= */ case 'G': /* >= */ case '<': case '>': - tmp = new NetEBComp(op_, lp, rp); - tmp->set_line(*this); - flag = tmp->set_width(1); - if (flag == false) { - cerr << get_fileline() << ": internal error: " - "expression bit width of comparison != 1." << endl; - des->errors += 1; - } - break; + cerr << get_fileline() << ": internal error: " + << "Elaboration of " << human_readable_op(op_) + << " Should have been handled in NetEBComp::elaborate." + << endl; + des->errors += 1; + return 0; case 'm': // min(l,r) case 'M': // max(l,r) @@ -425,6 +404,16 @@ NetExpr* PEBinary::elaborate_expr_base_div_(Design*des, } } + /* The original elaboration of the left and right expressions + already tried to elaborate to the expr_wid. If the + expressions are not that width by now, then they need to be + padded. The divide expression operands must be the width + of the output. */ + if (expr_wid > 0) { + lp = pad_to_width(lp, expr_wid); + rp = pad_to_width(rp, expr_wid); + } + NetEBDiv*tmp = new NetEBDiv(op_, lp, rp); tmp->set_line(*this); @@ -790,7 +779,83 @@ NetExpr* PEBComp::elaborate_expr(Design*des, NetScope*scope, if (type_is_vectorable(rp->expr_type())) rp = pad_to_width(rp, use_wid); - return elaborate_eval_expr_base_(des, lp, rp, use_wid); + eval_expr(lp, use_wid); + eval_expr(rp, use_wid); + + // Handle some operand-specific special cases... + switch (op_) { + case 'E': /* === */ + case 'N': /* !== */ + if (lp->expr_type() == IVL_VT_REAL || + rp->expr_type() == IVL_VT_REAL) { + cerr << get_fileline() << ": error: " + << human_readable_op(op_) + << "may not have real operands." << endl; + return 0; + } + break; + default: + break; + } + + NetEBComp*tmp = new NetEBComp(op_, lp, rp); + tmp->set_line(*this); + bool flag = tmp->set_width(1); + if (flag == false) { + cerr << get_fileline() << ": internal error: " + "expression bit width of comparison != 1." << endl; + des->errors += 1; + } + + return tmp; +} + +unsigned PEBLogic::test_width(Design*des, NetScope*scope, + unsigned min, unsigned lval, + ivl_variable_type_t&expr_type_out, + bool&unsized_flag) +{ + expr_type_ = IVL_VT_LOGIC; + expr_width_ = 1; + expr_type_out = expr_type_; + return expr_width_; +} + +NetExpr*PEBLogic::elaborate_expr(Design*des, NetScope*scope, + int expr_width_dummp, bool sys_task_arg) const +{ + assert(left_); + assert(right_); + + // The left and right expressions are self-determined and + // independent. Run their test_width methods independently. We + // don't need the widths here, but we do need the expressions + // to calculate their self-determined width and type. + + bool left_flag = false; + ivl_variable_type_t left_type = IVL_VT_NO_TYPE; + left_->test_width(des, scope, 0, 0, left_type, left_flag); + + bool right_flag = false; + ivl_variable_type_t right_type = IVL_VT_NO_TYPE; + right_->test_width(des, scope, 0, 0, right_type, right_flag); + + NetExpr*lp = elab_and_eval(des, scope, left_, -1); + NetExpr*rp = elab_and_eval(des, scope, right_, -1); + if ((lp == 0) || (rp == 0)) { + delete lp; + delete rp; + return 0; + } + + lp = condition_reduce(lp); + rp = condition_reduce(rp); + + NetEBLogic*tmp = new NetEBLogic(op_, lp, rp); + tmp->set_line(*this); + tmp->set_width(1); + + return tmp; } unsigned PEBShift::test_width(Design*des, NetScope*scope, @@ -1586,7 +1651,7 @@ unsigned PEIdent::test_width(Design*des, NetScope*scope, const NetExpr*ex1, *ex2; - symbol_search(des, scope, path_, net, par, eve, ex1, ex2); + symbol_search(0, des, scope, path_, net, par, eve, ex1, ex2); // If there is a part/bit select expression, then process it // here. This constrains the results no matter what kind the @@ -1699,7 +1764,7 @@ NetExpr* PEIdent::elaborate_expr(Design*des, NetScope*scope, const NetExpr*ex1, *ex2; - NetScope*found_in = symbol_search(des, scope, path_, + NetScope*found_in = symbol_search(this, des, scope, path_, net, par, eve, ex1, ex2); diff --git a/elab_lval.cc b/elab_lval.cc index 174af3260..8cdc13611 100644 --- a/elab_lval.cc +++ b/elab_lval.cc @@ -152,7 +152,7 @@ NetAssign_* PEIdent::elaborate_lval(Design*des, const NetExpr*par = 0; NetEvent* eve = 0; - symbol_search(des, scope, path_, reg, par, eve); + symbol_search(this, des, scope, path_, reg, par, eve); if (reg == 0) { cerr << get_fileline() << ": error: Could not find variable ``" << path_ << "'' in ``" << scope_path(scope) << diff --git a/elab_net.cc b/elab_net.cc index 4e0983c30..fa1d16f4a 100644 --- a/elab_net.cc +++ b/elab_net.cc @@ -374,7 +374,7 @@ NetNet* PEIdent::elaborate_lnet_common_(Design*des, NetScope*scope, const NetExpr*par = 0; NetEvent* eve = 0; - symbol_search(des, scope, path_, sig, par, eve); + symbol_search(this, des, scope, path_, sig, par, eve); if (eve != 0) { cerr << get_fileline() << ": error: named events (" << path_ @@ -631,4 +631,3 @@ NetNet* PEIdent::elaborate_port(Design*des, NetScope*scope) const return sig; } - diff --git a/elab_pexpr.cc b/elab_pexpr.cc index 143acc71d..a8728cafc 100644 --- a/elab_pexpr.cc +++ b/elab_pexpr.cc @@ -57,6 +57,52 @@ NetExpr*PEBinary::elaborate_pexpr (Design*des, NetScope*scope) const return tmp; } +NetExpr*PEBComp::elaborate_pexpr(Design*des, NetScope*scope) const +{ + NetExpr*lp = left_->elaborate_pexpr(des, scope); + NetExpr*rp = right_->elaborate_pexpr(des, scope); + if ((lp == 0) || (rp == 0)) { + delete lp; + delete rp; + return 0; + } + + suppress_binary_operand_sign_if_needed_(lp, rp); + + NetEBComp*tmp = new NetEBComp(op_, lp, rp); + tmp->set_line(*this); + bool flag = tmp->set_width(1); + if (flag == false) { + cerr << get_fileline() << ": internal error: " + "expression bit width of comparison != 1." << endl; + des->errors += 1; + } + + return tmp; +} + +NetExpr*PEBLogic::elaborate_pexpr(Design*des, NetScope*scope) const +{ + NetExpr*lp = left_->elaborate_pexpr(des, scope); + NetExpr*rp = right_->elaborate_pexpr(des, scope); + if ((lp == 0) || (rp == 0)) { + delete lp; + delete rp; + return 0; + } + + NetEBLogic*tmp = new NetEBLogic(op_, lp, rp); + tmp->set_line(*this); + bool flag = tmp->set_width(1); + if (flag == false) { + cerr << get_fileline() << ": internal error: " + "expression bit width of comparison != 1." << endl; + des->errors += 1; + } + + return tmp; +} + /* * Event though parameters are not generally sized, parameter * expressions can include concatenation expressions. This requires diff --git a/elab_sig.cc b/elab_sig.cc index cf323c783..359985b51 100644 --- a/elab_sig.cc +++ b/elab_sig.cc @@ -266,7 +266,7 @@ bool PEIdent::elaborate_sig(Design*des, NetScope*scope) const if (error_implicit) return true; - symbol_search(des, scope, path_, sig, par, eve); + symbol_search(this, des, scope, path_, sig, par, eve); if (eve != 0) return false; diff --git a/elaborate.cc b/elaborate.cc index 1adb217d7..51e018861 100644 --- a/elaborate.cc +++ b/elaborate.cc @@ -2861,7 +2861,8 @@ NetProc* PEventStatement::elaborate_st(Design*des, NetScope*scope, const NetExpr*par = 0; NetEvent* eve = 0; - NetScope*found_in = symbol_search(des, scope, id->path(), + NetScope*found_in = symbol_search(this, des, scope, + id->path(), sig, par, eve); if (found_in && eve) { @@ -3463,7 +3464,7 @@ NetProc* PTrigger::elaborate(Design*des, NetScope*scope) const const NetExpr*par = 0; NetEvent* eve = 0; - NetScope*found_in = symbol_search(des, scope, event_, + NetScope*found_in = symbol_search(this, des, scope, event_, sig, par, eve); if (found_in == 0) { diff --git a/eval.cc b/eval.cc index 41cb60a5b..1bf506552 100644 --- a/eval.cc +++ b/eval.cc @@ -185,7 +185,7 @@ verinum* PEIdent::eval_const(Design*des, NetScope*scope) const return new verinum(scope->genvar_tmp_val); } - symbol_search(des, scope, path_, net, expr, eve); + symbol_search(this, des, scope, path_, net, expr, eve); if (expr == 0) return 0; diff --git a/eval_tree.cc b/eval_tree.cc index be195159f..40cdf517a 100644 --- a/eval_tree.cc +++ b/eval_tree.cc @@ -858,8 +858,10 @@ NetExpr* NetEBDiv::eval_tree(int prune_to_width) NetEConst*rc = dynamic_cast(right_); if (rc == 0) return 0; - verinum lval = lc->value(); - verinum rval = rc->value(); + // Make sure the expression is evaluated at the + // expression width. + verinum lval = pad_to_width(lc->value(), expr_width()); + verinum rval = pad_to_width(rc->value(), expr_width()); NetExpr*tmp = 0; switch (op_) { diff --git a/main.cc b/main.cc index 99bfa4a2c..fb4ec97ef 100644 --- a/main.cc +++ b/main.cc @@ -124,6 +124,7 @@ bool debug_eval_tree = false; bool debug_elaborate = false; bool debug_synth2 = false; bool debug_optimizer = false; +bool debug_automatic = false; /* * Verbose messages enabled. @@ -392,6 +393,8 @@ static void read_iconfig_file(const char*ipath) } else if (strcmp(cp,"optimizer") == 0) { debug_optimizer = true; cerr << "debug: Enable optimizer debug" << endl; + } else if (strcmp(cp,"automatic") == 0) { + debug_automatic = true; } else { } diff --git a/net_event.cc b/net_event.cc index 3804ff8fb..ba9c5d3cc 100644 --- a/net_event.cc +++ b/net_event.cc @@ -173,6 +173,12 @@ void NetEvent::find_similar_event(list&event_list) if (tmp == this) continue; + /* For automatic tasks, the VVP runtime holds state for events + in the automatically allocated context. This means we can't + merge similar events in different automatic tasks. */ + if (scope()->is_auto() && (tmp->scope() != scope())) + continue; + if ((*idx).second != probe_count) continue; @@ -553,4 +559,3 @@ NetProc* NetEvWait::statement() * Simulate named event trigger and waits. * */ - diff --git a/netlist.h b/netlist.h index 04eb9bfa7..d97faed80 100644 --- a/netlist.h +++ b/netlist.h @@ -3275,7 +3275,7 @@ class NetEBComp : public NetEBinary { NetEBComp(char op, NetExpr*l, NetExpr*r); ~NetEBComp(); - virtual bool set_width(unsigned w, bool last_chance); + virtual bool set_width(unsigned w, bool last_chance =false); /* A compare expression has a definite width. */ virtual bool has_width() const; @@ -3312,7 +3312,7 @@ class NetEBLogic : public NetEBinary { NetEBLogic(char op, NetExpr*l, NetExpr*r); ~NetEBLogic(); - virtual bool set_width(unsigned w, bool last_chance); + virtual bool set_width(unsigned w, bool last_chance =false); virtual NetEBLogic* dup_expr() const; virtual NetEConst* eval_tree(int prune_to_width = -1); virtual NetNet* synthesize(Design*, NetScope*scope); @@ -3697,6 +3697,7 @@ class NetEUBits : public NetEUnary { virtual NetNet* synthesize(Design*, NetScope*scope); + virtual NetEUBits* dup_expr() const; virtual NetExpr* eval_tree(int prune_to_width = -1); virtual ivl_variable_type_t expr_type() const; }; diff --git a/netmisc.h b/netmisc.h index 97792d8f7..29333b063 100644 --- a/netmisc.h +++ b/netmisc.h @@ -36,21 +36,25 @@ * ex2 is the lsb expression for the range. If there is no range, then * these values are set to 0. */ -extern NetScope* symbol_search(Design*des, - NetScope*start, pform_name_t path, +extern NetScope* symbol_search(const LineInfo*li, + Design*des, + NetScope*start, + pform_name_t path, NetNet*&net, /* net/reg */ const NetExpr*&par,/* parameter */ NetEvent*&eve, /* named event */ const NetExpr*&ex1, const NetExpr*&ex2); -inline NetScope* symbol_search(Design*des, - NetScope*start, const pform_name_t&path, +inline NetScope* symbol_search(const LineInfo*li, + Design*des, + NetScope*start, + const pform_name_t&path, NetNet*&net, /* net/reg */ const NetExpr*&par,/* parameter */ NetEvent*&eve /* named event */) { const NetExpr*ex1, *ex2; - return symbol_search(des, start, path, net, par, eve, ex1, ex2); + return symbol_search(li, des, start, path, net, par, eve, ex1, ex2); } /* diff --git a/parse.y b/parse.y index 6b767d045..a4bc148a3 100644 --- a/parse.y +++ b/parse.y @@ -1033,12 +1033,12 @@ expression $$ = tmp; } | expression K_LOR expression - { PEBinary*tmp = new PEBinary('o', $1, $3); + { PEBinary*tmp = new PEBLogic('o', $1, $3); FILE_NAME(tmp, @2); $$ = tmp; } | expression K_LAND expression - { PEBinary*tmp = new PEBinary('a', $1, $3); + { PEBinary*tmp = new PEBLogic('a', $1, $3); FILE_NAME(tmp, @2); $$ = tmp; } @@ -2977,7 +2977,13 @@ real_variable { perm_string name = lex_strings.make($1); pform_makewire(@1, name, NetNet::REG, NetNet::NOT_A_PORT, IVL_VT_REAL, 0); if ($2 != 0) { - yyerror(@2, "sorry: real variables do not currently support arrays."); + index_component_t index; + if ($2->size() > 1) { + yyerror(@2, "sorry: only 1 dimensional arrays " + "are currently supported."); + } + index = $2->front(); + pform_set_reg_idx(name, index.msb, index.lsb); delete $2; } $$ = $1; diff --git a/pform.cc b/pform.cc index 0cd16c451..0eb67d899 100644 --- a/pform.cc +++ b/pform.cc @@ -110,11 +110,12 @@ PTask* pform_push_task_scope(char*name, bool is_auto) PTask*task; if (pform_cur_generate) { task = new PTask(task_name, pform_cur_generate->lexical_scope, - is_auto); + is_auto || debug_automatic); pform_cur_generate->tasks[task->pscope_name()] = task; pform_cur_generate->lexical_scope = task; } else { - task = new PTask(task_name, lexical_scope, is_auto); + task = new PTask(task_name, lexical_scope, + is_auto || debug_automatic); pform_cur_module->tasks[task->pscope_name()] = task; lexical_scope = task; } @@ -129,11 +130,12 @@ PFunction* pform_push_function_scope(char*name, bool is_auto) PFunction*func; if (pform_cur_generate) { func = new PFunction(func_name, pform_cur_generate->lexical_scope, - is_auto); + is_auto || debug_automatic); pform_cur_generate->funcs[func->pscope_name()] = func; pform_cur_generate->lexical_scope = func; } else { - func = new PFunction(func_name, lexical_scope, is_auto); + func = new PFunction(func_name, lexical_scope, + is_auto || debug_automatic); pform_cur_module->funcs[func->pscope_name()] = func; lexical_scope = func; } @@ -181,6 +183,20 @@ static LexicalScope*pform_get_cur_scope() return lexical_scope; } +static bool pform_at_module_level() +{ + if (pform_cur_generate) + if (pform_cur_generate->lexical_scope) + return false; + else + return true; + else + if (lexical_scope->pscope_parent()) + return false; + else + return true; +} + PWire*pform_get_wire_in_scope(perm_string name) { /* Note that if we are processing a generate, then the @@ -1293,6 +1309,13 @@ void pform_make_pgassign_list(svector*alist, void pform_make_reginit(const struct vlltype&li, perm_string name, PExpr*expr) { + if (! pform_at_module_level()) { + VLerror(li, "variable declaration assignments are only " + "allowed at the module level."); + delete expr; + return; + } + PWire*cur = pform_get_wire_in_scope(name); if (cur == 0) { VLerror(li, "internal error: reginit to non-register?"); diff --git a/symbol_search.cc b/symbol_search.cc index bc723d81d..0355fb6bb 100644 --- a/symbol_search.cc +++ b/symbol_search.cc @@ -28,7 +28,8 @@ /* * Search for the hierarchical name. */ -NetScope*symbol_search(Design*des, NetScope*scope, pform_name_t path, +NetScope*symbol_search(const LineInfo*li, Design*des, NetScope*scope, + pform_name_t path, NetNet*&net, const NetExpr*&par, NetEvent*&eve, @@ -57,6 +58,13 @@ NetScope*symbol_search(Design*des, NetScope*scope, pform_name_t path, return 0; scope = des->find_scope(scope, path_list); + + if (scope->is_auto() && li) { + cerr << li->get_fileline() << ": error: Hierarchical " + "reference to automatically allocated item " + "`" << key << "' in path `" << path << "'" << endl; + des->errors += 1; + } } while (scope) { diff --git a/tgt-vhdl/cast.cc b/tgt-vhdl/cast.cc index e3006e9e0..d01016c6a 100644 --- a/tgt-vhdl/cast.cc +++ b/tgt-vhdl/cast.cc @@ -198,11 +198,11 @@ vhdl_expr *vhdl_expr::resize(int newwidth) else return this; // Doesn't make sense to resize non-vector type - vhdl_fcall *resize = new vhdl_fcall("Resize", rtype); - resize->add_expr(this); - resize->add_expr(new vhdl_const_int(newwidth)); + vhdl_fcall *resizef = new vhdl_fcall("Resize", rtype); + resizef->add_expr(this); + resizef->add_expr(new vhdl_const_int(newwidth)); - return resize; + return resizef; } vhdl_expr *vhdl_const_int::to_vector(vhdl_type_name_t name, int w) diff --git a/tgt-vhdl/display.cc b/tgt-vhdl/display.cc index 05bbe944a..5073ab634 100644 --- a/tgt-vhdl/display.cc +++ b/tgt-vhdl/display.cc @@ -157,10 +157,10 @@ int draw_stask_display(vhdl_procedural *proc, stmt_container *container, // function in VHDL assert(i < count); - ivl_expr_t net = ivl_stmt_parm(stmt, i++); - assert(net); + ivl_expr_t netp = ivl_stmt_parm(stmt, i++); + assert(netp); - vhdl_expr *base = translate_expr(net); + vhdl_expr *base = translate_expr(netp); if (NULL == base) return 1; diff --git a/tgt-vhdl/scope.cc b/tgt-vhdl/scope.cc index 116382eeb..2d4272049 100644 --- a/tgt-vhdl/scope.cc +++ b/tgt-vhdl/scope.cc @@ -698,11 +698,11 @@ static int draw_constant_drivers(ivl_scope_t scope, void *_parent) for (int i = 0; i < nsigs; i++) { ivl_signal_t sig = ivl_scope_sig(scope, i); - for (unsigned i = ivl_signal_array_base(sig); - i < ivl_signal_array_count(sig); - i++) { + for (unsigned j = ivl_signal_array_base(sig); + j < ivl_signal_array_count(sig); + j++) { // Make sure the nexus code is generated - ivl_nexus_t nex = ivl_signal_nex(sig, i); + ivl_nexus_t nex = ivl_signal_nex(sig, j); seen_nexus(nex); nexus_private_t *priv = @@ -712,7 +712,7 @@ static int draw_constant_drivers(ivl_scope_t scope, void *_parent) vhdl_scope *arch_scope = ent->get_arch()->get_scope(); if (priv->const_driver) { - assert(i == 0); // TODO: Make work for more words + assert(j == 0); // TODO: Make work for more words vhdl_var_ref *ref = nexus_to_var_ref(arch_scope, nex); diff --git a/tgt-vhdl/stmt.cc b/tgt-vhdl/stmt.cc index 738646ec1..0e86ebf6c 100644 --- a/tgt-vhdl/stmt.cc +++ b/tgt-vhdl/stmt.cc @@ -424,8 +424,8 @@ static int draw_wait(vhdl_procedural *_proc, stmt_container *container, ivl_event_t event = ivl_stmt_events(stmt, i); int nany = ivl_event_nany(event); - for (int i = 0; i < nany; i++) { - ivl_nexus_t nexus = ivl_event_any(event, i); + for (int j = 0; j < nany; j++) { + ivl_nexus_t nexus = ivl_event_any(event, j); vhdl_var_ref *ref = nexus_to_var_ref(proc->get_scope(), nexus); wait->add_sensitivity(ref->get_name()); @@ -441,8 +441,8 @@ static int draw_wait(vhdl_procedural *_proc, stmt_container *container, ivl_event_t event = ivl_stmt_events(stmt, i); int nany = ivl_event_nany(event); - for (int i = 0; i < nany; i++) { - ivl_nexus_t nexus = ivl_event_any(event, i); + for (int j = 0; j < nany; j++) { + ivl_nexus_t nexus = ivl_event_any(event, j); vhdl_var_ref *ref = nexus_to_var_ref(proc->get_scope(), nexus); ref->set_name(ref->get_name() + "'Event"); @@ -450,8 +450,8 @@ static int draw_wait(vhdl_procedural *_proc, stmt_container *container, } int nneg = ivl_event_nneg(event); - for (int i = 0; i < nneg; i++) { - ivl_nexus_t nexus = ivl_event_neg(event, i); + for (int j = 0; j < nneg; j++) { + ivl_nexus_t nexus = ivl_event_neg(event, j); vhdl_var_ref *ref = nexus_to_var_ref(proc->get_scope(), nexus); vhdl_fcall *detect = new vhdl_fcall("falling_edge", vhdl_type::boolean()); @@ -461,8 +461,8 @@ static int draw_wait(vhdl_procedural *_proc, stmt_container *container, } int npos = ivl_event_npos(event); - for (int i = 0; i < npos; i++) { - ivl_nexus_t nexus = ivl_event_pos(event, i); + for (int j = 0; j < npos; j++) { + ivl_nexus_t nexus = ivl_event_pos(event, j); vhdl_var_ref *ref = nexus_to_var_ref(proc->get_scope(), nexus); vhdl_fcall *detect = new vhdl_fcall("rising_edge", vhdl_type::boolean()); @@ -501,12 +501,12 @@ static int draw_if(vhdl_procedural *proc, stmt_container *container, return 0; } -static int draw_case(vhdl_procedural *proc, stmt_container *container, - ivl_statement_t stmt, bool is_last) +static vhdl_var_ref *draw_case_test(vhdl_procedural *proc, stmt_container *container, + ivl_statement_t stmt) { vhdl_expr *test = translate_expr(ivl_stmt_cond_expr(stmt)); if (NULL == test) - return 1; + return NULL; // VHDL case expressions are required to be quite simple: variable // references or slices. So we may need to create a temporary @@ -523,8 +523,18 @@ static int draw_case(vhdl_procedural *proc, stmt_container *container, vhdl_var_ref *tmp_ref = new vhdl_var_ref(tmp_name, NULL); container->add_stmt(new vhdl_assign_stmt(tmp_ref, test)); - test = new vhdl_var_ref(tmp_name, test_type); + return new vhdl_var_ref(tmp_name, test_type); } + else + return dynamic_cast(test); +} + +static int draw_case(vhdl_procedural *proc, stmt_container *container, + ivl_statement_t stmt, bool is_last) +{ + vhdl_var_ref *test = draw_case_test(proc, container, stmt); + if (NULL == test) + return 1; vhdl_case_stmt *vhdlcase = new vhdl_case_stmt(test); container->add_stmt(vhdlcase); @@ -565,6 +575,100 @@ static int draw_case(vhdl_procedural *proc, stmt_container *container, return 0; } +/* + * A casex statement cannot be directly translated to a VHDL case + * statement as VHDL does not treat the don't-care bit as special. + * The solution here is to generate an if statement from the casex + * which compares only the non-don't-care bit positions. + */ +int draw_casezx(vhdl_procedural *proc, stmt_container *container, + ivl_statement_t stmt, bool is_last) +{ + vhdl_var_ref *test = draw_case_test(proc, container, stmt); + if (NULL == test) + return 1; + + vhdl_if_stmt *result = NULL; + + int nbranches = ivl_stmt_case_count(stmt); + for (int i = 0; i < nbranches; i++) { + stmt_container *where = NULL; + + ivl_expr_t net = ivl_stmt_case_expr(stmt, i); + if (net) { + // The net must be a constant value otherwise we can't + // generate the terms for the comparison expression + if (ivl_expr_type(net) != IVL_EX_NUMBER) { + error("Sorry, only casex statements with constant labels can " + "be translated to VHDL"); + return 1; + } + + const char *bits = ivl_expr_bits(net); + + vhdl_binop_expr *all = + new vhdl_binop_expr(VHDL_BINOP_AND, vhdl_type::boolean()); + for (unsigned i = 0; i < ivl_expr_width(net); i++) { + switch (bits[i]) { + case '?': + case 'z': + case 'x': + // Ignore it + break; + default: + { + // Generate a comparison for this bit position + vhdl_binop_expr *cmp = + new vhdl_binop_expr(VHDL_BINOP_EQ, vhdl_type::boolean()); + + vhdl_type *type = vhdl_type::nunsigned(ivl_expr_width(net)); + vhdl_var_ref *lhs = + new vhdl_var_ref(test->get_name().c_str(), type); + lhs->set_slice(new vhdl_const_int(i)); + + cmp->add_expr(lhs); + cmp->add_expr(new vhdl_const_bit(bits[i])); + + all->add_expr(cmp); + } + } + } + + if (result) + where = result->add_elsif(all); + else { + result = new vhdl_if_stmt(all); + where = result->get_then_container(); + } + } + else { + // This the default case and therefore the `else' branch + assert(result); + where = result->get_else_container(); + } + + // `where' now points to a branch of an if statement which + // corresponds to this casex/z branch + assert(where); + draw_stmt(proc, where, ivl_stmt_case_stmt(stmt, i), is_last); + } + + // Add a comment to say that this corresponds to a casex/z statement + // as this may not be obvious + ostringstream ss; + ss << "Generated from case" + << (ivl_statement_type(stmt) == IVL_ST_CASEX ? 'x' : 'z') + << " statement at " << ivl_stmt_file(stmt) << ":" << ivl_stmt_lineno(stmt); + result->set_comment(ss.str()); + + container->add_stmt(result); + + // We don't actually use the generated `test' expression + delete test; + + return 0; +} + int draw_while(vhdl_procedural *proc, stmt_container *container, ivl_statement_t stmt) { @@ -685,11 +789,8 @@ int draw_stmt(vhdl_procedural *proc, stmt_container *container, error("disable statement cannot be translated to VHDL"); return 1; case IVL_ST_CASEX: - error("casex statement cannot be translated to VHDL"); - return 1; case IVL_ST_CASEZ: - error("casez statement cannot be translated to VHDL"); - return 1; + return draw_casezx(proc, container, stmt, is_last); case IVL_ST_FORK: error("fork statement cannot be translated to VHDL"); return 1; diff --git a/tgt-vhdl/vhdl_syntax.cc b/tgt-vhdl/vhdl_syntax.cc index 5ca30e1fc..b40176b85 100644 --- a/tgt-vhdl/vhdl_syntax.cc +++ b/tgt-vhdl/vhdl_syntax.cc @@ -664,12 +664,30 @@ vhdl_if_stmt::~vhdl_if_stmt() delete test_; } +stmt_container *vhdl_if_stmt::add_elsif(vhdl_expr *test) +{ + elsif ef = { test, new stmt_container }; + elsif_parts_.push_back(ef); + return ef.container; +} + void vhdl_if_stmt::emit(std::ostream &of, int level) const { + emit_comment(of, level); + of << "if "; test_->emit(of, level); of << " then"; then_part_.emit(of, level); + + std::list::const_iterator it; + for (it = elsif_parts_.begin(); it != elsif_parts_.end(); ++it) { + of << "elsif "; + (*it).test->emit(of, level); + of << " then"; + (*it).container->emit(of, level); + } + if (!else_part_.empty()) { of << "else"; else_part_.emit(of, level); diff --git a/tgt-vhdl/vhdl_syntax.hh b/tgt-vhdl/vhdl_syntax.hh index 0c0d9bf25..9b8f1a325 100644 --- a/tgt-vhdl/vhdl_syntax.hh +++ b/tgt-vhdl/vhdl_syntax.hh @@ -427,10 +427,17 @@ public: stmt_container *get_then_container() { return &then_part_; } stmt_container *get_else_container() { return &else_part_; } + stmt_container *add_elsif(vhdl_expr *test); void emit(std::ostream &of, int level) const; private: + struct elsif { + vhdl_expr *test; + stmt_container *container; + }; + vhdl_expr *test_; stmt_container then_part_, else_part_; + std::list elsif_parts_; }; diff --git a/tgt-vvp/eval_real.c b/tgt-vvp/eval_real.c index de9ce577e..f2b577ba1 100644 --- a/tgt-vvp/eval_real.c +++ b/tgt-vvp/eval_real.c @@ -354,23 +354,17 @@ static int draw_signal_real_real(ivl_expr_t exp) { ivl_signal_t sig = ivl_expr_signal(exp); int res = allocate_word(); - unsigned long word = 0; - if (ivl_signal_dimensions(sig) > 0) { - ivl_expr_t ix = ivl_expr_oper1(exp); - if (!number_is_immediate(ix, IMM_WID, 0)) { - /* XXXX Need to generate a %load/ar instruction. */ - assert(0); - return res; - } - - /* The index is constant, so we can return to direct - readout with the specific word selected. */ - word = get_number_immediate(ix); + if (ivl_signal_dimensions(sig) == 0) { + fprintf(vvp_out, " %%load/wr %d, v%p_0;\n", res, sig); + return res; } - fprintf(vvp_out, " %%load/wr %d, v%p_%lu;\n", res, sig, word); - + ivl_expr_t word_ex = ivl_expr_oper1(exp); + int word_ix = allocate_word(); + draw_eval_expr_into_integer(word_ex, word_ix); + fprintf(vvp_out, " %%load/ar %d, v%p, %d;\n", res, sig, word_ix); + clr_word(word_ix); return res; } diff --git a/tgt-vvp/vvp_process.c b/tgt-vvp/vvp_process.c index 1d643454f..eacf35547 100644 --- a/tgt-vvp/vvp_process.c +++ b/tgt-vvp/vvp_process.c @@ -497,16 +497,30 @@ static int show_stmt_assign_sig_real(ivl_statement_t net) ivl_signal_t var; res = draw_eval_real(ivl_stmt_rval(net)); - clr_word(res); assert(ivl_stmt_lvals(net) == 1); lval = ivl_stmt_lval(net, 0); var = ivl_lval_sig(lval); assert(var != 0); - assert(ivl_signal_dimensions(var) == 0); + if (ivl_signal_dimensions(var) == 0) { + clr_word(res); + fprintf(vvp_out, " %%set/wr v%p_0, %d;\n", var, res); + return 0; + } - fprintf(vvp_out, " %%set/wr v%p_0, %d;\n", var, res); + // For now, only support 1-dimensional arrays. + assert(ivl_signal_dimensions(var) == 1); + + // Calculate the word index into an index register + ivl_expr_t word_ex = ivl_lval_idx(lval); + int word_ix = allocate_word(); + draw_eval_expr_into_integer(word_ex, word_ix); + // Generate an assignment to write to the array. + fprintf(vvp_out, " %%set/ar v%p, %d, %d;\n", var, word_ix, res); + + clr_word(res); + clr_word(word_ix); return 0; } diff --git a/vpi/sys_lxt.c b/vpi/sys_lxt.c index 6521d749b..b3bdb73f7 100644 --- a/vpi/sys_lxt.c +++ b/vpi/sys_lxt.c @@ -546,7 +546,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) case vpiTimeVar: case vpiReg: type = "reg"; } - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); nexus_id = vpi_get(_vpiNexusId, item); @@ -593,7 +593,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) case vpiRealVar: - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); { char*tmp = create_full_name(name); diff --git a/vpi/sys_lxt2.c b/vpi/sys_lxt2.c index 797078502..73135db04 100644 --- a/vpi/sys_lxt2.c +++ b/vpi/sys_lxt2.c @@ -552,7 +552,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) case vpiTimeVar: case vpiReg: type = "reg"; } - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); nexus_id = vpi_get(_vpiNexusId, item); @@ -603,7 +603,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) case vpiRealVar: - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); { char*tmp = create_full_name(name); diff --git a/vpi/sys_vcd.c b/vpi/sys_vcd.c index 222745167..c77044ed4 100644 --- a/vpi/sys_vcd.c +++ b/vpi/sys_vcd.c @@ -513,7 +513,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) break; } - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; name = vpi_get_str(vpiName, item); prefix = is_escaped_id(name) ? "\\" : ""; @@ -578,7 +578,7 @@ static void scan_item(unsigned depth, vpiHandle item, int skip) break; } - if (skip) break; + if (skip || vpi_get(vpiAutomatic, item)) break; /* Declare the variable in the VCD file. */ name = vpi_get_str(vpiName, item); diff --git a/vpi_user.h b/vpi_user.h index d5660b453..f1157e1ac 100644 --- a/vpi_user.h +++ b/vpi_user.h @@ -348,6 +348,7 @@ typedef struct t_vpi_delay { # define vpiSysFuncReal vpiRealFunc # define vpiSysFuncTime vpiTimeFunc # define vpiSysFuncSized vpiSizedFunc +#define vpiAutomatic 50 #define vpiConstantSelect 53 #define vpiSigned 65 /* IVL private properties */ diff --git a/vvp/arith.cc b/vvp/arith.cc index 126cfdaa1..3945ea076 100644 --- a/vvp/arith.cc +++ b/vvp/arith.cc @@ -63,7 +63,8 @@ vvp_arith_abs::~vvp_arith_abs() { } -void vvp_arith_abs::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_abs::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { vvp_vector4_t out (bit.size(), BIT4_0);; @@ -81,13 +82,14 @@ void vvp_arith_abs::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) break; } - vvp_send_vec4(ptr.ptr()->out, out); + vvp_send_vec4(ptr.ptr()->out, out, 0); } -void vvp_arith_abs::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_abs::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { double out = fabs(bit); - vvp_send_real(ptr.ptr()->out, out); + vvp_send_real(ptr.ptr()->out, out, 0); } vvp_arith_cast_int::vvp_arith_cast_int(unsigned wid) @@ -99,9 +101,10 @@ vvp_arith_cast_int::~vvp_arith_cast_int() { } -void vvp_arith_cast_int::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_cast_int::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { - vvp_send_vec4(ptr.ptr()->out, vvp_vector4_t(wid_, bit)); + vvp_send_vec4(ptr.ptr()->out, vvp_vector4_t(wid_, bit), 0); } vvp_arith_cast_real::vvp_arith_cast_real(bool signed_flag) @@ -113,11 +116,12 @@ vvp_arith_cast_real::~vvp_arith_cast_real() { } -void vvp_arith_cast_real::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_cast_real::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { double val; vector4_to_value(bit, val, signed_); - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } // Division @@ -135,21 +139,22 @@ void vvp_arith_div::wide4_(vvp_net_ptr_t ptr) { vvp_vector2_t a2 (op_a_); if (a2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t b2 (op_b_); if (b2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t res2 = a2 / b2; - vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res2, wid_)); + vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res2, wid_), 0); } -void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -160,13 +165,13 @@ void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) unsigned long a; if (! vector4_to_value(op_a_, a)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } unsigned long b; if (! vector4_to_value(op_b_, b)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -202,7 +207,7 @@ void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) val >>= 1; } - vvp_send_vec4(ptr.ptr()->out, vval); + vvp_send_vec4(ptr.ptr()->out, vval, 0); } @@ -219,21 +224,22 @@ void vvp_arith_mod::wide_(vvp_net_ptr_t ptr) { vvp_vector2_t a2 (op_a_); if (a2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t b2 (op_b_); if (b2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t res = a2 % b2; - vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res, res.size())); + vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res, res.size()), 0); } -void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -244,13 +250,13 @@ void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) unsigned long a; if (! vector4_to_value(op_a_, a)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } unsigned long b; if (! vector4_to_value(op_b_, b)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -275,7 +281,7 @@ void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) for (unsigned idx = 0 ; idx < wid_ ; idx += 1) xval.set_bit(idx, BIT4_X); - vvp_send_vec4(ptr.ptr()->out, xval); + vvp_send_vec4(ptr.ptr()->out, xval, 0); return; } @@ -295,7 +301,7 @@ void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) val >>= 1; } - vvp_send_vec4(ptr.ptr()->out, vval); + vvp_send_vec4(ptr.ptr()->out, vval, 0); } @@ -316,17 +322,18 @@ void vvp_arith_mult::wide_(vvp_net_ptr_t ptr) vvp_vector2_t b2 (op_b_); if (a2.is_NaN() || b2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } vvp_vector2_t result = a2 * b2; vvp_vector4_t res4 = vector2_to_vector4(result, wid_); - vvp_send_vec4(ptr.ptr()->out, res4); + vvp_send_vec4(ptr.ptr()->out, res4, 0); } -void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -337,13 +344,13 @@ void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) long a; if (! vector4_to_value(op_a_, a, false, true)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } long b; if (! vector4_to_value(op_b_, b, false, true)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -360,7 +367,7 @@ void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) val >>= 1; } - vvp_send_vec4(ptr.ptr()->out, vval); + vvp_send_vec4(ptr.ptr()->out, vval, 0); } @@ -375,14 +382,15 @@ vvp_arith_pow::~vvp_arith_pow() { } -void vvp_arith_pow::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_pow::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); vvp_vector4_t res4; if (signed_flag_) { if (op_a_.has_xz() || op_b_.has_xz()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -396,7 +404,7 @@ void vvp_arith_pow::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_vector2_t b2 (op_b_); if (a2.is_NaN() || b2.is_NaN()) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -404,7 +412,7 @@ void vvp_arith_pow::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) res4 = vector2_to_vector4(result, wid_); } - vvp_send_vec4(ptr.ptr()->out, res4); + vvp_send_vec4(ptr.ptr()->out, res4, 0); } @@ -419,7 +427,8 @@ vvp_arith_sum::~vvp_arith_sum() { } -void vvp_arith_sum::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_sum::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -438,14 +447,14 @@ void vvp_arith_sum::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_bit4_t cur = add_with_carry(a, b, carry); if (cur == BIT4_X) { - vvp_send_vec4(net->out, x_val_); + vvp_send_vec4(net->out, x_val_, 0); return; } value.set_bit(idx, cur); } - vvp_send_vec4(net->out, value); + vvp_send_vec4(net->out, value, 0); } vvp_arith_sub::vvp_arith_sub(unsigned wid) @@ -463,7 +472,8 @@ vvp_arith_sub::~vvp_arith_sub() * further reduce the operation to adding in the inverted value and * adding a correction. */ -void vvp_arith_sub::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_arith_sub::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -482,14 +492,14 @@ void vvp_arith_sub::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_bit4_t cur = add_with_carry(a, b, carry); if (cur == BIT4_X) { - vvp_send_vec4(net->out, x_val_); + vvp_send_vec4(net->out, x_val_, 0); return; } value.set_bit(idx, cur); } - vvp_send_vec4(net->out, value); + vvp_send_vec4(net->out, value, 0); } vvp_cmp_eeq::vvp_cmp_eeq(unsigned wid) @@ -497,7 +507,8 @@ vvp_cmp_eeq::vvp_cmp_eeq(unsigned wid) { } -void vvp_cmp_eeq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_eeq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -513,7 +524,7 @@ void vvp_cmp_eeq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_net_t*net = ptr.ptr(); - vvp_send_vec4(net->out, eeq); + vvp_send_vec4(net->out, eeq, 0); } vvp_cmp_nee::vvp_cmp_nee(unsigned wid) @@ -521,7 +532,8 @@ vvp_cmp_nee::vvp_cmp_nee(unsigned wid) { } -void vvp_cmp_nee::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_nee::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -537,7 +549,7 @@ void vvp_cmp_nee::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) vvp_net_t*net = ptr.ptr(); - vvp_send_vec4(net->out, eeq); + vvp_send_vec4(net->out, eeq, 0); } vvp_cmp_eq::vvp_cmp_eq(unsigned wid) @@ -551,7 +563,8 @@ vvp_cmp_eq::vvp_cmp_eq(unsigned wid) * there are X/Z bits anywhere in A or B, the result is X. Finally, * the result is 1. */ -void vvp_cmp_eq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_eq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -583,7 +596,7 @@ void vvp_cmp_eq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } vvp_net_t*net = ptr.ptr(); - vvp_send_vec4(net->out, res); + vvp_send_vec4(net->out, res, 0); } @@ -598,7 +611,8 @@ vvp_cmp_ne::vvp_cmp_ne(unsigned wid) * there are X/Z bits anywhere in A or B, the result is X. Finally, * the result is 0. */ -void vvp_cmp_ne::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_ne::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -630,7 +644,7 @@ void vvp_cmp_ne::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } vvp_net_t*net = ptr.ptr(); - vvp_send_vec4(net->out, res); + vvp_send_vec4(net->out, res, 0); } @@ -651,7 +665,7 @@ void vvp_cmp_gtge_base_::recv_vec4_base_(vvp_net_ptr_t ptr, : compare_gtge(op_a_, op_b_, out_if_equal); vvp_vector4_t val (1); val.set_bit(0, out); - vvp_send_vec4(ptr.ptr()->out, val); + vvp_send_vec4(ptr.ptr()->out, val, 0); return; } @@ -662,7 +676,8 @@ vvp_cmp_ge::vvp_cmp_ge(unsigned wid, bool flag) { } -void vvp_cmp_ge::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_ge::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { recv_vec4_base_(ptr, bit, BIT4_1); } @@ -672,7 +687,8 @@ vvp_cmp_gt::vvp_cmp_gt(unsigned wid, bool flag) { } -void vvp_cmp_gt::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_cmp_gt::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { recv_vec4_base_(ptr, bit, BIT4_0); } @@ -687,7 +703,8 @@ vvp_shiftl::~vvp_shiftl() { } -void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -695,7 +712,7 @@ void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) unsigned long shift; if (! vector4_to_value(op_b_, shift)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -708,7 +725,7 @@ void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) for (unsigned idx = shift ; idx < out.size() ; idx += 1) out.set_bit(idx, op_a_.value(idx-shift)); - vvp_send_vec4(ptr.ptr()->out, out); + vvp_send_vec4(ptr.ptr()->out, out, 0); } vvp_shiftr::vvp_shiftr(unsigned wid, bool signed_flag) @@ -720,7 +737,8 @@ vvp_shiftr::~vvp_shiftr() { } -void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -728,7 +746,7 @@ void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) unsigned long shift; if (! vector4_to_value(op_b_, shift)) { - vvp_send_vec4(ptr.ptr()->out, x_val_); + vvp_send_vec4(ptr.ptr()->out, x_val_, 0); return; } @@ -745,7 +763,7 @@ void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) for (unsigned idx = 0 ; idx < shift ; idx += 1) out.set_bit(idx+out.size()-shift, pad); - vvp_send_vec4(ptr.ptr()->out, out); + vvp_send_vec4(ptr.ptr()->out, out, 0); } @@ -780,12 +798,13 @@ vvp_arith_mult_real::~vvp_arith_mult_real() { } -void vvp_arith_mult_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_mult_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = op_a_ * op_b_; - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real power. */ @@ -797,12 +816,13 @@ vvp_arith_pow_real::~vvp_arith_pow_real() { } -void vvp_arith_pow_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_pow_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = pow(op_a_, op_b_); - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real division. */ @@ -814,12 +834,13 @@ vvp_arith_div_real::~vvp_arith_div_real() { } -void vvp_arith_div_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_div_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = op_a_ / op_b_; - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real modulus. */ @@ -831,12 +852,13 @@ vvp_arith_mod_real::~vvp_arith_mod_real() { } -void vvp_arith_mod_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_mod_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = fmod(op_a_, op_b_); - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real summation. */ @@ -848,12 +870,13 @@ vvp_arith_sum_real::~vvp_arith_sum_real() { } -void vvp_arith_sum_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_sum_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = op_a_ + op_b_; - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real subtraction. */ @@ -865,12 +888,13 @@ vvp_arith_sub_real::~vvp_arith_sub_real() { } -void vvp_arith_sub_real::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_arith_sub_real::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); double val = op_a_ - op_b_; - vvp_send_real(ptr.ptr()->out, val); + vvp_send_real(ptr.ptr()->out, val, 0); } /* Real compare equal. */ @@ -878,7 +902,8 @@ vvp_cmp_eq_real::vvp_cmp_eq_real() { } -void vvp_cmp_eq_real::recv_real(vvp_net_ptr_t ptr, const double bit) +void vvp_cmp_eq_real::recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -886,7 +911,7 @@ void vvp_cmp_eq_real::recv_real(vvp_net_ptr_t ptr, const double bit) if (op_a_ == op_b_) res.set_bit(0, BIT4_1); else res.set_bit(0, BIT4_0); - vvp_send_vec4(ptr.ptr()->out, res); + vvp_send_vec4(ptr.ptr()->out, res, 0); } /* Real compare not equal. */ @@ -894,7 +919,8 @@ vvp_cmp_ne_real::vvp_cmp_ne_real() { } -void vvp_cmp_ne_real::recv_real(vvp_net_ptr_t ptr, const double bit) +void vvp_cmp_ne_real::recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -902,7 +928,7 @@ void vvp_cmp_ne_real::recv_real(vvp_net_ptr_t ptr, const double bit) if (op_a_ != op_b_) res.set_bit(0, BIT4_1); else res.set_bit(0, BIT4_0); - vvp_send_vec4(ptr.ptr()->out, res); + vvp_send_vec4(ptr.ptr()->out, res, 0); } /* Real compare greater than or equal. */ @@ -910,7 +936,8 @@ vvp_cmp_ge_real::vvp_cmp_ge_real() { } -void vvp_cmp_ge_real::recv_real(vvp_net_ptr_t ptr, const double bit) +void vvp_cmp_ge_real::recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -918,7 +945,7 @@ void vvp_cmp_ge_real::recv_real(vvp_net_ptr_t ptr, const double bit) if (op_a_ >= op_b_) res.set_bit(0, BIT4_1); else res.set_bit(0, BIT4_0); - vvp_send_vec4(ptr.ptr()->out, res); + vvp_send_vec4(ptr.ptr()->out, res, 0); } /* Real compare greater than. */ @@ -926,7 +953,8 @@ vvp_cmp_gt_real::vvp_cmp_gt_real() { } -void vvp_cmp_gt_real::recv_real(vvp_net_ptr_t ptr, const double bit) +void vvp_cmp_gt_real::recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t) { dispatch_operand_(ptr, bit); @@ -934,5 +962,5 @@ void vvp_cmp_gt_real::recv_real(vvp_net_ptr_t ptr, const double bit) if (op_a_ > op_b_) res.set_bit(0, BIT4_1); else res.set_bit(0, BIT4_0); - vvp_send_vec4(ptr.ptr()->out, res); + vvp_send_vec4(ptr.ptr()->out, res, 0); } diff --git a/vvp/arith.h b/vvp/arith.h index cd05355ce..4f5812448 100644 --- a/vvp/arith.h +++ b/vvp/arith.h @@ -54,8 +54,10 @@ class vvp_arith_abs : public vvp_net_fun_t { explicit vvp_arith_abs(); ~vvp_arith_abs(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); private: }; @@ -65,7 +67,8 @@ class vvp_arith_cast_int : public vvp_net_fun_t { explicit vvp_arith_cast_int(unsigned wid); ~vvp_arith_cast_int(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); private: unsigned wid_; @@ -76,7 +79,8 @@ class vvp_arith_cast_real : public vvp_net_fun_t { explicit vvp_arith_cast_real(bool signed_flag); ~vvp_arith_cast_real(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: bool signed_; @@ -87,7 +91,8 @@ class vvp_arith_div : public vvp_arith_ { public: explicit vvp_arith_div(unsigned wid, bool signed_flag); ~vvp_arith_div(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: void wide4_(vvp_net_ptr_t ptr); bool signed_flag_; @@ -98,7 +103,8 @@ class vvp_arith_mod : public vvp_arith_ { public: explicit vvp_arith_mod(unsigned wid, bool signed_flag); ~vvp_arith_mod(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: void wide_(vvp_net_ptr_t ptr); bool signed_flag_; @@ -114,7 +120,8 @@ class vvp_cmp_eeq : public vvp_arith_ { public: explicit vvp_cmp_eeq(unsigned wid); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -122,7 +129,8 @@ class vvp_cmp_nee : public vvp_arith_ { public: explicit vvp_cmp_nee(unsigned wid); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -130,7 +138,8 @@ class vvp_cmp_eq : public vvp_arith_ { public: explicit vvp_cmp_eq(unsigned wid); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -138,7 +147,8 @@ class vvp_cmp_ne : public vvp_arith_ { public: explicit vvp_cmp_ne(unsigned wid); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -165,7 +175,8 @@ class vvp_cmp_ge : public vvp_cmp_gtge_base_ { public: explicit vvp_cmp_ge(unsigned wid, bool signed_flag); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -174,7 +185,8 @@ class vvp_cmp_gt : public vvp_cmp_gtge_base_ { public: explicit vvp_cmp_gt(unsigned wid, bool signed_flag); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); }; /* @@ -187,7 +199,8 @@ class vvp_arith_mult : public vvp_arith_ { public: explicit vvp_arith_mult(unsigned wid); ~vvp_arith_mult(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: void wide_(vvp_net_ptr_t ptr); }; @@ -197,7 +210,8 @@ class vvp_arith_pow : public vvp_arith_ { public: explicit vvp_arith_pow(unsigned wid, bool signed_flag); ~vvp_arith_pow(); - void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t); private: bool signed_flag_; }; @@ -207,7 +221,8 @@ class vvp_arith_sub : public vvp_arith_ { public: explicit vvp_arith_sub(unsigned wid); ~vvp_arith_sub(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -216,7 +231,8 @@ class vvp_arith_sum : public vvp_arith_ { public: explicit vvp_arith_sum(unsigned wid); ~vvp_arith_sum(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); }; @@ -225,7 +241,8 @@ class vvp_shiftl : public vvp_arith_ { public: explicit vvp_shiftl(unsigned wid); ~vvp_shiftl(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); }; class vvp_shiftr : public vvp_arith_ { @@ -233,7 +250,8 @@ class vvp_shiftr : public vvp_arith_ { public: explicit vvp_shiftr(unsigned wid, bool signed_flag); ~vvp_shiftr(); - virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + virtual void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); private: bool signed_flag_; @@ -263,7 +281,8 @@ class vvp_arith_sum_real : public vvp_arith_real_ { public: explicit vvp_arith_sum_real(); ~vvp_arith_sum_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_div_real : public vvp_arith_real_ { @@ -271,7 +290,8 @@ class vvp_arith_div_real : public vvp_arith_real_ { public: explicit vvp_arith_div_real(); ~vvp_arith_div_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_mod_real : public vvp_arith_real_ { @@ -279,7 +299,8 @@ class vvp_arith_mod_real : public vvp_arith_real_ { public: explicit vvp_arith_mod_real(); ~vvp_arith_mod_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_mult_real : public vvp_arith_real_ { @@ -287,7 +308,8 @@ class vvp_arith_mult_real : public vvp_arith_real_ { public: explicit vvp_arith_mult_real(); ~vvp_arith_mult_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_pow_real : public vvp_arith_real_ { @@ -295,7 +317,8 @@ class vvp_arith_pow_real : public vvp_arith_real_ { public: explicit vvp_arith_pow_real(); ~vvp_arith_pow_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_arith_sub_real : public vvp_arith_real_ { @@ -303,35 +326,40 @@ class vvp_arith_sub_real : public vvp_arith_real_ { public: explicit vvp_arith_sub_real(); ~vvp_arith_sub_real(); - void recv_real(vvp_net_ptr_t ptr, double bit); + void recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t); }; class vvp_cmp_eq_real : public vvp_arith_real_ { public: explicit vvp_cmp_eq_real(); - void recv_real(vvp_net_ptr_t ptr, const double bit); + void recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t); }; class vvp_cmp_ne_real : public vvp_arith_real_ { public: explicit vvp_cmp_ne_real(); - void recv_real(vvp_net_ptr_t ptr, const double bit); + void recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t); }; class vvp_cmp_ge_real : public vvp_arith_real_ { public: explicit vvp_cmp_ge_real(); - void recv_real(vvp_net_ptr_t ptr, const double bit); + void recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t); }; class vvp_cmp_gt_real : public vvp_arith_real_ { public: explicit vvp_cmp_gt_real(); - void recv_real(vvp_net_ptr_t ptr, const double bit); + void recv_real(vvp_net_ptr_t ptr, const double bit, + vvp_context_t); }; #endif diff --git a/vvp/array.cc b/vvp/array.cc index b480b6004..0cd51b5ff 100644 --- a/vvp/array.cc +++ b/vvp/array.cc @@ -53,7 +53,28 @@ vvp_array_t array_find(const char*label) /* * The vpiArray object holds an array of vpi objects that themselves -* represent the words of the array. The vpi_array_t is a pointer to this. +* represent the words of the array. The vpi_array_t is a pointer to +* a struct __vpiArray. +* +* The details of the implementation depends on what this is an array +* of. The easiest case is if this is an array of nets. +* +* - Array of Nets: +* If this represents an array of nets, then the nets member points to +* an array of vpiHandle objects. Each vpiHandle is a word. This is +* done because typically each word of a net array is simultaneously +* driven and accessed by other means, so there is no advantage to +* compacting the array in any other way. +* +* - Array of vector4 words. +* In this case, the nets pointer is nil, and the vals4 member points +* to a vvl_vector4array_t object that is a compact representation of +* an array of vvp_vector4_t vectors. +* +* - Array of real variables +* The valsr member points to a vvp_realarray_t objects that has an +* array of double variables. This is very much line the way the +* vector4 array works. */ struct __vpiArray { __vpiArray() { } @@ -70,7 +91,8 @@ struct __vpiArray { // If this is a net array, nets lists the handles. vpiHandle*nets; // If this is a var array, then these are used instead of nets. - vvp_vector4array_t *vals; + vvp_vector4array_t *vals4; + vvp_realarray_t *valsr; struct __vpiArrayWord*vals_words; class vvp_fun_arrayport*ports_; @@ -127,7 +149,7 @@ struct __vpiArrayVthrA { /* Get the array word size. This has only been checked for reg arrays. */ unsigned get_array_word_size(vvp_array_t array) { - assert(array->vals); + assert(array->vals4); return array->vals_width; } @@ -332,6 +354,9 @@ static int vpi_array_get(int code, vpiHandle ref) case vpiSize: return (int) obj->array_count; + case vpiAutomatic: + return (int) obj->scope->is_automatic; + default: return 0; } @@ -421,7 +446,7 @@ static int vpi_array_var_word_get(int code, vpiHandle ref) switch (code) { case vpiSize: - return (int) parent->vals->width(); + return (int) parent->vals4->width(); case vpiLeftRange: return parent->msb.value; @@ -458,9 +483,9 @@ static void vpi_array_var_word_get_value(vpiHandle ref, p_vpi_value value) assert(obj); unsigned index = decode_array_word_pointer(obj, parent); - unsigned width = parent->vals->width(); + unsigned width = parent->vals4->width(); - vpip_vec4_get_value(parent->vals->get_word(index), width, + vpip_vec4_get_value(parent->vals4->get_word(index), width, parent->signed_flag, value); } @@ -536,7 +561,7 @@ static vpiHandle array_iterator_scan(vpiHandle ref, int) if (obj->array->nets) return obj->array->nets[obj->next]; - assert(obj->array->vals); + assert(obj->array->vals4); if (obj->array->vals_words == 0) array_make_vals_words(obj->array); @@ -699,10 +724,10 @@ void array_set_word(vvp_array_t arr, if (address >= arr->array_count) return; - if (arr->vals) { + if (arr->vals4) { assert(arr->nets == 0); if (part_off != 0 || val.size() != arr->vals_width) { - vvp_vector4_t tmp = arr->vals->get_word(address); + vvp_vector4_t tmp = arr->vals4->get_word(address); if ((part_off + val.size()) > tmp.size()) { cerr << "part_off=" << part_off << " val.size()=" << val.size() @@ -711,9 +736,9 @@ void array_set_word(vvp_array_t arr, assert(0); } tmp.set_vec(part_off, val); - arr->vals->set_word(address, tmp); + arr->vals4->set_word(address, tmp); } else { - arr->vals->set_word(address, val); + arr->vals4->set_word(address, val); } array_word_change(arr, address); return; @@ -727,19 +752,26 @@ void array_set_word(vvp_array_t arr, assert(vsig); vvp_net_ptr_t ptr (vsig->node, 0); - vvp_send_vec4_pv(ptr, val, part_off, val.size(), vpip_size(vsig)); + vvp_send_vec4_pv(ptr, val, part_off, val.size(), vpip_size(vsig), 0); array_word_change(arr, address); } +void array_set_word(vvp_array_t arr, unsigned address, double val) +{ + assert(arr->valsr!= 0); + assert(arr->nets == 0); + + arr->valsr->set_word(address, val); +} + vvp_vector4_t array_get_word(vvp_array_t arr, unsigned address) { - if (arr->vals) { + if (arr->vals4) { assert(arr->nets == 0); - - return arr->vals->get_word(address); + return arr->vals4->get_word(address); } - assert(arr->vals == 0); + assert(arr->vals4 == 0); assert(arr->nets != 0); if (address >= arr->array_count) { @@ -764,6 +796,26 @@ vvp_vector4_t array_get_word(vvp_array_t arr, unsigned address) return val; } +double array_get_word_r(vvp_array_t arr, unsigned address) +{ + if (arr->valsr) { + assert(arr->vals4 == 0); + assert(arr->nets == 0); + return arr->valsr->get_word(address); + } + + assert(arr->nets); + vpiHandle word = arr->nets[address]; + struct __vpiRealVar*vsig = vpip_realvar_from_handle(word); + assert(vsig); + vvp_fun_signal_real*sig = dynamic_cast (vsig->net->fun); + assert(sig); + + double val = sig->real_value(); + return val; + +} + static vpiHandle vpip_make_array(char*label, const char*name, int first_addr, int last_addr, bool signed_flag) @@ -789,7 +841,8 @@ static vpiHandle vpip_make_array(char*label, const char*name, // Start off now knowing if we are nets or variables. obj->nets = 0; - obj->vals = 0; + obj->vals4 = 0; + obj->valsr = 0; obj->vals_width = 0; vpip_make_dec_const(&obj->msb, 0); vpip_make_dec_const(&obj->lsb, 0); @@ -839,6 +892,19 @@ void array_attach_word(vvp_array_t array, unsigned long addr, vpiHandle word) sig->is_netarray = 1; sig->within.parent = &array->base; sig->id.index = vpip_make_dec_const(addr + array->first_addr.value); + return; + } + + if (struct __vpiRealVar*sig = (struct __vpiRealVar*)word) { + vvp_net_t*net = sig->net; + assert(net); + vvp_fun_signal_base*fun = dynamic_cast(net->fun); + assert(fun); + fun->attach_as_word(array, addr); + sig->is_netarray = 1; + sig->within.parent = &array->base; + sig->id.index = vpip_make_dec_const(addr + array->first_addr.value); + return; } } @@ -852,9 +918,13 @@ void compile_var_array(char*label, char*name, int last, int first, /* Make the words. */ arr->vals_width = labs(msb-lsb) + 1; - arr->vals = new vvp_vector4array_t(arr->vals_width, arr->array_count, - vpip_peek_current_scope()->is_automatic); - vpip_add_item_to_current_scope(arr->vals); + if (vpip_peek_current_scope()->is_automatic) { + arr->vals4 = new vvp_vector4array_aa(arr->vals_width, + arr->array_count); + } else { + arr->vals4 = new vvp_vector4array_sa(arr->vals_width, + arr->array_count); + } vpip_make_dec_const(&arr->msb, msb); vpip_make_dec_const(&arr->lsb, lsb); @@ -871,14 +941,16 @@ void compile_real_array(char*label, char*name, int last, int first, vpiHandle obj = vpip_make_array(label, name, first, last, true); struct __vpiArray*arr = ARRAY_HANDLE(obj); - vvp_array_t array = array_find(label); /* Make the words. */ - for (unsigned idx = 0 ; idx < arr->array_count ; idx += 1) { - char buf[64]; - snprintf(buf, sizeof buf, "%s_%u", label, idx); - compile_varw_real(strdup(buf), array, idx, msb, lsb); - } + arr->valsr = new vvp_realarray_t(arr->array_count); + arr->vals_width = 1; + + /* Do these even make sense for real arrays? These are the + part select of a vector, but the real value is not + vectorable. */ + vpip_make_dec_const(&arr->msb, msb); + vpip_make_dec_const(&arr->lsb, lsb); count_real_arrays += 1; count_real_array_words += arr->array_count; @@ -908,11 +980,9 @@ class vvp_fun_arrayport : public vvp_net_fun_t { explicit vvp_fun_arrayport(vvp_array_t mem, vvp_net_t*net, long addr); ~vvp_fun_arrayport(); - void check_word_change(unsigned long addr); + virtual void check_word_change(unsigned long addr) = 0; - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); - - private: + protected: vvp_array_t arr_; vvp_net_t *net_; unsigned long addr_; @@ -938,7 +1008,37 @@ vvp_fun_arrayport::~vvp_fun_arrayport() { } -void vvp_fun_arrayport::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +class vvp_fun_arrayport_sa : public vvp_fun_arrayport { + + public: + explicit vvp_fun_arrayport_sa(vvp_array_t mem, vvp_net_t*net); + explicit vvp_fun_arrayport_sa(vvp_array_t mem, vvp_net_t*net, long addr); + ~vvp_fun_arrayport_sa(); + + void check_word_change(unsigned long addr); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); + + private: +}; + +vvp_fun_arrayport_sa::vvp_fun_arrayport_sa(vvp_array_t mem, vvp_net_t*net) +: vvp_fun_arrayport(mem, net) +{ +} + +vvp_fun_arrayport_sa::vvp_fun_arrayport_sa(vvp_array_t mem, vvp_net_t*net, long addr) +: vvp_fun_arrayport(mem, net, addr) +{ +} + +vvp_fun_arrayport_sa::~vvp_fun_arrayport_sa() +{ +} + +void vvp_fun_arrayport_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { bool addr_valid_flag; @@ -948,7 +1048,7 @@ void vvp_fun_arrayport::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) addr_valid_flag = vector4_to_value(bit, addr_); if (! addr_valid_flag) addr_ = arr_->array_count; - vvp_send_vec4(port.ptr()->out, array_get_word(arr_,addr_)); + vvp_send_vec4(port.ptr()->out, array_get_word(arr_,addr_), 0); break; default: @@ -957,13 +1057,111 @@ void vvp_fun_arrayport::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) } } -void vvp_fun_arrayport::check_word_change(unsigned long addr) +void vvp_fun_arrayport_sa::check_word_change(unsigned long addr) { if (addr != addr_) return; vvp_vector4_t bit = array_get_word(arr_, addr_); - vvp_send_vec4(net_->out, bit); + vvp_send_vec4(net_->out, bit, 0); +} + +class vvp_fun_arrayport_aa : public vvp_fun_arrayport, public automatic_hooks_s { + + public: + explicit vvp_fun_arrayport_aa(vvp_array_t mem, vvp_net_t*net); + explicit vvp_fun_arrayport_aa(vvp_array_t mem, vvp_net_t*net, long addr); + ~vvp_fun_arrayport_aa(); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + void check_word_change(unsigned long addr); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); + + private: + struct __vpiScope*context_scope_; + unsigned context_idx_; +}; + +vvp_fun_arrayport_aa::vvp_fun_arrayport_aa(vvp_array_t mem, vvp_net_t*net) +: vvp_fun_arrayport(mem, net) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_arrayport_aa::vvp_fun_arrayport_aa(vvp_array_t mem, vvp_net_t*net, long addr) +: vvp_fun_arrayport(mem, net, addr) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_arrayport_aa::~vvp_fun_arrayport_aa() +{ +} + +void vvp_fun_arrayport_aa::alloc_instance(vvp_context_t context) +{ + unsigned long*addr = new unsigned long; + vvp_set_context_item(context, context_idx_, addr); + + *addr = addr_; +} + +void vvp_fun_arrayport_aa::reset_instance(vvp_context_t context) +{ + unsigned long*addr = static_cast + (vvp_get_context_item(context, context_idx_)); + + *addr = addr_; +} + +void vvp_fun_arrayport_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + unsigned long*addr = static_cast + (vvp_get_context_item(context, context_idx_)); + + bool addr_valid_flag; + + switch (port.port()) { + + case 0: // Address input + addr_valid_flag = vector4_to_value(bit, *addr); + if (! addr_valid_flag) + *addr = arr_->array_count; + vvp_send_vec4(port.ptr()->out, array_get_word(arr_,*addr), + context); + break; + + default: + fprintf(stdout, "XXXX write ports not implemented.\n"); + assert(0); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} + +void vvp_fun_arrayport_aa::check_word_change(unsigned long addr) +{ + unsigned long*port_addr = static_cast + (vthread_get_wt_context_item(context_idx_)); + + if (addr != *port_addr) + return; + + vvp_vector4_t bit = array_get_word(arr_, addr); + vvp_send_vec4(net_->out, bit, vthread_get_wt_context()); } static void array_attach_port(vvp_array_t array, vvp_fun_arrayport*fun) @@ -1000,7 +1198,7 @@ void array_word_change(vvp_array_t array, unsigned long addr) if (cur->cb_data.cb_rtn != 0) { if (cur->cb_data.value) - vpip_vec4_get_value(array->vals->get_word(addr), + vpip_vec4_get_value(array->vals4->get_word(addr), array->vals_width, array->signed_flag, cur->cb_data.value); @@ -1046,9 +1244,15 @@ bool array_port_resolv_list_t::resolve(bool mes) vvp_fun_arrayport*fun; if (use_addr) - fun = new vvp_fun_arrayport(mem, ptr, addr); + if (vpip_peek_current_scope()->is_automatic) + fun = new vvp_fun_arrayport_aa(mem, ptr, addr); + else + fun = new vvp_fun_arrayport_sa(mem, ptr, addr); else - fun = new vvp_fun_arrayport(mem, ptr); + if (vpip_peek_current_scope()->is_automatic) + fun = new vvp_fun_arrayport_aa(mem, ptr); + else + fun = new vvp_fun_arrayport_sa(mem, ptr); ptr->fun = fun; array_attach_port(mem, fun); @@ -1130,7 +1334,7 @@ void compile_array_alias(char*label, char*name, char*src) // Share the words with the source array. obj->nets = mem->nets; - obj->vals = mem->vals; + obj->vals4 = mem->vals4; obj->ports_ = 0; diff --git a/vvp/array.h b/vvp/array.h index 7bac81088..167fc9387 100644 --- a/vvp/array.h +++ b/vvp/array.h @@ -39,12 +39,13 @@ extern void array_attach_word(vvp_array_t array, unsigned long addr, extern void array_alias_word(vvp_array_t array, unsigned long addr, vpiHandle word); -extern void array_set_word(vvp_array_t arr, - unsigned idx, - unsigned off, - vvp_vector4_t val); +extern void array_set_word(vvp_array_t arr, unsigned idx, + unsigned off, vvp_vector4_t val); +extern void array_set_word(vvp_array_t arr, unsigned idx, + double val); extern vvp_vector4_t array_get_word(vvp_array_t array, unsigned address); +extern double array_get_word_r(vvp_array_t array, unsigned address); /* VPI hooks */ diff --git a/vvp/bufif.cc b/vvp/bufif.cc index df356f812..9b11e2ee3 100644 --- a/vvp/bufif.cc +++ b/vvp/bufif.cc @@ -35,7 +35,8 @@ vvp_fun_bufif::vvp_fun_bufif(bool en_invert, bool out_invert, count_functors_bufif += 1; } -void vvp_fun_bufif::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_bufif::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { switch (ptr.port()) { case 0: @@ -115,4 +116,3 @@ void vvp_fun_bufif::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) * Revision 1.8 2002/08/12 01:35:07 steve * conditional ident string using autoconfig. */ - diff --git a/vvp/bufif.h b/vvp/bufif.h index 05f242ef5..6c55c21f8 100644 --- a/vvp/bufif.h +++ b/vvp/bufif.h @@ -40,7 +40,8 @@ class vvp_fun_bufif : public vvp_net_fun_t { vvp_fun_bufif(bool en_invert, bool out_invert, unsigned str0, unsigned str1); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); private: vvp_vector4_t bit_; diff --git a/vvp/codes.h b/vvp/codes.h index 769b3a915..0e634a95b 100644 --- a/vvp/codes.h +++ b/vvp/codes.h @@ -107,6 +107,7 @@ extern bool of_JMP0(vthread_t thr, vvp_code_t code); extern bool of_JMP0XZ(vthread_t thr, vvp_code_t code); extern bool of_JMP1(vthread_t thr, vvp_code_t code); extern bool of_JOIN(vthread_t thr, vvp_code_t code); +extern bool of_LOAD_AR(vthread_t thr, vvp_code_t code); extern bool of_LOAD_AV(vthread_t thr, vvp_code_t code); extern bool of_LOAD_AVP0(vthread_t thr, vvp_code_t code); extern bool of_LOAD_AVP0_S(vthread_t thr, vvp_code_t code); @@ -138,6 +139,7 @@ extern bool of_POW_WR(vthread_t thr, vvp_code_t code); extern bool of_RELEASE_NET(vthread_t thr, vvp_code_t code); extern bool of_RELEASE_REG(vthread_t thr, vvp_code_t code); extern bool of_RELEASE_WR(vthread_t thr, vvp_code_t code); +extern bool of_SET_AR(vthread_t thr, vvp_code_t code); extern bool of_SET_AV(vthread_t thr, vvp_code_t code); extern bool of_SET_VEC(vthread_t thr, vvp_code_t code); extern bool of_SET_WORDR(vthread_t thr, vvp_code_t code); diff --git a/vvp/compile.cc b/vvp/compile.cc index 28ce20ce4..75a84311e 100644 --- a/vvp/compile.cc +++ b/vvp/compile.cc @@ -83,6 +83,7 @@ const static struct opcode_table_s opcode_table[] = { { "%add", of_ADD, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, { "%add/wr", of_ADD_WR, 2, {OA_BIT1, OA_BIT2, OA_NONE} }, { "%addi", of_ADDI, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, + { "%alloc", of_ALLOC, 1, {OA_VPI_PTR, OA_NONE, OA_NONE} }, { "%and", of_AND, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, { "%and/r", of_ANDR, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, { "%andi", of_ANDI, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, @@ -134,6 +135,7 @@ const static struct opcode_table_s opcode_table[] = { { "%force/v",of_FORCE_V,3, {OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, { "%force/wr",of_FORCE_WR,2, {OA_FUNC_PTR, OA_BIT1, OA_NONE} }, { "%force/x0",of_FORCE_X0,3,{OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, + { "%free", of_FREE, 1, {OA_VPI_PTR, OA_NONE, OA_NONE} }, { "%inv", of_INV, 2, {OA_BIT1, OA_BIT2, OA_NONE} }, { "%ix/add", of_IX_ADD, 2, {OA_BIT1, OA_NUMBER, OA_NONE} }, { "%ix/get", of_IX_GET, 3, {OA_BIT1, OA_BIT2, OA_NUMBER} }, @@ -148,6 +150,7 @@ const static struct opcode_table_s opcode_table[] = { { "%jmp/0xz",of_JMP0XZ, 2, {OA_CODE_PTR, OA_BIT1, OA_NONE} }, { "%jmp/1", of_JMP1, 2, {OA_CODE_PTR, OA_BIT1, OA_NONE} }, { "%join", of_JOIN, 0, {OA_NONE, OA_NONE, OA_NONE} }, + { "%load/ar",of_LOAD_AR,3, {OA_BIT1, OA_ARR_PTR, OA_BIT2} }, { "%load/av",of_LOAD_AV,3, {OA_BIT1, OA_ARR_PTR, OA_BIT2} }, { "%load/avp0",of_LOAD_AVP0,3, {OA_BIT1, OA_ARR_PTR, OA_BIT2} }, { "%load/avp0/s",of_LOAD_AVP0_S,3,{OA_BIT1, OA_ARR_PTR, OA_BIT2} }, @@ -179,11 +182,11 @@ const static struct opcode_table_s opcode_table[] = { { "%release/net",of_RELEASE_NET,3,{OA_FUNC_PTR,OA_BIT1,OA_BIT2} }, { "%release/reg",of_RELEASE_REG,3,{OA_FUNC_PTR,OA_BIT1,OA_BIT2} }, { "%release/wr",of_RELEASE_WR,2,{OA_FUNC_PTR,OA_BIT1,OA_NONE} }, + { "%set/ar", of_SET_AR, 3, {OA_ARR_PTR, OA_BIT1, OA_BIT2} }, { "%set/av", of_SET_AV, 3, {OA_ARR_PTR, OA_BIT1, OA_BIT2} }, { "%set/v", of_SET_VEC,3, {OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, - { "%set/wr", of_SET_WORDR,2,{OA_VPI_PTR, OA_BIT1, OA_NONE} }, + { "%set/wr", of_SET_WORDR,2,{OA_FUNC_PTR, OA_BIT1, OA_NONE} }, { "%set/x0", of_SET_X0, 3, {OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, -// { "%set/x0/x",of_SET_X0_X,3,{OA_FUNC_PTR, OA_BIT1, OA_BIT2} }, { "%shiftl/i0", of_SHIFTL_I0, 2, {OA_BIT1,OA_NUMBER, OA_NONE} }, { "%shiftr/i0", of_SHIFTR_I0, 2, {OA_BIT1,OA_NUMBER, OA_NONE} }, { "%shiftr/s/i0", of_SHIFTR_S_I0,2,{OA_BIT1,OA_NUMBER, OA_NONE} }, @@ -1687,34 +1690,6 @@ void compile_fork(char*label, struct symb_s dest, struct symb_s scope) compile_vpi_lookup(&code->handle, scope.text); } -void compile_alloc(char*label, struct symb_s scope) -{ - if (label) - compile_codelabel(label); - - - /* Fill in the basics of the %alloc in the instruction. */ - vvp_code_t code = codespace_allocate(); - code->opcode = of_ALLOC; - - /* Figure out the target SCOPE. */ - compile_vpi_lookup(&code->handle, scope.text); -} - -void compile_free(char*label, struct symb_s scope) -{ - if (label) - compile_codelabel(label); - - - /* Fill in the basics of the %free in the instruction. */ - vvp_code_t code = codespace_allocate(); - code->opcode = of_FREE; - - /* Figure out the target SCOPE. */ - compile_vpi_lookup(&code->handle, scope.text); -} - void compile_vpi_call(char*label, char*name, long file_idx, long lineno, unsigned argc, vpiHandle*argv) diff --git a/vvp/compile.h b/vvp/compile.h index 05d568115..f6bb6a89e 100644 --- a/vvp/compile.h +++ b/vvp/compile.h @@ -356,8 +356,7 @@ extern void compile_ufunc(char*label, char*code, unsigned wid, * the threads. */ extern void compile_event(char*label, char*type, - unsigned argc, struct symb_s*argv, - bool debug_flag); + unsigned argc, struct symb_s*argv); extern void compile_named_event(char*label, char*type); @@ -406,9 +405,6 @@ extern void compile_fork(char*label, struct symb_s targ_s, struct symb_s scope_s); extern void compile_codelabel(char*label); -extern void compile_alloc(char*label, struct symb_s scope_s); -extern void compile_free(char*label, struct symb_s scope_s); - /* * The parser uses these functions to compile .scope statements. * The implementations of these live in the vpi_scope.cc file. @@ -443,12 +439,13 @@ extern void compile_net_real(char*label, char*name, extern void compile_netw(char*label, char*array_symbol, unsigned long array_addr, - int msb, int lsb, bool signed_flag, - bool net8_flag, - unsigned argc, struct symb_s*argv); + int msb, int lsb, bool signed_flag, + bool net8_flag, + unsigned argc, struct symb_s*argv); extern void compile_netw_real(char*label, char*array_symbol, - int msb, int lsb, - unsigned argc, struct symb_s*argv); + unsigned long array_addr, + int msb, int lsb, + unsigned argc, struct symb_s*argv); extern void compile_alias(char*label, char*name, int msb, int lsb, bool signed_flag, diff --git a/vvp/concat.cc b/vvp/concat.cc index 0a8c5716a..b17562459 100644 --- a/vvp/concat.cc +++ b/vvp/concat.cc @@ -45,7 +45,8 @@ vvp_fun_concat::~vvp_fun_concat() { } -void vvp_fun_concat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_concat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { unsigned pdx = port.port(); @@ -64,7 +65,7 @@ void vvp_fun_concat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) val_.set_bit(off+idx, bit.value(idx)); } - vvp_send_vec4(port.ptr()->out, val_); + vvp_send_vec4(port.ptr()->out, val_, 0); } void compile_concat(char*label, unsigned w0, unsigned w1, @@ -91,7 +92,8 @@ vvp_fun_repeat::~vvp_fun_repeat() { } -void vvp_fun_repeat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_repeat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { assert(bit.size() == wid_/rep_); @@ -105,7 +107,7 @@ void vvp_fun_repeat::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) } - vvp_send_vec4(port.ptr()->out, val); + vvp_send_vec4(port.ptr()->out, val, 0); } void compile_repeat(char*label, long width, long repeat, struct symb_s arg) @@ -140,4 +142,3 @@ void compile_repeat(char*label, long width, long repeat, struct symb_s arg) * Add missing concat.cc to cvs * */ - diff --git a/vvp/delay.cc b/vvp/delay.cc index d68c11820..66f3a0ffe 100644 --- a/vvp/delay.cc +++ b/vvp/delay.cc @@ -183,7 +183,8 @@ void vvp_fun_delay::clean_pulse_events_(vvp_time64_t use_delay) * wrong. What should happen is that if there are multiple changes, * multiple vectors approaching the result should be scheduled. */ -void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { if (port.port() > 0) { // Get the integer value of the bit vector, or 0 if @@ -248,7 +249,7 @@ void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) if (use_delay == 0) { cur_vec4_ = bit; initial_ = false; - vvp_send_vec4(net_->out, cur_vec4_); + vvp_send_vec4(net_->out, cur_vec4_, 0); } else { struct event_*cur = new struct event_(use_simtime); cur->run_run_ptr = &vvp_fun_delay::run_run_vec4_; @@ -283,7 +284,8 @@ void vvp_fun_delay::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) } } -void vvp_fun_delay::recv_real(vvp_net_ptr_t port, double bit) +void vvp_fun_delay::recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t) { if (port.port() > 0) { /* If the port is not 0, then this is a delay value that @@ -328,7 +330,7 @@ void vvp_fun_delay::recv_real(vvp_net_ptr_t port, double bit) if (use_delay == 0) { cur_real_ = bit; initial_ = false; - vvp_send_real(net_->out, cur_real_); + vvp_send_real(net_->out, cur_real_, 0); } else { struct event_*cur = new struct event_(use_simtime); cur->run_run_ptr = &vvp_fun_delay::run_run_real_; @@ -357,7 +359,7 @@ void vvp_fun_delay::run_run() void vvp_fun_delay::run_run_vec4_(struct event_*cur) { cur_vec4_ = cur->ptr_vec4; - vvp_send_vec4(net_->out, cur_vec4_); + vvp_send_vec4(net_->out, cur_vec4_, 0); } void vvp_fun_delay::run_run_vec8_(struct vvp_fun_delay::event_*cur) @@ -369,7 +371,7 @@ void vvp_fun_delay::run_run_vec8_(struct vvp_fun_delay::event_*cur) void vvp_fun_delay::run_run_real_(struct vvp_fun_delay::event_*cur) { cur_real_ = cur->ptr_real; - vvp_send_real(net_->out, cur_real_); + vvp_send_real(net_->out, cur_real_, 0); } vvp_fun_modpath::vvp_fun_modpath(vvp_net_t*net) @@ -418,7 +420,8 @@ static vvp_time64_t delay_from_edge(vvp_bit4_t a, vvp_bit4_t b, return array[ edge_table[a][b] ]; } -void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { /* Only the first port is used. */ if (port.port() > 0) @@ -532,7 +535,7 @@ void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) void vvp_fun_modpath::run_run() { - vvp_send_vec4(net_->out, cur_vec4_); + vvp_send_vec4(net_->out, cur_vec4_, 0); } vvp_fun_modpath_src::vvp_fun_modpath_src(vvp_time64_t del[12]) @@ -561,7 +564,8 @@ void vvp_fun_modpath_src::put_delay12(const vvp_time64_t val[12]) delay_[idx] = val[idx]; } -void vvp_fun_modpath_src::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_modpath_src::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { if (port.port() == 0) { // The modpath input... diff --git a/vvp/delay.h b/vvp/delay.h index d886f0c94..f7da4a907 100644 --- a/vvp/delay.h +++ b/vvp/delay.h @@ -85,9 +85,11 @@ class vvp_fun_delay : public vvp_net_fun_t, private vvp_gen_event_s { vvp_fun_delay(vvp_net_t*net, vvp_bit4_t init, const vvp_delay_t&d); ~vvp_fun_delay(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); - void recv_real(vvp_net_ptr_t port, double bit); + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t); //void recv_long(vvp_net_ptr_t port, long bit); private: @@ -153,7 +155,8 @@ class vvp_fun_modpath : public vvp_net_fun_t, private vvp_gen_event_s { void add_modpath_src(vvp_fun_modpath_src*that, bool ifnone); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); private: virtual void run_run(); @@ -181,7 +184,8 @@ class vvp_fun_modpath_src : public vvp_net_fun_t { ~vvp_fun_modpath_src(); public: - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); virtual bool test_vec4(const vvp_vector4_t&bit); void get_delay12(vvp_time64_t out[12]) const; diff --git a/vvp/dff.cc b/vvp/dff.cc index a726933ba..0b6b58725 100644 --- a/vvp/dff.cc +++ b/vvp/dff.cc @@ -39,7 +39,8 @@ vvp_dff::~vvp_dff() { } -void vvp_dff::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_dff::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { vvp_bit4_t tmp; @@ -57,7 +58,7 @@ void vvp_dff::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) tmp = clk_cur_; clk_cur_ = bit.value(0); if (clk_cur_ == BIT4_1 && tmp != BIT4_1) - vvp_send_vec4(port.ptr()->out, d_); + vvp_send_vec4(port.ptr()->out, d_, 0); break; case 2: // CE @@ -67,7 +68,7 @@ void vvp_dff::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) case 3: // Asynch-D d_ = bit; - vvp_send_vec4(port.ptr()->out, d_); + vvp_send_vec4(port.ptr()->out, d_, 0); break; } } diff --git a/vvp/dff.h b/vvp/dff.h index 95593fbdb..884541334 100644 --- a/vvp/dff.h +++ b/vvp/dff.h @@ -40,7 +40,8 @@ class vvp_dff : public vvp_net_fun_t { explicit vvp_dff(bool invert_clk =false, bool invert_ce =false); ~vvp_dff(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); private: bool iclk_, ice_; diff --git a/vvp/event.cc b/vvp/event.cc index 91e2c306a..bff6fb18f 100644 --- a/vvp/event.cc +++ b/vvp/event.cc @@ -32,7 +32,7 @@ # include -void waitable_hooks_s::run_waiting_threads_(unsigned context_idx) +void waitable_hooks_s::run_waiting_threads_(vthread_t&threads) { // Run the non-blocking event controls. last = &event_ctls; @@ -48,17 +48,11 @@ void waitable_hooks_s::run_waiting_threads_(unsigned context_idx) } } - vthread_t tmp; - if (context_idx) { - waitable_state_s*state = static_cast - (vthread_get_wt_context_item(context_idx)); - tmp = state->threads; - state->threads = 0; - } else { - tmp = threads; - threads = 0; - } - if (tmp) vthread_schedule_list(tmp); + vthread_t tmp = threads; + if (tmp == 0) return; + threads = 0; + + vthread_schedule_list(tmp); } evctl::evctl(unsigned long ecount) @@ -117,9 +111,9 @@ evctl_vector::evctl_vector(vvp_net_ptr_t ptr, const vvp_vector4_t&value, void evctl_vector::run_run() { if (wid_ != 0) { - vvp_send_vec4_pv(ptr_, value_, off_, value_.size(), wid_); + vvp_send_vec4_pv(ptr_, value_, off_, value_.size(), wid_, 0); } else { - vvp_send_vec4(ptr_, value_); + vvp_send_vec4(ptr_, value_, 0); } } @@ -186,114 +180,156 @@ const vvp_fun_edge::edge_t vvp_edge_negedge const vvp_fun_edge::edge_t vvp_edge_none = 0; struct vvp_fun_edge_state_s : public waitable_state_s { - vvp_fun_edge_state_s() : bit(BIT4_X) {} + vvp_fun_edge_state_s() + { + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + bits[idx] = BIT4_X; + } - vvp_bit4_t bit; + vvp_bit4_t bits[4]; }; -vvp_fun_edge::vvp_fun_edge(edge_t e, bool debug_flag) -: edge_(e), debug_(debug_flag) +vvp_fun_edge::vvp_fun_edge(edge_t e) +: edge_(e) { - bits_[0] = BIT4_X; - bits_[1] = BIT4_X; - bits_[2] = BIT4_X; - bits_[3] = BIT4_X; } vvp_fun_edge::~vvp_fun_edge() { } -void vvp_fun_edge::alloc_instance(vvp_context_t context) +bool vvp_fun_edge::recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_bit4_t&old_bit, vthread_t&threads) { - vvp_set_context_item(context, context_idx, new vvp_fun_edge_state_s); -} - -void vvp_fun_edge::reset_instance(vvp_context_t context) -{ - vvp_fun_edge_state_s*state = static_cast - (vvp_get_context_item(context, context_idx)); - state->threads = 0; - state->bit = BIT4_X; -} - -void vvp_fun_edge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) -{ - vvp_bit4_t*old_bit; - if (context_idx) { - vvp_fun_edge_state_s*state = static_cast - (vthread_get_wt_context_item(context_idx)); - old_bit = &state->bit; - } else { - old_bit = &bits_[port.port()]; - } - /* See what kind of edge this represents. */ - edge_t mask = VVP_EDGE(*old_bit, bit.value(0)); + edge_t mask = VVP_EDGE(old_bit, bit.value(0)); /* Save the current input for the next time around. */ - *old_bit = bit.value(0); + old_bit = bit.value(0); if ((edge_ == vvp_edge_none) || (edge_ & mask)) { - run_waiting_threads_(context_idx); + run_waiting_threads_(threads); + return true; + } + return false; +} +vvp_fun_edge_sa::vvp_fun_edge_sa(edge_t e) +: vvp_fun_edge(e), threads_(0) +{ + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + bits_[idx] = BIT4_X; +} + +vvp_fun_edge_sa::~vvp_fun_edge_sa() +{ +} + +vthread_t vvp_fun_edge_sa::add_waiting_thread(vthread_t thread) +{ + vthread_t tmp = threads_; + threads_ = thread; + + return tmp; +} + +void vvp_fun_edge_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + if (recv_vec4_(port, bit, bits_[port.port()], threads_)) { vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, bit); + vvp_send_vec4(net->out, bit, 0); } } +vvp_fun_edge_aa::vvp_fun_edge_aa(edge_t e) +: vvp_fun_edge(e) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_edge_aa::~vvp_fun_edge_aa() +{ +} + +void vvp_fun_edge_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_fun_edge_state_s); +} + +void vvp_fun_edge_aa::reset_instance(vvp_context_t context) +{ + vvp_fun_edge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + state->threads = 0; + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + state->bits[idx] = BIT4_X; +} + + +vthread_t vvp_fun_edge_aa::add_waiting_thread(vthread_t thread) +{ + vvp_fun_edge_state_s*state = static_cast + (vthread_get_wt_context_item(context_idx_)); + + vthread_t tmp = state->threads; + state->threads = thread; + + return tmp; +} + +void vvp_fun_edge_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + vvp_fun_edge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (recv_vec4_(port, bit, state->bits[port.port()], state->threads)) { + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} struct vvp_fun_anyedge_state_s : public waitable_state_s { - vvp_fun_anyedge_state_s() : bitsr(0.0) {} + vvp_fun_anyedge_state_s() + { + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + bitsr[idx] = 0.0; + } - vvp_vector4_t bits; - double bitsr; + vvp_vector4_t bits[4]; + double bitsr[4]; }; -vvp_fun_anyedge::vvp_fun_anyedge(bool debug_flag) -: debug_(debug_flag) +vvp_fun_anyedge::vvp_fun_anyedge() { - for (unsigned idx = 0 ; idx < 4 ; idx += 1) - bitsr_[idx] = 0.0; } vvp_fun_anyedge::~vvp_fun_anyedge() { } -void vvp_fun_anyedge::alloc_instance(vvp_context_t context) -{ - vvp_set_context_item(context, context_idx, new vvp_fun_anyedge_state_s); -} - -void vvp_fun_anyedge::reset_instance(vvp_context_t context) -{ - vvp_fun_anyedge_state_s*state = static_cast - (vvp_get_context_item(context, context_idx)); - state->threads = 0; - state->bits.set_to_x(); - state->bitsr = 0.0; -} - -void vvp_fun_anyedge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +bool vvp_fun_anyedge::recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_vector4_t&old_bits, vthread_t&threads) { bool flag = false; - vvp_vector4_t*old_bits; - if (context_idx) { - vvp_fun_anyedge_state_s*state = static_cast - (vthread_get_wt_context_item(context_idx)); - old_bits = &state->bits; - } else { - old_bits = &bits_[port.port()]; - } - - if (old_bits->size() != bit.size()) { + if (old_bits.size() != bit.size()) { flag = true; } else { for (unsigned idx = 0 ; idx < bit.size() ; idx += 1) { - if (old_bits->value(idx) != bit.value(idx)) { + if (old_bits.value(idx) != bit.value(idx)) { flag = true; break; } @@ -301,29 +337,136 @@ void vvp_fun_anyedge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) } if (flag) { - *old_bits = bit; - run_waiting_threads_(context_idx); + old_bits = bit; + run_waiting_threads_(threads); + } + + return flag; +} + +bool vvp_fun_anyedge::recv_real_(vvp_net_ptr_t port, double bit, + double&old_bits, vthread_t&threads) +{ + if (old_bits != bit) { + old_bits = bit; + run_waiting_threads_(threads); + return true; + } + return false; +} + +vvp_fun_anyedge_sa::vvp_fun_anyedge_sa() +: threads_(0) +{ + for (unsigned idx = 0 ; idx < 4 ; idx += 1) + bitsr_[idx] = 0.0; +} + +vvp_fun_anyedge_sa::~vvp_fun_anyedge_sa() +{ +} + +vthread_t vvp_fun_anyedge_sa::add_waiting_thread(vthread_t thread) +{ + vthread_t tmp = threads_; + threads_ = thread; + + return tmp; +} + +void vvp_fun_anyedge_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + if (recv_vec4_(port, bit, bits_[port.port()], threads_)) { vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, bit); + vvp_send_vec4(net->out, bit, 0); } } -void vvp_fun_anyedge::recv_real(vvp_net_ptr_t port, double bit) +void vvp_fun_anyedge_sa::recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t) { - double*old_bits; - if (context_idx) { - vvp_fun_anyedge_state_s*state = static_cast - (vthread_get_wt_context_item(context_idx)); - old_bits = &state->bitsr; - } else { - old_bits = &bitsr_[port.port()]; - } - - if (*old_bits != bit) { - *old_bits = bit; - run_waiting_threads_(context_idx); + if (recv_real_(port, bit, bitsr_[port.port()], threads_)) { vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, vvp_vector4_t()); + vvp_send_vec4(net->out, vvp_vector4_t(), 0); + } +} + +vvp_fun_anyedge_aa::vvp_fun_anyedge_aa() +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_anyedge_aa::~vvp_fun_anyedge_aa() +{ +} + +void vvp_fun_anyedge_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_fun_anyedge_state_s); +} + +void vvp_fun_anyedge_aa::reset_instance(vvp_context_t context) +{ + vvp_fun_anyedge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + state->threads = 0; + for (unsigned idx = 0 ; idx < 4 ; idx += 1) { + state->bits[idx].set_to_x(); + state->bitsr[idx] = 0.0; + } +} + +vthread_t vvp_fun_anyedge_aa::add_waiting_thread(vthread_t thread) +{ + vvp_fun_anyedge_state_s*state = static_cast + (vthread_get_wt_context_item(context_idx_)); + + vthread_t tmp = state->threads; + state->threads = thread; + + return tmp; +} + +void vvp_fun_anyedge_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + vvp_fun_anyedge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (recv_vec4_(port, bit, state->bits[port.port()], state->threads)) { + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} + +void vvp_fun_anyedge_aa::recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context) +{ + if (context) { + vvp_fun_anyedge_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (recv_real_(port, bit, state->bitsr[port.port()], state->threads)) { + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, vvp_vector4_t(), context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_real(port, bit, context); + context = vvp_get_next_context(context); + } } } @@ -335,23 +478,82 @@ vvp_fun_event_or::~vvp_fun_event_or() { } -void vvp_fun_event_or::alloc_instance(vvp_context_t context) +vvp_fun_event_or_sa::vvp_fun_event_or_sa() +: threads_(0) { - vvp_set_context_item(context, context_idx, new waitable_state_s); } -void vvp_fun_event_or::reset_instance(vvp_context_t context) +vvp_fun_event_or_sa::~vvp_fun_event_or_sa() +{ +} + +vthread_t vvp_fun_event_or_sa::add_waiting_thread(vthread_t thread) +{ + vthread_t tmp = threads_; + threads_ = thread; + + return tmp; +} + +void vvp_fun_event_or_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + run_waiting_threads_(threads_); + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, 0); +} + +vvp_fun_event_or_aa::vvp_fun_event_or_aa() +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_event_or_aa::~vvp_fun_event_or_aa() +{ +} + +void vvp_fun_event_or_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new waitable_state_s); +} + +void vvp_fun_event_or_aa::reset_instance(vvp_context_t context) { waitable_state_s*state = static_cast - (vvp_get_context_item(context, context_idx)); + (vvp_get_context_item(context, context_idx_)); + state->threads = 0; } -void vvp_fun_event_or::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +vthread_t vvp_fun_event_or_aa::add_waiting_thread(vthread_t thread) { - run_waiting_threads_(context_idx); - vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, bit); + waitable_state_s*state = static_cast + (vthread_get_wt_context_item(context_idx_)); + + vthread_t tmp = state->threads; + state->threads = thread; + + return tmp; +} + +void vvp_fun_event_or_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + waitable_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + run_waiting_threads_(state->threads); + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, context); + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } } vvp_named_event::vvp_named_event(struct __vpiHandle*h) @@ -363,25 +565,78 @@ vvp_named_event::~vvp_named_event() { } -void vvp_named_event::alloc_instance(vvp_context_t context) +vvp_named_event_sa::vvp_named_event_sa(struct __vpiHandle*h) +: vvp_named_event(h), threads_(0) { - vvp_set_context_item(context, context_idx, new waitable_state_s); } -void vvp_named_event::reset_instance(vvp_context_t context) +vvp_named_event_sa::~vvp_named_event_sa() +{ +} + +vthread_t vvp_named_event_sa::add_waiting_thread(vthread_t thread) +{ + vthread_t tmp = threads_; + threads_ = thread; + + return tmp; +} + +void vvp_named_event_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + run_waiting_threads_(threads_); + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, 0); + + vpip_run_named_event_callbacks(handle_); +} + +vvp_named_event_aa::vvp_named_event_aa(struct __vpiHandle*h) +: vvp_named_event(h) +{ + context_idx_ = vpip_add_item_to_context(this, vpip_peek_context_scope()); +} + +vvp_named_event_aa::~vvp_named_event_aa() +{ +} + +void vvp_named_event_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new waitable_state_s); +} + +void vvp_named_event_aa::reset_instance(vvp_context_t context) { waitable_state_s*state = static_cast - (vvp_get_context_item(context, context_idx)); + (vvp_get_context_item(context, context_idx_)); + state->threads = 0; } -void vvp_named_event::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +vthread_t vvp_named_event_aa::add_waiting_thread(vthread_t thread) { - run_waiting_threads_(context_idx); - vvp_net_t*net = port.ptr(); - vvp_send_vec4(net->out, bit); + waitable_state_s*state = static_cast + (vthread_get_wt_context_item(context_idx_)); - vpip_run_named_event_callbacks(handle_); + vthread_t tmp = state->threads; + state->threads = thread; + + return tmp; +} + +void vvp_named_event_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + assert(context); + + waitable_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + run_waiting_threads_(state->threads); + vvp_net_t*net = port.ptr(); + vvp_send_vec4(net->out, bit, context); } /* @@ -394,9 +649,7 @@ void vvp_named_event::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) static void compile_event_or(char*label, unsigned argc, struct symb_s*argv); -void compile_event(char*label, char*type, - unsigned argc, struct symb_s*argv, - bool debug_flag) +void compile_event(char*label, char*type, unsigned argc, struct symb_s*argv) { vvp_net_fun_t*fun = 0; @@ -408,9 +661,12 @@ void compile_event(char*label, char*type, if (strcmp(type,"edge") == 0) { free(type); - vvp_fun_anyedge*event_fun = new vvp_fun_anyedge(debug_flag); - vpip_add_item_to_current_scope(event_fun); - fun = event_fun; + + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_anyedge_aa; + } else { + fun = new vvp_fun_anyedge_sa; + } } else { @@ -424,9 +680,12 @@ void compile_event(char*label, char*type, assert(argc <= 4); free(type); - vvp_fun_edge*event_fun = new vvp_fun_edge(edge, debug_flag); - vpip_add_item_to_current_scope(event_fun); - fun = event_fun; + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_edge_aa(edge); + } else { + fun = new vvp_fun_edge_sa(edge); + } + } vvp_net_t* ptr = new vvp_net_t; @@ -440,11 +699,12 @@ void compile_event(char*label, char*type, static void compile_event_or(char*label, unsigned argc, struct symb_s*argv) { - vvp_fun_event_or*fun = new vvp_fun_event_or; vvp_net_t* ptr = new vvp_net_t; - ptr->fun = fun; - - vpip_add_item_to_current_scope(fun); + if (vpip_peek_current_scope()->is_automatic) { + ptr->fun = new vvp_fun_event_or_aa; + } else { + ptr->fun = new vvp_fun_event_or_sa; + } define_functor_symbol(label, ptr); free(label); @@ -466,10 +726,12 @@ void compile_named_event(char*label, char*name) vvp_net_t*ptr = new vvp_net_t; vpiHandle obj = vpip_make_named_event(name, ptr); - vvp_named_event*fun = new vvp_named_event(obj); - ptr->fun = fun; - vpip_add_item_to_current_scope(fun); + if (vpip_peek_current_scope()->is_automatic) { + ptr->fun = new vvp_named_event_aa(obj); + } else { + ptr->fun = new vvp_named_event_sa(obj); + } define_functor_symbol(label, ptr); compile_vpi_symbol(label, obj); vpip_attach_to_current_scope(obj); diff --git a/vvp/event.h b/vvp/event.h index 2d66416e3..8a769d8af 100644 --- a/vvp/event.h +++ b/vvp/event.h @@ -102,13 +102,15 @@ extern void schedule_evctl(vvp_array_t memory, unsigned index, struct waitable_hooks_s { public: - waitable_hooks_s() : threads(0), event_ctls(0) { last = &event_ctls; } - vthread_t threads; + waitable_hooks_s() : event_ctls(0) { last = &event_ctls; } + + virtual vthread_t add_waiting_thread(vthread_t thread) = 0; + evctl*event_ctls; evctl**last; protected: - void run_waiting_threads_(unsigned context_idx); + void run_waiting_threads_(vthread_t&threads); }; /* @@ -118,6 +120,7 @@ struct waitable_hooks_s { */ struct waitable_state_s { waitable_state_s() : threads(0) { } + vthread_t threads; }; @@ -130,24 +133,62 @@ class vvp_fun_edge : public vvp_net_fun_t, public waitable_hooks_s { public: typedef unsigned short edge_t; - explicit vvp_fun_edge(edge_t e, bool debug_flag); + explicit vvp_fun_edge(edge_t e); virtual ~vvp_fun_edge(); - void alloc_instance(vvp_context_t context); - void reset_instance(vvp_context_t context); - - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + protected: + bool recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_bit4_t&old_bit, vthread_t&threads); private: - vvp_bit4_t bits_[4]; edge_t edge_; - bool debug_; }; extern const vvp_fun_edge::edge_t vvp_edge_posedge; extern const vvp_fun_edge::edge_t vvp_edge_negedge; extern const vvp_fun_edge::edge_t vvp_edge_none; +/* + * Statically allocated vvp_fun_edge. + */ +class vvp_fun_edge_sa : public vvp_fun_edge { + + public: + explicit vvp_fun_edge_sa(edge_t e); + virtual ~vvp_fun_edge_sa(); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + private: + vthread_t threads_; + vvp_bit4_t bits_[4]; +}; + +/* + * Automatically allocated vvp_fun_edge. + */ +class vvp_fun_edge_aa : public vvp_fun_edge, public automatic_hooks_s { + + public: + explicit vvp_fun_edge_aa(edge_t e); + virtual ~vvp_fun_edge_aa(); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + private: + struct __vpiScope*context_scope_; + unsigned context_idx_; +}; + /* * The vvp_fun_anyedge functor checks to see if any value in an input * vector changes. Unlike the vvp_fun_edge, which watches for the LSB @@ -161,20 +202,63 @@ extern const vvp_fun_edge::edge_t vvp_edge_none; class vvp_fun_anyedge : public vvp_net_fun_t, public waitable_hooks_s { public: - explicit vvp_fun_anyedge(bool debug_flag); + explicit vvp_fun_anyedge(); virtual ~vvp_fun_anyedge(); + protected: + bool recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_vector4_t&old_bits, vthread_t&threads); + bool recv_real_(vvp_net_ptr_t port, double bit, + double&old_bits, vthread_t&threads); +}; + +/* + * Statically allocated vvp_fun_anyedge. + */ +class vvp_fun_anyedge_sa : public vvp_fun_anyedge { + + public: + explicit vvp_fun_anyedge_sa(); + virtual ~vvp_fun_anyedge_sa(); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context); + + private: + vthread_t threads_; + vvp_vector4_t bits_[4]; + // In case I'm a real-valued event. + double bitsr_[4]; +}; + +/* + * Automatically allocated vvp_fun_anyedge. + */ +class vvp_fun_anyedge_aa : public vvp_fun_anyedge, public automatic_hooks_s { + + public: + explicit vvp_fun_anyedge_aa(); + virtual ~vvp_fun_anyedge_aa(); + void alloc_instance(vvp_context_t context); void reset_instance(vvp_context_t context); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t port, double bit); + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + void recv_real(vvp_net_ptr_t port, double bit, + vvp_context_t context); private: - bool debug_; - vvp_vector4_t bits_[4]; - // In case I'm a real-valued event. - double bitsr_[4]; + struct __vpiScope*context_scope_; + unsigned context_idx_; }; /* @@ -186,13 +270,46 @@ class vvp_fun_event_or : public vvp_net_fun_t, public waitable_hooks_s { public: explicit vvp_fun_event_or(); ~vvp_fun_event_or(); +}; + +/* + * Statically allocated vvp_fun_event_or. + */ +class vvp_fun_event_or_sa : public vvp_fun_event_or { + + public: + explicit vvp_fun_event_or_sa(); + ~vvp_fun_event_or_sa(); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + private: + vthread_t threads_; +}; + +/* + * Automatically allocated vvp_fun_event_or. + */ +class vvp_fun_event_or_aa : public vvp_fun_event_or, public automatic_hooks_s { + + public: + explicit vvp_fun_event_or_aa(); + ~vvp_fun_event_or_aa(); void alloc_instance(vvp_context_t context); void reset_instance(vvp_context_t context); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); private: + struct __vpiScope*context_scope_; + unsigned context_idx_; }; /* @@ -206,13 +323,47 @@ class vvp_named_event : public vvp_net_fun_t, public waitable_hooks_s { explicit vvp_named_event(struct __vpiHandle*eh); ~vvp_named_event(); - void alloc_instance(vvp_context_t context); - void reset_instance(vvp_context_t context); - - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); - - private: + protected: struct __vpiHandle*handle_; }; +/* + * Statically allocated vvp_named_event. + */ +class vvp_named_event_sa : public vvp_named_event { + + public: + explicit vvp_named_event_sa(struct __vpiHandle*eh); + ~vvp_named_event_sa(); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); + + private: + vthread_t threads_; +}; + +/* + * Automatically allocated vvp_named_event. + */ +class vvp_named_event_aa : public vvp_named_event, public automatic_hooks_s { + + public: + explicit vvp_named_event_aa(struct __vpiHandle*eh); + ~vvp_named_event_aa(); + + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + vthread_t add_waiting_thread(vthread_t thread); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + private: + unsigned context_idx_; +}; + #endif // __event_H diff --git a/vvp/extend.cc b/vvp/extend.cc index 1e5b13f04..4927026fa 100644 --- a/vvp/extend.cc +++ b/vvp/extend.cc @@ -35,10 +35,11 @@ vvp_fun_extend_signed::~vvp_fun_extend_signed() { } -void vvp_fun_extend_signed::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_extend_signed::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { if (bit.size() >= width_) { - vvp_send_vec4(port.ptr()->out, bit); + vvp_send_vec4(port.ptr()->out, bit, 0); return; } @@ -51,5 +52,5 @@ void vvp_fun_extend_signed::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bi for (unsigned idx = bit.size() ; idx < res.size() ; idx += 1) res.set_bit(idx, pad); - vvp_send_vec4(port.ptr()->out, res); + vvp_send_vec4(port.ptr()->out, res, 0); } diff --git a/vvp/lexor.lex b/vvp/lexor.lex index 0cf3fc272..a2c19b7d5 100644 --- a/vvp/lexor.lex +++ b/vvp/lexor.lex @@ -183,8 +183,6 @@ "%vpi_func/r" { return K_vpi_func_r; } "%disable" { return K_disable; } "%fork" { return K_fork; } -"%alloc" { return K_alloc; } -"%free" { return K_free; } /* Handle the specialized variable access functions. */ diff --git a/vvp/logic.cc b/vvp/logic.cc index 3ba7c965b..6eb59db4b 100644 --- a/vvp/logic.cc +++ b/vvp/logic.cc @@ -42,7 +42,8 @@ vvp_fun_boolean_::~vvp_fun_boolean_() { } -void vvp_fun_boolean_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_boolean_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { unsigned port = ptr.port(); if (input_[port] .eeq( bit )) @@ -56,7 +57,8 @@ void vvp_fun_boolean_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } void vvp_fun_boolean_::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { unsigned port = ptr.port(); @@ -106,7 +108,7 @@ void vvp_fun_and::run_run() result.set_bit(idx, bitbit); } - vvp_send_vec4(ptr->out, result); + vvp_send_vec4(ptr->out, result, 0); } vvp_fun_buf::vvp_fun_buf() @@ -123,7 +125,8 @@ vvp_fun_buf::~vvp_fun_buf() * The buf functor is very simple--change the z bits to x bits in the * vector it passes, and propagate the result. */ -void vvp_fun_buf::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_buf::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { if (ptr.port() != 0) return; @@ -146,7 +149,7 @@ void vvp_fun_buf::run_run() vvp_vector4_t tmp (input_); tmp.change_z2x(); - vvp_send_vec4(ptr->out, tmp); + vvp_send_vec4(ptr->out, tmp, 0); } vvp_fun_bufz::vvp_fun_bufz() @@ -162,20 +165,22 @@ vvp_fun_bufz::~vvp_fun_bufz() * The bufz is similar to the buf device, except that it does not * bother translating z bits to x. */ -void vvp_fun_bufz::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_bufz::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { if (ptr.port() != 0) return; - vvp_send_vec4(ptr.ptr()->out, bit); + vvp_send_vec4(ptr.ptr()->out, bit, 0); } -void vvp_fun_bufz::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_fun_bufz::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { if (ptr.port() != 0) return; - vvp_send_real(ptr.ptr()->out, bit); + vvp_send_real(ptr.ptr()->out, bit, 0); } vvp_fun_muxr::vvp_fun_muxr() @@ -190,7 +195,8 @@ vvp_fun_muxr::~vvp_fun_muxr() { } -void vvp_fun_muxr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_muxr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { /* The real valued mux can only take in the select as a vector4_t. The muxed data is real. */ @@ -219,7 +225,8 @@ void vvp_fun_muxr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) } } -void vvp_fun_muxr::recv_real(vvp_net_ptr_t ptr, double bit) +void vvp_fun_muxr::recv_real(vvp_net_ptr_t ptr, double bit, + vvp_context_t) { switch (ptr.port()) { case 0: @@ -252,16 +259,16 @@ void vvp_fun_muxr::run_run() switch (select_) { case SEL_PORT0: - vvp_send_real(ptr->out, a_); + vvp_send_real(ptr->out, a_, 0); break; case SEL_PORT1: - vvp_send_real(ptr->out, b_); + vvp_send_real(ptr->out, b_, 0); break; default: if (a_ == b_) { - vvp_send_real(ptr->out, a_); + vvp_send_real(ptr->out, a_, 0); } else { - vvp_send_real(ptr->out, 0.0); // Should this be NaN? + vvp_send_real(ptr->out, 0.0, 0); // Should this be NaN? } break; } @@ -284,7 +291,8 @@ vvp_fun_muxz::~vvp_fun_muxz() { } -void vvp_fun_muxz::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_muxz::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { switch (ptr.port()) { case 0: @@ -331,10 +339,10 @@ void vvp_fun_muxz::run_run() switch (select_) { case SEL_PORT0: - vvp_send_vec4(ptr->out, a_); + vvp_send_vec4(ptr->out, a_, 0); break; case SEL_PORT1: - vvp_send_vec4(ptr->out, b_); + vvp_send_vec4(ptr->out, b_, 0); break; default: { @@ -357,7 +365,7 @@ void vvp_fun_muxz::run_run() for (unsigned idx = min_size ; idx < max_size ; idx += 1) res.set_bit(idx, BIT4_X); - vvp_send_vec4(ptr->out, res); + vvp_send_vec4(ptr->out, res, 0); } break; } @@ -377,7 +385,8 @@ vvp_fun_not::~vvp_fun_not() * The buf functor is very simple--change the z bits to x bits in the * vector it passes, and propagate the result. */ -void vvp_fun_not::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_not::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { if (ptr.port() != 0) return; @@ -404,7 +413,7 @@ void vvp_fun_not::run_run() result.set_bit(idx, bitbit); } - vvp_send_vec4(ptr->out, result); + vvp_send_vec4(ptr->out, result, 0); } vvp_fun_or::vvp_fun_or(unsigned wid, bool invert) @@ -440,7 +449,7 @@ void vvp_fun_or::run_run() result.set_bit(idx, bitbit); } - vvp_send_vec4(ptr->out, result); + vvp_send_vec4(ptr->out, result, 0); } vvp_fun_xor::vvp_fun_xor(unsigned wid, bool invert) @@ -476,7 +485,7 @@ void vvp_fun_xor::run_run() result.set_bit(idx, bitbit); } - vvp_send_vec4(ptr->out, result); + vvp_send_vec4(ptr->out, result, 0); } /* diff --git a/vvp/logic.h b/vvp/logic.h index 841ce4aaa..ddbb66d48 100644 --- a/vvp/logic.h +++ b/vvp/logic.h @@ -32,9 +32,11 @@ class vvp_fun_boolean_ : public vvp_net_fun_t, protected vvp_gen_event_s { explicit vvp_fun_boolean_(unsigned wid); ~vvp_fun_boolean_(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec4_pv(vvp_net_ptr_t p, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t); protected: vvp_vector4_t input_[4]; @@ -64,7 +66,8 @@ class vvp_fun_buf: public vvp_net_fun_t, private vvp_gen_event_s { explicit vvp_fun_buf(); virtual ~vvp_fun_buf(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); private: void run_run(); @@ -84,8 +87,10 @@ class vvp_fun_bufz: public vvp_net_fun_t { explicit vvp_fun_bufz(); virtual ~vvp_fun_bufz(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t p, double bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); + void recv_real(vvp_net_ptr_t p, double bit, + vvp_context_t); private: }; @@ -109,7 +114,8 @@ class vvp_fun_muxz : public vvp_net_fun_t, private vvp_gen_event_s { explicit vvp_fun_muxz(unsigned width); virtual ~vvp_fun_muxz(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); private: void run_run(); @@ -128,8 +134,10 @@ class vvp_fun_muxr : public vvp_net_fun_t, private vvp_gen_event_s { explicit vvp_fun_muxr(); virtual ~vvp_fun_muxr(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); - void recv_real(vvp_net_ptr_t p, double bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); + void recv_real(vvp_net_ptr_t p, double bit, + vvp_context_t); private: void run_run(); @@ -147,7 +155,8 @@ class vvp_fun_not: public vvp_net_fun_t, private vvp_gen_event_s { explicit vvp_fun_not(); virtual ~vvp_fun_not(); - void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t p, const vvp_vector4_t&bit, + vvp_context_t); private: void run_run(); diff --git a/vvp/npmos.cc b/vvp/npmos.cc index 2a1243bde..1d8db67b5 100644 --- a/vvp/npmos.cc +++ b/vvp/npmos.cc @@ -28,7 +28,8 @@ vvp_fun_pmos_::vvp_fun_pmos_(bool enable_invert) } -void vvp_fun_pmos_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) +void vvp_fun_pmos_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, + vvp_context_t) { /* Data input is processed through eh recv_vec8 method, because the strength must be preserved. */ @@ -89,7 +90,7 @@ vvp_fun_pmos::vvp_fun_pmos(bool enable_invert) void vvp_fun_pmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) { if (ptr.port() == 1) { - recv_vec4(ptr, reduce4(bit)); + recv_vec4(ptr, reduce4(bit), 0); return; } @@ -108,7 +109,7 @@ vvp_fun_rpmos::vvp_fun_rpmos(bool enable_invert) void vvp_fun_rpmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) { if (ptr.port() == 1) { - recv_vec4(ptr, reduce4(bit)); + recv_vec4(ptr, reduce4(bit), 0); return; } @@ -128,7 +129,8 @@ vvp_fun_cmos_::vvp_fun_cmos_() { } -void vvp_fun_cmos_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t &bit) +void vvp_fun_cmos_::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t &bit, + vvp_context_t) { /* Data input is processed through the recv_vec8 method, because the strength must be preserved. */ @@ -190,7 +192,7 @@ vvp_fun_cmos::vvp_fun_cmos() void vvp_fun_cmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) { if (ptr.port() == 1 || ptr.port() == 2) { - recv_vec4(ptr, reduce4(bit)); + recv_vec4(ptr, reduce4(bit), 0); return; } @@ -209,7 +211,7 @@ vvp_fun_rcmos::vvp_fun_rcmos() void vvp_fun_rcmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) { if (ptr.port() == 1) { - recv_vec4(ptr, reduce4(bit)); + recv_vec4(ptr, reduce4(bit), 0); return; } @@ -219,4 +221,3 @@ void vvp_fun_rcmos::recv_vec8(vvp_net_ptr_t ptr, const vvp_vector8_t&bit) bit_ = resistive_reduction(bit); generate_output_(ptr); } - diff --git a/vvp/npmos.h b/vvp/npmos.h index dc97b7290..31d36400a 100644 --- a/vvp/npmos.h +++ b/vvp/npmos.h @@ -51,7 +51,8 @@ class vvp_fun_pmos_ : public vvp_net_fun_t { public: explicit vvp_fun_pmos_(bool enable_invert); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); protected: void generate_output_(vvp_net_ptr_t port); @@ -107,7 +108,8 @@ class vvp_fun_cmos_ : public vvp_net_fun_t { public: explicit vvp_fun_cmos_(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t &bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t &bit, + vvp_context_t); protected: void generate_output_(vvp_net_ptr_t port); diff --git a/vvp/opcodes.txt b/vvp/opcodes.txt index 1cf47d03c..c9419c405 100644 --- a/vvp/opcodes.txt +++ b/vvp/opcodes.txt @@ -512,9 +512,14 @@ The is, line the %load/v, the result width. But unlike the (%load/vp0/s) to the desired width. * %load/wr , +* %load/ar , , -This instruction reads a real value from the vpi-like object to a word -register. +The %load/wr instruction reads a real value from the vpi-like object +to a word register . + +The %load/ar instruction reads a real value from an array. The +is the index register that contains the canonical word address into +the array. * %load/x1p , , @@ -708,8 +713,17 @@ The address (in canonical form) is precalculated and loaded into index register 3. This is the address of the word within the array. * %set/wr , +* %set/ar , , -This instruction writes a real word to the specified VPI-like object. +The %set/wr instruction writes a real word to the specified VPI-like +object. + +The %set/ar instruction writes a real work to the specified array +word. The addresses the array, and the is the +name of the index register to address into the word. The index +register must contain an integer value that is the canonical address +of the array word. The is the index register that contains the +real value word to write. * %set/x0 , , diff --git a/vvp/parse.y b/vvp/parse.y index ce75da3c1..cd4081bb5 100644 --- a/vvp/parse.y +++ b/vvp/parse.y @@ -85,7 +85,7 @@ static struct __vpiModPath*modpath_dst = 0; %token K_THREAD K_TIMESCALE K_TRAN K_TRANIF0 K_TRANIF1 K_TRANVP K_UFUNC %token K_UDP K_UDP_C K_UDP_S %token K_VAR K_VAR_S K_VAR_I K_VAR_R K_vpi_call K_vpi_func K_vpi_func_r -%token K_disable K_fork K_alloc K_free +%token K_disable K_fork %token K_vpi_module K_vpi_time_precision K_file_names %token T_INSTR @@ -483,16 +483,16 @@ statement named event instead. */ | T_LABEL K_EVENT T_SYMBOL ',' symbols ';' - { compile_event($1, $3, $5.cnt, $5.vect, false); } + { compile_event($1, $3, $5.cnt, $5.vect); } | T_LABEL K_EVENT K_DEBUG T_SYMBOL ',' symbols ';' - { compile_event($1, $4, $6.cnt, $6.vect, true); } + { compile_event($1, $4, $6.cnt, $6.vect); } | T_LABEL K_EVENT T_STRING ';' { compile_named_event($1, $3); } | T_LABEL K_EVENT_OR symbols ';' - { compile_event($1, 0, $3.cnt, $3.vect, false); } + { compile_event($1, 0, $3.cnt, $3.vect); } /* Instructions may have a label, and have zero or more @@ -533,12 +533,6 @@ statement | label_opt K_fork symbol ',' symbol ';' { compile_fork($1, $3, $5); } - | label_opt K_alloc symbol ';' - { compile_alloc($1, $3); } - - | label_opt K_free symbol ';' - { compile_free($1, $3); } - /* Scope statements come in two forms. There are the scope declaration and the scope recall. The declarations create the scope, with their association with a parent. The label of the @@ -668,6 +662,11 @@ statement symbols_net ';' { compile_netw($1, $3, $4, $6, $7, true, true, $9.cnt, $9.vect); } + | T_LABEL K_NET_R T_SYMBOL T_NUMBER ',' + signed_t_number signed_t_number ',' + symbols_net ';' + { compile_netw_real($1, $3, $4, $6, $7, $9.cnt, $9.vect); } + /* Array word versions of alias directives. */ | T_LABEL K_ALIAS T_SYMBOL T_NUMBER ',' diff --git a/vvp/part.cc b/vvp/part.cc index 3b7f6e500..0db98b927 100644 --- a/vvp/part.cc +++ b/vvp/part.cc @@ -27,17 +27,34 @@ # include # include +struct vvp_fun_part_state_s { + vvp_fun_part_state_s() : bitsr(0.0) {} + + vvp_vector4_t bits; + double bitsr; +}; + vvp_fun_part::vvp_fun_part(unsigned base, unsigned wid) : base_(base), wid_(wid) { - net_ = 0; } vvp_fun_part::~vvp_fun_part() { } -void vvp_fun_part::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +vvp_fun_part_sa::vvp_fun_part_sa(unsigned base, unsigned wid) +: vvp_fun_part(base, wid) +{ + net_ = 0; +} + +vvp_fun_part_sa::~vvp_fun_part_sa() +{ +} + +void vvp_fun_part_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { assert(port.port() == 0); @@ -55,11 +72,12 @@ void vvp_fun_part::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) /* * Handle the case that the part select node is actually fed by a part * select assignment. It's not exactly clear what might make this - * happen, but is does seem to happen and this should have sell + * happen, but is does seem to happen and this should have well * defined behavior. */ -void vvp_fun_part::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) +void vvp_fun_part_sa::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { assert(bit.size() == wid); @@ -69,10 +87,10 @@ void vvp_fun_part::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, assert(tmp.size() == vwid); tmp.set_vec(base, bit); - recv_vec4(port, tmp); + recv_vec4(port, tmp, 0); } -void vvp_fun_part::run_run() +void vvp_fun_part_sa::run_run() { vvp_net_t*ptr = net_; net_ = 0; @@ -82,7 +100,90 @@ void vvp_fun_part::run_run() if (idx + base_ < val_.size()) res.set_bit(idx, val_.value(base_+idx)); } - vvp_send_vec4(ptr->out, res); + vvp_send_vec4(ptr->out, res, 0); +} + +vvp_fun_part_aa::vvp_fun_part_aa(unsigned base, unsigned wid) +: vvp_fun_part(base, wid) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_part_aa::~vvp_fun_part_aa() +{ +} + +void vvp_fun_part_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_vector4_t); +} + +void vvp_fun_part_aa::reset_instance(vvp_context_t context) +{ + vvp_vector4_t*val = static_cast + (vvp_get_context_item(context, context_idx_)); + + val->set_to_x(); +} + +void vvp_fun_part_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + assert(port.port() == 0); + + vvp_vector4_t*val = static_cast + (vvp_get_context_item(context, context_idx_)); + + vvp_vector4_t tmp (wid_, BIT4_X); + for (unsigned idx = 0 ; idx < wid_ ; idx += 1) { + if (idx + base_ < bit.size()) + tmp.set_bit(idx, bit.value(base_+idx)); + } + if (!val->eeq( tmp )) { + *val = tmp; + vvp_send_vec4(port.ptr()->out, tmp, context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} + +/* + * Handle the case that the part select node is actually fed by a part + * select assignment. It's not exactly clear what might make this + * happen, but is does seem to happen and this should have well + * defined behavior. + */ +void vvp_fun_part_aa::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context) +{ + if (context) { + assert(bit.size() == wid); + + vvp_vector4_t*val = static_cast + (vvp_get_context_item(context, context_idx_)); + + vvp_vector4_t tmp = *val; + if (tmp.size() == 0) + tmp = vvp_vector4_t(vwid); + + assert(tmp.size() == vwid); + tmp.set_vec(base, bit); + recv_vec4(port, tmp, context); + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4_pv(port, bit, base, wid, vwid, context); + context = vvp_get_next_context(context); + } + } } vvp_fun_part_pv::vvp_fun_part_pv(unsigned b, unsigned w, unsigned v) @@ -94,7 +195,8 @@ vvp_fun_part_pv::~vvp_fun_part_pv() { } -void vvp_fun_part_pv::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void vvp_fun_part_pv::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) { assert(port.port() == 0); @@ -106,7 +208,7 @@ void vvp_fun_part_pv::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) } assert(bit.size() == wid_); - vvp_send_vec4_pv(port.ptr()->out, bit, base_, wid_, vwid_); + vvp_send_vec4_pv(port.ptr()->out, bit, base_, wid_, vwid_, context); } void vvp_fun_part_pv::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) @@ -125,7 +227,7 @@ void vvp_fun_part_pv::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) } vvp_fun_part_var::vvp_fun_part_var(unsigned w) -: base_(0), wid_(w) +: wid_(w) { } @@ -133,18 +235,20 @@ vvp_fun_part_var::~vvp_fun_part_var() { } -void vvp_fun_part_var::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +bool vvp_fun_part_var::recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned&base, vvp_vector4_t&source, + vvp_vector4_t&ref) { unsigned long tmp; switch (port.port()) { case 0: - source_ = bit; + source = bit; break; case 1: tmp = ULONG_MAX; vector4_to_value(bit, tmp); - if (tmp == base_) return; - base_ = tmp; + if (tmp == base) return false; + base = tmp; break; default: fprintf(stderr, "Unsupported port type %d.\n", port.port()); @@ -155,21 +259,40 @@ void vvp_fun_part_var::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) vvp_vector4_t res (wid_); for (unsigned idx = 0 ; idx < wid_ ; idx += 1) { - unsigned adr = base_+idx; - if (adr >= source_.size()) + unsigned adr = base+idx; + if (adr >= source.size()) break; - res.set_bit(idx, source_.value(adr)); + res.set_bit(idx, source.value(adr)); } - if (! ref_.eeq(res)) { - ref_ = res; - vvp_send_vec4(port.ptr()->out, res); + if (! ref.eeq(res)) { + ref = res; + return true; + } + return false; +} + +vvp_fun_part_var_sa::vvp_fun_part_var_sa(unsigned w) +: vvp_fun_part_var(w), base_(0) +{ +} + +vvp_fun_part_var_sa::~vvp_fun_part_var_sa() +{ +} + +void vvp_fun_part_var_sa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) +{ + if (recv_vec4_(port, bit, base_, source_, ref_)) { + vvp_send_vec4(port.ptr()->out, ref_, 0); } } -void vvp_fun_part_var::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) +void vvp_fun_part_var_sa::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { assert(bit.size() == wid); @@ -179,8 +302,86 @@ void vvp_fun_part_var::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, assert(tmp.size() == vwid); tmp.set_vec(base, bit); - recv_vec4(port, tmp); + recv_vec4(port, tmp, 0); +} +struct vvp_fun_part_var_state_s { + vvp_fun_part_var_state_s() : base(0) { } + + unsigned base; + vvp_vector4_t source; + vvp_vector4_t ref; +}; + +vvp_fun_part_var_aa::vvp_fun_part_var_aa(unsigned w) +: vvp_fun_part_var(w) +{ + context_scope_ = vpip_peek_context_scope(); + context_idx_ = vpip_add_item_to_context(this, context_scope_); +} + +vvp_fun_part_var_aa::~vvp_fun_part_var_aa() +{ +} + +void vvp_fun_part_var_aa::alloc_instance(vvp_context_t context) +{ + vvp_set_context_item(context, context_idx_, new vvp_fun_part_var_state_s); +} + +void vvp_fun_part_var_aa::reset_instance(vvp_context_t context) +{ + vvp_fun_part_var_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + state->base = 0; + state->source.set_to_x(); + state->ref.set_to_x(); +} + +void vvp_fun_part_var_aa::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context) +{ + if (context) { + vvp_fun_part_var_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + if (recv_vec4_(port, bit, state->base, state->source, state->ref)) { + vvp_send_vec4(port.ptr()->out, state->ref, context); + } + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } +} + +void vvp_fun_part_var_aa::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context) +{ + if (context) { + vvp_fun_part_var_state_s*state = static_cast + (vvp_get_context_item(context, context_idx_)); + + assert(bit.size() == wid); + + vvp_vector4_t tmp = state->source; + if (tmp.size() == 0) + tmp = vvp_vector4_t(vwid); + + assert(tmp.size() == vwid); + tmp.set_vec(base, bit); + recv_vec4(port, tmp, context); + } else { + context = context_scope_->live_contexts; + while (context) { + recv_vec4(port, bit, context); + context = vvp_get_next_context(context); + } + } } /* @@ -201,7 +402,12 @@ void link_node_1(char*label, char*source, vvp_net_fun_t*fun) void compile_part_select(char*label, char*source, unsigned base, unsigned wid) { - vvp_fun_part*fun = new vvp_fun_part(base, wid); + vvp_fun_part*fun = 0; + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_part_aa(base, wid); + } else { + fun = new vvp_fun_part_sa(base, wid); + } link_node_1(label, source, fun); } @@ -216,7 +422,12 @@ void compile_part_select_pv(char*label, char*source, void compile_part_select_var(char*label, char*source, char*var, unsigned wid) { - vvp_fun_part_var*fun = new vvp_fun_part_var(wid); + vvp_fun_part_var*fun = 0; + if (vpip_peek_current_scope()->is_automatic) { + fun = new vvp_fun_part_var_aa(wid); + } else { + fun = new vvp_fun_part_var_sa(wid); + } vvp_net_t*net = new vvp_net_t; net->fun = fun; @@ -226,4 +437,3 @@ void compile_part_select_var(char*label, char*source, char*var, input_connect(net, 0, source); input_connect(net, 1, var); } - diff --git a/vvp/part.h b/vvp/part.h index 3fdffd886..b6ff2bf10 100644 --- a/vvp/part.h +++ b/vvp/part.h @@ -27,32 +27,73 @@ * select starts. Input 2, which is typically constant, is the width * of the result. */ -class vvp_fun_part : public vvp_net_fun_t, private vvp_gen_event_s { +class vvp_fun_part : public vvp_net_fun_t { public: vvp_fun_part(unsigned base, unsigned wid); ~vvp_fun_part(); + protected: + unsigned base_; + unsigned wid_; +}; + +/* + * Statically allocated vvp_fun_part. + */ +class vvp_fun_part_sa : public vvp_fun_part, public vvp_gen_event_s { + public: - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + vvp_fun_part_sa(unsigned base, unsigned wid); + ~vvp_fun_part_sa(); + + public: + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned, unsigned, unsigned); + unsigned, unsigned, unsigned, + vvp_context_t); private: void run_run(); private: - unsigned base_; - unsigned wid_; vvp_vector4_t val_; vvp_net_t*net_; }; +/* + * Automatically allocated vvp_fun_part. + */ +class vvp_fun_part_aa : public vvp_fun_part, public automatic_hooks_s { + + public: + vvp_fun_part_aa(unsigned base, unsigned wid); + ~vvp_fun_part_aa(); + + public: + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned, unsigned, unsigned, + vvp_context_t context); + + private: + struct __vpiScope*context_scope_; + unsigned context_idx_; +}; + /* vvp_fun_part_pv * This node takes a vector input and turns it into the part select of * a wider output network. It used the recv_vec4_pv methods of the - * destination nodes to propagate the part select. + * destination nodes to propagate the part select. It can be used in + * both statically and automatically allocated scopes, as it has no + * dynamic state. */ class vvp_fun_part_pv : public vvp_net_fun_t { @@ -61,7 +102,9 @@ class vvp_fun_part_pv : public vvp_net_fun_t { ~vvp_fun_part_pv(); public: - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); private: @@ -81,18 +124,61 @@ class vvp_fun_part_var : public vvp_net_fun_t { explicit vvp_fun_part_var(unsigned wid); ~vvp_fun_part_var(); + protected: + bool recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned&base, vvp_vector4_t&source, + vvp_vector4_t&ref); + + unsigned wid_; +}; + +/* + * Statically allocated vvp_fun_part_var. + */ +class vvp_fun_part_var_sa : public vvp_fun_part_var { + public: - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + explicit vvp_fun_part_var_sa(unsigned wid); + ~vvp_fun_part_var_sa(); + + public: + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned, unsigned, unsigned); + unsigned, unsigned, unsigned, + vvp_context_t); private: unsigned base_; - unsigned wid_; vvp_vector4_t source_; // Save the last output, for detecting change. vvp_vector4_t ref_; }; +/* + * Automatically allocated vvp_fun_part_var. + */ +class vvp_fun_part_var_aa : public vvp_fun_part_var, public automatic_hooks_s { + + public: + explicit vvp_fun_part_var_aa(unsigned wid); + ~vvp_fun_part_var_aa(); + + public: + void alloc_instance(vvp_context_t context); + void reset_instance(vvp_context_t context); + + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t context); + + void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, + unsigned, unsigned, unsigned, + vvp_context_t context); + + private: + struct __vpiScope*context_scope_; + unsigned context_idx_; +}; + #endif diff --git a/vvp/reduce.cc b/vvp/reduce.cc index 238193845..0ed283daf 100644 --- a/vvp/reduce.cc +++ b/vvp/reduce.cc @@ -34,7 +34,8 @@ * All the reduction operations take a single vector input and produce * a scalar result. The vvp_reduce_base class codifies these general * characteristics, leaving only the calculation of the result for the - * base class. + * base class. This can be used in both statically and automatically + * allocated scopes, as bits_ is only used for temporary storage. */ class vvp_reduce_base : public vvp_net_fun_t { @@ -42,9 +43,11 @@ class vvp_reduce_base : public vvp_net_fun_t { vvp_reduce_base(); virtual ~vvp_reduce_base(); - void recv_vec4(vvp_net_ptr_t prt, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t prt, const vvp_vector4_t&bit, + vvp_context_t context); void recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context); virtual vvp_bit4_t calculate_result() const =0; @@ -60,16 +63,18 @@ vvp_reduce_base::~vvp_reduce_base() { } -void vvp_reduce_base::recv_vec4(vvp_net_ptr_t prt, const vvp_vector4_t&bit) +void vvp_reduce_base::recv_vec4(vvp_net_ptr_t prt, const vvp_vector4_t&bit, + vvp_context_t context) { bits_ = bit; vvp_bit4_t res = calculate_result(); vvp_vector4_t rv (1, res); - vvp_send_vec4(prt.ptr()->out, rv); + vvp_send_vec4(prt.ptr()->out, rv, context); } void vvp_reduce_base::recv_vec4_pv(vvp_net_ptr_t prt, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t context) { if (bits_.size() == 0) { bits_ = vvp_vector4_t(vwid); @@ -80,7 +85,7 @@ void vvp_reduce_base::recv_vec4_pv(vvp_net_ptr_t prt, const vvp_vector4_t&bit, bits_.set_vec(base, bit); vvp_bit4_t res = calculate_result(); vvp_vector4_t rv (1, res); - vvp_send_vec4(prt.ptr()->out, rv); + vvp_send_vec4(prt.ptr()->out, rv, context); } class vvp_reduce_and : public vvp_reduce_base { diff --git a/vvp/resolv.cc b/vvp/resolv.cc index 3b779a093..4919a1223 100644 --- a/vvp/resolv.cc +++ b/vvp/resolv.cc @@ -35,13 +35,15 @@ resolv_functor::~resolv_functor() { } -void resolv_functor::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void resolv_functor::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { recv_vec8(port, vvp_vector8_t(bit, 6,6 /* STRONG */)); } void resolv_functor::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid) + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t) { assert(bit.size() == wid); vvp_vector4_t res (vwid); @@ -55,7 +57,7 @@ void resolv_functor::recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, for (unsigned idx = base+wid ; idx < vwid ; idx += 1) res.set_bit(idx, BIT4_Z); - recv_vec4(port, res); + recv_vec4(port, res, 0); } void resolv_functor::recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit) @@ -123,7 +125,8 @@ resolv_wired_logic::~resolv_wired_logic() { } -void resolv_wired_logic::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) +void resolv_wired_logic::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t) { unsigned pdx = port.port(); vvp_net_t*ptr = port.ptr(); @@ -143,7 +146,7 @@ void resolv_wired_logic::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) out = wired_logic_math_(out, val_[idx]); } - vvp_send_vec4(ptr->out, out); + vvp_send_vec4(ptr->out, out, 0); } vvp_vector4_t resolv_triand::wired_logic_math_(vvp_vector4_t&a, vvp_vector4_t&b) diff --git a/vvp/resolv.h b/vvp/resolv.h index 4f0e683c6..1766f6ea3 100644 --- a/vvp/resolv.h +++ b/vvp/resolv.h @@ -40,11 +40,13 @@ class resolv_functor : public vvp_net_fun_t { explicit resolv_functor(vvp_scalar_t hiz_value, const char* debug =0); ~resolv_functor(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit); void recv_vec4_pv(vvp_net_ptr_t port, const vvp_vector4_t&bit, - unsigned base, unsigned wid, unsigned vwid); + unsigned base, unsigned wid, unsigned vwid, + vvp_context_t); void recv_vec8_pv(vvp_net_ptr_t port, const vvp_vector8_t&bit, unsigned base, unsigned wid, unsigned vwid); @@ -62,7 +64,8 @@ class resolv_wired_logic : public vvp_net_fun_t { explicit resolv_wired_logic(void); ~resolv_wired_logic(); - void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit); + void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit, + vvp_context_t); protected: virtual vvp_vector4_t wired_logic_math_(vvp_vector4_t&a, vvp_vector4_t&b) =0; diff --git a/vvp/schedule.cc b/vvp/schedule.cc index 50944c695..27bacc263 100644 --- a/vvp/schedule.cc +++ b/vvp/schedule.cc @@ -141,9 +141,9 @@ void assign_vector4_event_s::run_run(void) { count_assign_events += 1; if (vwid > 0) - vvp_send_vec4_pv(ptr, val, base, val.size(), vwid); + vvp_send_vec4_pv(ptr, val, base, val.size(), vwid, 0); else - vvp_send_vec4(ptr, val); + vvp_send_vec4(ptr, val, 0); } static const size_t ASSIGN4_CHUNK_COUNT = 524288 / sizeof(struct assign_vector4_event_s); @@ -205,7 +205,7 @@ struct assign_real_event_s : public event_s { void assign_real_event_s::run_run(void) { count_assign_events += 1; - vvp_send_real(ptr, val); + vvp_send_real(ptr, val, 0); } static const size_t ASSIGNR_CHUNK_COUNT = 8192 / sizeof(struct assign_real_event_s); diff --git a/vvp/ufunc.cc b/vvp/ufunc.cc index caa7d78de..f7b0b01f2 100644 --- a/vvp/ufunc.cc +++ b/vvp/ufunc.cc @@ -17,6 +17,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ +# include "vvp_net.h" # include "compile.h" # include "symbols.h" # include "codes.h" @@ -61,15 +62,15 @@ ufunc_core::~ufunc_core() * input variables of the function for execution. The method copies * the input values collected by the core to the variables. */ -void ufunc_core::assign_bits_to_ports(void) +void ufunc_core::assign_bits_to_ports(vvp_context_t context) { for (unsigned idx = 0 ; idx < port_count() ; idx += 1) { vvp_net_t*net = ports_[idx]; vvp_net_ptr_t pp (net, 0); if (vvp_fun_signal_real*tmp = dynamic_cast(net->fun)) - tmp->recv_real(pp, value_r(idx)); + tmp->recv_real(pp, value_r(idx), context); if (vvp_fun_signal_vec*tmp = dynamic_cast(net->fun)) - tmp->recv_vec4(pp, value(idx)); + tmp->recv_vec4(pp, value(idx), context); } } diff --git a/vvp/ufunc.h b/vvp/ufunc.h index 28b88e9cd..1d7feebfc 100644 --- a/vvp/ufunc.h +++ b/vvp/ufunc.h @@ -61,7 +61,7 @@ class ufunc_core : public vvp_wide_fun_core { struct __vpiScope*call_scope() { return call_scope_; } struct __vpiScope*func_scope() { return func_scope_; } - void assign_bits_to_ports(void); + void assign_bits_to_ports(vvp_context_t context); void finish_thread(vthread_t thr); private: diff --git a/vvp/vpi_callback.cc b/vvp/vpi_callback.cc index a422399fa..c99cf3b03 100644 --- a/vvp/vpi_callback.cc +++ b/vvp/vpi_callback.cc @@ -569,7 +569,7 @@ void vvp_vpi_callback_wordable::attach_as_word(vvp_array_t arr, unsigned long ad array_word_ = addr; } -void vvp_fun_signal::get_value(struct t_vpi_value*vp) +void vvp_fun_signal4::get_value(struct t_vpi_value*vp) { switch (vp->format) { case vpiScalarVal: diff --git a/vvp/vpi_event.cc b/vvp/vpi_event.cc index bd99a7397..bb0385603 100644 --- a/vvp/vpi_event.cc +++ b/vvp/vpi_event.cc @@ -27,6 +27,21 @@ # include # include +static int named_event_get(int code, vpiHandle ref) +{ + assert((ref->vpi_type->type_code==vpiNamedEvent)); + + struct __vpiNamedEvent*obj = (struct __vpiNamedEvent*)ref; + + switch (code) { + + case vpiAutomatic: + return (int) obj->scope->is_automatic; + } + + return 0; +} + static char* named_event_get_str(int code, vpiHandle ref) { assert((ref->vpi_type->type_code==vpiNamedEvent)); @@ -57,7 +72,7 @@ static vpiHandle named_event_get_handle(int code, vpiHandle ref) static const struct __vpirt vpip_named_event_rt = { vpiNamedEvent, - 0, + named_event_get, named_event_get_str, 0, 0, @@ -122,4 +137,3 @@ void vpip_run_named_event_callbacks(vpiHandle ref) } } } - diff --git a/vvp/vpi_priv.cc b/vvp/vpi_priv.cc index f0338e909..27af301cf 100644 --- a/vvp/vpi_priv.cc +++ b/vvp/vpi_priv.cc @@ -58,6 +58,14 @@ struct __vpiScope* vpip_scope(__vpiSignal*sig) return sig->within.scope; } +struct __vpiScope* vpip_scope(__vpiRealVar*sig) +{ + if (sig->is_netarray) + return (struct __vpiScope*) vpi_handle(vpiScope, sig->within.parent); + else + return sig->within.scope; +} + const char *vpip_string(const char*str) { static vpip_string_chunk first_chunk = {0, {0}}; diff --git a/vvp/vpi_priv.h b/vvp/vpi_priv.h index e2b0634a7..a9ffd0369 100644 --- a/vvp/vpi_priv.h +++ b/vvp/vpi_priv.h @@ -183,8 +183,10 @@ struct __vpiScope { /* Keep an array of items to be automatically allocated */ struct automatic_hooks_s**item; unsigned nitem; + /* Keep a list of live contexts. */ + vvp_context_t live_contexts; /* Keep a list of freed contexts. */ - vvp_context_t free_context; + vvp_context_t free_contexts; /* Keep a list of threads in the scope. */ vthread_t threads; signed int time_units :8; @@ -193,7 +195,9 @@ struct __vpiScope { extern struct __vpiScope* vpip_peek_current_scope(void); extern void vpip_attach_to_current_scope(vpiHandle obj); -extern void vpip_add_item_to_current_scope(automatic_hooks_s*item); +extern struct __vpiScope* vpip_peek_context_scope(void); +extern unsigned vpip_add_item_to_context(automatic_hooks_s*item, + struct __vpiScope*scope); extern vpiHandle vpip_make_root_iterator(void); extern void vpip_make_root_iterator(struct __vpiHandle**&table, unsigned&ntable); @@ -219,6 +223,7 @@ struct __vpiSignal { unsigned signed_flag : 1; unsigned isint_ : 1; // original type was integer unsigned is_netarray : 1; // This is word of a net array + unsigned is_automatic : 1; /* The represented value is here. */ vvp_net_t*node; }; @@ -349,17 +354,22 @@ extern void vpip_real_value_change(struct __vpiCallback*cbh, */ struct __vpiRealVar { struct __vpiHandle base; - vpiHandle parent; - struct __vpiScope* scope; + union { // The scope or parent array that contains me. + vpiHandle parent; + struct __vpiScope* scope; + } within; /* The name of this variable, or the index for array words. */ union { const char*name; vpiHandle index; } id; + unsigned is_netarray : 1; // This is word of a net array vvp_net_t*net; }; +extern struct __vpiScope* vpip_scope(__vpiRealVar*sig); extern vpiHandle vpip_make_real_var(const char*name, vvp_net_t*net); +extern struct __vpiRealVar* vpip_realvar_from_handle(vpiHandle obj); /* * When a loaded VPI module announces a system task/function, one diff --git a/vvp/vpi_real.cc b/vvp/vpi_real.cc index a6d5f54c6..a0c83d466 100644 --- a/vvp/vpi_real.cc +++ b/vvp/vpi_real.cc @@ -28,21 +28,33 @@ #endif # include +struct __vpiRealVar* vpip_realvar_from_handle(vpiHandle obj) +{ + assert(obj); + if (obj->vpi_type->type_code == vpiRealVar) + return (struct __vpiRealVar*)obj; + else + return 0; +} + static int real_var_get(int code, vpiHandle ref) { assert(ref->vpi_type->type_code == vpiRealVar); - struct __vpiRealVar*rfp = (struct __vpiRealVar*)ref; + struct __vpiRealVar*rfp = vpip_realvar_from_handle(ref); switch (code) { case vpiArray: - return rfp->parent != 0; + return rfp->is_netarray != 0; case vpiSize: return 1; case vpiLineNo: return 0; // Not implemented for now! + + case vpiAutomatic: + return (int) vpip_scope(rfp)->is_automatic; } return 0; @@ -59,8 +71,8 @@ static char* real_var_get_str(int code, vpiHandle ref) } char *nm, *ixs; - if (rfp->parent) { - nm = strdup(vpi_get_str(vpiName, rfp->parent)); + if (rfp->is_netarray) { + nm = strdup(vpi_get_str(vpiName, rfp->within.parent)); s_vpi_value vp; vp.format = vpiDecStrVal; vpi_get_value(rfp->id.index, &vp); @@ -70,7 +82,7 @@ static char* real_var_get_str(int code, vpiHandle ref) ixs = NULL; } - char *rbuf = generic_get_str(code, &rfp->scope->base, nm, ixs); + char *rbuf = generic_get_str(code, &(vpip_scope(rfp)->base), nm, ixs); free(nm); return rbuf; } @@ -84,10 +96,13 @@ static vpiHandle real_var_get_handle(int code, vpiHandle ref) switch (code) { case vpiParent: - return rfp->parent; + return rfp->is_netarray ? rfp->within.parent : 0; case vpiIndex: - return rfp->parent ? rfp->id.index : 0; + return rfp->is_netarray ? rfp->id.index : 0; + + case vpiScope: + return &(vpip_scope(rfp)->base); } return 0; @@ -100,8 +115,8 @@ static vpiHandle real_var_iterate(int code, vpiHandle ref) struct __vpiRealVar*rfp = (struct __vpiRealVar*)ref; if (code == vpiIndex) { - return rfp->parent ? (rfp->id.index->vpi_type->iterate_) - (code, rfp->id.index) : 0; + return rfp->is_netarray ? (rfp->id.index->vpi_type->iterate_) + (code, rfp->id.index) : 0; } return 0; @@ -131,11 +146,13 @@ static vpiHandle real_var_put_value(vpiHandle ref, p_vpi_value vp, int) switch (vp->format) { case vpiRealVal: - vvp_send_real(destination, vp->value.real); + vvp_send_real(destination, vp->value.real, + vthread_get_wt_context()); break; case vpiIntVal: - vvp_send_real(destination, (double)vp->value.integer); + vvp_send_real(destination, (double)vp->value.integer, + vthread_get_wt_context()); break; default: @@ -174,22 +191,17 @@ void vpip_real_value_change(struct __vpiCallback*cbh, fun->add_vpi_callback(cbh); } -/* - * Since reals do not currently support arrays none of the array code - * has been tested! Though it should work since it is a copy of the - * signal code. - */ vpiHandle vpip_make_real_var(const char*name, vvp_net_t*net) { struct __vpiRealVar*obj = (struct __vpiRealVar*) malloc(sizeof(struct __vpiRealVar)); obj->base.vpi_type = &vpip_real_var_rt; - obj->parent = 0; - obj->id.name = vpip_name_string(name); + obj->id.name = name ? vpip_name_string(name) : 0; + obj->is_netarray = 0; obj->net = net; - obj->scope = vpip_peek_current_scope(); + obj->within.scope = vpip_peek_current_scope(); return &obj->base; } diff --git a/vvp/vpi_scope.cc b/vvp/vpi_scope.cc index 1a18e0f56..f12103753 100644 --- a/vvp/vpi_scope.cc +++ b/vvp/vpi_scope.cc @@ -316,26 +316,6 @@ static void attach_to_scope_(struct __vpiScope*scope, vpiHandle obj) scope->intern[idx] = obj; } -static void add_item_to_scope_(struct __vpiScope*scope, automatic_hooks_s*item) -{ - assert(scope); - - // there is no need to record items for static scopes - if (!scope->is_automatic) return; - - unsigned idx = scope->nitem++; - item->context_idx = 1 + idx; - - if (scope->item == 0) - scope->item = (automatic_hooks_s**) - malloc(sizeof(automatic_hooks_s*)); - else - scope->item = (automatic_hooks_s**) - realloc(scope->item, sizeof(automatic_hooks_s*)*scope->nitem); - - scope->item[idx] = item; -} - /* * When the compiler encounters a scope declaration, this function * creates and initializes a __vpiScope object with the requested name @@ -392,7 +372,8 @@ compile_scope_decl(char*label, char*type, char*name, const char*tname, scope->nintern = 0; scope->item = 0; scope->nitem = 0; - scope->free_context = 0; + scope->live_contexts = 0; + scope->free_contexts = 0; scope->threads = 0; current_scope = scope; @@ -415,6 +396,10 @@ compile_scope_decl(char*label, char*type, char*name, const char*tname, scope->time_units = sp->time_units; scope->time_precision = sp->time_precision; + /* Scopes within automatic scopes are themselves automatic. */ + if (sp->is_automatic) + scope->is_automatic = true; + } else { scope->scope = 0x0; @@ -458,7 +443,38 @@ void vpip_attach_to_current_scope(vpiHandle obj) attach_to_scope_(current_scope, obj); } -void vpip_add_item_to_current_scope(automatic_hooks_s*item) +struct __vpiScope* vpip_peek_context_scope(void) { - add_item_to_scope_(current_scope, item); + struct __vpiScope*scope = current_scope; + + /* A context is allocated for each automatic task or function. + Storage for nested scopes (named blocks) is allocated in + the parent context. */ + while (scope->scope && scope->scope->is_automatic) + scope = scope->scope; + + return scope; } + +unsigned vpip_add_item_to_context(automatic_hooks_s*item, + struct __vpiScope*scope) +{ + assert(scope); + assert(scope->is_automatic); + + unsigned idx = scope->nitem++; + + if (scope->item == 0) + scope->item = (automatic_hooks_s**) + malloc(sizeof(automatic_hooks_s*)); + else + scope->item = (automatic_hooks_s**) + realloc(scope->item, sizeof(automatic_hooks_s*)*scope->nitem); + + scope->item[idx] = item; + + /* Offset the context index by 2 to leave space for the list links. */ + return 2 + idx; +} + + diff --git a/vvp/vpi_signal.cc b/vvp/vpi_signal.cc index 02cd3dab0..575372434 100644 --- a/vvp/vpi_signal.cc +++ b/vvp/vpi_signal.cc @@ -531,6 +531,8 @@ static int signal_get(int code, vpiHandle ref) case vpiLeftRange: return rfp->msb; case vpiRightRange: return rfp->lsb; + case vpiAutomatic: return rfp->is_automatic; + case _vpiNexusId: if (rfp->msb == rfp->lsb) return (int) (unsigned long) rfp->node; @@ -765,7 +767,7 @@ static vpiHandle signal_put_value(vpiHandle ref, s_vpi_value*vp, int flags) port-0. This is the port where signals receive input. */ vvp_net_ptr_t destination (rfp->node, dest_port); - vvp_send_vec4(destination, val); + vvp_send_vec4(destination, val, vthread_get_wt_context()); return ref; } @@ -861,6 +863,7 @@ vpiHandle vpip_make_int(const char*name, int msb, int lsb, vvp_net_t*vec) struct __vpiSignal*rfp = (struct __vpiSignal*)obj; obj->vpi_type = &vpip_reg_rt; rfp->isint_ = true; + rfp->is_automatic = vpip_peek_current_scope()->is_automatic; return obj; } @@ -871,7 +874,9 @@ vpiHandle vpip_make_reg(const char*name, int msb, int lsb, bool signed_flag, vvp_net_t*vec) { vpiHandle obj = vpip_make_net(name, msb,lsb, signed_flag, vec); + struct __vpiSignal*rfp = (struct __vpiSignal*)obj; obj->vpi_type = &vpip_reg_rt; + rfp->is_automatic = vpip_peek_current_scope()->is_automatic; return obj; } @@ -910,6 +915,7 @@ vpiHandle vpip_make_net(const char*name, int msb, int lsb, obj->signed_flag = signed_flag? 1 : 0; obj->isint_ = 0; obj->is_netarray = 0; + obj->is_automatic = vpip_peek_current_scope()->is_automatic; obj->node = node; // Place this object within a scope. If this object is @@ -1100,9 +1106,10 @@ static vpiHandle PV_put_value(vpiHandle ref, p_vpi_value vp, int) vvp_net_ptr_t dest(rfp->net, 0); if (full_sig) { - vvp_send_vec4(dest, val); + vvp_send_vec4(dest, val, vthread_get_wt_context()); } else { - vvp_send_vec4_pv(dest, val, base, width, sig_size); + vvp_send_vec4_pv(dest, val, base, width, sig_size, + vthread_get_wt_context()); } return 0; diff --git a/vvp/vpi_tasks.cc b/vvp/vpi_tasks.cc index 403125406..6e6fd2638 100644 --- a/vvp/vpi_tasks.cc +++ b/vvp/vpi_tasks.cc @@ -403,7 +403,7 @@ static vpiHandle sysfunc_put_4net_value(vpiHandle ref, p_vpi_value vp, int) assert(0); } - vvp_send_vec4(rfp->fnet->out, val); + vvp_send_vec4(rfp->fnet->out, val, vthread_get_wt_context()); return 0; } @@ -427,7 +427,7 @@ static vpiHandle sysfunc_put_rnet_value(vpiHandle ref, p_vpi_value vp, int) assert(0); } - vvp_send_real(rfp->fnet->out, val); + vvp_send_real(rfp->fnet->out, val, vthread_get_wt_context()); return 0; } diff --git a/vvp/vthread.cc b/vvp/vthread.cc index 4a0b89249..b8e6871f3 100644 --- a/vvp/vthread.cc +++ b/vvp/vthread.cc @@ -303,15 +303,16 @@ static void multiply_array_imm(unsigned long*res, unsigned long*val, /* * Allocate a context for use by a child thread. By preference, use - * the last freed context. If none available, create a new one. + * the last freed context. If none available, create a new one. Add + * it to the list of live contexts in that scope. */ -static vvp_context_t vthread_alloc_context(__vpiScope*scope) +static vvp_context_t vthread_alloc_context(struct __vpiScope*scope) { assert(scope->is_automatic); - vvp_context_t context = scope->free_context; + vvp_context_t context = scope->free_contexts; if (context) { - scope->free_context = vvp_get_next_context(context); + scope->free_contexts = vvp_get_next_context(context); for (unsigned idx = 0 ; idx < scope->nitem ; idx += 1) { scope->item[idx]->reset_instance(context); } @@ -322,20 +323,35 @@ static vvp_context_t vthread_alloc_context(__vpiScope*scope) } } + vvp_set_next_context(context, scope->live_contexts); + scope->live_contexts = context; + return context; } /* * Free a context previously allocated to a child thread by pushing it - * onto the freed context stack. + * onto the freed context stack. Remove it from the list of live contexts + * in that scope. */ -static void vthread_free_context(vvp_context_t context, __vpiScope*scope) +static void vthread_free_context(vvp_context_t context, struct __vpiScope*scope) { assert(scope->is_automatic); assert(context); - vvp_set_next_context(context, scope->free_context); - scope->free_context = context; + if (context == scope->live_contexts) { + scope->live_contexts = vvp_get_next_context(context); + } else { + vvp_context_t tmp = scope->live_contexts; + while (context != vvp_get_next_context(tmp)) { + assert(tmp); + tmp = vvp_get_next_context(tmp); + } + vvp_set_next_context(tmp, vvp_get_next_context(context)); + } + + vvp_set_next_context(context, scope->free_contexts); + scope->free_contexts = context; } /* @@ -530,6 +546,22 @@ void vthread_schedule_list(vthread_t thr) schedule_vthread(thr, 0); } +vvp_context_t vthread_get_wt_context() +{ + if (running_thread) + return running_thread->wt_context; + else + return 0; +} + +vvp_context_t vthread_get_rd_context() +{ + if (running_thread) + return running_thread->rd_context; + else + return 0; +} + vvp_context_item_t vthread_get_wt_context_item(unsigned context_idx) { assert(running_thread && running_thread->wt_context); @@ -557,7 +589,7 @@ bool of_ALLOC(vthread_t thr, vvp_code_t cp) vvp_context_t child_context = vthread_alloc_context(cp->scope); /* Push the allocated context onto the write context stack. */ - vvp_set_next_context(child_context, thr->wt_context); + vvp_set_stacked_context(child_context, thr->wt_context); thr->wt_context = child_context; return true; @@ -1205,7 +1237,7 @@ bool of_CASSIGN_V(vthread_t thr, vvp_code_t cp) /* set the value into port 1 of the destination. */ vvp_net_ptr_t ptr (net, 1); - vvp_send_vec4(ptr, value); + vvp_send_vec4(ptr, value, 0); return true; } @@ -1217,7 +1249,7 @@ bool of_CASSIGN_WR(vthread_t thr, vvp_code_t cp) /* Set the value into port 1 of the destination. */ vvp_net_ptr_t ptr (net, 1); - vvp_send_real(ptr, value); + vvp_send_real(ptr, value, 0); return true; } @@ -1251,7 +1283,7 @@ bool of_CASSIGN_X0(vthread_t thr, vvp_code_t cp) vvp_vector4_t vector = vthread_bits_to_vector(thr, base, wid); vvp_net_ptr_t ptr (net, 1); - vvp_send_vec4_pv(ptr, vector, index, wid, sig->size()); + vvp_send_vec4_pv(ptr, vector, index, wid, sig->size(), 0); return true; } @@ -2315,7 +2347,7 @@ bool of_FORCE_V(vthread_t thr, vvp_code_t cp) /* Set the value into port 2 of the destination. */ vvp_net_ptr_t ptr (net, 2); - vvp_send_vec4(ptr, value); + vvp_send_vec4(ptr, value, 0); return true; } @@ -2327,7 +2359,7 @@ bool of_FORCE_WR(vthread_t thr, vvp_code_t cp) /* Set the value into port 2 of the destination. */ vvp_net_ptr_t ptr (net, 2); - vvp_send_real(ptr, value); + vvp_send_real(ptr, value, 0); return true; } @@ -2362,7 +2394,7 @@ bool of_FORCE_X0(vthread_t thr, vvp_code_t cp) vvp_vector4_t vector = vthread_bits_to_vector(thr, base, wid); vvp_net_ptr_t ptr (net, 2); - vvp_send_vec4_pv(ptr, vector, index, wid, sig->size()); + vvp_send_vec4_pv(ptr, vector, index, wid, sig->size(), 0); return true; } @@ -2410,7 +2442,7 @@ bool of_FREE(vthread_t thr, vvp_code_t cp) { /* Pop the child context from the read context stack. */ vvp_context_t child_context = thr->rd_context; - thr->rd_context = vvp_get_next_context(child_context); + thr->rd_context = vvp_get_stacked_context(child_context); /* Free the context. */ vthread_free_context(child_context, cp->scope); @@ -2701,10 +2733,10 @@ bool of_JOIN(vthread_t thr, vvp_code_t cp) if (thr->wt_context != thr->rd_context) { /* Pop the child context from the write context stack. */ vvp_context_t child_context = thr->wt_context; - thr->wt_context = vvp_get_next_context(child_context); + thr->wt_context = vvp_get_stacked_context(child_context); /* Push the child context onto the read context stack */ - vvp_set_next_context(child_context, thr->rd_context); + vvp_set_stacked_context(child_context, thr->rd_context); thr->rd_context = child_context; } @@ -2720,6 +2752,20 @@ bool of_JOIN(vthread_t thr, vvp_code_t cp) return false; } +/* + * %load/ar , , ; +*/ +bool of_LOAD_AR(vthread_t thr, vvp_code_t cp) +{ + unsigned bit = cp->bit_idx[0]; + unsigned idx = cp->bit_idx[1]; + unsigned adr = thr->words[idx].w_int; + + double word = array_get_word_r(cp->array, adr); + thr->words[bit].w_real = word; + return true; +} + /* * %load/av , , ; * @@ -3904,6 +3950,24 @@ bool of_RELEASE_WR(vthread_t thr, vvp_code_t cp) return true; } +/* + * %set/av