Support for automatic tasks and functions.
This patch adds support for automatic tasks and functions. Refer to the overview in vvp/README.txt for details.
This commit is contained in:
parent
46bf03bba4
commit
7ebcc6b357
|
|
@ -718,6 +718,11 @@ void NetProcTop::dump(ostream&o, unsigned ind) const
|
|||
statement_->dump(o, ind+2);
|
||||
}
|
||||
|
||||
void NetAlloc::dump(ostream&o, unsigned ind) const
|
||||
{
|
||||
o << setw(ind) << "// allocate storage : " << scope_path(scope_) << endl;
|
||||
}
|
||||
|
||||
void NetAssign_::dump_lval(ostream&o) const
|
||||
{
|
||||
if (sig_) {
|
||||
|
|
@ -950,6 +955,11 @@ void NetForever::dump(ostream&o, unsigned ind) const
|
|||
statement_->dump(o, ind+2);
|
||||
}
|
||||
|
||||
void NetFree::dump(ostream&o, unsigned ind) const
|
||||
{
|
||||
o << setw(ind) << "// free storage : " << scope_path(scope_) << endl;
|
||||
}
|
||||
|
||||
void NetFuncDef::dump(ostream&o, unsigned ind) const
|
||||
{
|
||||
o << setw(ind) << "" << "function definition for " << scope_path(scope_) << endl;
|
||||
|
|
|
|||
14
elaborate.cc
14
elaborate.cc
|
|
@ -2408,6 +2408,13 @@ NetProc* PCallTask::elaborate_usr(Design*des, NetScope*scope) const
|
|||
return block;
|
||||
}
|
||||
|
||||
/* If this is an automatic task, generate a statement to
|
||||
allocate the local storage. */
|
||||
if (task->is_auto()) {
|
||||
NetAlloc*ap = new NetAlloc(task);
|
||||
block->append(ap);
|
||||
}
|
||||
|
||||
/* Generate assignment statement statements for the input and
|
||||
INOUT ports of the task. These are managed by writing
|
||||
assignments with the task port the l-value and the passed
|
||||
|
|
@ -2487,6 +2494,13 @@ NetProc* PCallTask::elaborate_usr(Design*des, NetScope*scope) const
|
|||
block->append(ass);
|
||||
}
|
||||
|
||||
/* If this is an automatic task, generate a statement to free
|
||||
the local storage. */
|
||||
if (task->is_auto()) {
|
||||
NetFree*fp = new NetFree(task);
|
||||
block->append(fp);
|
||||
}
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
|
|
|
|||
12
emit.cc
12
emit.cc
|
|
@ -196,6 +196,12 @@ bool NetProc::emit_proc(struct target_t*tgt) const
|
|||
return false;
|
||||
}
|
||||
|
||||
bool NetAlloc::emit_proc(struct target_t*tgt) const
|
||||
{
|
||||
tgt->proc_alloc(this);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool NetAssign::emit_proc(struct target_t*tgt) const
|
||||
{
|
||||
return tgt->proc_assign(this);
|
||||
|
|
@ -249,6 +255,12 @@ bool NetForever::emit_proc(struct target_t*tgt) const
|
|||
return true;
|
||||
}
|
||||
|
||||
bool NetFree::emit_proc(struct target_t*tgt) const
|
||||
{
|
||||
tgt->proc_free(this);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool NetPDelay::emit_proc(struct target_t*tgt) const
|
||||
{
|
||||
return tgt->proc_delay(this);
|
||||
|
|
|
|||
|
|
@ -343,6 +343,7 @@ typedef enum ivl_signal_type_e {
|
|||
typedef enum ivl_statement_type_e {
|
||||
IVL_ST_NONE = 0,
|
||||
IVL_ST_NOOP = 1,
|
||||
IVL_ST_ALLOC = 25,
|
||||
IVL_ST_ASSIGN = 2,
|
||||
IVL_ST_ASSIGN_NB = 3,
|
||||
IVL_ST_BLOCK = 4,
|
||||
|
|
@ -359,6 +360,7 @@ typedef enum ivl_statement_type_e {
|
|||
IVL_ST_FORCE = 14,
|
||||
IVL_ST_FOREVER = 15,
|
||||
IVL_ST_FORK = 16,
|
||||
IVL_ST_FREE = 26,
|
||||
IVL_ST_RELEASE = 17,
|
||||
IVL_ST_REPEAT = 18,
|
||||
IVL_ST_STASK = 19,
|
||||
|
|
|
|||
38
netlist.cc
38
netlist.cc
|
|
@ -1973,6 +1973,44 @@ const NetScope* NetUTask::task() const
|
|||
return task_;
|
||||
}
|
||||
|
||||
NetAlloc::NetAlloc(NetScope*scope)
|
||||
: scope_(scope)
|
||||
{
|
||||
}
|
||||
|
||||
NetAlloc::~NetAlloc()
|
||||
{
|
||||
}
|
||||
#if 0
|
||||
const string NetAlloc::name() const
|
||||
{
|
||||
return scope_->name();
|
||||
}
|
||||
#endif
|
||||
const NetScope* NetAlloc::scope() const
|
||||
{
|
||||
return scope_;
|
||||
}
|
||||
|
||||
NetFree::NetFree(NetScope*scope)
|
||||
: scope_(scope)
|
||||
{
|
||||
}
|
||||
|
||||
NetFree::~NetFree()
|
||||
{
|
||||
}
|
||||
#if 0
|
||||
const string NetFree::name() const
|
||||
{
|
||||
return scope_->name();
|
||||
}
|
||||
#endif
|
||||
const NetScope* NetFree::scope() const
|
||||
{
|
||||
return scope_;
|
||||
}
|
||||
|
||||
NetExpr::NetExpr(unsigned w)
|
||||
: width_(w), signed_flag_(false)
|
||||
{
|
||||
|
|
|
|||
34
netlist.h
34
netlist.h
|
|
@ -2086,6 +2086,23 @@ class NetProc : public virtual LineInfo {
|
|||
NetProc& operator= (const NetProc&);
|
||||
};
|
||||
|
||||
class NetAlloc : public NetProc {
|
||||
|
||||
public:
|
||||
NetAlloc(NetScope*);
|
||||
~NetAlloc();
|
||||
|
||||
const string name() const;
|
||||
|
||||
const NetScope* scope() const;
|
||||
|
||||
virtual bool emit_proc(struct target_t*) const;
|
||||
virtual void dump(ostream&, unsigned ind) const;
|
||||
|
||||
private:
|
||||
NetScope*scope_;
|
||||
};
|
||||
|
||||
/*
|
||||
* Procedural assignment is broken into a suite of classes. These
|
||||
* classes represent the various aspects of the assignment statement
|
||||
|
|
@ -2705,6 +2722,23 @@ class NetForever : public NetProc {
|
|||
NetProc*statement_;
|
||||
};
|
||||
|
||||
class NetFree : public NetProc {
|
||||
|
||||
public:
|
||||
NetFree(NetScope*);
|
||||
~NetFree();
|
||||
|
||||
const string name() const;
|
||||
|
||||
const NetScope* scope() const;
|
||||
|
||||
virtual bool emit_proc(struct target_t*) const;
|
||||
virtual void dump(ostream&, unsigned ind) const;
|
||||
|
||||
private:
|
||||
NetScope*scope_;
|
||||
};
|
||||
|
||||
/*
|
||||
* A function definition is elaborated just like a task, though by now
|
||||
* it is certain that the first parameter (a phantom parameter) is the
|
||||
|
|
|
|||
8
parse.y
8
parse.y
|
|
@ -459,10 +459,10 @@ block_item_decl
|
|||
with real value. Note that real and realtime are interchangeable
|
||||
in this context. */
|
||||
|
||||
| attribute_list_opt K_real real_variable_list ';'
|
||||
{ delete $3; }
|
||||
| attribute_list_opt K_realtime real_variable_list ';'
|
||||
{ delete $3; }
|
||||
| attribute_list_opt K_real real_variable_list ';'
|
||||
{ delete $3; }
|
||||
| attribute_list_opt K_realtime real_variable_list ';'
|
||||
{ delete $3; }
|
||||
|
||||
| K_event list_of_identifiers ';'
|
||||
{ pform_make_events($2, @1.text, @1.first_line);
|
||||
|
|
|
|||
|
|
@ -1913,9 +1913,15 @@ extern "C" ivl_statement_t ivl_stmt_block_stmt(ivl_statement_t net,
|
|||
extern "C" ivl_scope_t ivl_stmt_call(ivl_statement_t net)
|
||||
{
|
||||
switch (net->type_) {
|
||||
case IVL_ST_ALLOC:
|
||||
return net->u_.alloc_.scope;
|
||||
|
||||
case IVL_ST_DISABLE:
|
||||
return net->u_.disable_.scope;
|
||||
|
||||
case IVL_ST_FREE:
|
||||
return net->u_.free_.scope;
|
||||
|
||||
case IVL_ST_UTASK:
|
||||
return net->u_.utask_.def;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -188,6 +188,16 @@ void dll_target::make_assign_lvals_(const NetAssignBase*net)
|
|||
}
|
||||
}
|
||||
|
||||
void dll_target::proc_alloc(const NetAlloc*net)
|
||||
{
|
||||
assert(stmt_cur_);
|
||||
assert(stmt_cur_->type_ == IVL_ST_NONE);
|
||||
FILE_NAME(stmt_cur_, net);
|
||||
|
||||
stmt_cur_->type_ = IVL_ST_ALLOC;
|
||||
stmt_cur_->u_.alloc_.scope = lookup_scope_(net->scope());
|
||||
}
|
||||
|
||||
/*
|
||||
*/
|
||||
bool dll_target::proc_assign(const NetAssign*net)
|
||||
|
|
@ -629,6 +639,16 @@ void dll_target::proc_forever(const NetForever*net)
|
|||
stmt_cur_ = save_cur_;
|
||||
}
|
||||
|
||||
void dll_target::proc_free(const NetFree*net)
|
||||
{
|
||||
assert(stmt_cur_);
|
||||
assert(stmt_cur_->type_ == IVL_ST_NONE);
|
||||
FILE_NAME(stmt_cur_, net);
|
||||
|
||||
stmt_cur_->type_ = IVL_ST_FREE;
|
||||
stmt_cur_->u_.free_.scope = lookup_scope_(net->scope());
|
||||
}
|
||||
|
||||
bool dll_target::proc_release(const NetRelease*net)
|
||||
{
|
||||
assert(stmt_cur_);
|
||||
|
|
|
|||
10
t-dll.h
10
t-dll.h
|
|
@ -115,6 +115,7 @@ struct dll_target : public target_t, public expr_scan_t {
|
|||
/* These methods and members are used for forming the
|
||||
statements of a thread. */
|
||||
struct ivl_statement_s*stmt_cur_;
|
||||
void proc_alloc(const NetAlloc*);
|
||||
bool proc_assign(const NetAssign*);
|
||||
void proc_assign_nb(const NetAssignNB*);
|
||||
bool proc_block(const NetBlock*);
|
||||
|
|
@ -126,6 +127,7 @@ struct dll_target : public target_t, public expr_scan_t {
|
|||
bool proc_disable(const NetDisable*);
|
||||
bool proc_force(const NetForce*);
|
||||
void proc_forever(const NetForever*);
|
||||
void proc_free(const NetFree*);
|
||||
bool proc_release(const NetRelease*);
|
||||
void proc_repeat(const NetRepeat*);
|
||||
void proc_stask(const NetSTask*);
|
||||
|
|
@ -665,6 +667,10 @@ struct ivl_statement_s {
|
|||
unsigned lineno;
|
||||
|
||||
union {
|
||||
struct { /* IVL_ST_ALLOC */
|
||||
ivl_scope_t scope;
|
||||
} alloc_;
|
||||
|
||||
struct { /* IVL_ST_ASSIGN IVL_ST_ASSIGN_NB
|
||||
IVL_ST_CASSIGN, IVL_ST_DEASSIGN */
|
||||
unsigned lvals_;
|
||||
|
|
@ -718,6 +724,10 @@ struct ivl_statement_s {
|
|||
ivl_statement_t stmt_;
|
||||
} forever_;
|
||||
|
||||
struct { /* IVL_ST_FREE */
|
||||
ivl_scope_t scope;
|
||||
} free_;
|
||||
|
||||
struct { /* IVL_ST_STASK */
|
||||
const char*name_;
|
||||
unsigned nparm_;
|
||||
|
|
|
|||
12
target.cc
12
target.cc
|
|
@ -243,6 +243,12 @@ bool target_t::process(const NetProcTop*top)
|
|||
return top->statement()->emit_proc(this);
|
||||
}
|
||||
|
||||
void target_t::proc_alloc(const NetAlloc*)
|
||||
{
|
||||
cerr << "target (" << typeid(*this).name() << "): "
|
||||
"Unhandled proc_alloc." << endl;
|
||||
}
|
||||
|
||||
bool target_t::proc_assign(const NetAssign*)
|
||||
{
|
||||
cerr << "target (" << typeid(*this).name() << "): "
|
||||
|
|
@ -322,6 +328,12 @@ void target_t::proc_forever(const NetForever*)
|
|||
"Unhandled proc_forever." << endl;
|
||||
}
|
||||
|
||||
void target_t::proc_free(const NetFree*)
|
||||
{
|
||||
cerr << "target (" << typeid(*this).name() << "): "
|
||||
"Unhandled proc_free." << endl;
|
||||
}
|
||||
|
||||
bool target_t::proc_release(const NetRelease*dev)
|
||||
{
|
||||
cerr << dev->get_fileline() << ": internal error: "
|
||||
|
|
|
|||
2
target.h
2
target.h
|
|
@ -105,6 +105,7 @@ struct target_t {
|
|||
virtual bool process(const NetProcTop*);
|
||||
|
||||
/* Various kinds of process nodes are dispatched through these. */
|
||||
virtual void proc_alloc(const NetAlloc*);
|
||||
virtual bool proc_assign(const NetAssign*);
|
||||
virtual void proc_assign_nb(const NetAssignNB*);
|
||||
virtual bool proc_block(const NetBlock*);
|
||||
|
|
@ -116,6 +117,7 @@ struct target_t {
|
|||
virtual bool proc_disable(const NetDisable*);
|
||||
virtual bool proc_force(const NetForce*);
|
||||
virtual void proc_forever(const NetForever*);
|
||||
virtual void proc_free(const NetFree*);
|
||||
virtual bool proc_release(const NetRelease*);
|
||||
virtual void proc_repeat(const NetRepeat*);
|
||||
virtual bool proc_trigger(const NetEvTrig*);
|
||||
|
|
|
|||
|
|
@ -183,6 +183,10 @@ void show_statement(ivl_statement_t net, unsigned ind)
|
|||
|
||||
switch (code) {
|
||||
|
||||
case IVL_ST_ALLOC:
|
||||
fprintf(out, "%*sallocate automatic storage ...\n", ind, "");
|
||||
break;
|
||||
|
||||
case IVL_ST_ASSIGN:
|
||||
fprintf(out, "%*sASSIGN <lwidth=%u>\n", ind, "",
|
||||
ivl_stmt_lwidth(net));
|
||||
|
|
@ -316,6 +320,10 @@ void show_statement(ivl_statement_t net, unsigned ind)
|
|||
break;
|
||||
}
|
||||
|
||||
case IVL_ST_FREE:
|
||||
fprintf(out, "%*sfree automatic storage ...\n", ind, "");
|
||||
break;
|
||||
|
||||
case IVL_ST_NOOP:
|
||||
fprintf(out, "%*s/* noop */;\n", ind, "");
|
||||
break;
|
||||
|
|
@ -356,4 +364,3 @@ void show_statement(ivl_statement_t net, unsigned ind)
|
|||
fprintf(out, "%*sunknown statement type (%u)\n", ind, "", code);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -88,6 +88,11 @@ struct vector_info draw_ufunc_expr(ivl_expr_t exp, unsigned wid)
|
|||
ivl_signal_t retval = ivl_scope_port(def, 0);
|
||||
struct vector_info res;
|
||||
|
||||
/* If this is an automatic function, allocate the local storage. */
|
||||
if (ivl_scope_is_auto(def)) {
|
||||
fprintf(vvp_out, " %%alloc S_%p;\n", def);
|
||||
}
|
||||
|
||||
/* evaluate the expressions and send the results to the
|
||||
function ports. */
|
||||
|
||||
|
|
@ -134,6 +139,11 @@ struct vector_info draw_ufunc_expr(ivl_expr_t exp, unsigned wid)
|
|||
if (load_wid < wid)
|
||||
pad_expr_in_place(exp, res, swid);
|
||||
|
||||
/* If this is an automatic function, free the local storage. */
|
||||
if (ivl_scope_is_auto(def)) {
|
||||
fprintf(vvp_out, " %%free S_%p;\n", def);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
@ -144,6 +154,11 @@ int draw_ufunc_real(ivl_expr_t exp)
|
|||
int res = 0;
|
||||
int idx;
|
||||
|
||||
/* If this is an automatic function, allocate the local storage. */
|
||||
if (ivl_scope_is_auto(def)) {
|
||||
fprintf(vvp_out, " %%alloc S_%p;\n", def);
|
||||
}
|
||||
|
||||
assert(ivl_expr_parms(exp) == (ivl_scope_ports(def)-1));
|
||||
for (idx = 0 ; idx < ivl_expr_parms(exp) ; idx += 1) {
|
||||
ivl_signal_t port = ivl_scope_port(def, idx+1);
|
||||
|
|
@ -163,6 +178,10 @@ int draw_ufunc_real(ivl_expr_t exp)
|
|||
res = allocate_word();
|
||||
fprintf(vvp_out, " %%load/wr %d, v%p_0;\n", res, retval);
|
||||
|
||||
/* If this is an automatic function, free the local storage. */
|
||||
if (ivl_scope_is_auto(def)) {
|
||||
fprintf(vvp_out, " %%free S_%p;\n", def);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -428,6 +428,14 @@ static void set_vec_to_lval(ivl_statement_t net, struct vector_info res)
|
|||
}
|
||||
}
|
||||
|
||||
static int show_stmt_alloc(ivl_statement_t net)
|
||||
{
|
||||
ivl_scope_t scope = ivl_stmt_call(net);
|
||||
|
||||
fprintf(vvp_out, " %%alloc S_%p;\n", scope);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int show_stmt_assign_vector(ivl_statement_t net)
|
||||
{
|
||||
ivl_expr_t rval = ivl_stmt_rval(net);
|
||||
|
|
@ -1398,6 +1406,14 @@ static int show_stmt_fork(ivl_statement_t net, ivl_scope_t sscope)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int show_stmt_free(ivl_statement_t net)
|
||||
{
|
||||
ivl_scope_t scope = ivl_stmt_call(net);
|
||||
|
||||
fprintf(vvp_out, " %%free S_%p;\n", scope);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* noop statements are implemented by doing nothing.
|
||||
*/
|
||||
|
|
@ -1645,6 +1661,10 @@ static int show_statement(ivl_statement_t net, ivl_scope_t sscope)
|
|||
|
||||
switch (code) {
|
||||
|
||||
case IVL_ST_ALLOC:
|
||||
rc += show_stmt_alloc(net);
|
||||
break;
|
||||
|
||||
case IVL_ST_ASSIGN:
|
||||
rc += show_stmt_assign(net);
|
||||
break;
|
||||
|
|
@ -1706,6 +1726,10 @@ static int show_statement(ivl_statement_t net, ivl_scope_t sscope)
|
|||
rc += show_stmt_fork(net, sscope);
|
||||
break;
|
||||
|
||||
case IVL_ST_FREE:
|
||||
rc += show_stmt_free(net);
|
||||
break;
|
||||
|
||||
case IVL_ST_NOOP:
|
||||
rc += show_stmt_noop(net);
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -1581,7 +1581,7 @@ static void draw_lpm_ufunc(ivl_lpm_t net)
|
|||
|
||||
fprintf(vvp_out, ")");
|
||||
|
||||
/* Finally, print the reference to the signal from which the
|
||||
/* Now print the reference to the signal from which the
|
||||
result is collected. */
|
||||
{ ivl_signal_t psig = ivl_scope_port(def, 0);
|
||||
assert(ivl_lpm_width(net) == ivl_signal_width(psig));
|
||||
|
|
@ -1590,7 +1590,8 @@ static void draw_lpm_ufunc(ivl_lpm_t net)
|
|||
fprintf(vvp_out, " v%p_0", psig);
|
||||
}
|
||||
|
||||
fprintf(vvp_out, ";\n");
|
||||
/* Finally, print the scope identifier. */
|
||||
fprintf(vvp_out, " S_%p;\n", def);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1774,13 +1775,9 @@ int draw_scope(ivl_scope_t net, ivl_scope_t parent)
|
|||
{
|
||||
unsigned idx;
|
||||
const char *type;
|
||||
/* For now we do not support automatic tasks or functions. */
|
||||
if (ivl_scope_is_auto(net)) {
|
||||
fprintf(stderr, "%s:%u: vvp-tgt sorry: automatic tasks/functions "
|
||||
"are not supported!\n",
|
||||
ivl_scope_def_file(net), ivl_scope_def_lineno(net));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
const char*prefix = ivl_scope_is_auto(net) ? "auto" : "";
|
||||
|
||||
switch (ivl_scope_type(net)) {
|
||||
case IVL_SCT_MODULE: type = "module"; break;
|
||||
case IVL_SCT_FUNCTION: type = "function"; break;
|
||||
|
|
@ -1791,8 +1788,8 @@ int draw_scope(ivl_scope_t net, ivl_scope_t parent)
|
|||
default: type = "?"; assert(0);
|
||||
}
|
||||
|
||||
fprintf(vvp_out, "S_%p .scope %s, \"%s\" \"%s\" %d %d",
|
||||
net, type, vvp_mangle_name(ivl_scope_basename(net)),
|
||||
fprintf(vvp_out, "S_%p .scope %s%s, \"%s\" \"%s\" %d %d",
|
||||
net, prefix, type, vvp_mangle_name(ivl_scope_basename(net)),
|
||||
ivl_scope_tname(net), ivl_file_table_index(ivl_scope_file(net)),
|
||||
ivl_scope_lineno(net));
|
||||
|
||||
|
|
|
|||
|
|
@ -96,8 +96,8 @@ The syntax of a scope statement is:
|
|||
|
||||
<label> .scope <type>, <instance>, <declaration>, <parent> ;
|
||||
|
||||
The <type> is the general type of the scope: module, function, task,
|
||||
begin, fork or generate.
|
||||
The <type> is the general type of the scope: module, autofunction,
|
||||
function, autotask, task, begin, fork or generate.
|
||||
|
||||
The <instance> is a string that is the base name of the instance. For
|
||||
modules, this is the instance name. For tasks, this is the task name.
|
||||
|
|
@ -656,7 +656,7 @@ STRUCTURAL FUNCTION CALLS:
|
|||
|
||||
The .ufunc statement defines a call to a user defined function.
|
||||
|
||||
<label> .ufunc <flabel>, <wid>, <isymbols> ( <psymbols> ) <rsymbol> ;
|
||||
<label> .ufunc <flabel>, <wid>, <isymbols> ( <psymbols> ) <rsymbol> <ssymbol>;
|
||||
|
||||
The <flabel> is the code label for the first instruction of the
|
||||
function implementation. This is code that the simulator will branch
|
||||
|
|
@ -673,9 +673,11 @@ list. The <psymbols> are variables that represent the input ports for
|
|||
the function. The ufunc performs an assignment to these variables
|
||||
before calling the function.
|
||||
|
||||
Finally, the <rsymbol> is the variable within the function where the
|
||||
result will be found when the function code ends. This value is picked
|
||||
up and propagated to the output of the functor.
|
||||
The <rsymbol> is the variable within the function where the result
|
||||
will be found when the function code ends. This value is picked up
|
||||
and propagated to the output of the functor.
|
||||
|
||||
The <ssymbol> is the function scope name.
|
||||
|
||||
THREAD STATEMENTS:
|
||||
|
||||
|
|
@ -1035,6 +1037,60 @@ This creates a functor and makes it into a mode-2 functor. Then the
|
|||
trigger statement, "-> a", cause a ``%set a, 0;'' statement be
|
||||
generated. This is sufficient to trigger the event.
|
||||
|
||||
|
||||
AUTOMATICALLY ALLOCATED SCOPES
|
||||
|
||||
If a .scope statement has a <type> of autofunction or autotask, the
|
||||
scope is flagged as being an automatically allocated scope. The functor
|
||||
for each variable or event declared in that scope is added to a list
|
||||
of items that need to be automatically allocated for each dynamic
|
||||
instance of that scope.
|
||||
|
||||
Before copying the input parameters of an automatic function or task
|
||||
into the scope variables, a new scope instance needs to be allocated.
|
||||
For function or task calls in procedural code, this is handled by the
|
||||
%alloc instruction. For structural function calls, this is handled
|
||||
by the phantom code generated by the .ufunc statement. In both cases,
|
||||
VVP attempts to use a previously freed scope instance - only if none
|
||||
are available is a new instance created.
|
||||
|
||||
After copying the result of an automatic function or the output
|
||||
parameters of an automatic task, the scope instance can be freed.
|
||||
For function or task calls in procedural code, this is handled by the
|
||||
%free instruction. For structural function calls, this is handled
|
||||
by the phantom code generated by the .ufunc statement. In both cases,
|
||||
VVP adds the instance to a list of freed instances for that scope,
|
||||
which allows the storage to be reused the next time a new instance
|
||||
is required.
|
||||
|
||||
For each automatically allocated scope instance, VVP creates an array
|
||||
of items, referred to as the scope context. Each item in this array is
|
||||
a pointer to the allocated storage for holding the state of one scope
|
||||
variable or event. The index into this array for any given variable
|
||||
or event, referred to as the context index, is stored in the functor
|
||||
associated with that variable or event.
|
||||
|
||||
Each VVP thread keeps track of its current write context and current
|
||||
read context. For threads executing in a static scope, these are both
|
||||
initialised to null values. For threads executing in an automatically
|
||||
allocated scope, these are both initialised to refer to the context
|
||||
allocated to that scope.
|
||||
|
||||
Before starting the copying of the input parameters of an automatic
|
||||
function or task, the current write context of the caller thread is
|
||||
set to the context allocated for that function/task call. After the
|
||||
thread that executed the function/task body has been rejoined and
|
||||
before starting the copying of the result or output parameters, the
|
||||
current write context is reset to its previous value and the current
|
||||
read context is set to the context allocated for the function/task
|
||||
call. After finishing the copying of the result or output parameters,
|
||||
the current read context is reset to its previous value.
|
||||
|
||||
When reading or writing the state of an automatically allocated
|
||||
variable or event, the associated functor indirects through the
|
||||
current read or write context of the running thread, using its
|
||||
stored context index.
|
||||
|
||||
/*
|
||||
* Copyright (c) 2001 Stephen Williams (steve@icarus.com)
|
||||
*
|
||||
|
|
|
|||
|
|
@ -852,7 +852,9 @@ void compile_var_array(char*label, char*name, int last, int first,
|
|||
|
||||
/* Make the words. */
|
||||
arr->vals_width = labs(msb-lsb) + 1;
|
||||
arr->vals = new vvp_vector4array_t(arr->vals_width, arr->array_count);
|
||||
arr->vals = new vvp_vector4array_t(arr->vals_width, arr->array_count,
|
||||
vpip_peek_current_scope()->is_automatic);
|
||||
vpip_add_item_to_current_scope(arr->vals);
|
||||
vpip_make_dec_const(&arr->msb, msb);
|
||||
vpip_make_dec_const(&arr->lsb, lsb);
|
||||
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ extern bool of_ABS_WR(vthread_t thr, vvp_code_t code);
|
|||
extern bool of_ADD(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_ADD_WR(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_ADDI(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_ALLOC(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_AND(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_ANDI(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_ANDR(vthread_t thr, vvp_code_t code);
|
||||
|
|
@ -91,6 +92,7 @@ extern bool of_FORCE_V(vthread_t thr, vvp_code_t code);
|
|||
extern bool of_FORCE_WR(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_FORCE_X0(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_FORK(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_FREE(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_INV(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_IX_ADD(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_IX_GET(vthread_t thr, vvp_code_t code);
|
||||
|
|
@ -156,8 +158,7 @@ extern bool of_XORR(vthread_t thr, vvp_code_t code);
|
|||
|
||||
extern bool of_ZOMBIE(vthread_t thr, vvp_code_t code);
|
||||
|
||||
extern bool of_FORK_UFUNC(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_JOIN_UFUNC(vthread_t thr, vvp_code_t code);
|
||||
extern bool of_EXEC_UFUNC(vthread_t thr, vvp_code_t code);
|
||||
|
||||
extern bool of_CHUNK_LINK(vthread_t thr, vvp_code_t code);
|
||||
|
||||
|
|
|
|||
|
|
@ -1687,6 +1687,34 @@ void compile_fork(char*label, struct symb_s dest, struct symb_s scope)
|
|||
compile_vpi_lookup(&code->handle, scope.text);
|
||||
}
|
||||
|
||||
void compile_alloc(char*label, struct symb_s scope)
|
||||
{
|
||||
if (label)
|
||||
compile_codelabel(label);
|
||||
|
||||
|
||||
/* Fill in the basics of the %alloc in the instruction. */
|
||||
vvp_code_t code = codespace_allocate();
|
||||
code->opcode = of_ALLOC;
|
||||
|
||||
/* Figure out the target SCOPE. */
|
||||
compile_vpi_lookup(&code->handle, scope.text);
|
||||
}
|
||||
|
||||
void compile_free(char*label, struct symb_s scope)
|
||||
{
|
||||
if (label)
|
||||
compile_codelabel(label);
|
||||
|
||||
|
||||
/* Fill in the basics of the %free in the instruction. */
|
||||
vvp_code_t code = codespace_allocate();
|
||||
code->opcode = of_FREE;
|
||||
|
||||
/* Figure out the target SCOPE. */
|
||||
compile_vpi_lookup(&code->handle, scope.text);
|
||||
}
|
||||
|
||||
void compile_vpi_call(char*label, char*name,
|
||||
long file_idx, long lineno,
|
||||
unsigned argc, vpiHandle*argv)
|
||||
|
|
|
|||
|
|
@ -347,7 +347,7 @@ extern void compile_array_cleanup(void);
|
|||
extern void compile_ufunc(char*label, char*code, unsigned wid,
|
||||
unsigned argc, struct symb_s*argv,
|
||||
unsigned portc, struct symb_s*portv,
|
||||
struct symb_s retv);
|
||||
struct symb_s retv, struct symb_s scope);
|
||||
|
||||
/*
|
||||
* The compile_event function takes the parts of the event statement
|
||||
|
|
@ -406,6 +406,9 @@ extern void compile_fork(char*label, struct symb_s targ_s,
|
|||
struct symb_s scope_s);
|
||||
extern void compile_codelabel(char*label);
|
||||
|
||||
extern void compile_alloc(char*label, struct symb_s scope_s);
|
||||
extern void compile_free(char*label, struct symb_s scope_s);
|
||||
|
||||
/*
|
||||
* The parser uses these functions to compile .scope statements.
|
||||
* The implementations of these live in the vpi_scope.cc file.
|
||||
|
|
|
|||
153
vvp/event.cc
153
vvp/event.cc
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
# include <iostream>
|
||||
|
||||
void waitable_hooks_s::run_waiting_threads_()
|
||||
void waitable_hooks_s::run_waiting_threads_(unsigned context_idx)
|
||||
{
|
||||
// Run the non-blocking event controls.
|
||||
last = &event_ctls;
|
||||
|
|
@ -48,12 +48,17 @@ void waitable_hooks_s::run_waiting_threads_()
|
|||
}
|
||||
}
|
||||
|
||||
if (threads == 0)
|
||||
return;
|
||||
|
||||
vthread_t tmp = threads;
|
||||
threads = 0;
|
||||
vthread_schedule_list(tmp);
|
||||
vthread_t tmp;
|
||||
if (context_idx) {
|
||||
waitable_state_s*state = static_cast<waitable_state_s*>
|
||||
(vthread_get_wt_context_item(context_idx));
|
||||
tmp = state->threads;
|
||||
state->threads = 0;
|
||||
} else {
|
||||
tmp = threads;
|
||||
threads = 0;
|
||||
}
|
||||
if (tmp) vthread_schedule_list(tmp);
|
||||
}
|
||||
|
||||
evctl::evctl(unsigned long ecount)
|
||||
|
|
@ -180,6 +185,12 @@ const vvp_fun_edge::edge_t vvp_edge_negedge
|
|||
|
||||
const vvp_fun_edge::edge_t vvp_edge_none = 0;
|
||||
|
||||
struct vvp_fun_edge_state_s : public waitable_state_s {
|
||||
vvp_fun_edge_state_s() : bit(BIT4_X) {}
|
||||
|
||||
vvp_bit4_t bit;
|
||||
};
|
||||
|
||||
vvp_fun_edge::vvp_fun_edge(edge_t e, bool debug_flag)
|
||||
: edge_(e), debug_(debug_flag)
|
||||
{
|
||||
|
|
@ -193,16 +204,38 @@ vvp_fun_edge::~vvp_fun_edge()
|
|||
{
|
||||
}
|
||||
|
||||
void vvp_fun_edge::alloc_instance(vvp_context_t context)
|
||||
{
|
||||
vvp_set_context_item(context, context_idx, new vvp_fun_edge_state_s);
|
||||
}
|
||||
|
||||
void vvp_fun_edge::reset_instance(vvp_context_t context)
|
||||
{
|
||||
vvp_fun_edge_state_s*state = static_cast<vvp_fun_edge_state_s*>
|
||||
(vvp_get_context_item(context, context_idx));
|
||||
state->threads = 0;
|
||||
state->bit = BIT4_X;
|
||||
}
|
||||
|
||||
void vvp_fun_edge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
|
||||
{
|
||||
vvp_bit4_t*old_bit;
|
||||
if (context_idx) {
|
||||
vvp_fun_edge_state_s*state = static_cast<vvp_fun_edge_state_s*>
|
||||
(vthread_get_wt_context_item(context_idx));
|
||||
old_bit = &state->bit;
|
||||
} else {
|
||||
old_bit = &bits_[port.port()];
|
||||
}
|
||||
|
||||
/* See what kind of edge this represents. */
|
||||
edge_t mask = VVP_EDGE(bits_[port.port()], bit.value(0));
|
||||
edge_t mask = VVP_EDGE(*old_bit, bit.value(0));
|
||||
|
||||
/* Save the current input for the next time around. */
|
||||
bits_[port.port()] = bit.value(0);
|
||||
*old_bit = bit.value(0);
|
||||
|
||||
if ((edge_ == vvp_edge_none) || (edge_ & mask)) {
|
||||
run_waiting_threads_();
|
||||
run_waiting_threads_(context_idx);
|
||||
|
||||
vvp_net_t*net = port.ptr();
|
||||
vvp_send_vec4(net->out, bit);
|
||||
|
|
@ -210,6 +243,13 @@ void vvp_fun_edge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
|
|||
}
|
||||
|
||||
|
||||
struct vvp_fun_anyedge_state_s : public waitable_state_s {
|
||||
vvp_fun_anyedge_state_s() : bitsr(0.0) {}
|
||||
|
||||
vvp_vector4_t bits;
|
||||
double bitsr;
|
||||
};
|
||||
|
||||
vvp_fun_anyedge::vvp_fun_anyedge(bool debug_flag)
|
||||
: debug_(debug_flag)
|
||||
{
|
||||
|
|
@ -221,17 +261,39 @@ vvp_fun_anyedge::~vvp_fun_anyedge()
|
|||
{
|
||||
}
|
||||
|
||||
void vvp_fun_anyedge::alloc_instance(vvp_context_t context)
|
||||
{
|
||||
vvp_set_context_item(context, context_idx, new vvp_fun_anyedge_state_s);
|
||||
}
|
||||
|
||||
void vvp_fun_anyedge::reset_instance(vvp_context_t context)
|
||||
{
|
||||
vvp_fun_anyedge_state_s*state = static_cast<vvp_fun_anyedge_state_s*>
|
||||
(vvp_get_context_item(context, context_idx));
|
||||
state->threads = 0;
|
||||
state->bits.set_to_x();
|
||||
state->bitsr = 0.0;
|
||||
}
|
||||
|
||||
void vvp_fun_anyedge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
|
||||
{
|
||||
unsigned pdx = port.port();
|
||||
bool flag = false;
|
||||
|
||||
if (bits_[pdx].size() != bit.size()) {
|
||||
vvp_vector4_t*old_bits;
|
||||
if (context_idx) {
|
||||
vvp_fun_anyedge_state_s*state = static_cast<vvp_fun_anyedge_state_s*>
|
||||
(vthread_get_wt_context_item(context_idx));
|
||||
old_bits = &state->bits;
|
||||
} else {
|
||||
old_bits = &bits_[port.port()];
|
||||
}
|
||||
|
||||
if (old_bits->size() != bit.size()) {
|
||||
flag = true;
|
||||
|
||||
} else {
|
||||
for (unsigned idx = 0 ; idx < bit.size() ; idx += 1) {
|
||||
if (bits_[pdx].value(idx) != bit.value(idx)) {
|
||||
if (old_bits->value(idx) != bit.value(idx)) {
|
||||
flag = true;
|
||||
break;
|
||||
}
|
||||
|
|
@ -239,8 +301,8 @@ void vvp_fun_anyedge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
|
|||
}
|
||||
|
||||
if (flag) {
|
||||
bits_[pdx] = bit;
|
||||
run_waiting_threads_();
|
||||
*old_bits = bit;
|
||||
run_waiting_threads_(context_idx);
|
||||
vvp_net_t*net = port.ptr();
|
||||
vvp_send_vec4(net->out, bit);
|
||||
}
|
||||
|
|
@ -248,16 +310,18 @@ void vvp_fun_anyedge::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
|
|||
|
||||
void vvp_fun_anyedge::recv_real(vvp_net_ptr_t port, double bit)
|
||||
{
|
||||
unsigned pdx = port.port();
|
||||
bool flag = false;
|
||||
|
||||
if (bitsr_[pdx] != bit) {
|
||||
flag = true;
|
||||
bitsr_[pdx] = bit;
|
||||
double*old_bits;
|
||||
if (context_idx) {
|
||||
vvp_fun_anyedge_state_s*state = static_cast<vvp_fun_anyedge_state_s*>
|
||||
(vthread_get_wt_context_item(context_idx));
|
||||
old_bits = &state->bitsr;
|
||||
} else {
|
||||
old_bits = &bitsr_[port.port()];
|
||||
}
|
||||
|
||||
if (flag) {
|
||||
run_waiting_threads_();
|
||||
if (*old_bits != bit) {
|
||||
*old_bits = bit;
|
||||
run_waiting_threads_(context_idx);
|
||||
vvp_net_t*net = port.ptr();
|
||||
vvp_send_vec4(net->out, vvp_vector4_t());
|
||||
}
|
||||
|
|
@ -271,9 +335,21 @@ vvp_fun_event_or::~vvp_fun_event_or()
|
|||
{
|
||||
}
|
||||
|
||||
void vvp_fun_event_or::alloc_instance(vvp_context_t context)
|
||||
{
|
||||
vvp_set_context_item(context, context_idx, new waitable_state_s);
|
||||
}
|
||||
|
||||
void vvp_fun_event_or::reset_instance(vvp_context_t context)
|
||||
{
|
||||
waitable_state_s*state = static_cast<waitable_state_s*>
|
||||
(vvp_get_context_item(context, context_idx));
|
||||
state->threads = 0;
|
||||
}
|
||||
|
||||
void vvp_fun_event_or::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
|
||||
{
|
||||
run_waiting_threads_();
|
||||
run_waiting_threads_(context_idx);
|
||||
vvp_net_t*net = port.ptr();
|
||||
vvp_send_vec4(net->out, bit);
|
||||
}
|
||||
|
|
@ -287,9 +363,21 @@ vvp_named_event::~vvp_named_event()
|
|||
{
|
||||
}
|
||||
|
||||
void vvp_named_event::alloc_instance(vvp_context_t context)
|
||||
{
|
||||
vvp_set_context_item(context, context_idx, new waitable_state_s);
|
||||
}
|
||||
|
||||
void vvp_named_event::reset_instance(vvp_context_t context)
|
||||
{
|
||||
waitable_state_s*state = static_cast<waitable_state_s*>
|
||||
(vvp_get_context_item(context, context_idx));
|
||||
state->threads = 0;
|
||||
}
|
||||
|
||||
void vvp_named_event::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
|
||||
{
|
||||
run_waiting_threads_();
|
||||
run_waiting_threads_(context_idx);
|
||||
vvp_net_t*net = port.ptr();
|
||||
vvp_send_vec4(net->out, bit);
|
||||
|
||||
|
|
@ -320,7 +408,9 @@ void compile_event(char*label, char*type,
|
|||
if (strcmp(type,"edge") == 0) {
|
||||
|
||||
free(type);
|
||||
fun = new vvp_fun_anyedge(debug_flag);
|
||||
vvp_fun_anyedge*event_fun = new vvp_fun_anyedge(debug_flag);
|
||||
vpip_add_item_to_current_scope(event_fun);
|
||||
fun = event_fun;
|
||||
|
||||
} else {
|
||||
|
||||
|
|
@ -334,7 +424,9 @@ void compile_event(char*label, char*type,
|
|||
assert(argc <= 4);
|
||||
free(type);
|
||||
|
||||
fun = new vvp_fun_edge(edge, debug_flag);
|
||||
vvp_fun_edge*event_fun = new vvp_fun_edge(edge, debug_flag);
|
||||
vpip_add_item_to_current_scope(event_fun);
|
||||
fun = event_fun;
|
||||
}
|
||||
|
||||
vvp_net_t* ptr = new vvp_net_t;
|
||||
|
|
@ -348,10 +440,11 @@ void compile_event(char*label, char*type,
|
|||
|
||||
static void compile_event_or(char*label, unsigned argc, struct symb_s*argv)
|
||||
{
|
||||
vvp_net_fun_t*fun = new vvp_fun_event_or;
|
||||
vvp_fun_event_or*fun = new vvp_fun_event_or;
|
||||
vvp_net_t* ptr = new vvp_net_t;
|
||||
ptr->fun = fun;
|
||||
|
||||
vpip_add_item_to_current_scope(fun);
|
||||
define_functor_symbol(label, ptr);
|
||||
free(label);
|
||||
|
||||
|
|
@ -373,8 +466,10 @@ void compile_named_event(char*label, char*name)
|
|||
vvp_net_t*ptr = new vvp_net_t;
|
||||
|
||||
vpiHandle obj = vpip_make_named_event(name, ptr);
|
||||
ptr->fun = new vvp_named_event(obj);
|
||||
vvp_named_event*fun = new vvp_named_event(obj);
|
||||
ptr->fun = fun;
|
||||
|
||||
vpip_add_item_to_current_scope(fun);
|
||||
define_functor_symbol(label, ptr);
|
||||
compile_vpi_symbol(label, obj);
|
||||
vpip_attach_to_current_scope(obj);
|
||||
|
|
|
|||
29
vvp/event.h
29
vvp/event.h
|
|
@ -108,7 +108,17 @@ struct waitable_hooks_s {
|
|||
evctl**last;
|
||||
|
||||
protected:
|
||||
void run_waiting_threads_();
|
||||
void run_waiting_threads_(unsigned context_idx);
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the base object for storing state information for each instance
|
||||
* of an automatically allocated event. In the general case, all that is
|
||||
* needed is the list of threads waiting on that instance.
|
||||
*/
|
||||
struct waitable_state_s {
|
||||
waitable_state_s() : threads(0) { }
|
||||
vthread_t threads;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -121,9 +131,11 @@ class vvp_fun_edge : public vvp_net_fun_t, public waitable_hooks_s {
|
|||
public:
|
||||
typedef unsigned short edge_t;
|
||||
explicit vvp_fun_edge(edge_t e, bool debug_flag);
|
||||
|
||||
virtual ~vvp_fun_edge();
|
||||
|
||||
void alloc_instance(vvp_context_t context);
|
||||
void reset_instance(vvp_context_t context);
|
||||
|
||||
void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit);
|
||||
|
||||
private:
|
||||
|
|
@ -143,8 +155,8 @@ extern const vvp_fun_edge::edge_t vvp_edge_none;
|
|||
* functor looks at the entire input vector for any change.
|
||||
*
|
||||
* The anyedge is also different in that it can receive real
|
||||
* values. in this case, any detectable change in the real value leads
|
||||
* to an even trigger.
|
||||
* values. In this case, any detectable change in the real value leads
|
||||
* to an event trigger.
|
||||
*/
|
||||
class vvp_fun_anyedge : public vvp_net_fun_t, public waitable_hooks_s {
|
||||
|
||||
|
|
@ -152,6 +164,9 @@ class vvp_fun_anyedge : public vvp_net_fun_t, public waitable_hooks_s {
|
|||
explicit vvp_fun_anyedge(bool debug_flag);
|
||||
virtual ~vvp_fun_anyedge();
|
||||
|
||||
void alloc_instance(vvp_context_t context);
|
||||
void reset_instance(vvp_context_t context);
|
||||
|
||||
void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit);
|
||||
void recv_real(vvp_net_ptr_t port, double bit);
|
||||
|
||||
|
|
@ -172,6 +187,9 @@ class vvp_fun_event_or : public vvp_net_fun_t, public waitable_hooks_s {
|
|||
explicit vvp_fun_event_or();
|
||||
~vvp_fun_event_or();
|
||||
|
||||
void alloc_instance(vvp_context_t context);
|
||||
void reset_instance(vvp_context_t context);
|
||||
|
||||
void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit);
|
||||
|
||||
private:
|
||||
|
|
@ -188,6 +206,9 @@ class vvp_named_event : public vvp_net_fun_t, public waitable_hooks_s {
|
|||
explicit vvp_named_event(struct __vpiHandle*eh);
|
||||
~vvp_named_event();
|
||||
|
||||
void alloc_instance(vvp_context_t context);
|
||||
void reset_instance(vvp_context_t context);
|
||||
|
||||
void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit);
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -183,6 +183,8 @@
|
|||
"%vpi_func/r" { return K_vpi_func_r; }
|
||||
"%disable" { return K_disable; }
|
||||
"%fork" { return K_fork; }
|
||||
"%alloc" { return K_alloc; }
|
||||
"%free" { return K_free; }
|
||||
|
||||
/* Handle the specialized variable access functions. */
|
||||
|
||||
|
|
|
|||
|
|
@ -49,6 +49,11 @@ This instruction adds the immediate value (no x or z bits) into the
|
|||
left vector. The imm value is limited to 16 significant bits, but it
|
||||
is zero extended to match any width.
|
||||
|
||||
* %alloc <scope-label>
|
||||
|
||||
This instruction allocates the storage for a new instance of an
|
||||
automatically allocated scope.
|
||||
|
||||
* %and <bit-l>, <bit-r>, <wid>
|
||||
|
||||
Perform the bitwise AND of the two vectors, and store the result in
|
||||
|
|
@ -362,6 +367,11 @@ The %fork instruction has no effect other than to push a child thread.
|
|||
|
||||
See also %join.
|
||||
|
||||
* %free <scope-label>
|
||||
|
||||
This instruction de-allocates the storage for a previously allocated
|
||||
instance of as automatically allocated scope.
|
||||
|
||||
|
||||
* %inv <bit>, <wid>
|
||||
|
||||
|
|
|
|||
11
vvp/parse.y
11
vvp/parse.y
|
|
@ -85,7 +85,7 @@ static struct __vpiModPath*modpath_dst = 0;
|
|||
%token K_THREAD K_TIMESCALE K_TRAN K_TRANIF0 K_TRANIF1 K_TRANVP K_UFUNC
|
||||
%token K_UDP K_UDP_C K_UDP_S
|
||||
%token K_VAR K_VAR_S K_VAR_I K_VAR_R K_vpi_call K_vpi_func K_vpi_func_r
|
||||
%token K_disable K_fork
|
||||
%token K_disable K_fork K_alloc K_free
|
||||
%token K_vpi_module K_vpi_time_precision K_file_names
|
||||
|
||||
%token <text> T_INSTR
|
||||
|
|
@ -215,11 +215,11 @@ statement
|
|||
bits in the symbols list change. */
|
||||
|
||||
| T_LABEL K_UFUNC T_SYMBOL ',' T_NUMBER ',' symbols
|
||||
'(' symbols ')' symbol ';'
|
||||
'(' symbols ')' symbol symbol ';'
|
||||
{ compile_ufunc($1, $3, $5,
|
||||
$7.cnt, $7.vect,
|
||||
$9.cnt, $9.vect,
|
||||
$11); }
|
||||
$11, $12); }
|
||||
|
||||
/* Resolver statements are very much like functors. They are
|
||||
compiled to functors of a different mode. */
|
||||
|
|
@ -533,6 +533,11 @@ statement
|
|||
| label_opt K_fork symbol ',' symbol ';'
|
||||
{ compile_fork($1, $3, $5); }
|
||||
|
||||
| label_opt K_alloc symbol ';'
|
||||
{ compile_alloc($1, $3); }
|
||||
|
||||
| label_opt K_free symbol ';'
|
||||
{ compile_free($1, $3); }
|
||||
|
||||
/* Scope statements come in two forms. There are the scope
|
||||
declaration and the scope recall. The declarations create the
|
||||
|
|
|
|||
52
vvp/ufunc.cc
52
vvp/ufunc.cc
|
|
@ -37,17 +37,19 @@
|
|||
|
||||
ufunc_core::ufunc_core(unsigned owid, vvp_net_t*ptr,
|
||||
unsigned nports, vvp_net_t**ports,
|
||||
vvp_code_t sa, struct __vpiScope*run_scope,
|
||||
char*result_label)
|
||||
vvp_code_t sa, struct __vpiScope*call_scope,
|
||||
char*result_label, char*scope_label)
|
||||
: vvp_wide_fun_core(ptr, nports)
|
||||
{
|
||||
owid_ = owid;
|
||||
ports_ = ports;
|
||||
code_ = sa;
|
||||
thread_ = 0;
|
||||
scope_ = run_scope;
|
||||
call_scope_ = call_scope;
|
||||
|
||||
functor_ref_lookup(&result_, result_label);
|
||||
|
||||
compile_vpi_lookup((vpiHandle*)(&func_scope_), scope_label);
|
||||
}
|
||||
|
||||
ufunc_core::~ufunc_core()
|
||||
|
|
@ -55,7 +57,7 @@ ufunc_core::~ufunc_core()
|
|||
}
|
||||
|
||||
/*
|
||||
* This method is called by the %fork_ufunc function to prepare the
|
||||
* This method is called by the %exec_ufunc function to prepare the
|
||||
* input variables of the function for execution. The method copies
|
||||
* the input values collected by the core to the variables.
|
||||
*/
|
||||
|
|
@ -72,7 +74,7 @@ void ufunc_core::assign_bits_to_ports(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* This method is called by the %join_ufunc instruction to copy the
|
||||
* This method is called by the %exec_ufunc instruction to copy the
|
||||
* result from the return code variable and deliver it to the output
|
||||
* of the functor, back into the netlist.
|
||||
*/
|
||||
|
|
@ -105,7 +107,7 @@ void ufunc_core::recv_real_from_inputs(unsigned port)
|
|||
void ufunc_core::invoke_thread_()
|
||||
{
|
||||
if (thread_ == 0) {
|
||||
thread_ = vthread_new(code_, scope_);
|
||||
thread_ = vthread_new(code_, call_scope_);
|
||||
schedule_vthread(thread_, 0);
|
||||
}
|
||||
}
|
||||
|
|
@ -123,15 +125,15 @@ void ufunc_core::invoke_thread_()
|
|||
void compile_ufunc(char*label, char*code, unsigned wid,
|
||||
unsigned argc, struct symb_s*argv,
|
||||
unsigned portc, struct symb_s*portv,
|
||||
struct symb_s retv)
|
||||
struct symb_s retv, struct symb_s scope)
|
||||
{
|
||||
/* The input argument list and port list must have the same
|
||||
sizes, since internally we will be mapping the inputs list
|
||||
to the ports list. */
|
||||
assert(argc == portc);
|
||||
|
||||
struct __vpiScope*run_scope = vpip_peek_current_scope();
|
||||
assert(run_scope);
|
||||
struct __vpiScope*call_scope = vpip_peek_current_scope();
|
||||
assert(call_scope);
|
||||
|
||||
/* Construct some phantom code that is the thread of the
|
||||
function call. The first instruction, at the start_address
|
||||
|
|
@ -139,32 +141,19 @@ void compile_ufunc(char*label, char*code, unsigned wid,
|
|||
The last instruction is the usual %end. So the thread looks
|
||||
like this:
|
||||
|
||||
%fork_ufunc <core>;
|
||||
%join;
|
||||
%join_ufunc;
|
||||
%exec_ufunc <core>;
|
||||
%end;
|
||||
|
||||
The %fork_ufunc starts the user defined function by copying
|
||||
the input values into local regs, forking a thread and
|
||||
pushing that thread. The %join waits on that thread. The
|
||||
$join_ufunc then copies the output values to the
|
||||
destination net functors. */
|
||||
The %exec_ufunc copies the input values into local regs,
|
||||
runs the function code, then copies the output values to
|
||||
the destination net functors. */
|
||||
|
||||
vvp_code_t start_code = codespace_allocate();
|
||||
start_code->opcode = of_FORK_UFUNC;
|
||||
start_code->opcode = of_EXEC_UFUNC;
|
||||
code_label_lookup(start_code, code);
|
||||
|
||||
{ vvp_code_t codep = codespace_allocate();
|
||||
codep->opcode = &of_JOIN;
|
||||
}
|
||||
|
||||
vvp_code_t ujoin_code;
|
||||
ujoin_code = codespace_allocate();
|
||||
ujoin_code->opcode = &of_JOIN_UFUNC;
|
||||
|
||||
{ vvp_code_t codep = codespace_allocate();
|
||||
codep->opcode = &of_END;
|
||||
}
|
||||
vvp_code_t end_code = codespace_allocate();
|
||||
end_code->opcode = &of_END;
|
||||
|
||||
/* Run through the function ports (which are related to but
|
||||
not the same as the input ports) and arrange for their
|
||||
|
|
@ -179,14 +168,13 @@ void compile_ufunc(char*label, char*code, unsigned wid,
|
|||
that will contain the execution. */
|
||||
vvp_net_t*ptr = new vvp_net_t;
|
||||
ufunc_core*fcore = new ufunc_core(wid, ptr, portc, ports,
|
||||
start_code, run_scope,
|
||||
retv.text);
|
||||
start_code, call_scope,
|
||||
retv.text, scope.text);
|
||||
ptr->fun = fcore;
|
||||
define_functor_symbol(label, ptr);
|
||||
free(label);
|
||||
|
||||
start_code->ufunc_core_ptr = fcore;
|
||||
ujoin_code->ufunc_core_ptr = fcore;
|
||||
|
||||
wide_inputs_connect(fcore, argc, argv);
|
||||
|
||||
|
|
|
|||
11
vvp/ufunc.h
11
vvp/ufunc.h
|
|
@ -53,11 +53,13 @@ class ufunc_core : public vvp_wide_fun_core {
|
|||
ufunc_core(unsigned ow, vvp_net_t*ptr,
|
||||
unsigned nports, vvp_net_t**ports,
|
||||
vvp_code_t start_address,
|
||||
struct __vpiScope*run_scope,
|
||||
char*result_label);
|
||||
struct __vpiScope*call_scope,
|
||||
char*result_label,
|
||||
char*scope_label);
|
||||
~ufunc_core();
|
||||
|
||||
struct __vpiScope*scope() { return scope_; }
|
||||
struct __vpiScope*call_scope() { return call_scope_; }
|
||||
struct __vpiScope*func_scope() { return func_scope_; }
|
||||
|
||||
void assign_bits_to_ports(void);
|
||||
void finish_thread(vthread_t thr);
|
||||
|
|
@ -80,7 +82,8 @@ class ufunc_core : public vvp_wide_fun_core {
|
|||
// This is a thread to execute the behavioral portion of the
|
||||
// function.
|
||||
vthread_t thread_;
|
||||
struct __vpiScope*scope_;
|
||||
struct __vpiScope*call_scope_;
|
||||
struct __vpiScope*func_scope_;
|
||||
vvp_code_t code_;
|
||||
|
||||
// Where the result will be.
|
||||
|
|
|
|||
|
|
@ -173,12 +173,18 @@ struct __vpiScope {
|
|||
unsigned lineno;
|
||||
unsigned def_file_idx;
|
||||
unsigned def_lineno;
|
||||
bool is_automatic;
|
||||
/* The scope has a system time of its own. */
|
||||
struct __vpiSystemTime scoped_time;
|
||||
struct __vpiSystemTime scoped_realtime;
|
||||
/* Keep an array of internal scope items. */
|
||||
struct __vpiHandle**intern;
|
||||
unsigned nintern;
|
||||
/* Keep an array of items to be automatically allocated */
|
||||
struct automatic_hooks_s**item;
|
||||
unsigned nitem;
|
||||
/* Keep a list of freed contexts. */
|
||||
vvp_context_t free_context;
|
||||
/* Keep a list of threads in the scope. */
|
||||
vthread_t threads;
|
||||
signed int time_units :8;
|
||||
|
|
@ -187,6 +193,7 @@ struct __vpiScope {
|
|||
|
||||
extern struct __vpiScope* vpip_peek_current_scope(void);
|
||||
extern void vpip_attach_to_current_scope(vpiHandle obj);
|
||||
extern void vpip_add_item_to_current_scope(automatic_hooks_s*item);
|
||||
extern vpiHandle vpip_make_root_iterator(void);
|
||||
extern void vpip_make_root_iterator(struct __vpiHandle**&table,
|
||||
unsigned&ntable);
|
||||
|
|
|
|||
|
|
@ -316,6 +316,26 @@ static void attach_to_scope_(struct __vpiScope*scope, vpiHandle obj)
|
|||
scope->intern[idx] = obj;
|
||||
}
|
||||
|
||||
static void add_item_to_scope_(struct __vpiScope*scope, automatic_hooks_s*item)
|
||||
{
|
||||
assert(scope);
|
||||
|
||||
// there is no need to record items for static scopes
|
||||
if (!scope->is_automatic) return;
|
||||
|
||||
unsigned idx = scope->nitem++;
|
||||
item->context_idx = 1 + idx;
|
||||
|
||||
if (scope->item == 0)
|
||||
scope->item = (automatic_hooks_s**)
|
||||
malloc(sizeof(automatic_hooks_s*));
|
||||
else
|
||||
scope->item = (automatic_hooks_s**)
|
||||
realloc(scope->item, sizeof(automatic_hooks_s*)*scope->nitem);
|
||||
|
||||
scope->item[idx] = item;
|
||||
}
|
||||
|
||||
/*
|
||||
* When the compiler encounters a scope declaration, this function
|
||||
* creates and initializes a __vpiScope object with the requested name
|
||||
|
|
@ -330,20 +350,33 @@ compile_scope_decl(char*label, char*type, char*name, const char*tname,
|
|||
struct __vpiScope*scope = new struct __vpiScope;
|
||||
count_vpi_scopes += 1;
|
||||
|
||||
if (strcmp(type,"module") == 0)
|
||||
if (strcmp(type,"module") == 0) {
|
||||
scope->base.vpi_type = &vpip_scope_module_rt;
|
||||
else if (strcmp(type,"function") == 0)
|
||||
scope->is_automatic = false;
|
||||
} else if (strcmp(type,"autofunction") == 0) {
|
||||
scope->base.vpi_type = &vpip_scope_function_rt;
|
||||
else if (strcmp(type,"task") == 0)
|
||||
scope->is_automatic = true;
|
||||
} else if (strcmp(type,"function") == 0) {
|
||||
scope->base.vpi_type = &vpip_scope_function_rt;
|
||||
scope->is_automatic = false;
|
||||
} else if (strcmp(type,"autotask") == 0) {
|
||||
scope->base.vpi_type = &vpip_scope_task_rt;
|
||||
else if (strcmp(type,"fork") == 0)
|
||||
scope->is_automatic = true;
|
||||
} else if (strcmp(type,"task") == 0) {
|
||||
scope->base.vpi_type = &vpip_scope_task_rt;
|
||||
scope->is_automatic = false;
|
||||
} else if (strcmp(type,"fork") == 0) {
|
||||
scope->base.vpi_type = &vpip_scope_fork_rt;
|
||||
else if (strcmp(type,"begin") == 0)
|
||||
scope->is_automatic = false;
|
||||
} else if (strcmp(type,"begin") == 0) {
|
||||
scope->base.vpi_type = &vpip_scope_begin_rt;
|
||||
else if (strcmp(type,"generate") == 0)
|
||||
scope->is_automatic = false;
|
||||
} else if (strcmp(type,"generate") == 0) {
|
||||
scope->base.vpi_type = &vpip_scope_begin_rt;
|
||||
else {
|
||||
scope->is_automatic = false;
|
||||
} else {
|
||||
scope->base.vpi_type = &vpip_scope_module_rt;
|
||||
scope->is_automatic = false;
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
|
@ -357,6 +390,9 @@ compile_scope_decl(char*label, char*type, char*name, const char*tname,
|
|||
scope->def_lineno = (unsigned) def_lineno;
|
||||
scope->intern = 0;
|
||||
scope->nintern = 0;
|
||||
scope->item = 0;
|
||||
scope->nitem = 0;
|
||||
scope->free_context = 0;
|
||||
scope->threads = 0;
|
||||
|
||||
current_scope = scope;
|
||||
|
|
@ -421,3 +457,8 @@ void vpip_attach_to_current_scope(vpiHandle obj)
|
|||
{
|
||||
attach_to_scope_(current_scope, obj);
|
||||
}
|
||||
|
||||
void vpip_add_item_to_current_scope(automatic_hooks_s*item)
|
||||
{
|
||||
add_item_to_scope_(current_scope, item);
|
||||
}
|
||||
|
|
|
|||
189
vvp/vthread.cc
189
vvp/vthread.cc
|
|
@ -43,7 +43,7 @@
|
|||
# define TOP_BIT (1UL << (CPU_WORD_BITS-1))
|
||||
|
||||
/*
|
||||
* This vhtread_s structure describes all there is to know about a
|
||||
* This vthread_s structure describes all there is to know about a
|
||||
* thread, including its program counter, all the private bits it
|
||||
* holds, and its place in other lists.
|
||||
*
|
||||
|
|
@ -117,12 +117,15 @@ struct vthread_s {
|
|||
struct vthread_s*wait_next;
|
||||
/* These are used to keep the thread in a scope. */
|
||||
struct vthread_s*scope_next, *scope_prev;
|
||||
|
||||
/* These are used to access automatically allocated items. */
|
||||
vvp_context_t wt_context, rd_context;
|
||||
/* These are used to pass non-blocking event control information. */
|
||||
vvp_net_t*event;
|
||||
uint64_t ecount;
|
||||
};
|
||||
|
||||
struct vthread_s*running_thread = 0;
|
||||
|
||||
// this table maps the thread special index bit addresses to
|
||||
// vvp_bit4_t bit values.
|
||||
static vvp_bit4_t thr_index_to_bit4[4] = { BIT4_0, BIT4_1, BIT4_X, BIT4_Z };
|
||||
|
|
@ -298,6 +301,43 @@ static void multiply_array_imm(unsigned long*res, unsigned long*val,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a context for use by a child thread. By preference, use
|
||||
* the last freed context. If none available, create a new one.
|
||||
*/
|
||||
static vvp_context_t vthread_alloc_context(__vpiScope*scope)
|
||||
{
|
||||
assert(scope->is_automatic);
|
||||
|
||||
vvp_context_t context = scope->free_context;
|
||||
if (context) {
|
||||
scope->free_context = vvp_get_next_context(context);
|
||||
for (unsigned idx = 0 ; idx < scope->nitem ; idx += 1) {
|
||||
scope->item[idx]->reset_instance(context);
|
||||
}
|
||||
} else {
|
||||
context = vvp_allocate_context(scope->nitem);
|
||||
for (unsigned idx = 0 ; idx < scope->nitem ; idx += 1) {
|
||||
scope->item[idx]->alloc_instance(context);
|
||||
}
|
||||
}
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free a context previously allocated to a child thread by pushing it
|
||||
* onto the freed context stack.
|
||||
*/
|
||||
static void vthread_free_context(vvp_context_t context, __vpiScope*scope)
|
||||
{
|
||||
assert(scope->is_automatic);
|
||||
assert(context);
|
||||
|
||||
vvp_set_next_context(context, scope->free_context);
|
||||
scope->free_context = context;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new thread with the given start address.
|
||||
*/
|
||||
|
|
@ -309,6 +349,8 @@ vthread_t vthread_new(vvp_code_t pc, struct __vpiScope*scope)
|
|||
thr->child = 0;
|
||||
thr->parent = 0;
|
||||
thr->wait_next = 0;
|
||||
thr->wt_context = 0;
|
||||
thr->rd_context = 0;
|
||||
|
||||
/* If the target scope never held a thread, then create a
|
||||
header cell for it. This is a stub to make circular lists
|
||||
|
|
@ -334,7 +376,6 @@ vthread_t vthread_new(vvp_code_t pc, struct __vpiScope*scope)
|
|||
thr->is_scheduled = 0;
|
||||
thr->i_have_ended = 0;
|
||||
thr->waiting_for_event = 0;
|
||||
thr->is_scheduled = 0;
|
||||
thr->fork_count = 0;
|
||||
thr->event = 0;
|
||||
thr->ecount = 0;
|
||||
|
|
@ -409,6 +450,8 @@ void vthread_run(vthread_t thr)
|
|||
assert(thr->is_scheduled);
|
||||
thr->is_scheduled = 0;
|
||||
|
||||
running_thread = thr;
|
||||
|
||||
for (;;) {
|
||||
vvp_code_t cp = thr->pc;
|
||||
thr->pc += 1;
|
||||
|
|
@ -423,6 +466,7 @@ void vthread_run(vthread_t thr)
|
|||
|
||||
thr = tmp;
|
||||
}
|
||||
running_thread = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -486,6 +530,18 @@ void vthread_schedule_list(vthread_t thr)
|
|||
schedule_vthread(thr, 0);
|
||||
}
|
||||
|
||||
vvp_context_item_t vthread_get_wt_context_item(unsigned context_idx)
|
||||
{
|
||||
assert(running_thread && running_thread->wt_context);
|
||||
return vvp_get_context_item(running_thread->wt_context, context_idx);
|
||||
}
|
||||
|
||||
vvp_context_item_t vthread_get_rd_context_item(unsigned context_idx)
|
||||
{
|
||||
assert(running_thread && running_thread->rd_context);
|
||||
return vvp_get_context_item(running_thread->rd_context, context_idx);
|
||||
}
|
||||
|
||||
bool of_ABS_WR(vthread_t thr, vvp_code_t cp)
|
||||
{
|
||||
unsigned dst = cp->bit_idx[0];
|
||||
|
|
@ -495,6 +551,18 @@ bool of_ABS_WR(vthread_t thr, vvp_code_t cp)
|
|||
return true;
|
||||
}
|
||||
|
||||
bool of_ALLOC(vthread_t thr, vvp_code_t cp)
|
||||
{
|
||||
/* Allocate a context. */
|
||||
vvp_context_t child_context = vthread_alloc_context(cp->scope);
|
||||
|
||||
/* Push the allocated context onto the write context stack. */
|
||||
vvp_set_next_context(child_context, thr->wt_context);
|
||||
thr->wt_context = child_context;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool of_AND_wide(vthread_t thr, vvp_code_t cp)
|
||||
{
|
||||
unsigned idx1 = cp->bit_idx[0];
|
||||
|
|
@ -2308,6 +2376,12 @@ bool of_FORCE_X0(vthread_t thr, vvp_code_t cp)
|
|||
bool of_FORK(vthread_t thr, vvp_code_t cp)
|
||||
{
|
||||
vthread_t child = vthread_new(cp->cptr2, cp->scope);
|
||||
if (cp->scope->is_automatic) {
|
||||
/* The context allocated for this child is the top entry
|
||||
on the write context stack. */
|
||||
child->wt_context = thr->wt_context;
|
||||
child->rd_context = thr->wt_context;
|
||||
}
|
||||
|
||||
child->child = thr->child;
|
||||
child->parent = thr;
|
||||
|
|
@ -2324,9 +2398,23 @@ bool of_FORK(vthread_t thr, vvp_code_t cp)
|
|||
if (cp->scope->base.vpi_type->type_code == vpiFunction) {
|
||||
child->is_scheduled = 1;
|
||||
vthread_run(child);
|
||||
running_thread = thr;
|
||||
} else {
|
||||
schedule_vthread(child, 0, true);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool of_FREE(vthread_t thr, vvp_code_t cp)
|
||||
{
|
||||
/* Pop the child context from the read context stack. */
|
||||
vvp_context_t child_context = thr->rd_context;
|
||||
thr->rd_context = vvp_get_next_context(child_context);
|
||||
|
||||
/* Free the context. */
|
||||
vthread_free_context(child_context, cp->scope);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -2610,6 +2698,15 @@ bool of_JOIN(vthread_t thr, vvp_code_t cp)
|
|||
|
||||
assert(thr->fork_count > 0);
|
||||
|
||||
if (thr->wt_context != thr->rd_context) {
|
||||
/* Pop the child context from the write context stack. */
|
||||
vvp_context_t child_context = thr->wt_context;
|
||||
thr->wt_context = vvp_get_next_context(child_context);
|
||||
|
||||
/* Push the child context onto the read context stack */
|
||||
vvp_set_next_context(child_context, thr->rd_context);
|
||||
thr->rd_context = child_context;
|
||||
}
|
||||
|
||||
/* If the child has already ended, reap it now. */
|
||||
if (thr->child->i_have_ended) {
|
||||
|
|
@ -4080,7 +4177,6 @@ bool of_SUBI(vthread_t thr, vvp_code_t cp)
|
|||
|
||||
bool of_VPI_CALL(vthread_t thr, vvp_code_t cp)
|
||||
{
|
||||
// printf("thread %p: %%vpi_call\n", thr);
|
||||
vpip_execute_vpi_call(thr, cp->handle);
|
||||
|
||||
if (schedule_stopped()) {
|
||||
|
|
@ -4104,13 +4200,19 @@ bool of_WAIT(vthread_t thr, vvp_code_t cp)
|
|||
assert(! thr->waiting_for_event);
|
||||
thr->waiting_for_event = 1;
|
||||
|
||||
vvp_net_t*net = cp->net;
|
||||
/* Get the functor as a waitable_hooks_s object. */
|
||||
waitable_hooks_s*ep = dynamic_cast<waitable_hooks_s*> (net->fun);
|
||||
assert(ep);
|
||||
/* Add this thread to the list in the event. */
|
||||
thr->wait_next = ep->threads;
|
||||
ep->threads = thr;
|
||||
vvp_net_fun_t*fun = cp->net->fun;
|
||||
if (fun->context_idx) {
|
||||
waitable_state_s*es = static_cast<waitable_state_s*>
|
||||
(vthread_get_wt_context_item(fun->context_idx));
|
||||
thr->wait_next = es->threads;
|
||||
es->threads = thr;
|
||||
} else {
|
||||
waitable_hooks_s*ep = dynamic_cast<waitable_hooks_s*> (fun);
|
||||
assert(ep);
|
||||
thr->wait_next = ep->threads;
|
||||
ep->threads = thr;
|
||||
}
|
||||
/* Return false to suspend this thread. */
|
||||
return false;
|
||||
}
|
||||
|
|
@ -4185,46 +4287,57 @@ bool of_ZOMBIE(vthread_t thr, vvp_code_t)
|
|||
}
|
||||
|
||||
/*
|
||||
* These are phantom opcode used to call user defined functions.
|
||||
* They are used in code generated by the .ufunc statement. They
|
||||
* contain a pointer to executable code of the function, and to a
|
||||
* ufunc_core object that has all the port information about the
|
||||
* This is a phantom opcode used to call user defined functions. It
|
||||
* is used in code generated by the .ufunc statement. It contains a
|
||||
* pointer to the executable code of the function and a pointer to
|
||||
* a ufunc_core object that has all the port information about the
|
||||
* function.
|
||||
*/
|
||||
bool of_FORK_UFUNC(vthread_t thr, vvp_code_t cp)
|
||||
bool of_EXEC_UFUNC(vthread_t thr, vvp_code_t cp)
|
||||
{
|
||||
struct __vpiScope*child_scope = cp->ufunc_core_ptr->func_scope();
|
||||
assert(child_scope);
|
||||
|
||||
assert(thr->child == 0);
|
||||
assert(thr->fork_count == 0);
|
||||
|
||||
/* We can take a number of shortcuts because we know that a
|
||||
continuous assignment can only occur in a static scope. */
|
||||
assert(thr->wt_context == 0);
|
||||
assert(thr->rd_context == 0);
|
||||
|
||||
/* If an automatic function, allocate a context for this call. */
|
||||
vvp_context_t child_context = 0;
|
||||
if (child_scope->is_automatic) {
|
||||
child_context = vthread_alloc_context(child_scope);
|
||||
thr->wt_context = child_context;
|
||||
thr->rd_context = child_context;
|
||||
}
|
||||
/* Copy all the inputs to the ufunc object to the port
|
||||
variables of the function. This copies all the values
|
||||
atomically. */
|
||||
cp->ufunc_core_ptr->assign_bits_to_ports();
|
||||
|
||||
assert(thr->child == 0);
|
||||
assert(thr->fork_count == 0);
|
||||
/* Create a temporary thread and run it immediately. A function
|
||||
may not contain any blocking statements, so vthread_run() can
|
||||
only return when the %end opcode is reached. */
|
||||
vthread_t child = vthread_new(cp->cptr, child_scope);
|
||||
child->wt_context = child_context;
|
||||
child->rd_context = child_context;
|
||||
child->is_scheduled = 1;
|
||||
vthread_run(child);
|
||||
running_thread = thr;
|
||||
|
||||
/* Create a temporary thread, and push its execution. This is
|
||||
done so that the assign_bits_to_ports above is atomic with
|
||||
this startup. */
|
||||
vthread_t child = vthread_new(cp->cptr, cp->ufunc_core_ptr->scope());
|
||||
|
||||
child->child = 0;
|
||||
child->parent = thr;
|
||||
thr->child = child;
|
||||
|
||||
thr->fork_count += 1;
|
||||
schedule_vthread(child, 0, true);
|
||||
|
||||
/* After this function, the .ufunc code has placed an of_JOIN
|
||||
to pause this thread. Since the child was pushed by the
|
||||
flag to schedule_vthread, the called function starts up
|
||||
immediately. */
|
||||
return true;
|
||||
}
|
||||
|
||||
bool of_JOIN_UFUNC(vthread_t thr, vvp_code_t cp)
|
||||
{
|
||||
/* Now copy the output from the result variable to the output
|
||||
ports of the .ufunc device. */
|
||||
cp->ufunc_core_ptr->finish_thread(thr);
|
||||
|
||||
/* If an automatic function, free the context for this call. */
|
||||
if (child_scope->is_automatic) {
|
||||
vthread_free_context(child_context, child_scope);
|
||||
thr->wt_context = 0;
|
||||
thr->rd_context = 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,6 +62,29 @@ extern void vthread_run(vthread_t thr);
|
|||
*/
|
||||
extern void vthread_schedule_list(vthread_t thr);
|
||||
|
||||
/*
|
||||
* This function returns a handle to an item in the writable context
|
||||
* of the currently running thread. Normally the writable context is
|
||||
* the context allocated to the scope associated with that thread.
|
||||
* However, between executing a %alloc instruction and executing the
|
||||
* associated %fork instruction, the writable context changes to the
|
||||
* newly allocated context, thus allowing the input parameters of an
|
||||
* automatic task or function to be written to the task/function local
|
||||
* variables.
|
||||
*/
|
||||
extern vvp_context_item_t vthread_get_wt_context_item(unsigned context_idx);
|
||||
|
||||
/*
|
||||
* This function returns a handle to an item in the readable context
|
||||
* of the currently running thread. Normally the readable context is
|
||||
* the context allocated to the scope associated with that thread.
|
||||
* However, between executing a %join instruction and executing the
|
||||
* associated %free instruction, the readable context changes to the
|
||||
* context allocated to the newly joined thread, thus allowing the
|
||||
* output parameters of an automatic task or function to be read from
|
||||
* the task/function local variables.
|
||||
*/
|
||||
extern vvp_context_item_t vthread_get_rd_context_item(unsigned context_idx);
|
||||
|
||||
/*
|
||||
* Return a bit from the thread's bit space. These are used, for
|
||||
|
|
|
|||
213
vvp/vvp_net.cc
213
vvp/vvp_net.cc
|
|
@ -990,6 +990,20 @@ void vvp_vector4_t::change_z2x()
|
|||
}
|
||||
}
|
||||
|
||||
void vvp_vector4_t::set_to_x()
|
||||
{
|
||||
if (size_ <= BITS_PER_WORD) {
|
||||
abits_val_ = vvp_vector4_t::WORD_X_ABITS;
|
||||
bbits_val_ = vvp_vector4_t::WORD_X_BBITS;
|
||||
} else {
|
||||
unsigned words = (size_+BITS_PER_WORD-1) / BITS_PER_WORD;
|
||||
for (unsigned idx = 0 ; idx < words ; idx += 1) {
|
||||
abits_ptr_[idx] = vvp_vector4_t::WORD_X_ABITS;
|
||||
bbits_ptr_[idx] = vvp_vector4_t::WORD_X_BBITS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
char* vvp_vector4_t::as_string(char*buf, size_t buf_len)
|
||||
{
|
||||
char*res = buf;
|
||||
|
|
@ -1284,9 +1298,12 @@ bool vector4_to_value(const vvp_vector4_t&vec, double&val, bool signed_flag)
|
|||
return flag;
|
||||
}
|
||||
|
||||
vvp_vector4array_t::vvp_vector4array_t(unsigned width, unsigned words)
|
||||
: width_(width), words_(words)
|
||||
vvp_vector4array_t::vvp_vector4array_t(unsigned width, unsigned words,
|
||||
bool is_automatic)
|
||||
: width_(width), words_(words), array_(0)
|
||||
{
|
||||
if (is_automatic) return;
|
||||
|
||||
array_ = new v4cell[words_];
|
||||
|
||||
if (width_ <= vvp_vector4_t::BITS_PER_WORD) {
|
||||
|
|
@ -1314,30 +1331,79 @@ vvp_vector4array_t::~vvp_vector4array_t()
|
|||
}
|
||||
}
|
||||
|
||||
void vvp_vector4array_t::alloc_instance(vvp_context_t context)
|
||||
{
|
||||
v4cell*array = new v4cell[words_];
|
||||
|
||||
if (width_ <= vvp_vector4_t::BITS_PER_WORD) {
|
||||
for (unsigned idx = 0 ; idx < words_ ; idx += 1) {
|
||||
array[idx].abits_val_ = vvp_vector4_t::WORD_X_ABITS;
|
||||
array[idx].bbits_val_ = vvp_vector4_t::WORD_X_BBITS;
|
||||
}
|
||||
} else {
|
||||
for (unsigned idx = 0 ; idx < words_ ; idx += 1) {
|
||||
array[idx].abits_ptr_ = 0;
|
||||
array[idx].bbits_ptr_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
vvp_set_context_item(context, context_idx, array);
|
||||
}
|
||||
|
||||
void vvp_vector4array_t::reset_instance(vvp_context_t context)
|
||||
{
|
||||
v4cell*cell = static_cast<v4cell*>
|
||||
(vvp_get_context_item(context, context_idx));
|
||||
|
||||
if (width_ <= vvp_vector4_t::BITS_PER_WORD) {
|
||||
for (unsigned idx = 0 ; idx < words_ ; idx += 1) {
|
||||
cell->abits_val_ = vvp_vector4_t::WORD_X_ABITS;
|
||||
cell->bbits_val_ = vvp_vector4_t::WORD_X_BBITS;
|
||||
cell++;
|
||||
}
|
||||
} else {
|
||||
unsigned cnt = (width_ + vvp_vector4_t::BITS_PER_WORD-1)/vvp_vector4_t::BITS_PER_WORD;
|
||||
for (unsigned idx = 0 ; idx < words_ ; idx += 1) {
|
||||
if (cell->abits_ptr_) {
|
||||
for (unsigned n = 0 ; n < cnt ; n += 1) {
|
||||
cell->abits_ptr_[n] = vvp_vector4_t::WORD_X_ABITS;
|
||||
cell->bbits_ptr_[n] = vvp_vector4_t::WORD_X_BBITS;
|
||||
}
|
||||
}
|
||||
cell++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vvp_vector4array_t::set_word(unsigned index, const vvp_vector4_t&that)
|
||||
{
|
||||
assert(index < words_);
|
||||
assert(that.size_ == width_);
|
||||
|
||||
v4cell&cell = array_[index];
|
||||
v4cell*cell;
|
||||
if (context_idx)
|
||||
cell = static_cast<v4cell*>
|
||||
(vthread_get_wt_context_item(context_idx)) + index;
|
||||
else
|
||||
cell = &(array_[index]);
|
||||
|
||||
if (width_ <= vvp_vector4_t::BITS_PER_WORD) {
|
||||
cell.abits_val_ = that.abits_val_;
|
||||
cell.bbits_val_ = that.bbits_val_;
|
||||
cell->abits_val_ = that.abits_val_;
|
||||
cell->bbits_val_ = that.bbits_val_;
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned cnt = (width_ + vvp_vector4_t::BITS_PER_WORD-1)/vvp_vector4_t::BITS_PER_WORD;
|
||||
|
||||
if (cell.abits_ptr_ == 0) {
|
||||
cell.abits_ptr_ = new unsigned long[2*cnt];
|
||||
cell.bbits_ptr_ = cell.abits_ptr_ + cnt;
|
||||
if (cell->abits_ptr_ == 0) {
|
||||
cell->abits_ptr_ = new unsigned long[2*cnt];
|
||||
cell->bbits_ptr_ = cell->abits_ptr_ + cnt;
|
||||
}
|
||||
|
||||
for (unsigned idx = 0 ; idx < cnt ; idx += 1)
|
||||
cell.abits_ptr_[idx] = that.abits_ptr_[idx];
|
||||
cell->abits_ptr_[idx] = that.abits_ptr_[idx];
|
||||
for (unsigned idx = 0 ; idx < cnt ; idx += 1)
|
||||
cell.bbits_ptr_[idx] = that.bbits_ptr_[idx];
|
||||
cell->bbits_ptr_[idx] = that.bbits_ptr_[idx];
|
||||
}
|
||||
|
||||
vvp_vector4_t vvp_vector4array_t::get_word(unsigned index) const
|
||||
|
|
@ -1347,26 +1413,31 @@ vvp_vector4_t vvp_vector4array_t::get_word(unsigned index) const
|
|||
|
||||
assert(index < words_);
|
||||
|
||||
v4cell&cell = array_[index];
|
||||
v4cell*cell;
|
||||
if (context_idx)
|
||||
cell = static_cast<v4cell*>
|
||||
(vthread_get_rd_context_item(context_idx)) + index;
|
||||
else
|
||||
cell = &(array_[index]);
|
||||
|
||||
if (width_ <= vvp_vector4_t::BITS_PER_WORD) {
|
||||
vvp_vector4_t res;
|
||||
res.size_ = width_;
|
||||
res.abits_val_ = cell.abits_val_;
|
||||
res.bbits_val_ = cell.bbits_val_;
|
||||
res.abits_val_ = cell->abits_val_;
|
||||
res.bbits_val_ = cell->bbits_val_;
|
||||
return res;
|
||||
}
|
||||
|
||||
vvp_vector4_t res (width_, BIT4_X);
|
||||
if (cell.abits_ptr_ == 0)
|
||||
if (cell->abits_ptr_ == 0)
|
||||
return res;
|
||||
|
||||
unsigned cnt = (width_ + vvp_vector4_t::BITS_PER_WORD-1)/vvp_vector4_t::BITS_PER_WORD;
|
||||
|
||||
for (unsigned idx = 0 ; idx < cnt ; idx += 1)
|
||||
res.abits_ptr_[idx] = cell.abits_ptr_[idx];
|
||||
res.abits_ptr_[idx] = cell->abits_ptr_[idx];
|
||||
for (unsigned idx = 0 ; idx < cnt ; idx += 1)
|
||||
res.bbits_ptr_[idx] = cell.bbits_ptr_[idx];
|
||||
res.bbits_ptr_[idx] = cell->bbits_ptr_[idx];
|
||||
|
||||
return res;
|
||||
|
||||
|
|
@ -2350,6 +2421,21 @@ vvp_fun_signal::vvp_fun_signal(unsigned wid, vvp_bit4_t init)
|
|||
{
|
||||
}
|
||||
|
||||
void vvp_fun_signal::alloc_instance(vvp_context_t context)
|
||||
{
|
||||
unsigned wid = bits4_.size();
|
||||
|
||||
vvp_set_context_item(context, context_idx, new vvp_vector4_t(wid));
|
||||
}
|
||||
|
||||
void vvp_fun_signal::reset_instance(vvp_context_t context)
|
||||
{
|
||||
vvp_vector4_t*bits = static_cast<vvp_vector4_t*>
|
||||
(vvp_get_context_item(context, context_idx));
|
||||
|
||||
bits->set_to_x();
|
||||
}
|
||||
|
||||
/*
|
||||
* Nets simply reflect their input to their output.
|
||||
*
|
||||
|
|
@ -2361,6 +2447,10 @@ vvp_fun_signal::vvp_fun_signal(unsigned wid, vvp_bit4_t init)
|
|||
* herein is to keep a "needs_init_" flag that is turned false after
|
||||
* the first propagation, and forces the first propagation to happen
|
||||
* even if it matches the initial value.
|
||||
*
|
||||
* Continuous and forced assignments are not permitted on automatic
|
||||
* variables. So we only need incur the overhead of checking for an
|
||||
* automatic variable when we are doing a normal unmasked assign.
|
||||
*/
|
||||
void vvp_fun_signal::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
|
||||
{
|
||||
|
|
@ -2370,8 +2460,15 @@ void vvp_fun_signal::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
|
|||
copy the bits, otherwise we need to see if there are
|
||||
any holes in the mask so we can set those bits. */
|
||||
if (assign_mask_.size() == 0) {
|
||||
if (needs_init_ || !bits4_.eeq(bit)) {
|
||||
bits4_ = bit;
|
||||
vvp_vector4_t*bits4;
|
||||
if (context_idx) {
|
||||
bits4 = static_cast<vvp_vector4_t*>
|
||||
(vthread_get_wt_context_item(context_idx));
|
||||
} else {
|
||||
bits4 = &bits4_;
|
||||
}
|
||||
if (needs_init_ || !bits4->eeq(bit)) {
|
||||
*bits4 = bit;
|
||||
needs_init_ = false;
|
||||
calculate_output_(ptr);
|
||||
}
|
||||
|
|
@ -2426,9 +2523,16 @@ void vvp_fun_signal::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit,
|
|||
switch (ptr.port()) {
|
||||
case 0: // Normal input
|
||||
if (assign_mask_.size() == 0) {
|
||||
vvp_vector4_t*bits4;
|
||||
if (context_idx) {
|
||||
bits4 = static_cast<vvp_vector4_t*>
|
||||
(vthread_get_wt_context_item(context_idx));
|
||||
} else {
|
||||
bits4 = &bits4_;
|
||||
}
|
||||
for (unsigned idx = 0 ; idx < wid ; idx += 1) {
|
||||
if (base+idx >= bits4_.size()) break;
|
||||
bits4_.set_bit(base+idx, bit.value(idx));
|
||||
if (base+idx >= bits4->size()) break;
|
||||
bits4->set_bit(base+idx, bit.value(idx));
|
||||
}
|
||||
needs_init_ = false;
|
||||
calculate_output_(ptr);
|
||||
|
|
@ -2499,9 +2603,12 @@ void vvp_fun_signal::calculate_output_(vvp_net_ptr_t ptr)
|
|||
bits.set_bit(idx, force_.value(idx));
|
||||
}
|
||||
vvp_send_vec4(ptr.ptr()->out, bits);
|
||||
|
||||
} else if (context_idx) {
|
||||
vvp_vector4_t*bits4 = static_cast<vvp_vector4_t*>
|
||||
(vthread_get_wt_context_item(context_idx));
|
||||
vvp_send_vec4(ptr.ptr()->out, *bits4);
|
||||
} else {
|
||||
vvp_send_vec4(ptr.ptr()->out, bits4_);
|
||||
vvp_send_vec4(ptr.ptr()->out, bits4_);
|
||||
}
|
||||
|
||||
run_vpi_callbacks();
|
||||
|
|
@ -2548,18 +2655,28 @@ unsigned vvp_fun_signal::size() const
|
|||
|
||||
vvp_bit4_t vvp_fun_signal::value(unsigned idx) const
|
||||
{
|
||||
if (force_mask_.size() && force_mask_.value(idx))
|
||||
if (force_mask_.size() && force_mask_.value(idx)) {
|
||||
return force_.value(idx);
|
||||
else
|
||||
return bits4_.value(idx);
|
||||
} else if (context_idx) {
|
||||
vvp_vector4_t*bits4 = static_cast<vvp_vector4_t*>
|
||||
(vthread_get_rd_context_item(context_idx));
|
||||
return bits4->value(idx);
|
||||
} else {
|
||||
return bits4_.value(idx);
|
||||
}
|
||||
}
|
||||
|
||||
vvp_scalar_t vvp_fun_signal::scalar_value(unsigned idx) const
|
||||
{
|
||||
if (force_mask_.size() && force_mask_.value(idx))
|
||||
if (force_mask_.size() && force_mask_.value(idx)) {
|
||||
return vvp_scalar_t(force_.value(idx), 6, 6);
|
||||
else
|
||||
return vvp_scalar_t(bits4_.value(idx), 6, 6);
|
||||
} else if (context_idx) {
|
||||
vvp_vector4_t*bits4 = static_cast<vvp_vector4_t*>
|
||||
(vthread_get_rd_context_item(context_idx));
|
||||
return vvp_scalar_t(bits4->value(idx), 6, 6);
|
||||
} else {
|
||||
return vvp_scalar_t(bits4_.value(idx), 6, 6);
|
||||
}
|
||||
}
|
||||
|
||||
vvp_vector4_t vvp_fun_signal::vec4_value() const
|
||||
|
|
@ -2573,9 +2690,12 @@ vvp_vector4_t vvp_fun_signal::vec4_value() const
|
|||
bits.set_bit(idx, force_.value(idx));
|
||||
}
|
||||
return bits;
|
||||
|
||||
} else if (context_idx) {
|
||||
vvp_vector4_t*bits4 = static_cast<vvp_vector4_t*>
|
||||
(vthread_get_rd_context_item(context_idx));
|
||||
return *bits4;
|
||||
} else {
|
||||
return bits4_;
|
||||
return bits4_;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2764,12 +2884,30 @@ vvp_fun_signal_real::vvp_fun_signal_real()
|
|||
bits_ = 0.0;
|
||||
}
|
||||
|
||||
void vvp_fun_signal_real::alloc_instance(vvp_context_t context)
|
||||
{
|
||||
vvp_set_context_item(context, context_idx, new double);
|
||||
}
|
||||
|
||||
void vvp_fun_signal_real::reset_instance(vvp_context_t context)
|
||||
{
|
||||
double*bits = static_cast<double*>
|
||||
(vvp_get_context_item(context, context_idx));
|
||||
|
||||
*bits = 0.0;
|
||||
}
|
||||
|
||||
double vvp_fun_signal_real::real_value() const
|
||||
{
|
||||
if (force_mask_.size())
|
||||
if (force_mask_.size()) {
|
||||
return force_;
|
||||
else
|
||||
} else if (context_idx) {
|
||||
double*bits = static_cast<double*>
|
||||
(vthread_get_rd_context_item(context_idx));
|
||||
return *bits;
|
||||
} else {
|
||||
return bits_;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -2787,8 +2925,15 @@ void vvp_fun_signal_real::recv_real(vvp_net_ptr_t ptr, double bit)
|
|||
switch (ptr.port()) {
|
||||
case 0:
|
||||
if (!continuous_assign_active_) {
|
||||
if (needs_init_ || !bits_equal(bits_,bit)) {
|
||||
bits_ = bit;
|
||||
double*bits;
|
||||
if (context_idx) {
|
||||
bits = static_cast<double*>
|
||||
(vthread_get_wt_context_item(context_idx));
|
||||
} else {
|
||||
bits = &bits_;
|
||||
}
|
||||
if (needs_init_ || !bits_equal(*bits,bit)) {
|
||||
*bits = bit;
|
||||
needs_init_ = false;
|
||||
vvp_send_real(ptr.ptr()->out, bit);
|
||||
run_vpi_callbacks();
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@
|
|||
# include "config.h"
|
||||
# include "vpi_user.h"
|
||||
# include <stddef.h>
|
||||
# include <stdlib.h>
|
||||
# include <string.h>
|
||||
# include <new>
|
||||
# include <assert.h>
|
||||
|
|
@ -49,6 +50,58 @@ class vvp_fun_part;
|
|||
|
||||
class vvp_delay_t;
|
||||
|
||||
/*
|
||||
* Storage for items declared in automatically allocated scopes (i.e. automatic
|
||||
* tasks and functions).
|
||||
*/
|
||||
typedef void**vvp_context_t;
|
||||
|
||||
typedef void*vvp_context_item_t;
|
||||
|
||||
inline vvp_context_t vvp_allocate_context(unsigned nitem)
|
||||
{
|
||||
return (vvp_context_t)malloc((1 + nitem) * sizeof(void*));
|
||||
}
|
||||
|
||||
inline vvp_context_t vvp_get_next_context(vvp_context_t context)
|
||||
{
|
||||
return (vvp_context_t)context[0];
|
||||
}
|
||||
|
||||
inline void vvp_set_next_context(vvp_context_t context, vvp_context_t next)
|
||||
{
|
||||
context[0] = next;
|
||||
}
|
||||
|
||||
inline vvp_context_item_t vvp_get_context_item(vvp_context_t context,
|
||||
unsigned item_idx)
|
||||
{
|
||||
return (vvp_context_item_t)context[item_idx];
|
||||
}
|
||||
|
||||
inline void vvp_set_context_item(vvp_context_t context, unsigned item_idx,
|
||||
vvp_context_item_t item)
|
||||
{
|
||||
context[item_idx] = item;
|
||||
}
|
||||
|
||||
/*
|
||||
* An "automatic" functor is one which may be associated with an automatically
|
||||
* allocated scope item. This provides the infrastructure needed to allocate
|
||||
* and access the state information for individual instances of the item. A
|
||||
* context_idx value of 0 indicates a statically allocated item.
|
||||
*/
|
||||
struct automatic_hooks_s {
|
||||
|
||||
automatic_hooks_s() : context_idx(0) {}
|
||||
virtual ~automatic_hooks_s() {}
|
||||
|
||||
virtual void alloc_instance(vvp_context_t context) {}
|
||||
virtual void reset_instance(vvp_context_t context) {}
|
||||
|
||||
unsigned context_idx;
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the set of Verilog 4-value bit values. Scalars have this
|
||||
* value along with strength, vectors are a collection of these
|
||||
|
|
@ -169,6 +222,9 @@ class vvp_vector4_t {
|
|||
// Change all Z bits to X bits.
|
||||
void change_z2x();
|
||||
|
||||
// Change all bits to X bits.
|
||||
void set_to_x();
|
||||
|
||||
// Display the value into the buf as a string.
|
||||
char*as_string(char*buf, size_t buf_len);
|
||||
|
||||
|
|
@ -402,12 +458,15 @@ extern bool vector4_to_value(const vvp_vector4_t&a, double&val, bool is_signed);
|
|||
/*
|
||||
* vvp_vector4array_t
|
||||
*/
|
||||
class vvp_vector4array_t {
|
||||
class vvp_vector4array_t : public automatic_hooks_s {
|
||||
|
||||
public:
|
||||
vvp_vector4array_t(unsigned width, unsigned words);
|
||||
vvp_vector4array_t(unsigned width, unsigned words, bool is_automatic);
|
||||
~vvp_vector4array_t();
|
||||
|
||||
void alloc_instance(vvp_context_t context);
|
||||
void reset_instance(vvp_context_t context);
|
||||
|
||||
unsigned width() const { return width_; }
|
||||
unsigned words() const { return words_; }
|
||||
|
||||
|
|
@ -866,7 +925,7 @@ struct vvp_net_t {
|
|||
* operand to a vvp_vector4_t and pass it on to the recv_vec4 or
|
||||
* recv_vec4_pv method.
|
||||
*/
|
||||
class vvp_net_fun_t {
|
||||
class vvp_net_fun_t : public automatic_hooks_s {
|
||||
|
||||
public:
|
||||
vvp_net_fun_t();
|
||||
|
|
@ -1120,6 +1179,9 @@ class vvp_fun_signal : public vvp_fun_signal_vec {
|
|||
public:
|
||||
explicit vvp_fun_signal(unsigned wid, vvp_bit4_t init=BIT4_X);
|
||||
|
||||
void alloc_instance(vvp_context_t context);
|
||||
void reset_instance(vvp_context_t context);
|
||||
|
||||
void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit);
|
||||
void recv_vec8(vvp_net_ptr_t port, const vvp_vector8_t&bit);
|
||||
|
||||
|
|
@ -1188,6 +1250,9 @@ class vvp_fun_signal_real : public vvp_fun_signal_base {
|
|||
public:
|
||||
explicit vvp_fun_signal_real();
|
||||
|
||||
void alloc_instance(vvp_context_t context);
|
||||
void reset_instance(vvp_context_t context);
|
||||
|
||||
//void recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit);
|
||||
void recv_real(vvp_net_ptr_t port, double bit);
|
||||
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ static void __compile_var_real(char*label, char*name,
|
|||
vvp_fun_signal_real*fun = new vvp_fun_signal_real;
|
||||
vvp_net_t*net = new vvp_net_t;
|
||||
net->fun = fun;
|
||||
vpip_add_item_to_current_scope(fun);
|
||||
define_functor_symbol(label, net);
|
||||
|
||||
vpiHandle obj = vpip_make_real_var(name, net);
|
||||
|
|
@ -45,7 +46,8 @@ static void __compile_var_real(char*label, char*name,
|
|||
if (name) {
|
||||
assert(!array);
|
||||
vpip_attach_to_current_scope(obj);
|
||||
schedule_init_vector(vvp_net_ptr_t(net,0), fun->real_value());
|
||||
if (!vpip_peek_current_scope()->is_automatic)
|
||||
schedule_init_vector(vvp_net_ptr_t(net,0), fun->real_value());
|
||||
}
|
||||
if (array) {
|
||||
assert(!name);
|
||||
|
|
@ -81,6 +83,7 @@ static void __compile_var(char*label, char*name,
|
|||
vvp_net_t*node = new vvp_net_t;
|
||||
|
||||
node->fun = vsig;
|
||||
vpip_add_item_to_current_scope(vsig);
|
||||
define_functor_symbol(label, node);
|
||||
|
||||
vpiHandle obj = 0;
|
||||
|
|
@ -96,7 +99,8 @@ static void __compile_var(char*label, char*name,
|
|||
if (name) {
|
||||
assert(!array);
|
||||
if (obj) vpip_attach_to_current_scope(obj);
|
||||
schedule_init_vector(vvp_net_ptr_t(node,0), vsig->vec4_value());
|
||||
if (!vpip_peek_current_scope()->is_automatic)
|
||||
schedule_init_vector(vvp_net_ptr_t(node,0), vsig->vec4_value());
|
||||
}
|
||||
// If this is an array word, then it does not have a name, and
|
||||
// it is attached to the addressed array.
|
||||
|
|
|
|||
Loading…
Reference in New Issue