Rework user function calls to use specialized opcodes.

Create The %callf/* opcodes to invoke user defined functions in a
more specialized way. This allows for some sanity checking on the
way, and also is a step towards keeping return values on stacks.
This commit is contained in:
Stephen Williams 2015-12-27 20:29:10 -08:00
parent 1b33bf8615
commit 63ad15ee2e
6 changed files with 179 additions and 30 deletions

View File

@ -130,10 +130,36 @@ static void draw_ufunc_preamble(ivl_expr_t expr)
}
/* Call the function */
fprintf(vvp_out, " %%fork TD_%s", vvp_mangle_id(ivl_scope_name(def)));
fprintf(vvp_out, ", S_%p;\n", def);
fprintf(vvp_out, " %%join;\n");
switch (ivl_expr_value(expr)) {
case IVL_VT_VOID:
fprintf(vvp_out, " %%callf/void TD_%s", vvp_mangle_id(ivl_scope_name(def)));
fprintf(vvp_out, ", S_%p;\n", def);
break;
case IVL_VT_REAL:
fprintf(vvp_out, " %%callf/real TD_%s", vvp_mangle_id(ivl_scope_name(def)));
fprintf(vvp_out, ", S_%p;\n", def);
break;
case IVL_VT_BOOL:
case IVL_VT_LOGIC:
fprintf(vvp_out, " %%callf/vec4 TD_%s", vvp_mangle_id(ivl_scope_name(def)));
fprintf(vvp_out, ", S_%p;\n", def);
break;
case IVL_VT_STRING:
fprintf(vvp_out, " %%callf/str TD_%s", vvp_mangle_id(ivl_scope_name(def)));
fprintf(vvp_out, ", S_%p;\n", def);
break;
case IVL_VT_CLASS:
case IVL_VT_DARRAY:
case IVL_VT_QUEUE:
fprintf(vvp_out, " %%callf/obj TD_%s", vvp_mangle_id(ivl_scope_name(def)));
fprintf(vvp_out, ", S_%p;\n", def);
break;
default:
fprintf(vvp_out, " %%fork TD_%s", vvp_mangle_id(ivl_scope_name(def)));
fprintf(vvp_out, ", S_%p;\n", def);
fprintf(vvp_out, " %%join;\n");
break;
}
}
static void draw_ufunc_epilogue(ivl_expr_t expr)

View File

@ -1621,10 +1621,18 @@ static int show_stmt_utask(ivl_statement_t net)
show_stmt_file_line(net, "User task call.");
fprintf(vvp_out, " %%fork TD_%s",
vvp_mangle_id(ivl_scope_name(task)));
fprintf(vvp_out, ", S_%p;\n", task);
fprintf(vvp_out, " %%join;\n");
if (ivl_scope_type(task) == IVL_SCT_FUNCTION) {
// A function called as a task is (presumably) a void function.
// Use the %callf/void instruction to call it.
fprintf(vvp_out, " %%callf/void TD_%s",
vvp_mangle_id(ivl_scope_name(task)));
fprintf(vvp_out, ", S_%p;\n", task);
} else {
fprintf(vvp_out, " %%fork TD_%s",
vvp_mangle_id(ivl_scope_name(task)));
fprintf(vvp_out, ", S_%p;\n", task);
fprintf(vvp_out, " %%join;\n");
}
return 0;
}

View File

@ -56,6 +56,11 @@ extern bool of_ASSIGN_WRE(vthread_t thr, vvp_code_t code);
extern bool of_BLEND(vthread_t thr, vvp_code_t code);
extern bool of_BLEND_WR(vthread_t thr, vvp_code_t code);
extern bool of_BREAKPOINT(vthread_t thr, vvp_code_t code);
extern bool of_CALLF_OBJ(vthread_t thr, vvp_code_t code);
extern bool of_CALLF_REAL(vthread_t thr, vvp_code_t code);
extern bool of_CALLF_STR(vthread_t thr, vvp_code_t code);
extern bool of_CALLF_VEC4(vthread_t thr, vvp_code_t code);
extern bool of_CALLF_VOID(vthread_t thr, vvp_code_t code);
extern bool of_CASSIGN_LINK(vthread_t thr, vvp_code_t code);
extern bool of_CASSIGN_VEC4(vthread_t thr, vvp_code_t code);
extern bool of_CASSIGN_VEC4_OFF(vthread_t thr, vvp_code_t code);
@ -259,7 +264,7 @@ struct vvp_code_s {
vvp_code_t cptr;
vvp_array_t array;
class __vpiHandle*handle;
struct __vpiScope*scope;
__vpiScope*scope;
const char*text;
};

View File

@ -108,6 +108,11 @@ static const struct opcode_table_s opcode_table[] = {
{ "%blend", of_BLEND, 0, {OA_NONE, OA_NONE, OA_NONE} },
{ "%blend/wr", of_BLEND_WR,0, {OA_NONE, OA_NONE, OA_NONE} },
{ "%breakpoint", of_BREAKPOINT, 0, {OA_NONE, OA_NONE, OA_NONE} },
{ "%callf/obj", of_CALLF_OBJ, 2,{OA_CODE_PTR2,OA_VPI_PTR, OA_NONE} },
{ "%callf/real", of_CALLF_REAL, 2,{OA_CODE_PTR2,OA_VPI_PTR, OA_NONE} },
{ "%callf/str", of_CALLF_STR, 2,{OA_CODE_PTR2,OA_VPI_PTR, OA_NONE} },
{ "%callf/vec4", of_CALLF_VEC4, 2,{OA_CODE_PTR2,OA_VPI_PTR, OA_NONE} },
{ "%callf/void", of_CALLF_VOID, 2,{OA_CODE_PTR2,OA_VPI_PTR, OA_NONE} },
{ "%cassign/link", of_CASSIGN_LINK, 2,{OA_FUNC_PTR,OA_FUNC_PTR2,OA_NONE} },
{ "%cassign/vec4", of_CASSIGN_VEC4, 1,{OA_FUNC_PTR,OA_NONE, OA_NONE} },
{ "%cassign/vec4/off",of_CASSIGN_VEC4_OFF,2,{OA_FUNC_PTR,OA_BIT1, OA_NONE} },

View File

@ -218,6 +218,22 @@ debugger commands.
This may not work on all platforms. If run-time debugging is compiled
out, then this function is a no-op.
* %callf/obj <code-label>, <scope-label>
* %callf/real <code-label>, <scope-label>
* %callf/str <code-label>, <scope-label>
* %callf/vec4 <code-label>, <scope-label>
* %callf/void <code-label>, <scope-label>
More directly implement function calling. This subsumes the %fork and
%join of the mroe general task and block calling, but is optimized for
functions, which are threads of a special, constrained sort.
The different variants reflect the different return types for the
called function. For example, if the function returns a string, the
%callf/str opcode is used, and will push the string return value into
the caller's string stack. The %callf/void function is special in that
is pushes no value onto any stack.
* %cassign/vec4 <var-label>
* %cassign/vec4/off <var-label>, <off-index>

View File

@ -248,6 +248,7 @@ struct vthread_s {
unsigned i_am_joining :1;
unsigned i_am_detached :1;
unsigned i_am_waiting :1;
unsigned i_am_in_function :1; // True if running function code
unsigned i_have_ended :1;
unsigned i_was_disabled :1;
unsigned waiting_for_event :1;
@ -262,7 +263,7 @@ struct vthread_s {
/* This points to my parent, if I have one. */
struct vthread_s*parent;
/* This points to the containing scope. */
struct __vpiScope*parent_scope;
__vpiScope*parent_scope;
/* This is used for keeping wait queues. */
struct vthread_s*wait_next;
/* These are used to access automatically allocated items. */
@ -309,7 +310,7 @@ void vthread_s::debug_dump(ostream&fd, const char*label)
static bool test_joinable(vthread_t thr, vthread_t child);
static void do_join(vthread_t thr, vthread_t child);
struct __vpiScope* vthread_scope(struct vthread_s*thr)
__vpiScope* vthread_scope(struct vthread_s*thr)
{
return thr->parent_scope;
}
@ -357,6 +358,9 @@ const vvp_vector4_t& vthread_get_vec4_stack(struct vthread_s*thr, unsigned depth
return thr->peek_vec4(depth);
}
/*
* Some thread management functions
*/
/*
* This is a function to get a vvp_queue handle from the variable
* referenced by "net". If the queue is nil, then allocated it and
@ -423,7 +427,7 @@ static void multiply_array_imm(unsigned long*res, unsigned long*val,
* the last freed context. If none available, create a new one. Add
* it to the list of live contexts in that scope.
*/
static vvp_context_t vthread_alloc_context(struct __vpiScope*scope)
static vvp_context_t vthread_alloc_context(__vpiScope*scope)
{
assert(scope->is_automatic());
@ -451,7 +455,7 @@ static vvp_context_t vthread_alloc_context(struct __vpiScope*scope)
* onto the freed context stack. Remove it from the list of live contexts
* in that scope.
*/
static void vthread_free_context(vvp_context_t context, struct __vpiScope*scope)
static void vthread_free_context(vvp_context_t context, __vpiScope*scope)
{
assert(scope->is_automatic());
assert(context);
@ -491,7 +495,7 @@ void contexts_delete(struct __vpiScope*scope)
/*
* Create a new thread with the given start address.
*/
vthread_t vthread_new(vvp_code_t pc, struct __vpiScope*scope)
vthread_t vthread_new(vvp_code_t pc, __vpiScope*scope)
{
vthread_t thr = new struct vthread_s;
thr->pc = pc;
@ -505,6 +509,7 @@ vthread_t vthread_new(vvp_code_t pc, struct __vpiScope*scope)
thr->i_am_joining = 0;
thr->i_am_detached = 0;
thr->i_am_waiting = 0;
thr->i_am_in_function = 0;
thr->is_scheduled = 0;
thr->i_have_ended = 0;
thr->i_was_disabled = 0;
@ -1261,6 +1266,81 @@ bool of_BREAKPOINT(vthread_t, vvp_code_t)
return true;
}
/*
* %callf/void <code-label>, <scope-label>
* Combine the %fork and %join steps for invoking a function.
*/
static bool do_callf_void(vthread_t thr, vvp_code_t cp)
{
vthread_t child = vthread_new(cp->cptr2, cp->scope);
if (cp->scope->is_automatic()) {
/* The context allocated for this child is the top entry
on the write context stack */
child->wt_context = thr->wt_context;
child->rd_context = thr->wt_context;
}
// Mark the function thread as a direct child of the current thread.
child->parent = thr;
thr->children.insert(child);
// This should be the only child
assert(thr->children.size()==1);
// Execute the function. This SHOULD run the function to completion,
// but there are some exceptional situations where it won't.
assert(cp->scope->get_type_code() == vpiFunction);
thr->task_func_children.insert(child);
child->is_scheduled = 1;
child->i_am_in_function = 1;
vthread_run(child);
running_thread = thr;
assert(test_joinable(thr, child));
if (child->i_have_ended) {
do_join(thr, child);
return true;
} else {
thr->i_am_joining = 1;
return false;
}
}
bool of_CALLF_OBJ(vthread_t thr, vvp_code_t cp)
{
return do_callf_void(thr, cp);
// XXXX NOT IMPLEMENTED
}
bool of_CALLF_REAL(vthread_t thr, vvp_code_t cp)
{
// XXXX Here, I should arrange for a reference to the destination variable
// XXXX as a place in my stack. The function will write to that place in
// XXXX my stack for me.
return do_callf_void(thr, cp);
}
bool of_CALLF_STR(vthread_t thr, vvp_code_t cp)
{
return do_callf_void(thr, cp);
// XXXX NOT IMPLEMENTED
}
bool of_CALLF_VEC4(vthread_t thr, vvp_code_t cp)
{
return do_callf_void(thr, cp);
// XXXX NOT IMPLEMENTED
}
bool of_CALLF_VOID(vthread_t thr, vvp_code_t cp)
{
return do_callf_void(thr, cp);
}
/*
* The %cassign/link instruction connects a source node to a
* destination node. The destination node must be a signal, as it is
@ -2227,7 +2307,7 @@ static bool do_disable(vthread_t thr, vthread_t match)
*/
bool of_DISABLE(vthread_t thr, vvp_code_t cp)
{
struct __vpiScope*scope = (struct __vpiScope*)cp->handle;
__vpiScope*scope = (__vpiScope*)cp->handle;
bool disabled_myself_flag = false;
@ -2897,24 +2977,26 @@ bool of_FORK(vthread_t thr, vvp_code_t cp)
child->parent = thr;
thr->children.insert(child);
/* If the child scope is not the same as the current scope,
infer that this is a task or function call. */
switch (cp->scope->get_type_code()) {
case vpiFunction:
case vpiFunction:
// Functions should be started by the %callf opcodes, and
// NOT by the %fork instruction
assert(0);
case vpiTask:
thr->task_func_children.insert(child);
child->is_scheduled = 1;
vthread_run(child);
running_thread = thr;
break;
case vpiTask:
thr->task_func_children.insert(child);
schedule_vthread(child, 0, true);
break;
default:
schedule_vthread(child, 0, true);
default:
break;
}
if (thr->i_am_in_function) {
child->is_scheduled = 1;
child->i_am_in_function = 1;
vthread_run(child);
running_thread = thr;
} else {
schedule_vthread(child, 0, true);
}
return true;
}
@ -3260,7 +3342,7 @@ static void do_join(vthread_t thr, vthread_t child)
vthread_reap(child);
}
bool of_JOIN(vthread_t thr, vvp_code_t)
static bool do_join_opcode(vthread_t thr)
{
assert( !thr->i_am_joining );
assert( !thr->children.empty());
@ -3287,6 +3369,11 @@ bool of_JOIN(vthread_t thr, vvp_code_t)
return false;
}
bool of_JOIN(vthread_t thr, vvp_code_t)
{
return do_join_opcode(thr);
}
/*
* This %join/detach <n> instruction causes the thread to detach
* threads that were created by an earlier %fork.
@ -5692,6 +5779,7 @@ bool of_VPI_CALL(vthread_t thr, vvp_code_t cp)
*/
bool of_WAIT(vthread_t thr, vvp_code_t cp)
{
assert(! thr->i_am_in_function);
assert(! thr->waiting_for_event);
thr->waiting_for_event = 1;
@ -5712,6 +5800,7 @@ bool of_WAIT_FORK(vthread_t thr, vvp_code_t)
{
/* If a %wait/fork is being executed then the parent thread
* cannot be waiting in a join or already waiting. */
assert(! thr->i_am_in_function);
assert(! thr->i_am_joining);
assert(! thr->i_am_waiting);
@ -5790,7 +5879,7 @@ bool of_ZOMBIE(vthread_t thr, vvp_code_t)
*/
bool of_EXEC_UFUNC(vthread_t thr, vvp_code_t cp)
{
struct __vpiScope*child_scope = cp->ufunc_core_ptr->func_scope();
__vpiScope*child_scope = cp->ufunc_core_ptr->func_scope();
assert(child_scope);
assert(thr->children.empty());
@ -5837,7 +5926,7 @@ bool of_EXEC_UFUNC(vthread_t thr, vvp_code_t cp)
*/
bool of_REAP_UFUNC(vthread_t thr, vvp_code_t cp)
{
struct __vpiScope*child_scope = cp->ufunc_core_ptr->func_scope();
__vpiScope*child_scope = cp->ufunc_core_ptr->func_scope();
assert(child_scope);
/* Copy the output from the result variable to the output