Fix #0 to trigger in the inactive region and add a trigger for always_comb/latch

This commit is contained in:
Cary R 2017-12-03 20:13:24 -08:00
parent a5b945f8f5
commit db1ea05452
5 changed files with 88 additions and 24 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001-2016 Stephen Williams (steve@icarus.com)
* Copyright (c) 2001-2017 Stephen Williams (steve@icarus.com)
*
* This source code is free software; you can redistribute it
* and/or modify it in source code form under the terms of the GNU
@ -1643,6 +1643,7 @@ static int show_stmt_utask(ivl_statement_t net)
static int show_stmt_wait(ivl_statement_t net, ivl_scope_t sscope)
{
static unsigned int cascade_counter = 0;
/* Look to see if this is a SystemVerilog wait fork. */
if ((ivl_stmt_nevent(net) == 1) && (ivl_stmt_events(net, 0) == 0)) {
assert(ivl_statement_type(ivl_stmt_sub_stmt(net)) == IVL_ST_NOOP);
@ -1655,11 +1656,17 @@ static int show_stmt_wait(ivl_statement_t net, ivl_scope_t sscope)
if (ivl_stmt_nevent(net) == 1) {
ivl_event_t ev = ivl_stmt_events(net, 0);
fprintf(vvp_out, " %%wait E_%p;\n", ev);
if (ivl_stmt_needs_t0_trigger(net)) {
fprintf(vvp_out, "Ewait_%u .event/or E_%p, E_0x0;\n",
cascade_counter, ev);
fprintf(vvp_out, " %%wait Ewait_%u;\n", cascade_counter);
cascade_counter += 1;
} else {
fprintf(vvp_out, " %%wait E_%p;\n", ev);
}
} else {
unsigned idx;
static unsigned int cascade_counter = 0;
ivl_event_t ev = ivl_stmt_events(net, 0);
fprintf(vvp_out, "Ewait_%u .event/or E_%p", cascade_counter, ev);
@ -1667,6 +1674,7 @@ static int show_stmt_wait(ivl_statement_t net, ivl_scope_t sscope)
ev = ivl_stmt_events(net, idx);
fprintf(vvp_out, ", E_%p", ev);
}
assert(ivl_stmt_needs_t0_trigger(net) == 0);
fprintf(vvp_out, ";\n %%wait Ewait_%u;\n", cascade_counter);
cascade_counter += 1;
}

View File

@ -478,6 +478,7 @@ struct vvp_net_resolv_list_s: public resolv_list_s {
bool vvp_net_resolv_list_s::resolve(bool mes)
{
static bool t0_trigger_generated = false;
vvp_net_t*tmp = vvp_net_lookup(label());
if (tmp) {
@ -486,6 +487,25 @@ bool vvp_net_resolv_list_s::resolve(bool mes)
return true;
}
// This is a special label used to create a T0 trigger for the
// always_comb/latch processes.
if (! t0_trigger_generated && (strcmp(label(), "E_0x0") == 0)) {
// This should never happen, but if it does then the E_0x0
// event generation may need to be explictly generated in
// the compiler output instead of implicitly in this code.
assert(! vpip_peek_current_scope()->is_automatic());
t0_trigger_generated = true;
// Create an event with no name for the T0 trigger
compile_named_event(strdup(label()), strcpy(new char [1],""));
tmp = vvp_net_lookup(label());
assert(tmp);
tmp->link(port);
// Create a trigger for the T0 event.
vvp_net_ptr_t ptr (tmp, 0);
schedule_t0_trigger(ptr);
return true;
}
if (mes)
fprintf(stderr, "unresolved vvp_net reference: %s\n", label());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001-2016 Stephen Williams (steve@icarus.com)
* Copyright (c) 2001-2017 Stephen Williams (steve@icarus.com)
*
* This source code is free software; you can redistribute it
* and/or modify it in source code form under the terms of the GNU
@ -76,6 +76,7 @@ struct event_time_s {
count_time_events += 1;
start = 0;
active = 0;
inactive = 0;
nbassign = 0;
rwsync = 0;
rosync = 0;
@ -86,6 +87,7 @@ struct event_time_s {
struct event_s*start;
struct event_s*active;
struct event_s*inactive;
struct event_s*nbassign;
struct event_s*rwsync;
struct event_s*rosync;
@ -661,14 +663,13 @@ static void schedule_final_event(struct event_s*cur)
* itself, and the structure is placed in the right place in the
* queue.
*/
typedef enum event_queue_e { SEQ_START, SEQ_ACTIVE, SEQ_NBASSIGN,
typedef enum event_queue_e { SEQ_START, SEQ_ACTIVE, SEQ_INACTIVE, SEQ_NBASSIGN,
SEQ_RWSYNC, SEQ_ROSYNC, DEL_THREAD } event_queue_t;
static void schedule_event_(struct event_s*cur, vvp_time64_t delay,
event_queue_t select_queue)
{
cur->next = cur;
struct event_time_s*ctim = sched_list;
if (sched_list == 0) {
@ -736,6 +737,11 @@ static void schedule_event_(struct event_s*cur, vvp_time64_t delay,
q = &ctim->active;
break;
case SEQ_INACTIVE:
assert(delay == 0);
q = &ctim->inactive;
break;
case SEQ_NBASSIGN:
q = &ctim->nbassign;
break;
@ -801,6 +807,23 @@ void schedule_vthread(vthread_t thr, vvp_time64_t delay, bool push_flag)
}
}
void schedule_t0_trigger(vvp_net_ptr_t ptr)
{
vvp_vector4_t bit (1, BIT4_X);
struct assign_vector4_event_s*cur = new struct assign_vector4_event_s(bit);
cur->ptr = ptr;
schedule_event_(cur, 0, SEQ_INACTIVE);
}
void schedule_inactive(vthread_t thr)
{
struct vthread_event_s*cur = new vthread_event_s;
cur->thr = thr;
vthread_mark_scheduled(thr);
schedule_event_(cur, 0, SEQ_INACTIVE);
}
void schedule_init_vthread(vthread_t thr)
{
struct vthread_event_s*cur = new vthread_event_s;
@ -1044,7 +1067,7 @@ static void run_rosync(struct event_time_s*ctim)
delete cur;
}
if (ctim->active || ctim->nbassign || ctim->rwsync) {
if (ctim->active || ctim->inactive || ctim->nbassign || ctim->rwsync) {
cerr << "SCHEDULER ERROR: read-only sync events "
<< "created RW events!" << endl;
}
@ -1145,21 +1168,26 @@ void schedule_simulate(void)
queues. If there are not events at all, then release
the event_time object. */
if (ctim->active == 0) {
ctim->active = ctim->nbassign;
ctim->nbassign = 0;
ctim->active = ctim->inactive;
ctim->inactive = 0;
if (ctim->active == 0) {
ctim->active = ctim->rwsync;
ctim->rwsync = 0;
ctim->active = ctim->nbassign;
ctim->nbassign = 0;
/* If out of rw events, then run the rosync
events and delete this time step. This also
deletes threads as needed. */
if (ctim->active == 0) {
run_rosync(ctim);
sched_list = ctim->next;
delete ctim;
continue;
ctim->active = ctim->rwsync;
ctim->rwsync = 0;
/* If out of rw events, then run the rosync
events and delete this time step. This also
deletes threads as needed. */
if (ctim->active == 0) {
run_rosync(ctim);
sched_list = ctim->next;
delete ctim;
continue;
}
}
}
}

View File

@ -1,7 +1,7 @@
#ifndef IVL_schedule_H
#define IVL_schedule_H
/*
* Copyright (c) 2001-2016 Stephen Williams (steve@icarus.com)
* Copyright (c) 2001-2017 Stephen Williams (steve@icarus.com)
*
* This source code is free software; you can redistribute it
* and/or modify it in source code form under the terms of the GNU
@ -35,6 +35,8 @@
extern void schedule_vthread(vthread_t thr, vvp_time64_t delay,
bool push_flag =false);
extern void schedule_inactive(vthread_t thr);
extern void schedule_init_vthread(vthread_t thr);
extern void schedule_final_vthread(vthread_t thr);
@ -86,6 +88,12 @@ extern void schedule_set_vector(vvp_net_ptr_t ptr, const vvp_vector4_t&val);
extern void schedule_set_vector(vvp_net_ptr_t ptr, vvp_vector8_t val);
extern void schedule_set_vector(vvp_net_ptr_t ptr, double val);
/*
* Create a T0 event for always_comb/latch processes. This is the first
* event in the first inactive region.
*/
extern void schedule_t0_trigger(vvp_net_ptr_t ptr);
/*
* The schedule_init_vector function assigns an initial value to a
* functor. The assignment is put into a pre-simulation queue that is

View File

@ -2328,11 +2328,10 @@ bool of_DELAY(vthread_t thr, vvp_code_t cp)
vvp_time64_t low = cp->bit_idx[0];
vvp_time64_t hig = cp->bit_idx[1];
vvp_time64_t res = 32;
res = hig << res;
res += low;
vvp_time64_t delay = (hig << 32) | low;
schedule_vthread(thr, res);
if (delay == 0) schedule_inactive(thr);
else schedule_vthread(thr, delay);
return false;
}
@ -2342,7 +2341,8 @@ bool of_DELAYX(vthread_t thr, vvp_code_t cp)
assert(cp->number < vthread_s::WORDS_COUNT);
delay = thr->words[cp->number].w_uint;
schedule_vthread(thr, delay);
if (delay == 0) schedule_inactive(thr);
else schedule_vthread(thr, delay);
return false;
}