iverilog/vvp/schedule.cc

608 lines
14 KiB
C++
Raw Normal View History

2001-03-11 01:29:38 +01:00
/*
* Copyright (c) 2001-2003 Stephen Williams (steve@icarus.com)
2001-03-11 01:29:38 +01:00
*
* This source code is free software; you can redistribute it
* and/or modify it in source code form under the terms of the GNU
* General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#ifdef HAVE_CVS_IDENT
#ident "$Id: schedule.cc,v 1.31 2005/02/12 03:26:14 steve Exp $"
2001-03-11 01:29:38 +01:00
#endif
# include "schedule.h"
2001-03-11 23:42:11 +01:00
# include "functor.h"
# include "memory.h"
2001-03-11 01:29:38 +01:00
# include "vthread.h"
2001-09-15 20:27:04 +02:00
#ifdef HAVE_MALLOC_H
2001-03-11 01:29:38 +01:00
# include <malloc.h>
2001-09-15 20:27:04 +02:00
#endif
2003-02-21 04:40:35 +01:00
# include <signal.h>
2001-09-15 20:27:04 +02:00
# include <stdlib.h>
2001-03-11 01:29:38 +01:00
# include <assert.h>
# include <stdio.h>
unsigned long count_assign_events = 0;
unsigned long count_gen_events = 0;
unsigned long count_prop_events = 0;
unsigned long count_thread_events = 0;
unsigned long count_event_pool = 0;
unsigned long count_time_pool = 0;
/*
* The event_s and event_time_s structures implement the Verilog
* stratified event queue.
*
* The event_time_s objects are one per time step. Each time step in
* turn contains a list of event_s objects that are the actual events.
*
* The event_s objects are base classes for the more specific sort of
* event.
*/
2001-03-11 01:29:38 +01:00
struct event_s {
struct event_s*next;
virtual ~event_s() { }
virtual void run_run(void) =0;
2001-03-11 01:29:38 +01:00
};
struct event_time_s {
vvp_time64_t delay;
struct event_s*active;
struct event_s*nbassign;
struct event_s*rosync;
struct event_time_s*next;
void* operator new (size_t);
void operator delete(void*obj, size_t s);
};
/*
* Derived event types
*/
struct vthread_event_s : public event_s {
vthread_t thr;
void run_run(void);
};
void vthread_event_s::run_run(void)
{
count_thread_events += 1;
vthread_run(thr);
}
struct assign_vector4_event_s : public event_s {
vvp_net_ptr_t ptr;
vvp_vector4_t val;
void run_run(void);
};
void assign_vector4_event_s::run_run(void)
{
count_assign_events += 1;
vvp_send_vec4(ptr, val);
}
struct assign_vector8_event_s : public event_s {
vvp_net_ptr_t ptr;
vvp_vector8_t val;
void run_run(void);
};
void assign_vector8_event_s::run_run(void)
{
count_assign_events += 1;
vvp_send_vec8(ptr, val);
}
struct generic_event_s : public event_s {
vvp_gen_event_t obj;
unsigned char val;
void run_run(void);
};
void generic_event_s::run_run(void)
{
count_gen_events += 1;
if (obj && obj->run)
obj->run(obj, val);
}
/*
** These event_time_s will be required a lot, at high frequency.
** Once allocated, we never free them, but stash them away for next time.
*/
static struct event_time_s* time_free_list = 0;
static const unsigned TIME_CHUNK_COUNT = 8192 / sizeof(struct event_time_s);
inline void* event_time_s::operator new (size_t size)
{
assert(size == sizeof(struct event_time_s));
struct event_time_s* cur = time_free_list;
if (!cur) {
cur = (struct event_time_s*)
malloc(TIME_CHUNK_COUNT * sizeof(struct event_time_s));
for (unsigned idx = 1 ; idx < TIME_CHUNK_COUNT ; idx += 1) {
cur[idx].next = time_free_list;
time_free_list = cur + idx;
}
count_time_pool += TIME_CHUNK_COUNT;
} else {
time_free_list = cur->next;
}
return cur;
}
inline void event_time_s::operator delete(void*obj, size_t size)
{
struct event_time_s*cur = reinterpret_cast<event_time_s*>(obj);
cur->next = time_free_list;
time_free_list = cur;
}
/*
* This is the head of the list of pending events. This includes all
* the events that have not been executed yet, and reaches into the
* future.
*/
static struct event_time_s* sched_list = 0;
/*
* At the current time, events that are marked as synch events are put
* into this list and held off until the time step is about to
* advance. Then the events in this list are run and the clock is
* allowed to advance.
*/
static struct event_s* synch_list = 0;
2001-03-11 01:29:38 +01:00
/*
* This flag is true until a VPI task or function finishes the
* simulation.
*/
static bool schedule_runnable = true;
static bool schedule_stopped_flag = false;
void schedule_finish(int)
{
schedule_runnable = false;
}
2003-02-21 04:40:35 +01:00
void schedule_stop(int)
{
schedule_stopped_flag = true;
2003-02-21 04:40:35 +01:00
}
bool schedule_finished(void)
{
return !schedule_runnable;
}
bool schedule_stopped(void)
{
return schedule_stopped_flag;
}
2003-02-21 04:40:35 +01:00
/*
* These are the signal handling infrastructure. The SIGINT signal
* leads to an implicit $stop.
*/
static void signals_handler(int)
{
schedule_stopped_flag = true;
2003-02-21 04:40:35 +01:00
}
static void signals_capture(void)
{
signal(SIGINT, &signals_handler);
}
static void signals_revert(void)
{
signal(SIGINT, SIG_DFL);
}
/*
* This function does all the hard work of putting an event into the
* event queue. The event delay is taken from the event structure
* itself, and the structure is placed in the right place in the
* queue.
*/
typedef enum event_queue_e { SEQ_ACTIVE, SEQ_NBASSIGN, SEQ_ROSYNC } event_queue_t;
static void schedule_event_(struct event_s*cur, vvp_time64_t delay,
event_queue_t select_queue)
2001-03-11 01:29:38 +01:00
{
cur->next = cur;
struct event_time_s*ctim = sched_list;
2001-03-11 01:29:38 +01:00
if (sched_list == 0) {
/* Is the event_time list completely empty? Create the
first event_time object. */
ctim = new struct event_time_s;
ctim->active = 0;
ctim->nbassign = 0;
ctim->rosync = 0;
ctim->delay = delay;
ctim->next = 0;
sched_list = ctim;
} else if (sched_list->delay > delay) {
/* Am I looking for an event before the first event_time?
If so, create a new event_time to go in front. */
struct event_time_s*tmp = new struct event_time_s;
tmp->active = 0;
tmp->nbassign = 0;
ctim->rosync = 0;
tmp->delay = delay;
tmp->next = ctim;
ctim->delay -= delay;
ctim = tmp;
sched_list = ctim;
} else {
struct event_time_s*prev = 0;
while (ctim->next && (ctim->delay < delay)) {
delay -= ctim->delay;
prev = ctim;
ctim = ctim->next;
}
if (ctim->delay > delay) {
struct event_time_s*tmp = new struct event_time_s;
tmp->active = 0;
tmp->nbassign = 0;
tmp->rosync = 0;
tmp->delay = delay;
tmp->next = prev->next;
prev->next = tmp;
tmp->next->delay -= delay;
ctim = tmp;
} else if (ctim->delay == delay) {
} else {
assert(ctim->next == 0);
struct event_time_s*tmp = new struct event_time_s;
tmp->active = 0;
tmp->nbassign = 0;
tmp->rosync = 0;
tmp->delay = delay - ctim->delay;
tmp->next = 0;
ctim->next = tmp;
ctim = tmp;
}
2001-03-11 01:29:38 +01:00
}
/* By this point, ctim is the event_time structure that is to
receive the event at hand. Put the event in to the
appropriate list for the kind of assign we have at hand. */
2001-03-11 01:29:38 +01:00
switch (select_queue) {
2001-03-11 01:29:38 +01:00
case SEQ_ACTIVE:
if (ctim->active == 0) {
ctim->active = cur;
} else {
/* Put the cur event on the end of the active list. */
cur->next = ctim->active->next;
ctim->active->next = cur;
ctim->active = cur;
}
break;
case SEQ_NBASSIGN:
if (ctim->nbassign == 0) {
ctim->nbassign = cur;
} else {
/* Put the cur event on the end of the active list. */
cur->next = ctim->nbassign->next;
ctim->nbassign->next = cur;
ctim->nbassign = cur;
2001-03-11 01:29:38 +01:00
}
break;
2001-03-11 01:29:38 +01:00
case SEQ_ROSYNC:
if (ctim->rosync == 0) {
ctim->rosync = cur;
2001-03-11 01:29:38 +01:00
} else {
/* Put the cur event on the end of the active list. */
cur->next = ctim->rosync->next;
ctim->rosync->next = cur;
ctim->rosync = cur;
2001-03-11 01:29:38 +01:00
}
break;
2001-03-11 01:29:38 +01:00
}
}
static void schedule_event_push_(struct event_s*cur)
{
if ((sched_list == 0) || (sched_list->delay > 0)) {
schedule_event_(cur, 0, SEQ_ACTIVE);
return;
}
struct event_time_s*ctim = sched_list;
2001-03-11 01:29:38 +01:00
if (ctim->active == 0) {
cur->next = cur;
ctim->active = cur;
return;
}
cur->next = ctim->active->next;
ctim->active->next = cur;
}
/*
*/
static struct event_s* pull_sync_event(void)
{
if (synch_list == 0)
return 0;
struct event_s*cur = synch_list->next;
if (cur->next == cur) {
synch_list = 0;
} else {
synch_list->next = cur->next;
}
return cur;
}
void schedule_vthread(vthread_t thr, vvp_time64_t delay, bool push_flag)
2001-03-11 01:29:38 +01:00
{
struct vthread_event_s*cur = new vthread_event_s;
2001-03-11 01:29:38 +01:00
cur->thr = thr;
vthread_mark_scheduled(thr);
2001-03-11 01:29:38 +01:00
if (push_flag && (delay == 0)) {
/* Special case: If the delay is 0, the push_flag means
I can push this event in front of everything. This is
used by the %fork statement, for example, to perform
task calls. */
schedule_event_push_(cur);
} else {
schedule_event_(cur, delay, SEQ_ACTIVE);
}
2001-03-11 01:29:38 +01:00
}
void functor_s::schedule(vvp_time64_t delay, bool nba_flag)
2001-03-11 01:29:38 +01:00
{
#if 0
struct event_s*cur = new event_s;
2001-03-11 01:29:38 +01:00
cur->funp = this;
2001-03-11 23:42:11 +01:00
cur->type = TYPE_PROP;
2001-03-11 01:29:38 +01:00
schedule_event_(cur, delay, nba_flag? SEQ_NBASSIGN:SEQ_ACTIVE);
#else
fprintf(stderr, "XXXX I forgot how to schedule functors.\n");
#endif
2001-03-11 01:29:38 +01:00
}
void schedule_assign_vector(vvp_net_ptr_t ptr,
vvp_vector4_t bit,
vvp_time64_t delay)
{
struct assign_vector4_event_s*cur = new struct assign_vector4_event_s;
cur->ptr = ptr;
cur->val = bit;
schedule_event_(cur, delay, SEQ_NBASSIGN);
}
2001-03-11 23:42:11 +01:00
void schedule_set_vector(vvp_net_ptr_t ptr, vvp_vector4_t bit)
{
struct assign_vector4_event_s*cur = new struct assign_vector4_event_s;
cur->ptr = ptr;
cur->val = bit;
schedule_event_(cur, 0, SEQ_ACTIVE);
}
void schedule_set_vector(vvp_net_ptr_t ptr, vvp_vector8_t bit)
{
struct assign_vector8_event_s*cur = new struct assign_vector8_event_s;
cur->ptr = ptr;
cur->val = bit;
schedule_event_(cur, 0, SEQ_ACTIVE);
}
void schedule_generic(vvp_gen_event_t obj, unsigned char val,
vvp_time64_t delay, bool sync_flag)
{
struct generic_event_s*cur = new generic_event_s;
cur->obj = obj;
cur->val = val;
schedule_event_(cur, delay, sync_flag? SEQ_ROSYNC : SEQ_ACTIVE);
2001-03-11 23:42:11 +01:00
}
static vvp_time64_t schedule_time;
vvp_time64_t schedule_simtime(void)
{ return schedule_time; }
2001-03-11 01:29:38 +01:00
2002-05-04 05:03:17 +02:00
extern void vpiPresim();
extern void vpiPostsim();
2003-04-20 01:32:57 +02:00
extern void vpiNextSimTime(void);
2002-05-04 05:03:17 +02:00
2001-03-11 01:29:38 +01:00
void schedule_simulate(void)
{
schedule_time = 0;
2002-05-04 05:03:17 +02:00
// Execute pre-simulation callbacks
vpiPresim();
2003-02-21 04:40:35 +01:00
signals_capture();
while (schedule_runnable && sched_list) {
2001-03-11 01:29:38 +01:00
if (schedule_stopped_flag) {
schedule_stopped_flag = false;
2003-02-21 04:40:35 +01:00
stop_handler(0);
continue;
}
/* ctim is the current time step. */
struct event_time_s* ctim = sched_list;
2001-03-11 01:29:38 +01:00
/* If the time is advancing, then first run the
postponed sync events. Run them all. */
if (ctim->delay > 0) {
struct event_s*sync_cur;
while ( (sync_cur = pull_sync_event()) ) {
sync_cur->run_run();
delete sync_cur;
}
schedule_time += ctim->delay;
ctim->delay = 0;
2003-04-20 01:32:57 +02:00
vpiNextSimTime();
}
/* If there are no more active events, advance the event
queues. If there are not events at all, then release
the event_time object. */
if (ctim->active == 0) {
ctim->active = ctim->nbassign;
ctim->nbassign = 0;
if (ctim->active == 0) {
sched_list = ctim->next;
synch_list = ctim->rosync;
delete ctim;
continue;
}
}
/* Pull the first item off the list. If this is the last
cell in the list, then clear the list. Execute that
event type, and delete it. */
struct event_s*cur = ctim->active->next;
if (cur->next == cur) {
ctim->active = 0;
} else {
ctim->active->next = cur->next;
}
cur->run_run();
#if 0
2001-03-11 23:42:11 +01:00
switch (cur->type) {
case TYPE_THREAD:
count_thread_events += 1;
2001-03-11 01:29:38 +01:00
vthread_run(cur->thr);
2001-03-11 23:42:11 +01:00
break;
case TYPE_PROP:
//printf("Propagate %p\n", cur->fun);
count_prop_events += 1;
2002-03-17 04:23:10 +01:00
cur->funp->propagate(false);
2001-03-11 23:42:11 +01:00
break;
2001-03-11 01:29:38 +01:00
2001-03-11 23:42:11 +01:00
case TYPE_ASSIGN:
count_assign_events += 1;
{ static const unsigned val_table[4] = {St0, St1, StX, HiZ};
functor_set(cur->fun,
cur->val,
val_table[cur->val],
false);
}
2001-03-11 23:42:11 +01:00
break;
2001-03-11 01:29:38 +01:00
case TYPE_GEN:
count_gen_events += 1;
if (cur->obj && cur->obj->run)
cur->obj->run(cur->obj, cur->val);
break;
2001-03-11 01:29:38 +01:00
}
#endif
delete (cur);
2001-03-11 01:29:38 +01:00
}
2002-05-04 05:03:17 +02:00
/* Clean up lingering ReadOnlySync events. It is safe to do
that out here because ReadOnlySync events are not allowed
to create new events. */
for (struct event_s*sync_cur = pull_sync_event()
; sync_cur ; sync_cur = pull_sync_event()) {
sync_cur->run_run();
delete (sync_cur);
}
2003-02-21 04:40:35 +01:00
signals_revert();
2002-05-04 05:03:17 +02:00
// Execute post-simulation callbacks
vpiPostsim();
2001-03-11 01:29:38 +01:00
}
/*
* $Log: schedule.cc,v $
* Revision 1.31 2005/02/12 03:26:14 steve
* Support scheduling vvp_vector8_t objects.
*
* Revision 1.30 2005/01/29 17:53:25 steve
* Use scheduler to initialize constant functor inputs.
*
* Revision 1.29 2004/12/11 02:31:30 steve
* Rework of internals to carry vectors through nexus instead
* of single bits. Make the ivl, tgt-vvp and vvp initial changes
* down this path.
*
* Revision 1.28 2004/10/04 01:10:59 steve
* Clean up spurious trailing white space.
*
* Revision 1.27 2003/09/26 02:15:15 steve
* Slight performance tweaks of scheduler.
*
* Revision 1.26 2003/09/09 00:56:45 steve
* Reimpelement scheduler to divide nonblocking assign queue out.
*
2003-04-20 01:32:57 +02:00
* Revision 1.25 2003/04/19 23:32:57 steve
* Add support for cbNextSimTime.
*
* Revision 1.24 2003/02/22 02:52:06 steve
* Check for stopped flag in certain strategic points.
2001-03-11 01:29:38 +01:00
*/