2001-03-11 01:29:38 +01:00
|
|
|
/*
|
2011-02-28 04:20:16 +01:00
|
|
|
* Copyright (c) 2001-2011 Stephen Williams (steve@icarus.com)
|
2001-03-11 01:29:38 +01:00
|
|
|
*
|
|
|
|
|
* This source code is free software; you can redistribute it
|
|
|
|
|
* and/or modify it in source code form under the terms of the GNU
|
|
|
|
|
* General Public License as published by the Free Software
|
|
|
|
|
* Foundation; either version 2 of the License, or (at your option)
|
|
|
|
|
* any later version.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
# include "schedule.h"
|
|
|
|
|
# include "vthread.h"
|
2010-01-07 01:08:20 +01:00
|
|
|
# include "vpi_priv.h"
|
2008-06-13 04:55:53 +02:00
|
|
|
# include "slab.h"
|
2010-01-07 01:08:20 +01:00
|
|
|
# include "compile.h"
|
2008-06-13 04:55:53 +02:00
|
|
|
# include <new>
|
2010-01-07 06:38:45 +01:00
|
|
|
# include <typeinfo>
|
2010-05-31 22:12:06 +02:00
|
|
|
# include <csignal>
|
|
|
|
|
# include <cstdlib>
|
|
|
|
|
# include <cassert>
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
# include <iostream>
|
2003-01-07 00:57:26 +01:00
|
|
|
|
|
|
|
|
unsigned long count_assign_events = 0;
|
|
|
|
|
unsigned long count_gen_events = 0;
|
|
|
|
|
unsigned long count_thread_events = 0;
|
2008-06-07 04:50:44 +02:00
|
|
|
// Count the time events (A time cell created)
|
|
|
|
|
unsigned long count_time_events = 0;
|
2008-06-13 04:55:53 +02:00
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
|
2003-01-07 00:57:26 +01:00
|
|
|
|
2001-05-06 01:51:49 +02:00
|
|
|
/*
|
2003-09-09 02:56:45 +02:00
|
|
|
* The event_s and event_time_s structures implement the Verilog
|
2004-12-11 03:31:25 +01:00
|
|
|
* stratified event queue.
|
|
|
|
|
*
|
|
|
|
|
* The event_time_s objects are one per time step. Each time step in
|
|
|
|
|
* turn contains a list of event_s objects that are the actual events.
|
|
|
|
|
*
|
|
|
|
|
* The event_s objects are base classes for the more specific sort of
|
|
|
|
|
* event.
|
2001-05-06 01:51:49 +02:00
|
|
|
*/
|
2001-03-11 01:29:38 +01:00
|
|
|
struct event_s {
|
|
|
|
|
struct event_s*next;
|
2004-12-11 03:31:25 +01:00
|
|
|
virtual ~event_s() { }
|
|
|
|
|
virtual void run_run(void) =0;
|
2008-06-13 04:55:53 +02:00
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
// Write something about the event to stderr
|
|
|
|
|
virtual void single_step_display(void);
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
// Fallback new/delete
|
|
|
|
|
static void*operator new (size_t size) { return ::new char[size]; }
|
|
|
|
|
static void operator delete(void*ptr) { ::delete[]( (char*)ptr ); }
|
2001-03-11 01:29:38 +01:00
|
|
|
};
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
void event_s::single_step_display(void)
|
|
|
|
|
{
|
2012-06-04 21:43:33 +02:00
|
|
|
std::cerr << "event_s: Step into event " << typeid(*this).name() << std::endl;
|
2010-01-07 01:08:20 +01:00
|
|
|
}
|
|
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
struct event_time_s {
|
2009-11-01 18:26:09 +01:00
|
|
|
event_time_s() {
|
|
|
|
|
count_time_events += 1;
|
|
|
|
|
start = 0;
|
|
|
|
|
active = 0;
|
|
|
|
|
nbassign = 0;
|
|
|
|
|
rwsync = 0;
|
|
|
|
|
rosync = 0;
|
|
|
|
|
del_thr = 0;
|
2010-10-11 00:14:29 +02:00
|
|
|
next = NULL;
|
2009-11-01 18:26:09 +01:00
|
|
|
}
|
2003-09-09 02:56:45 +02:00
|
|
|
vvp_time64_t delay;
|
|
|
|
|
|
2009-11-01 18:26:09 +01:00
|
|
|
struct event_s*start;
|
2003-09-09 02:56:45 +02:00
|
|
|
struct event_s*active;
|
|
|
|
|
struct event_s*nbassign;
|
2006-09-29 03:24:34 +02:00
|
|
|
struct event_s*rwsync;
|
2003-09-09 02:56:45 +02:00
|
|
|
struct event_s*rosync;
|
2008-03-21 03:01:20 +01:00
|
|
|
struct event_s*del_thr;
|
2003-09-09 02:56:45 +02:00
|
|
|
|
|
|
|
|
struct event_time_s*next;
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
static void* operator new (size_t);
|
|
|
|
|
static void operator delete(void*obj, size_t s);
|
2003-09-09 02:56:45 +02:00
|
|
|
};
|
|
|
|
|
|
2005-09-20 20:34:01 +02:00
|
|
|
vvp_gen_event_s::~vvp_gen_event_s()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-07 06:40:26 +01:00
|
|
|
void vvp_gen_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
cerr << "vvp_gen_event_s: Step into event " << typeid(*this).name() << endl;
|
|
|
|
|
}
|
|
|
|
|
|
2001-05-01 03:09:39 +02:00
|
|
|
/*
|
2004-12-11 03:31:25 +01:00
|
|
|
* Derived event types
|
|
|
|
|
*/
|
|
|
|
|
struct vthread_event_s : public event_s {
|
|
|
|
|
vthread_t thr;
|
|
|
|
|
void run_run(void);
|
2010-01-07 01:08:20 +01:00
|
|
|
void single_step_display(void);
|
2008-06-13 04:55:53 +02:00
|
|
|
|
|
|
|
|
static void* operator new(size_t);
|
|
|
|
|
static void operator delete(void*);
|
2004-12-11 03:31:25 +01:00
|
|
|
};
|
2001-05-01 03:09:39 +02:00
|
|
|
|
2004-12-11 03:31:25 +01:00
|
|
|
void vthread_event_s::run_run(void)
|
2001-05-01 03:09:39 +02:00
|
|
|
{
|
2004-12-11 03:31:25 +01:00
|
|
|
count_thread_events += 1;
|
|
|
|
|
vthread_run(thr);
|
|
|
|
|
}
|
2003-01-07 00:57:26 +01:00
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
void vthread_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
struct __vpiScope*scope = vthread_scope(thr);
|
|
|
|
|
cerr << "vthread_event: Resume thread"
|
2012-01-19 19:16:39 +01:00
|
|
|
<< " scope=" << scope->vpi_get_str(vpiFullName)
|
2010-01-07 01:08:20 +01:00
|
|
|
<< endl;
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
static const size_t VTHR_CHUNK_COUNT = 8192 / sizeof(struct vthread_event_s);
|
|
|
|
|
static slab_t<sizeof(vthread_event_s),VTHR_CHUNK_COUNT> vthread_event_heap;
|
|
|
|
|
|
|
|
|
|
inline void* vthread_event_s::operator new(size_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(size == sizeof(vthread_event_s));
|
|
|
|
|
return vthread_event_heap.alloc_slab();
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-23 04:57:59 +02:00
|
|
|
void vthread_event_s::operator delete(void*dptr)
|
2008-06-13 04:55:53 +02:00
|
|
|
{
|
2011-09-23 04:57:59 +02:00
|
|
|
vthread_event_heap.free_slab(dptr);
|
2008-06-13 04:55:53 +02:00
|
|
|
}
|
|
|
|
|
|
2008-03-21 03:01:20 +01:00
|
|
|
struct del_thr_event_s : public event_s {
|
|
|
|
|
vthread_t thr;
|
|
|
|
|
void run_run(void);
|
2010-01-07 01:08:20 +01:00
|
|
|
void single_step_display(void);
|
2008-03-21 03:01:20 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void del_thr_event_s::run_run(void)
|
|
|
|
|
{
|
|
|
|
|
vthread_delete(thr);
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
void del_thr_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
struct __vpiScope*scope = vthread_scope(thr);
|
|
|
|
|
cerr << "del_thr_event: Reap completed thread"
|
2012-01-19 19:16:39 +01:00
|
|
|
<< " scope=" << scope->vpi_get_str(vpiFullName) << endl;
|
2010-01-07 01:08:20 +01:00
|
|
|
}
|
|
|
|
|
|
2004-12-11 03:31:25 +01:00
|
|
|
struct assign_vector4_event_s : public event_s {
|
2008-06-07 04:50:44 +02:00
|
|
|
/* The default constructor. */
|
2010-10-11 00:14:29 +02:00
|
|
|
assign_vector4_event_s(const vvp_vector4_t&that) : val(that) {
|
|
|
|
|
base = 0;
|
|
|
|
|
vwid = 0;
|
|
|
|
|
}
|
2008-06-07 04:50:44 +02:00
|
|
|
/* A constructor that makes the val directly. */
|
|
|
|
|
assign_vector4_event_s(const vvp_vector4_t&that, unsigned adr, unsigned wid)
|
2010-10-11 00:14:29 +02:00
|
|
|
: val(that,adr,wid) {
|
|
|
|
|
base = 0;
|
|
|
|
|
vwid = 0;
|
|
|
|
|
}
|
2008-06-07 04:50:44 +02:00
|
|
|
|
2005-05-07 05:15:42 +02:00
|
|
|
/* Where to do the assign. */
|
2004-12-11 03:31:25 +01:00
|
|
|
vvp_net_ptr_t ptr;
|
2005-05-07 05:15:42 +02:00
|
|
|
/* Value to assign. */
|
2004-12-11 03:31:25 +01:00
|
|
|
vvp_vector4_t val;
|
2005-05-07 05:15:42 +02:00
|
|
|
/* Offset of the part into the destination. */
|
|
|
|
|
unsigned base;
|
|
|
|
|
/* Width of the destination vector. */
|
|
|
|
|
unsigned vwid;
|
2004-12-11 03:31:25 +01:00
|
|
|
void run_run(void);
|
2010-01-07 01:08:20 +01:00
|
|
|
void single_step_display(void);
|
2008-06-13 04:55:53 +02:00
|
|
|
|
|
|
|
|
static void* operator new(size_t);
|
|
|
|
|
static void operator delete(void*);
|
2004-12-11 03:31:25 +01:00
|
|
|
};
|
2003-01-07 00:57:26 +01:00
|
|
|
|
2004-12-11 03:31:25 +01:00
|
|
|
void assign_vector4_event_s::run_run(void)
|
|
|
|
|
{
|
|
|
|
|
count_assign_events += 1;
|
2005-05-07 05:15:42 +02:00
|
|
|
if (vwid > 0)
|
2008-10-28 18:52:39 +01:00
|
|
|
vvp_send_vec4_pv(ptr, val, base, val.size(), vwid, 0);
|
2005-05-07 05:15:42 +02:00
|
|
|
else
|
2008-10-28 18:52:39 +01:00
|
|
|
vvp_send_vec4(ptr, val, 0);
|
2001-05-01 03:09:39 +02:00
|
|
|
}
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
void assign_vector4_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
cerr << "assign_vector4_event: Propagate val=" << val
|
|
|
|
|
<< ", vwid=" << vwid << ", base=" << base << endl;
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
static const size_t ASSIGN4_CHUNK_COUNT = 524288 / sizeof(struct assign_vector4_event_s);
|
|
|
|
|
static slab_t<sizeof(assign_vector4_event_s),ASSIGN4_CHUNK_COUNT> assign4_heap;
|
|
|
|
|
|
|
|
|
|
inline void* assign_vector4_event_s::operator new(size_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(size == sizeof(assign_vector4_event_s));
|
|
|
|
|
return assign4_heap.alloc_slab();
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-23 04:57:59 +02:00
|
|
|
void assign_vector4_event_s::operator delete(void*dptr)
|
2008-06-13 04:55:53 +02:00
|
|
|
{
|
2011-09-23 04:57:59 +02:00
|
|
|
assign4_heap.free_slab(dptr);
|
2008-06-13 04:55:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long count_assign4_pool(void) { return assign4_heap.pool; }
|
|
|
|
|
|
2005-02-12 04:26:14 +01:00
|
|
|
struct assign_vector8_event_s : public event_s {
|
|
|
|
|
vvp_net_ptr_t ptr;
|
|
|
|
|
vvp_vector8_t val;
|
|
|
|
|
void run_run(void);
|
2010-01-07 01:08:20 +01:00
|
|
|
void single_step_display(void);
|
2008-06-13 04:55:53 +02:00
|
|
|
|
|
|
|
|
static void* operator new(size_t);
|
|
|
|
|
static void operator delete(void*);
|
2005-02-12 04:26:14 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void assign_vector8_event_s::run_run(void)
|
|
|
|
|
{
|
|
|
|
|
count_assign_events += 1;
|
|
|
|
|
vvp_send_vec8(ptr, val);
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
void assign_vector8_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
cerr << "assign_vector8_event: Propagate val=" << val << endl;
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
static const size_t ASSIGN8_CHUNK_COUNT = 8192 / sizeof(struct assign_vector8_event_s);
|
|
|
|
|
static slab_t<sizeof(assign_vector8_event_s),ASSIGN8_CHUNK_COUNT> assign8_heap;
|
|
|
|
|
|
|
|
|
|
inline void* assign_vector8_event_s::operator new(size_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(size == sizeof(assign_vector8_event_s));
|
|
|
|
|
return assign8_heap.alloc_slab();
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-23 04:57:59 +02:00
|
|
|
void assign_vector8_event_s::operator delete(void*dptr)
|
2008-06-13 04:55:53 +02:00
|
|
|
{
|
2011-09-23 04:57:59 +02:00
|
|
|
assign8_heap.free_slab(dptr);
|
2008-06-13 04:55:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long count_assign8_pool() { return assign8_heap.pool; }
|
|
|
|
|
|
2005-07-06 06:29:25 +02:00
|
|
|
struct assign_real_event_s : public event_s {
|
|
|
|
|
vvp_net_ptr_t ptr;
|
|
|
|
|
double val;
|
|
|
|
|
void run_run(void);
|
2010-01-07 01:08:20 +01:00
|
|
|
void single_step_display(void);
|
2008-06-13 04:55:53 +02:00
|
|
|
|
|
|
|
|
static void* operator new(size_t);
|
|
|
|
|
static void operator delete(void*);
|
2005-07-06 06:29:25 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void assign_real_event_s::run_run(void)
|
|
|
|
|
{
|
|
|
|
|
count_assign_events += 1;
|
2008-10-28 18:52:39 +01:00
|
|
|
vvp_send_real(ptr, val, 0);
|
2005-07-06 06:29:25 +02:00
|
|
|
}
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
void assign_real_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
cerr << "assign_real_event: Propagate val=" << val << endl;
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
static const size_t ASSIGNR_CHUNK_COUNT = 8192 / sizeof(struct assign_real_event_s);
|
|
|
|
|
static slab_t<sizeof(assign_real_event_s),ASSIGNR_CHUNK_COUNT> assignr_heap;
|
|
|
|
|
|
|
|
|
|
inline void* assign_real_event_s::operator new (size_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(size == sizeof(assign_real_event_s));
|
|
|
|
|
return assignr_heap.alloc_slab();
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-23 04:57:59 +02:00
|
|
|
void assign_real_event_s::operator delete(void*dptr)
|
2008-06-13 04:55:53 +02:00
|
|
|
{
|
2011-09-23 04:57:59 +02:00
|
|
|
assignr_heap.free_slab(dptr);
|
2008-06-13 04:55:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long count_assign_real_pool(void) { return assignr_heap.pool; }
|
|
|
|
|
|
2007-01-16 06:44:14 +01:00
|
|
|
struct assign_array_word_s : public event_s {
|
|
|
|
|
vvp_array_t mem;
|
|
|
|
|
unsigned adr;
|
|
|
|
|
vvp_vector4_t val;
|
|
|
|
|
unsigned off;
|
|
|
|
|
void run_run(void);
|
2008-06-13 04:55:53 +02:00
|
|
|
|
|
|
|
|
static void* operator new(size_t);
|
|
|
|
|
static void operator delete(void*);
|
2007-01-16 06:44:14 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void assign_array_word_s::run_run(void)
|
|
|
|
|
{
|
|
|
|
|
count_assign_events += 1;
|
|
|
|
|
array_set_word(mem, adr, off, val);
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
static const size_t ARRAY_W_CHUNK_COUNT = 8192 / sizeof(struct assign_array_word_s);
|
|
|
|
|
static slab_t<sizeof(assign_array_word_s),ARRAY_W_CHUNK_COUNT> array_w_heap;
|
|
|
|
|
|
|
|
|
|
inline void* assign_array_word_s::operator new (size_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(size == sizeof(assign_array_word_s));
|
|
|
|
|
return array_w_heap.alloc_slab();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void assign_array_word_s::operator delete(void*ptr)
|
|
|
|
|
{
|
|
|
|
|
array_w_heap.free_slab(ptr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long count_assign_aword_pool(void) { return array_w_heap.pool; }
|
|
|
|
|
|
2009-04-07 06:47:21 +02:00
|
|
|
/*
|
|
|
|
|
* This class supports the propagation of vec4 outputs from a
|
|
|
|
|
* vvp_net_t object.
|
|
|
|
|
*/
|
|
|
|
|
struct propagate_vector4_event_s : public event_s {
|
2010-04-03 15:54:40 +02:00
|
|
|
/* The default constructor. */
|
2010-10-11 00:14:29 +02:00
|
|
|
propagate_vector4_event_s(const vvp_vector4_t&that) : val(that) {
|
|
|
|
|
net = NULL;
|
|
|
|
|
}
|
2010-04-03 15:54:40 +02:00
|
|
|
/* A constructor that makes the val directly. */
|
2009-04-07 06:47:21 +02:00
|
|
|
propagate_vector4_event_s(const vvp_vector4_t&that, unsigned adr, unsigned wid)
|
2010-10-11 00:14:29 +02:00
|
|
|
: val(that,adr,wid) {
|
|
|
|
|
net = NULL;
|
|
|
|
|
}
|
2009-04-07 06:47:21 +02:00
|
|
|
|
|
|
|
|
/* Propagate the output of this net. */
|
|
|
|
|
vvp_net_t*net;
|
|
|
|
|
/* value to propagate */
|
|
|
|
|
vvp_vector4_t val;
|
|
|
|
|
/* Action */
|
|
|
|
|
void run_run(void);
|
2010-01-07 06:40:26 +01:00
|
|
|
void single_step_display(void);
|
2009-04-07 06:47:21 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void propagate_vector4_event_s::run_run(void)
|
|
|
|
|
{
|
|
|
|
|
net->send_vec4(val, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-07 06:40:26 +01:00
|
|
|
void propagate_vector4_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
cerr << "propagate_vector4_event: Propagate val=" << val << endl;
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-03 15:54:40 +02:00
|
|
|
/*
|
|
|
|
|
* This class supports the propagation of real outputs from a
|
|
|
|
|
* vvp_net_t object.
|
|
|
|
|
*/
|
|
|
|
|
struct propagate_real_event_s : public event_s {
|
|
|
|
|
/* Propagate the output of this net. */
|
|
|
|
|
vvp_net_t*net;
|
|
|
|
|
/* value to propagate */
|
|
|
|
|
double val;
|
|
|
|
|
/* Action */
|
|
|
|
|
void run_run(void);
|
|
|
|
|
void single_step_display(void);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void propagate_real_event_s::run_run(void)
|
|
|
|
|
{
|
|
|
|
|
net->send_real(val, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void propagate_real_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
cerr << "propagate_real_event: Propagate val=" << val << endl;
|
|
|
|
|
}
|
|
|
|
|
|
2009-09-03 05:04:34 +02:00
|
|
|
struct assign_array_r_word_s : public event_s {
|
|
|
|
|
vvp_array_t mem;
|
|
|
|
|
unsigned adr;
|
|
|
|
|
double val;
|
|
|
|
|
void run_run(void);
|
|
|
|
|
|
|
|
|
|
static void* operator new(size_t);
|
|
|
|
|
static void operator delete(void*);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void assign_array_r_word_s::run_run(void)
|
|
|
|
|
{
|
|
|
|
|
count_assign_events += 1;
|
|
|
|
|
array_set_word(mem, adr, val);
|
|
|
|
|
}
|
|
|
|
|
static const size_t ARRAY_R_W_CHUNK_COUNT = 8192 / sizeof(struct assign_array_r_word_s);
|
|
|
|
|
static slab_t<sizeof(assign_array_r_word_s),ARRAY_R_W_CHUNK_COUNT> array_r_w_heap;
|
|
|
|
|
|
|
|
|
|
inline void* assign_array_r_word_s::operator new(size_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(size == sizeof(assign_array_r_word_s));
|
|
|
|
|
return array_r_w_heap.alloc_slab();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void assign_array_r_word_s::operator delete(void*ptr)
|
|
|
|
|
{
|
|
|
|
|
array_r_w_heap.free_slab(ptr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long count_assign_arword_pool(void) { return array_r_w_heap.pool; }
|
|
|
|
|
|
2004-12-11 03:31:25 +01:00
|
|
|
struct generic_event_s : public event_s {
|
|
|
|
|
vvp_gen_event_t obj;
|
2009-01-15 05:15:50 +01:00
|
|
|
bool delete_obj_when_done;
|
2004-12-11 03:31:25 +01:00
|
|
|
void run_run(void);
|
2010-01-07 06:40:26 +01:00
|
|
|
void single_step_display(void);
|
2008-06-13 04:55:53 +02:00
|
|
|
|
|
|
|
|
static void* operator new(size_t);
|
|
|
|
|
static void operator delete(void*);
|
2004-12-11 03:31:25 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void generic_event_s::run_run(void)
|
2001-05-01 03:09:39 +02:00
|
|
|
{
|
2004-12-11 03:31:25 +01:00
|
|
|
count_gen_events += 1;
|
2009-01-15 05:15:50 +01:00
|
|
|
if (obj) {
|
2005-06-02 18:02:11 +02:00
|
|
|
obj->run_run();
|
2009-01-15 05:15:50 +01:00
|
|
|
if (delete_obj_when_done)
|
|
|
|
|
delete obj;
|
|
|
|
|
}
|
2003-09-09 02:56:45 +02:00
|
|
|
}
|
|
|
|
|
|
2010-01-07 06:40:26 +01:00
|
|
|
void generic_event_s::single_step_display(void)
|
|
|
|
|
{
|
|
|
|
|
obj->single_step_display();
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
static const size_t GENERIC_CHUNK_COUNT = 131072 / sizeof(struct generic_event_s);
|
|
|
|
|
static slab_t<sizeof(generic_event_s),GENERIC_CHUNK_COUNT> generic_event_heap;
|
|
|
|
|
|
|
|
|
|
inline void* generic_event_s::operator new(size_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(size == sizeof(generic_event_s));
|
|
|
|
|
return generic_event_heap.alloc_slab();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void generic_event_s::operator delete(void*ptr)
|
|
|
|
|
{
|
|
|
|
|
generic_event_heap.free_slab(ptr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long count_gen_pool(void) { return generic_event_heap.pool; }
|
|
|
|
|
|
2004-12-11 03:31:25 +01:00
|
|
|
/*
|
|
|
|
|
** These event_time_s will be required a lot, at high frequency.
|
|
|
|
|
** Once allocated, we never free them, but stash them away for next time.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
static const size_t TIME_CHUNK_COUNT = 8192 / sizeof(struct event_time_s);
|
|
|
|
|
static slab_t<sizeof(event_time_s),TIME_CHUNK_COUNT> event_time_heap;
|
2003-09-09 02:56:45 +02:00
|
|
|
|
|
|
|
|
inline void* event_time_s::operator new (size_t size)
|
|
|
|
|
{
|
|
|
|
|
assert(size == sizeof(struct event_time_s));
|
2008-06-13 04:55:53 +02:00
|
|
|
void*ptr = event_time_heap.alloc_slab();
|
|
|
|
|
return ptr;
|
2003-09-09 02:56:45 +02:00
|
|
|
}
|
|
|
|
|
|
2010-10-11 06:52:26 +02:00
|
|
|
inline void event_time_s::operator delete(void*ptr, size_t)
|
2003-09-09 02:56:45 +02:00
|
|
|
{
|
2008-06-13 04:55:53 +02:00
|
|
|
event_time_heap.free_slab(ptr);
|
2001-05-01 03:09:39 +02:00
|
|
|
}
|
|
|
|
|
|
2008-06-13 04:55:53 +02:00
|
|
|
unsigned long count_time_pool(void) { return event_time_heap.pool; }
|
|
|
|
|
|
2001-03-19 02:55:38 +01:00
|
|
|
/*
|
2001-07-11 04:27:21 +02:00
|
|
|
* This is the head of the list of pending events. This includes all
|
|
|
|
|
* the events that have not been executed yet, and reaches into the
|
|
|
|
|
* future.
|
2001-03-19 02:55:38 +01:00
|
|
|
*/
|
2003-09-09 02:56:45 +02:00
|
|
|
static struct event_time_s* sched_list = 0;
|
2001-07-11 04:27:21 +02:00
|
|
|
|
2007-07-12 06:31:09 +02:00
|
|
|
/*
|
|
|
|
|
* This is a list of initialization events. The setup puts
|
|
|
|
|
* initializations in this list so that they happen before the
|
|
|
|
|
* simulation as a whole starts. This prevents time-0 triggers of
|
|
|
|
|
* certain events.
|
|
|
|
|
*/
|
|
|
|
|
static struct event_s* schedule_init_list = 0;
|
|
|
|
|
|
2011-03-30 08:42:26 +02:00
|
|
|
/*
|
|
|
|
|
* This is the head of the list of final events.
|
|
|
|
|
*/
|
|
|
|
|
static struct event_s* schedule_final_list = 0;
|
|
|
|
|
|
2001-03-19 02:55:38 +01:00
|
|
|
/*
|
|
|
|
|
* This flag is true until a VPI task or function finishes the
|
|
|
|
|
* simulation.
|
|
|
|
|
*/
|
|
|
|
|
static bool schedule_runnable = true;
|
2003-02-22 03:52:06 +01:00
|
|
|
static bool schedule_stopped_flag = false;
|
2010-01-07 01:08:20 +01:00
|
|
|
static bool schedule_single_step_flag = false;
|
2001-03-19 02:55:38 +01:00
|
|
|
|
|
|
|
|
void schedule_finish(int)
|
|
|
|
|
{
|
|
|
|
|
schedule_runnable = false;
|
|
|
|
|
}
|
|
|
|
|
|
2003-02-21 04:40:35 +01:00
|
|
|
void schedule_stop(int)
|
|
|
|
|
{
|
2003-02-22 03:52:06 +01:00
|
|
|
schedule_stopped_flag = true;
|
2003-02-21 04:40:35 +01:00
|
|
|
}
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
void schedule_single_step(int)
|
|
|
|
|
{
|
|
|
|
|
schedule_single_step_flag = true;
|
|
|
|
|
}
|
|
|
|
|
|
2001-03-19 02:55:38 +01:00
|
|
|
bool schedule_finished(void)
|
|
|
|
|
{
|
|
|
|
|
return !schedule_runnable;
|
|
|
|
|
}
|
|
|
|
|
|
2003-02-22 03:52:06 +01:00
|
|
|
bool schedule_stopped(void)
|
|
|
|
|
{
|
|
|
|
|
return schedule_stopped_flag;
|
|
|
|
|
}
|
|
|
|
|
|
2003-02-21 04:40:35 +01:00
|
|
|
/*
|
|
|
|
|
* These are the signal handling infrastructure. The SIGINT signal
|
|
|
|
|
* leads to an implicit $stop.
|
|
|
|
|
*/
|
2010-05-14 01:20:49 +02:00
|
|
|
extern "C" void signals_handler(int)
|
2003-02-21 04:40:35 +01:00
|
|
|
{
|
2003-02-22 03:52:06 +01:00
|
|
|
schedule_stopped_flag = true;
|
2003-02-21 04:40:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void signals_capture(void)
|
|
|
|
|
{
|
|
|
|
|
signal(SIGINT, &signals_handler);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void signals_revert(void)
|
|
|
|
|
{
|
|
|
|
|
signal(SIGINT, SIG_DFL);
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-08 13:13:35 +02:00
|
|
|
/*
|
|
|
|
|
* This function puts an event on the end of the pre-simulation event queue.
|
|
|
|
|
*/
|
|
|
|
|
static void schedule_init_event(struct event_s*cur)
|
|
|
|
|
{
|
|
|
|
|
if (schedule_init_list == 0) {
|
|
|
|
|
cur->next = cur;
|
|
|
|
|
} else {
|
|
|
|
|
cur->next = schedule_init_list->next;
|
|
|
|
|
schedule_init_list->next = cur;
|
|
|
|
|
}
|
|
|
|
|
schedule_init_list = cur;
|
|
|
|
|
}
|
2003-02-21 04:40:35 +01:00
|
|
|
|
2011-03-30 08:42:26 +02:00
|
|
|
/*
|
|
|
|
|
* This function puts an event on the end of the post-simulation event queue.
|
|
|
|
|
*/
|
|
|
|
|
static void schedule_final_event(struct event_s*cur)
|
|
|
|
|
{
|
|
|
|
|
if (schedule_final_list == 0) {
|
|
|
|
|
cur->next = cur;
|
|
|
|
|
} else {
|
|
|
|
|
cur->next = schedule_final_list->next;
|
|
|
|
|
schedule_final_list->next = cur;
|
|
|
|
|
}
|
|
|
|
|
schedule_final_list = cur;
|
|
|
|
|
}
|
|
|
|
|
|
2001-05-06 01:51:49 +02:00
|
|
|
/*
|
|
|
|
|
* This function does all the hard work of putting an event into the
|
|
|
|
|
* event queue. The event delay is taken from the event structure
|
|
|
|
|
* itself, and the structure is placed in the right place in the
|
|
|
|
|
* queue.
|
|
|
|
|
*/
|
2009-11-01 18:26:09 +01:00
|
|
|
typedef enum event_queue_e { SEQ_START, SEQ_ACTIVE, SEQ_NBASSIGN,
|
|
|
|
|
SEQ_RWSYNC, SEQ_ROSYNC, DEL_THREAD } event_queue_t;
|
2003-09-09 02:56:45 +02:00
|
|
|
|
|
|
|
|
static void schedule_event_(struct event_s*cur, vvp_time64_t delay,
|
|
|
|
|
event_queue_t select_queue)
|
2001-03-11 01:29:38 +01:00
|
|
|
{
|
2003-09-09 02:56:45 +02:00
|
|
|
cur->next = cur;
|
|
|
|
|
|
|
|
|
|
struct event_time_s*ctim = sched_list;
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2001-07-11 04:27:21 +02:00
|
|
|
if (sched_list == 0) {
|
2003-09-09 02:56:45 +02:00
|
|
|
/* Is the event_time list completely empty? Create the
|
|
|
|
|
first event_time object. */
|
|
|
|
|
ctim = new struct event_time_s;
|
|
|
|
|
ctim->delay = delay;
|
|
|
|
|
ctim->next = 0;
|
|
|
|
|
sched_list = ctim;
|
|
|
|
|
|
|
|
|
|
} else if (sched_list->delay > delay) {
|
|
|
|
|
|
|
|
|
|
/* Am I looking for an event before the first event_time?
|
|
|
|
|
If so, create a new event_time to go in front. */
|
|
|
|
|
struct event_time_s*tmp = new struct event_time_s;
|
|
|
|
|
tmp->delay = delay;
|
|
|
|
|
tmp->next = ctim;
|
|
|
|
|
ctim->delay -= delay;
|
|
|
|
|
ctim = tmp;
|
|
|
|
|
sched_list = ctim;
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
struct event_time_s*prev = 0;
|
|
|
|
|
|
|
|
|
|
while (ctim->next && (ctim->delay < delay)) {
|
|
|
|
|
delay -= ctim->delay;
|
|
|
|
|
prev = ctim;
|
|
|
|
|
ctim = ctim->next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ctim->delay > delay) {
|
|
|
|
|
struct event_time_s*tmp = new struct event_time_s;
|
|
|
|
|
tmp->delay = delay;
|
|
|
|
|
tmp->next = prev->next;
|
|
|
|
|
prev->next = tmp;
|
|
|
|
|
|
|
|
|
|
tmp->next->delay -= delay;
|
|
|
|
|
ctim = tmp;
|
|
|
|
|
|
|
|
|
|
} else if (ctim->delay == delay) {
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
assert(ctim->next == 0);
|
|
|
|
|
struct event_time_s*tmp = new struct event_time_s;
|
|
|
|
|
tmp->delay = delay - ctim->delay;
|
|
|
|
|
tmp->next = 0;
|
|
|
|
|
ctim->next = tmp;
|
|
|
|
|
|
|
|
|
|
ctim = tmp;
|
|
|
|
|
}
|
2001-03-11 01:29:38 +01:00
|
|
|
}
|
|
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
/* By this point, ctim is the event_time structure that is to
|
|
|
|
|
receive the event at hand. Put the event in to the
|
|
|
|
|
appropriate list for the kind of assign we have at hand. */
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
switch (select_queue) {
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2009-11-01 18:26:09 +01:00
|
|
|
case SEQ_START:
|
|
|
|
|
if (ctim->start == 0) {
|
|
|
|
|
ctim->start = cur;
|
|
|
|
|
} else {
|
|
|
|
|
cur->next = ctim->active->next;
|
|
|
|
|
ctim->active->next = cur;
|
|
|
|
|
ctim->active = cur;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
case SEQ_ACTIVE:
|
|
|
|
|
if (ctim->active == 0) {
|
|
|
|
|
ctim->active = cur;
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
/* Put the cur event on the end of the active list. */
|
|
|
|
|
cur->next = ctim->active->next;
|
|
|
|
|
ctim->active->next = cur;
|
|
|
|
|
ctim->active = cur;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case SEQ_NBASSIGN:
|
|
|
|
|
if (ctim->nbassign == 0) {
|
|
|
|
|
ctim->nbassign = cur;
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
/* Put the cur event on the end of the active list. */
|
|
|
|
|
cur->next = ctim->nbassign->next;
|
|
|
|
|
ctim->nbassign->next = cur;
|
|
|
|
|
ctim->nbassign = cur;
|
2001-03-11 01:29:38 +01:00
|
|
|
}
|
2003-09-09 02:56:45 +02:00
|
|
|
break;
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2006-09-29 03:24:34 +02:00
|
|
|
case SEQ_RWSYNC:
|
|
|
|
|
if (ctim->rwsync == 0) {
|
|
|
|
|
ctim->rwsync = cur;
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
/* Put the cur event on the end of the active list. */
|
|
|
|
|
cur->next = ctim->rwsync->next;
|
|
|
|
|
ctim->rwsync->next = cur;
|
|
|
|
|
ctim->rwsync = cur;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
case SEQ_ROSYNC:
|
|
|
|
|
if (ctim->rosync == 0) {
|
|
|
|
|
ctim->rosync = cur;
|
2001-03-11 01:29:38 +01:00
|
|
|
|
|
|
|
|
} else {
|
2003-09-09 02:56:45 +02:00
|
|
|
/* Put the cur event on the end of the active list. */
|
|
|
|
|
cur->next = ctim->rosync->next;
|
|
|
|
|
ctim->rosync->next = cur;
|
|
|
|
|
ctim->rosync = cur;
|
2001-03-11 01:29:38 +01:00
|
|
|
}
|
2003-09-09 02:56:45 +02:00
|
|
|
break;
|
2008-03-21 03:01:20 +01:00
|
|
|
|
|
|
|
|
case DEL_THREAD:
|
|
|
|
|
if (ctim->del_thr == 0) {
|
|
|
|
|
ctim->del_thr = cur;
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
/* Put the cur event on the end of the active list. */
|
|
|
|
|
cur->next = ctim->del_thr->next;
|
|
|
|
|
ctim->del_thr->next = cur;
|
|
|
|
|
ctim->del_thr = cur;
|
|
|
|
|
}
|
|
|
|
|
break;
|
2001-03-11 01:29:38 +01:00
|
|
|
}
|
|
|
|
|
}
|
2003-09-09 02:56:45 +02:00
|
|
|
|
2002-05-13 01:44:41 +02:00
|
|
|
static void schedule_event_push_(struct event_s*cur)
|
|
|
|
|
{
|
2003-09-09 02:56:45 +02:00
|
|
|
if ((sched_list == 0) || (sched_list->delay > 0)) {
|
|
|
|
|
schedule_event_(cur, 0, SEQ_ACTIVE);
|
2002-05-13 01:44:41 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
struct event_time_s*ctim = sched_list;
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
if (ctim->active == 0) {
|
2001-07-11 04:27:21 +02:00
|
|
|
cur->next = cur;
|
2003-09-09 02:56:45 +02:00
|
|
|
ctim->active = cur;
|
2001-07-11 04:27:21 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
cur->next = ctim->active->next;
|
|
|
|
|
ctim->active->next = cur;
|
2001-07-11 04:27:21 +02:00
|
|
|
}
|
|
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
void schedule_vthread(vthread_t thr, vvp_time64_t delay, bool push_flag)
|
2001-03-11 01:29:38 +01:00
|
|
|
{
|
2004-12-11 03:31:25 +01:00
|
|
|
struct vthread_event_s*cur = new vthread_event_s;
|
2001-03-11 01:29:38 +01:00
|
|
|
|
|
|
|
|
cur->thr = thr;
|
2001-04-21 02:34:39 +02:00
|
|
|
vthread_mark_scheduled(thr);
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2002-05-13 01:44:41 +02:00
|
|
|
if (push_flag && (delay == 0)) {
|
|
|
|
|
/* Special case: If the delay is 0, the push_flag means
|
|
|
|
|
I can push this event in front of everything. This is
|
|
|
|
|
used by the %fork statement, for example, to perform
|
|
|
|
|
task calls. */
|
|
|
|
|
schedule_event_push_(cur);
|
|
|
|
|
|
|
|
|
|
} else {
|
2003-09-09 02:56:45 +02:00
|
|
|
schedule_event_(cur, delay, SEQ_ACTIVE);
|
2002-05-13 01:44:41 +02:00
|
|
|
}
|
2001-03-11 01:29:38 +01:00
|
|
|
}
|
|
|
|
|
|
2011-03-30 08:42:26 +02:00
|
|
|
void schedule_final_vthread(vthread_t thr)
|
|
|
|
|
{
|
|
|
|
|
struct vthread_event_s*cur = new vthread_event_s;
|
|
|
|
|
|
|
|
|
|
cur->thr = thr;
|
|
|
|
|
vthread_mark_scheduled(thr);
|
|
|
|
|
|
|
|
|
|
schedule_final_event(cur);
|
|
|
|
|
}
|
|
|
|
|
|
2004-12-11 03:31:25 +01:00
|
|
|
void schedule_assign_vector(vvp_net_ptr_t ptr,
|
2005-05-07 05:15:42 +02:00
|
|
|
unsigned base, unsigned vwid,
|
2005-06-22 20:30:12 +02:00
|
|
|
const vvp_vector4_t&bit,
|
2004-12-11 03:31:25 +01:00
|
|
|
vvp_time64_t delay)
|
|
|
|
|
{
|
2008-06-16 22:40:20 +02:00
|
|
|
struct assign_vector4_event_s*cur = new struct assign_vector4_event_s(bit);
|
2004-12-11 03:31:25 +01:00
|
|
|
cur->ptr = ptr;
|
2005-05-07 05:15:42 +02:00
|
|
|
cur->base = base;
|
|
|
|
|
cur->vwid = vwid;
|
|
|
|
|
schedule_event_(cur, delay, SEQ_NBASSIGN);
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-07 04:50:44 +02:00
|
|
|
void schedule_assign_plucked_vector(vvp_net_ptr_t ptr,
|
|
|
|
|
vvp_time64_t delay,
|
|
|
|
|
const vvp_vector4_t&src,
|
|
|
|
|
unsigned adr, unsigned wid)
|
|
|
|
|
{
|
|
|
|
|
struct assign_vector4_event_s*cur
|
|
|
|
|
= new struct assign_vector4_event_s(src,adr,wid);
|
|
|
|
|
cur->ptr = ptr;
|
|
|
|
|
cur->vwid = 0;
|
|
|
|
|
cur->base = 0;
|
|
|
|
|
schedule_event_(cur, delay, SEQ_NBASSIGN);
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-07 06:47:21 +02:00
|
|
|
void schedule_propagate_plucked_vector(vvp_net_t*net,
|
|
|
|
|
vvp_time64_t delay,
|
|
|
|
|
const vvp_vector4_t&src,
|
|
|
|
|
unsigned adr, unsigned wid)
|
|
|
|
|
{
|
|
|
|
|
struct propagate_vector4_event_s*cur
|
|
|
|
|
= new struct propagate_vector4_event_s(src,adr,wid);
|
|
|
|
|
cur->net = net;
|
|
|
|
|
schedule_event_(cur, delay, SEQ_NBASSIGN);
|
|
|
|
|
}
|
|
|
|
|
|
2007-01-16 06:44:14 +01:00
|
|
|
void schedule_assign_array_word(vvp_array_t mem,
|
|
|
|
|
unsigned word_addr,
|
|
|
|
|
unsigned off,
|
|
|
|
|
vvp_vector4_t val,
|
|
|
|
|
vvp_time64_t delay)
|
|
|
|
|
{
|
|
|
|
|
struct assign_array_word_s*cur = new struct assign_array_word_s;
|
|
|
|
|
cur->mem = mem;
|
|
|
|
|
cur->adr = word_addr;
|
|
|
|
|
cur->off = off;
|
|
|
|
|
cur->val = val;
|
|
|
|
|
schedule_event_(cur, delay, SEQ_NBASSIGN);
|
|
|
|
|
}
|
|
|
|
|
|
2009-09-03 05:04:34 +02:00
|
|
|
void schedule_assign_array_word(vvp_array_t mem,
|
|
|
|
|
unsigned word_addr,
|
|
|
|
|
double val,
|
|
|
|
|
vvp_time64_t delay)
|
|
|
|
|
{
|
|
|
|
|
struct assign_array_r_word_s*cur = new struct assign_array_r_word_s;
|
|
|
|
|
cur->mem = mem;
|
|
|
|
|
cur->adr = word_addr;
|
|
|
|
|
cur->val = val;
|
|
|
|
|
schedule_event_(cur, delay, SEQ_NBASSIGN);
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-16 22:40:20 +02:00
|
|
|
void schedule_set_vector(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
|
2005-01-29 18:53:25 +01:00
|
|
|
{
|
2008-06-16 22:40:20 +02:00
|
|
|
struct assign_vector4_event_s*cur = new struct assign_vector4_event_s(bit);
|
2005-01-29 18:53:25 +01:00
|
|
|
cur->ptr = ptr;
|
2005-05-07 05:15:42 +02:00
|
|
|
cur->base = 0;
|
|
|
|
|
cur->vwid = 0;
|
2005-01-29 18:53:25 +01:00
|
|
|
schedule_event_(cur, 0, SEQ_ACTIVE);
|
|
|
|
|
}
|
|
|
|
|
|
2005-02-12 04:26:14 +01:00
|
|
|
void schedule_set_vector(vvp_net_ptr_t ptr, vvp_vector8_t bit)
|
|
|
|
|
{
|
|
|
|
|
struct assign_vector8_event_s*cur = new struct assign_vector8_event_s;
|
|
|
|
|
cur->ptr = ptr;
|
|
|
|
|
cur->val = bit;
|
|
|
|
|
schedule_event_(cur, 0, SEQ_ACTIVE);
|
|
|
|
|
}
|
|
|
|
|
|
2005-07-06 06:29:25 +02:00
|
|
|
void schedule_set_vector(vvp_net_ptr_t ptr, double bit)
|
|
|
|
|
{
|
|
|
|
|
struct assign_real_event_s*cur = new struct assign_real_event_s;
|
|
|
|
|
cur->ptr = ptr;
|
|
|
|
|
cur->val = bit;
|
|
|
|
|
schedule_event_(cur, 0, SEQ_ACTIVE);
|
|
|
|
|
}
|
|
|
|
|
|
2007-07-12 06:31:09 +02:00
|
|
|
void schedule_init_vector(vvp_net_ptr_t ptr, vvp_vector4_t bit)
|
|
|
|
|
{
|
2008-06-16 22:40:20 +02:00
|
|
|
struct assign_vector4_event_s*cur = new struct assign_vector4_event_s(bit);
|
2007-07-12 06:31:09 +02:00
|
|
|
cur->ptr = ptr;
|
|
|
|
|
cur->base = 0;
|
|
|
|
|
cur->vwid = 0;
|
2010-04-08 13:13:35 +02:00
|
|
|
schedule_init_event(cur);
|
2007-07-12 06:31:09 +02:00
|
|
|
}
|
|
|
|
|
|
2009-08-29 17:53:44 +02:00
|
|
|
void schedule_init_vector(vvp_net_ptr_t ptr, vvp_vector8_t bit)
|
|
|
|
|
{
|
|
|
|
|
struct assign_vector8_event_s*cur = new struct assign_vector8_event_s;
|
|
|
|
|
cur->ptr = ptr;
|
|
|
|
|
cur->val = bit;
|
2010-04-08 13:13:35 +02:00
|
|
|
schedule_init_event(cur);
|
2009-08-29 17:53:44 +02:00
|
|
|
}
|
|
|
|
|
|
2007-07-12 06:31:09 +02:00
|
|
|
void schedule_init_vector(vvp_net_ptr_t ptr, double bit)
|
|
|
|
|
{
|
|
|
|
|
struct assign_real_event_s*cur = new struct assign_real_event_s;
|
|
|
|
|
cur->ptr = ptr;
|
|
|
|
|
cur->val = bit;
|
2010-04-08 13:13:35 +02:00
|
|
|
schedule_init_event(cur);
|
2007-07-12 06:31:09 +02:00
|
|
|
}
|
|
|
|
|
|
2010-04-03 15:54:40 +02:00
|
|
|
void schedule_init_propagate(vvp_net_t*net, vvp_vector4_t bit)
|
|
|
|
|
{
|
|
|
|
|
struct propagate_vector4_event_s*cur = new struct propagate_vector4_event_s(bit);
|
|
|
|
|
cur->net = net;
|
2010-04-08 13:13:35 +02:00
|
|
|
schedule_init_event(cur);
|
2010-04-03 15:54:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void schedule_init_propagate(vvp_net_t*net, double bit)
|
|
|
|
|
{
|
|
|
|
|
struct propagate_real_event_s*cur = new struct propagate_real_event_s;
|
|
|
|
|
cur->net = net;
|
|
|
|
|
cur->val = bit;
|
2010-04-08 13:13:35 +02:00
|
|
|
schedule_init_event(cur);
|
2010-04-03 15:54:40 +02:00
|
|
|
}
|
|
|
|
|
|
2008-03-21 03:01:20 +01:00
|
|
|
void schedule_del_thr(vthread_t thr)
|
|
|
|
|
{
|
|
|
|
|
struct del_thr_event_s*cur = new del_thr_event_s;
|
|
|
|
|
|
|
|
|
|
cur->thr = thr;
|
|
|
|
|
|
|
|
|
|
schedule_event_(cur, 0, DEL_THREAD);
|
|
|
|
|
}
|
|
|
|
|
|
2006-09-29 03:24:34 +02:00
|
|
|
void schedule_generic(vvp_gen_event_t obj, vvp_time64_t delay,
|
2009-01-15 05:15:50 +01:00
|
|
|
bool sync_flag, bool ro_flag, bool delete_when_done)
|
2001-05-01 03:09:39 +02:00
|
|
|
{
|
2004-12-11 03:31:25 +01:00
|
|
|
struct generic_event_s*cur = new generic_event_s;
|
2001-05-01 03:09:39 +02:00
|
|
|
|
|
|
|
|
cur->obj = obj;
|
2009-01-15 05:15:50 +01:00
|
|
|
cur->delete_obj_when_done = delete_when_done;
|
2006-09-29 03:24:34 +02:00
|
|
|
schedule_event_(cur, delay,
|
|
|
|
|
sync_flag? (ro_flag?SEQ_ROSYNC:SEQ_RWSYNC) : SEQ_ACTIVE);
|
2009-12-07 00:34:14 +01:00
|
|
|
|
|
|
|
|
if (sync_flag)
|
|
|
|
|
vthread_delay_delete();
|
2001-03-11 23:42:11 +01:00
|
|
|
}
|
|
|
|
|
|
2010-04-08 13:13:35 +02:00
|
|
|
static bool sim_started;
|
|
|
|
|
|
|
|
|
|
void schedule_functor(vvp_gen_event_t obj)
|
|
|
|
|
{
|
|
|
|
|
struct generic_event_s*cur = new generic_event_s;
|
|
|
|
|
|
|
|
|
|
cur->obj = obj;
|
|
|
|
|
cur->delete_obj_when_done = false;
|
|
|
|
|
if (!sim_started) {
|
|
|
|
|
schedule_init_event(cur);
|
|
|
|
|
} else {
|
|
|
|
|
schedule_event_(cur, 0, SEQ_ACTIVE);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-11-01 18:26:09 +01:00
|
|
|
void schedule_at_start_of_simtime(vvp_gen_event_t obj, vvp_time64_t delay)
|
|
|
|
|
{
|
|
|
|
|
struct generic_event_s*cur = new generic_event_s;
|
|
|
|
|
|
|
|
|
|
cur->obj = obj;
|
|
|
|
|
cur->delete_obj_when_done = false;
|
|
|
|
|
schedule_event_(cur, delay, SEQ_START);
|
|
|
|
|
}
|
|
|
|
|
|
2002-04-20 06:33:23 +02:00
|
|
|
static vvp_time64_t schedule_time;
|
|
|
|
|
vvp_time64_t schedule_simtime(void)
|
2001-03-31 21:00:43 +02:00
|
|
|
{ return schedule_time; }
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2007-08-24 03:24:51 +02:00
|
|
|
extern void vpiEndOfCompile();
|
|
|
|
|
extern void vpiStartOfSim();
|
2002-05-04 05:03:17 +02:00
|
|
|
extern void vpiPostsim();
|
2003-04-20 01:32:57 +02:00
|
|
|
extern void vpiNextSimTime(void);
|
2002-05-04 05:03:17 +02:00
|
|
|
|
2006-09-29 18:55:04 +02:00
|
|
|
/*
|
|
|
|
|
* The scheduler uses this function to drain the rosync events of the
|
|
|
|
|
* current time. The ctim object is still in the event queue, because
|
|
|
|
|
* it is legal for a rosync callback to create other rosync
|
|
|
|
|
* callbacks. It is *not* legal for them to create any other kinds of
|
|
|
|
|
* events, and that is why the rosync is treated specially.
|
2008-03-21 03:01:20 +01:00
|
|
|
*
|
|
|
|
|
* Once all the rosync callbacks are done we can safely delete any
|
|
|
|
|
* threads that finished during this time step.
|
2006-09-29 18:55:04 +02:00
|
|
|
*/
|
|
|
|
|
static void run_rosync(struct event_time_s*ctim)
|
|
|
|
|
{
|
|
|
|
|
while (ctim->rosync) {
|
|
|
|
|
struct event_s*cur = ctim->rosync->next;
|
|
|
|
|
if (cur->next == cur) {
|
|
|
|
|
ctim->rosync = 0;
|
|
|
|
|
} else {
|
|
|
|
|
ctim->rosync->next = cur->next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cur->run_run();
|
|
|
|
|
delete cur;
|
|
|
|
|
}
|
|
|
|
|
|
2008-03-21 03:01:20 +01:00
|
|
|
while (ctim->del_thr) {
|
|
|
|
|
struct event_s*cur = ctim->del_thr->next;
|
|
|
|
|
if (cur->next == cur) {
|
|
|
|
|
ctim->del_thr = 0;
|
|
|
|
|
} else {
|
|
|
|
|
ctim->del_thr->next = cur->next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cur->run_run();
|
|
|
|
|
delete cur;
|
|
|
|
|
}
|
|
|
|
|
|
2006-09-29 18:55:04 +02:00
|
|
|
if (ctim->active || ctim->nbassign || ctim->rwsync) {
|
2010-01-07 01:08:20 +01:00
|
|
|
cerr << "SCHEDULER ERROR: read-only sync events "
|
|
|
|
|
<< "created RW events!" << endl;
|
2006-09-29 18:55:04 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2001-03-11 01:29:38 +01:00
|
|
|
void schedule_simulate(void)
|
|
|
|
|
{
|
2010-04-08 13:13:35 +02:00
|
|
|
sim_started = false;
|
|
|
|
|
|
2001-03-11 01:29:38 +01:00
|
|
|
schedule_time = 0;
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
if (verbose_flag) {
|
|
|
|
|
vpi_mcd_printf(1, " ...execute EndOfCompile callbacks\n");
|
|
|
|
|
}
|
|
|
|
|
|
2007-08-24 03:24:51 +02:00
|
|
|
// Execute end of compile callbacks
|
|
|
|
|
vpiEndOfCompile();
|
2002-05-04 05:03:17 +02:00
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
if (verbose_flag) {
|
|
|
|
|
vpi_mcd_printf(1, " ...propagate initialization events\n");
|
|
|
|
|
}
|
|
|
|
|
|
2007-07-12 06:31:09 +02:00
|
|
|
// Execute initialization events.
|
|
|
|
|
while (schedule_init_list) {
|
2010-04-08 13:13:35 +02:00
|
|
|
struct event_s*cur = schedule_init_list->next;
|
|
|
|
|
if (cur->next == cur) {
|
|
|
|
|
schedule_init_list = 0;
|
|
|
|
|
} else {
|
|
|
|
|
schedule_init_list->next = cur->next;
|
|
|
|
|
}
|
2007-07-12 06:31:09 +02:00
|
|
|
cur->run_run();
|
|
|
|
|
delete cur;
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
if (verbose_flag) {
|
|
|
|
|
vpi_mcd_printf(1, " ...execute StartOfSim callbacks\n");
|
|
|
|
|
}
|
|
|
|
|
|
2007-08-24 03:24:51 +02:00
|
|
|
// Execute start of simulation callbacks
|
|
|
|
|
vpiStartOfSim();
|
|
|
|
|
|
2010-04-08 13:13:35 +02:00
|
|
|
sim_started = true;
|
|
|
|
|
|
2003-02-21 04:40:35 +01:00
|
|
|
signals_capture();
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
if (verbose_flag) {
|
|
|
|
|
vpi_mcd_printf(1, " ...run scheduler\n");
|
|
|
|
|
}
|
|
|
|
|
|
2007-12-10 23:53:10 +01:00
|
|
|
if (schedule_runnable) while (sched_list) {
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2003-02-22 03:52:06 +01:00
|
|
|
if (schedule_stopped_flag) {
|
|
|
|
|
schedule_stopped_flag = false;
|
2003-02-21 04:40:35 +01:00
|
|
|
stop_handler(0);
|
2007-11-13 03:05:52 +01:00
|
|
|
// You can finish from the debugger without a time change.
|
|
|
|
|
if (!schedule_runnable) break;
|
2003-02-21 04:40:35 +01:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
/* ctim is the current time step. */
|
|
|
|
|
struct event_time_s* ctim = sched_list;
|
2001-03-11 01:29:38 +01:00
|
|
|
|
2001-07-11 04:27:21 +02:00
|
|
|
/* If the time is advancing, then first run the
|
|
|
|
|
postponed sync events. Run them all. */
|
2003-09-09 02:56:45 +02:00
|
|
|
if (ctim->delay > 0) {
|
|
|
|
|
|
2007-08-23 04:10:09 +02:00
|
|
|
if (!schedule_runnable) break;
|
2003-09-09 02:56:45 +02:00
|
|
|
schedule_time += ctim->delay;
|
2011-02-28 04:20:16 +01:00
|
|
|
/* When the design is being traced (we are emitting
|
|
|
|
|
* file/line information) also print any time changes. */
|
|
|
|
|
if (show_file_line) {
|
|
|
|
|
cerr << "Advancing to simulation time: "
|
|
|
|
|
<< schedule_time << endl;
|
|
|
|
|
}
|
2003-09-09 02:56:45 +02:00
|
|
|
ctim->delay = 0;
|
2003-04-20 01:32:57 +02:00
|
|
|
|
|
|
|
|
vpiNextSimTime();
|
2009-11-01 18:26:09 +01:00
|
|
|
// Process the cbAtStartOfSimTime callbacks.
|
|
|
|
|
while (ctim->start) {
|
|
|
|
|
struct event_s*cur = ctim->start->next;
|
|
|
|
|
if (cur->next == cur) {
|
|
|
|
|
ctim->start = 0;
|
|
|
|
|
} else {
|
|
|
|
|
ctim->start->next = cur->next;
|
|
|
|
|
}
|
|
|
|
|
cur->run_run();
|
|
|
|
|
delete (cur);
|
|
|
|
|
}
|
2001-07-11 04:27:21 +02:00
|
|
|
}
|
2001-05-06 01:51:49 +02:00
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
|
|
|
|
|
/* If there are no more active events, advance the event
|
|
|
|
|
queues. If there are not events at all, then release
|
|
|
|
|
the event_time object. */
|
|
|
|
|
if (ctim->active == 0) {
|
|
|
|
|
ctim->active = ctim->nbassign;
|
|
|
|
|
ctim->nbassign = 0;
|
|
|
|
|
|
|
|
|
|
if (ctim->active == 0) {
|
2006-09-29 03:24:34 +02:00
|
|
|
ctim->active = ctim->rwsync;
|
|
|
|
|
ctim->rwsync = 0;
|
|
|
|
|
|
2006-09-29 18:55:04 +02:00
|
|
|
/* If out of rw events, then run the rosync
|
2008-06-24 17:46:16 +02:00
|
|
|
events and delete this time step. This also
|
2008-03-21 03:01:20 +01:00
|
|
|
deletes threads as needed. */
|
2006-09-29 03:24:34 +02:00
|
|
|
if (ctim->active == 0) {
|
2006-09-29 18:55:04 +02:00
|
|
|
run_rosync(ctim);
|
2006-09-29 03:24:34 +02:00
|
|
|
sched_list = ctim->next;
|
|
|
|
|
delete ctim;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2003-09-09 02:56:45 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Pull the first item off the list. If this is the last
|
|
|
|
|
cell in the list, then clear the list. Execute that
|
|
|
|
|
event type, and delete it. */
|
|
|
|
|
struct event_s*cur = ctim->active->next;
|
|
|
|
|
if (cur->next == cur) {
|
|
|
|
|
ctim->active = 0;
|
|
|
|
|
} else {
|
|
|
|
|
ctim->active->next = cur->next;
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
if (schedule_single_step_flag) {
|
|
|
|
|
cur->single_step_display();
|
|
|
|
|
schedule_stopped_flag = true;
|
|
|
|
|
schedule_single_step_flag = false;
|
|
|
|
|
}
|
|
|
|
|
|
2004-12-11 03:31:25 +01:00
|
|
|
cur->run_run();
|
2001-05-01 03:09:39 +02:00
|
|
|
|
2003-09-09 02:56:45 +02:00
|
|
|
delete (cur);
|
2001-03-11 01:29:38 +01:00
|
|
|
}
|
2002-05-04 05:03:17 +02:00
|
|
|
|
2011-03-30 08:42:26 +02:00
|
|
|
// Execute final events.
|
|
|
|
|
while (schedule_final_list) {
|
|
|
|
|
struct event_s*cur = schedule_final_list->next;
|
|
|
|
|
if (cur->next == cur) {
|
|
|
|
|
schedule_final_list = 0;
|
|
|
|
|
} else {
|
|
|
|
|
schedule_final_list->next = cur->next;
|
|
|
|
|
}
|
|
|
|
|
cur->run_run();
|
|
|
|
|
delete cur;
|
|
|
|
|
}
|
2002-07-31 05:22:44 +02:00
|
|
|
|
2003-02-21 04:40:35 +01:00
|
|
|
signals_revert();
|
|
|
|
|
|
2010-01-07 01:08:20 +01:00
|
|
|
if (verbose_flag) {
|
|
|
|
|
vpi_mcd_printf(1, " ...execute Postsim callbacks\n");
|
|
|
|
|
}
|
|
|
|
|
|
2002-05-04 05:03:17 +02:00
|
|
|
// Execute post-simulation callbacks
|
|
|
|
|
vpiPostsim();
|
2001-03-11 01:29:38 +01:00
|
|
|
}
|