Schedule wait lists of threads as a single event,

to save on events. Also, improve efficiency of
 event_s allocation. Add some event statistics to
 get an idea where performance is really going.
This commit is contained in:
steve 2003-01-06 23:57:26 +00:00
parent dedae73761
commit aa3a6dba4e
5 changed files with 136 additions and 56 deletions

View File

@ -17,7 +17,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#ifdef HAVE_CVS_IDENT
#ident "$Id: event.cc,v 1.10 2002/08/12 01:35:08 steve Exp $"
#ident "$Id: event.cc,v 1.11 2003/01/06 23:57:26 steve Exp $"
#endif
# include "event.h"
@ -80,7 +80,7 @@ void event_functor_s::set(vvp_ipoint_t ptr, bool, unsigned val, unsigned)
vthread_t tmp = threads;
threads = 0;
vthread_schedule_list(tmp);
if (out) {
functor_set(out, 0, St0, true);
}
@ -189,6 +189,12 @@ void compile_named_event(char*label, char*name)
/*
* $Log: event.cc,v $
* Revision 1.11 2003/01/06 23:57:26 steve
* Schedule wait lists of threads as a single event,
* to save on events. Also, improve efficiency of
* event_s allocation. Add some event statistics to
* get an idea where performance is really going.
*
* Revision 1.10 2002/08/12 01:35:08 steve
* conditional ident string using autoconfig.
*

View File

@ -17,7 +17,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#ifdef HAVE_CVS_IDENT
#ident "$Id: main.cc,v 1.31 2002/09/18 03:34:07 steve Exp $"
#ident "$Id: main.cc,v 1.32 2003/01/06 23:57:26 steve Exp $"
#endif
# include "config.h"
@ -273,6 +273,17 @@ int main(int argc, char*argv[])
print_rusage(stderr, cycles+2, cycles+1);
if (logfile && logfile != stderr)
print_rusage(logfile, cycles+2, cycles+1);
fprintf(stderr, "Event counts: (event pool = %lu)\n",
count_event_pool);
fprintf(stderr, " %8lu thread schedule events\n",
count_thread_events);
fprintf(stderr, " %8lu propagation events\n",
count_prop_events);
fprintf(stderr, " %8lu assign events\n",
count_assign_events);
fprintf(stderr, " %8lu other events\n",
count_gen_events);
}
return 0;
@ -280,6 +291,12 @@ int main(int argc, char*argv[])
/*
* $Log: main.cc,v $
* Revision 1.32 2003/01/06 23:57:26 steve
* Schedule wait lists of threads as a single event,
* to save on events. Also, improve efficiency of
* event_s allocation. Add some event statistics to
* get an idea where performance is really going.
*
* Revision 1.31 2002/09/18 03:34:07 steve
* printf size warning.
*

View File

@ -17,7 +17,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#ifdef HAVE_CVS_IDENT
#ident "$Id: schedule.cc,v 1.20 2002/08/12 01:35:08 steve Exp $"
#ident "$Id: schedule.cc,v 1.21 2003/01/06 23:57:26 steve Exp $"
#endif
# include "schedule.h"
@ -31,6 +31,13 @@
# include <assert.h>
# include <stdio.h>
unsigned long count_assign_events = 0;
unsigned long count_gen_events = 0;
unsigned long count_prop_events = 0;
unsigned long count_thread_events = 0;
unsigned long count_event_pool = 0;
/*
* The event queue is arranged as a skip list, with the simulation
* time the key to the list. The simulation time is stored in each
@ -60,6 +67,9 @@ struct event_s {
struct event_s*next;
struct event_s*last;
void* operator new (size_t);
void operator delete(void*obj, size_t s);
};
const unsigned TYPE_GEN = 0;
const unsigned TYPE_THREAD = 1;
@ -72,27 +82,35 @@ const unsigned TYPE_ASSIGN = 3;
*/
static struct event_s* free_list = 0;
static const unsigned CHUNK_COUNT = 8192 / sizeof(struct event_s);
inline static struct event_s* e_alloc()
inline void* event_s::operator new (size_t size)
{
struct event_s* cur = free_list;
if (!cur)
{
cur = (struct event_s*) malloc(sizeof(struct event_s));
// cur = (struct event_s*) calloc(1, sizeof(struct event_s));
}
else
{
free_list = cur->next;
// memset(cur, 0, sizeof(struct event_s));
}
return cur;
assert(size == sizeof(struct event_s));
struct event_s* cur = free_list;
if (!cur) {
cur = (struct event_s*)
malloc(CHUNK_COUNT * sizeof(struct event_s));
for (unsigned idx = 1 ; idx < CHUNK_COUNT ; idx += 1) {
cur[idx].next = free_list;
free_list = cur + idx;
}
count_event_pool += CHUNK_COUNT;
} else {
free_list = cur->next;
}
return cur;
}
inline static void e_free(struct event_s* cur)
inline void event_s::operator delete(void*obj, size_t size)
{
cur->next = free_list;
free_list = cur;
struct event_s*cur = reinterpret_cast<event_s*>(obj);
cur->next = free_list;
free_list = cur;
}
/*
@ -246,7 +264,7 @@ static struct event_s* pull_sync_event(void)
void schedule_vthread(vthread_t thr, unsigned delay, bool push_flag)
{
struct event_s*cur = e_alloc();
struct event_s*cur = new event_s;
cur->delay = delay;
cur->thr = thr;
@ -267,7 +285,7 @@ void schedule_vthread(vthread_t thr, unsigned delay, bool push_flag)
void functor_s::schedule(unsigned delay)
{
struct event_s*cur = e_alloc();
struct event_s*cur = new event_s;
cur->delay = delay;
cur->funp = this;
@ -280,7 +298,7 @@ void functor_s::schedule(unsigned delay)
void schedule_assign(vvp_ipoint_t fun, unsigned char val, unsigned delay)
{
struct event_s*cur = e_alloc();
struct event_s*cur = new event_s;
cur->delay = delay;
cur->fun = fun;
@ -292,7 +310,7 @@ void schedule_assign(vvp_ipoint_t fun, unsigned char val, unsigned delay)
void schedule_generic(vvp_gen_event_t obj, unsigned char val, unsigned delay)
{
struct event_s*cur = e_alloc();
struct event_s*cur = new event_s;
cur->delay = delay;
cur->obj = obj;
@ -339,7 +357,7 @@ void schedule_simulate(void)
assert(sync_cur->obj->sync_flag);
sync_cur->obj->run(sync_cur->obj, sync_cur->val);
}
e_free(sync_cur);
delete sync_cur;
}
@ -349,17 +367,20 @@ void schedule_simulate(void)
switch (cur->type) {
case TYPE_THREAD:
count_thread_events += 1;
vthread_run(cur->thr);
e_free(cur);
delete cur;
break;
case TYPE_PROP:
//printf("Propagate %p\n", cur->fun);
count_prop_events += 1;
cur->funp->propagate(false);
e_free(cur);
delete(cur);
break;
case TYPE_ASSIGN:
count_assign_events += 1;
switch (cur->val) {
case 0:
functor_set(cur->fun, cur->val, St0, false);
@ -374,14 +395,15 @@ void schedule_simulate(void)
functor_set(cur->fun, cur->val, HiZ, false);
break;
}
e_free(cur);
delete(cur);
break;
case TYPE_GEN:
count_gen_events += 1;
if (cur->obj && cur->obj->run) {
if (cur->obj->sync_flag == false) {
cur->obj->run(cur->obj, cur->val);
e_free(cur);
delete (cur);
} else {
postpone_sync_event(cur);
@ -405,7 +427,7 @@ void schedule_simulate(void)
sync_cur->obj->run(sync_cur->obj, sync_cur->val);
}
e_free(sync_cur);
delete (sync_cur);
}
@ -415,6 +437,12 @@ void schedule_simulate(void)
/*
* $Log: schedule.cc,v $
* Revision 1.21 2003/01/06 23:57:26 steve
* Schedule wait lists of threads as a single event,
* to save on events. Also, improve efficiency of
* event_s allocation. Add some event statistics to
* get an idea where performance is really going.
*
* Revision 1.20 2002/08/12 01:35:08 steve
* conditional ident string using autoconfig.
*

View File

@ -19,7 +19,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#ifdef HAVE_CVS_IDENT
#ident "$Id: schedule.h,v 1.11 2002/08/12 01:35:08 steve Exp $"
#ident "$Id: schedule.h,v 1.12 2003/01/06 23:57:26 steve Exp $"
#endif
# include "vthread.h"
@ -90,9 +90,23 @@ extern vvp_time64_t schedule_simtime(void);
extern void schedule_finish(int rc);
extern bool schedule_finished(void);
/*
* These are event counters for the sake of performance measurements.
*/
extern unsigned long count_assign_events;
extern unsigned long count_gen_events;
extern unsigned long count_prop_events;
extern unsigned long count_thread_events;
extern unsigned long count_event_pool;
/*
* $Log: schedule.h,v $
* Revision 1.12 2003/01/06 23:57:26 steve
* Schedule wait lists of threads as a single event,
* to save on events. Also, improve efficiency of
* event_s allocation. Add some event statistics to
* get an idea where performance is really going.
*
* Revision 1.11 2002/08/12 01:35:08 steve
* conditional ident string using autoconfig.
*

View File

@ -17,7 +17,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#ifdef HAVE_CVS_IDENT
#ident "$Id: vthread.cc,v 1.95 2002/11/22 00:01:50 steve Exp $"
#ident "$Id: vthread.cc,v 1.96 2003/01/06 23:57:26 steve Exp $"
#endif
# include "vthread.h"
@ -290,32 +290,43 @@ static void vthread_reap(vthread_t thr)
void vthread_mark_scheduled(vthread_t thr)
{
assert(thr->is_scheduled == 0);
thr->is_scheduled = 1;
while (thr != 0) {
assert(thr->is_scheduled == 0);
thr->is_scheduled = 1;
thr = thr->wait_next;
}
}
/*
* This function runs a thread by fetching an instruction,
* incrementing the PC, and executing the instruction.
* This function runs each thread by fetching an instruction,
* incrementing the PC, and executing the instruction. The thread may
* be the head of a list, so each thread is run so far as possible.
*/
void vthread_run(vthread_t thr)
{
assert(thr->is_scheduled);
thr->is_scheduled = 0;
while (thr != 0) {
vthread_t tmp = thr->wait_next;
thr->wait_next = 0;
for (;;) {
vvp_code_t cp = codespace_index(thr->pc);
thr->pc += 1;
assert(thr->is_scheduled);
thr->is_scheduled = 0;
assert(cp);
assert(cp->opcode);
for (;;) {
vvp_code_t cp = codespace_index(thr->pc);
thr->pc += 1;
/* Run the opcode implementation. If the execution of
the opcode returns false, then the thread is meant to
be paused, so break out of the loop. */
bool rc = (cp->opcode)(thr, cp);
if (rc == false)
return;
assert(cp);
assert(cp->opcode);
/* Run the opcode implementation. If the execution of
the opcode returns false, then the thread is meant to
be paused, so break out of the loop. */
bool rc = (cp->opcode)(thr, cp);
if (rc == false)
break;
}
thr = tmp;
}
}
@ -326,14 +337,12 @@ void vthread_run(vthread_t thr)
*/
void vthread_schedule_list(vthread_t thr)
{
while (thr) {
vthread_t tmp = thr;
thr = thr->wait_next;
assert(tmp->waiting_for_event);
tmp->waiting_for_event = 0;
tmp->wait_next = 0;
schedule_vthread(tmp, 0);
for (vthread_t cur = thr ; cur ; cur = cur->wait_next) {
assert(cur->waiting_for_event);
cur->waiting_for_event = 0;
}
schedule_vthread(thr, 0);
}
@ -2496,6 +2505,12 @@ bool of_CALL_UFUNC(vthread_t thr, vvp_code_t cp)
/*
* $Log: vthread.cc,v $
* Revision 1.96 2003/01/06 23:57:26 steve
* Schedule wait lists of threads as a single event,
* to save on events. Also, improve efficiency of
* event_s allocation. Add some event statistics to
* get an idea where performance is really going.
*
* Revision 1.95 2002/11/22 00:01:50 steve
* Careful of left operands to shift that are constant.
*