/* * Copyright (c) 2001-2009 Stephen Williams (steve@icarus.com) * * This source code is free software; you can redistribute it * and/or modify it in source code form under the terms of the GNU * General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ # include "config.h" # include "vthread.h" # include "codes.h" # include "schedule.h" # include "ufunc.h" # include "event.h" # include "vpi_priv.h" # include "vvp_net_sig.h" #ifdef CHECK_WITH_VALGRIND # include "vvp_cleanup.h" #endif #ifdef HAVE_MALLOC_H # include #endif # include # include # include # include # include # include # include #include /* This is the size of an unsigned long in bits. This is just a convenience macro. */ # define CPU_WORD_BITS (8*sizeof(unsigned long)) # define TOP_BIT (1UL << (CPU_WORD_BITS-1)) /* * This vthread_s structure describes all there is to know about a * thread, including its program counter, all the private bits it * holds, and its place in other lists. * * * ** Notes On The Interactions of %fork/%join/%end: * * The %fork instruction creates a new thread and pushes that onto the * stack of children for the thread. This new thread, then, becomes * the new direct descendant of the thread. This new thread is * therefore also the first thread to be reaped when the parent does a * %join. * * It is a programming error for a thread that created threads to not * %join as many as it created before it %ends. The linear stack for * tracking thread relationships will create a mess otherwise. For * example, if A creates B then C, the stack is: * * A --> C --> B * * If C then %forks X, the stack is: * * A --> C --> X --> B * * If C %ends without a join, then the stack is: * * A --> C(zombie) --> X --> B * * If A then executes 2 %joins, it will reap C and X (when it ends) * leaving B in purgatory. What's worse, A will block on the schedules * of X and C instead of C and B, possibly creating incorrect timing. * * The schedule_parent_on_end flag is used by threads to tell their * children that they are waiting for it to end. It is set by a %join * instruction if the child is not already done. The thread that * executes a %join instruction sets the flag in its child. * * The i_have_ended flag, on the other hand, is used by threads to * tell their parents that they are already dead. A thread that * executes %end will set its own i_have_ended flag and let its parent * reap it when the parent does the %join. If a thread has its * schedule_parent_on_end flag set already when it %ends, then it * reaps itself and simply schedules its parent. If a child has its * i_have_ended flag set when a thread executes %join, then it is free * to reap the child immediately. */ struct vthread_s { /* This is the program counter. */ vvp_code_t pc; /* These hold the private thread bits. */ vvp_vector4_t bits4; /* These are the word registers. */ union { int64_t w_int; uint64_t w_uint; double w_real; } words[16]; /* My parent sets this when it wants me to wake it up. */ unsigned schedule_parent_on_end :1; unsigned i_have_ended :1; unsigned waiting_for_event :1; unsigned is_scheduled :1; unsigned delay_delete :1; unsigned fork_count :8; /* This points to the sole child of the thread. */ struct vthread_s*child; /* This points to my parent, if I have one. */ struct vthread_s*parent; /* This points to the containing scope. */ struct __vpiScope*parent_scope; /* This is used for keeping wait queues. */ struct vthread_s*wait_next; /* These are used to access automatically allocated items. */ vvp_context_t wt_context, rd_context; /* These are used to pass non-blocking event control information. */ vvp_net_t*event; uint64_t ecount; }; struct __vpiScope* vthread_scope(struct vthread_s*thr) { return thr->parent_scope; } struct vthread_s*running_thread = 0; // this table maps the thread special index bit addresses to // vvp_bit4_t bit values. static vvp_bit4_t thr_index_to_bit4[4] = { BIT4_0, BIT4_1, BIT4_X, BIT4_Z }; static inline void thr_check_addr(struct vthread_s*thr, unsigned addr) { if (thr->bits4.size() <= addr) thr->bits4.resize(addr+1); } static inline vvp_bit4_t thr_get_bit(struct vthread_s*thr, unsigned addr) { assert(addr < thr->bits4.size()); return thr->bits4.value(addr); } static inline void thr_put_bit(struct vthread_s*thr, unsigned addr, vvp_bit4_t val) { thr_check_addr(thr, addr); thr->bits4.set_bit(addr, val); } // REMOVE ME static inline void thr_clr_bit_(struct vthread_s*thr, unsigned addr) { thr->bits4.set_bit(addr, BIT4_0); } vvp_bit4_t vthread_get_bit(struct vthread_s*thr, unsigned addr) { return thr_get_bit(thr, addr); } void vthread_put_bit(struct vthread_s*thr, unsigned addr, vvp_bit4_t bit) { thr_put_bit(thr, addr, bit); } double vthread_get_real(struct vthread_s*thr, unsigned addr) { return thr->words[addr].w_real; } void vthread_put_real(struct vthread_s*thr, unsigned addr, double val) { thr->words[addr].w_real = val; } template T coerce_to_width(const T&that, unsigned width) { if (that.size() == width) return that; assert(that.size() > width); T res (width); for (unsigned idx = 0 ; idx < width ; idx += 1) res.set_bit(idx, that.value(idx)); return res; } static unsigned long* vector_to_array(struct vthread_s*thr, unsigned addr, unsigned wid) { if (addr == 0) { unsigned awid = (wid + CPU_WORD_BITS - 1) / (CPU_WORD_BITS); unsigned long*val = new unsigned long[awid]; for (unsigned idx = 0 ; idx < awid ; idx += 1) val[idx] = 0; return val; } if (addr == 1) { unsigned awid = (wid + CPU_WORD_BITS - 1) / (CPU_WORD_BITS); unsigned long*val = new unsigned long[awid]; for (unsigned idx = 0 ; idx < awid ; idx += 1) val[idx] = -1UL; wid -= (awid-1) * CPU_WORD_BITS; if (wid < CPU_WORD_BITS) val[awid-1] &= (-1UL) >> (CPU_WORD_BITS-wid); return val; } if (addr < 4) return 0; return thr->bits4.subarray(addr, wid); } /* * This function gets from the thread a vector of bits starting from * the addressed location and for the specified width. */ static vvp_vector4_t vthread_bits_to_vector(struct vthread_s*thr, unsigned bit, unsigned wid) { /* Make a vector of the desired width. */ if (bit >= 4) { return vvp_vector4_t(thr->bits4, bit, wid); } else { return vvp_vector4_t(wid, thr_index_to_bit4[bit]); } } /* * Some of the instructions do wide addition to arrays of long. They * use this add_with_cary function to help. */ static inline unsigned long add_with_carry(unsigned long a, unsigned long b, unsigned long&carry) { unsigned long tmp = b + carry; unsigned long sum = a + tmp; carry = 0; if (tmp < b) carry = 1; if (sum < tmp) carry = 1; if (sum < a) carry = 1; return sum; } static unsigned long multiply_with_carry(unsigned long a, unsigned long b, unsigned long&carry) { const unsigned long mask = (1UL << (CPU_WORD_BITS/2)) - 1; unsigned long a0 = a & mask; unsigned long a1 = (a >> (CPU_WORD_BITS/2)) & mask; unsigned long b0 = b & mask; unsigned long b1 = (b >> (CPU_WORD_BITS/2)) & mask; unsigned long tmp = a0 * b0; unsigned long r00 = tmp & mask; unsigned long c00 = (tmp >> (CPU_WORD_BITS/2)) & mask; tmp = a0 * b1; unsigned long r01 = tmp & mask; unsigned long c01 = (tmp >> (CPU_WORD_BITS/2)) & mask; tmp = a1 * b0; unsigned long r10 = tmp & mask; unsigned long c10 = (tmp >> (CPU_WORD_BITS/2)) & mask; tmp = a1 * b1; unsigned long r11 = tmp & mask; unsigned long c11 = (tmp >> (CPU_WORD_BITS/2)) & mask; unsigned long r1 = c00 + r01 + r10; unsigned long r2 = (r1 >> (CPU_WORD_BITS/2)) & mask; r1 &= mask; r2 += c01 + c10 + r11; unsigned long r3 = (r2 >> (CPU_WORD_BITS/2)) & mask; r2 &= mask; r3 += c11; r3 &= mask; carry = (r3 << (CPU_WORD_BITS/2)) + r2; return (r1 << (CPU_WORD_BITS/2)) + r00; } static void multiply_array_imm(unsigned long*res, unsigned long*val, unsigned words, unsigned long imm) { for (unsigned idx = 0 ; idx < words ; idx += 1) res[idx] = 0; for (unsigned mul_idx = 0 ; mul_idx < words ; mul_idx += 1) { unsigned long sum; unsigned long tmp = multiply_with_carry(val[mul_idx], imm, sum); unsigned long carry = 0; res[mul_idx] = add_with_carry(res[mul_idx], tmp, carry); for (unsigned add_idx = mul_idx+1 ; add_idx < words ; add_idx += 1) { res[add_idx] = add_with_carry(res[add_idx], sum, carry); sum = 0; } } } /* * Allocate a context for use by a child thread. By preference, use * the last freed context. If none available, create a new one. Add * it to the list of live contexts in that scope. */ static vvp_context_t vthread_alloc_context(struct __vpiScope*scope) { assert(scope->is_automatic); vvp_context_t context = scope->free_contexts; if (context) { scope->free_contexts = vvp_get_next_context(context); for (unsigned idx = 0 ; idx < scope->nitem ; idx += 1) { scope->item[idx]->reset_instance(context); } } else { context = vvp_allocate_context(scope->nitem); for (unsigned idx = 0 ; idx < scope->nitem ; idx += 1) { scope->item[idx]->alloc_instance(context); } } vvp_set_next_context(context, scope->live_contexts); scope->live_contexts = context; return context; } /* * Free a context previously allocated to a child thread by pushing it * onto the freed context stack. Remove it from the list of live contexts * in that scope. */ static void vthread_free_context(vvp_context_t context, struct __vpiScope*scope) { assert(scope->is_automatic); assert(context); if (context == scope->live_contexts) { scope->live_contexts = vvp_get_next_context(context); } else { vvp_context_t tmp = scope->live_contexts; while (context != vvp_get_next_context(tmp)) { assert(tmp); tmp = vvp_get_next_context(tmp); } vvp_set_next_context(tmp, vvp_get_next_context(context)); } vvp_set_next_context(context, scope->free_contexts); scope->free_contexts = context; } #ifdef CHECK_WITH_VALGRIND void contexts_delete(struct __vpiScope*scope) { vvp_context_t context = scope->free_contexts; while (context) { scope->free_contexts = vvp_get_next_context(context); for (unsigned idx = 0; idx < scope->nitem; idx += 1) { scope->item[idx]->free_instance(context); } free(context); context = scope->free_contexts; } free(scope->item); } #endif /* * Create a new thread with the given start address. */ vthread_t vthread_new(vvp_code_t pc, struct __vpiScope*scope) { vthread_t thr = new struct vthread_s; thr->pc = pc; thr->bits4 = vvp_vector4_t(32); thr->child = 0; thr->parent = 0; thr->parent_scope = scope; thr->wait_next = 0; thr->wt_context = 0; thr->rd_context = 0; thr->schedule_parent_on_end = 0; thr->is_scheduled = 0; thr->i_have_ended = 0; thr->delay_delete = 0; thr->waiting_for_event = 0; thr->fork_count = 0; thr->event = 0; thr->ecount = 0; thr_put_bit(thr, 0, BIT4_0); thr_put_bit(thr, 1, BIT4_1); thr_put_bit(thr, 2, BIT4_X); thr_put_bit(thr, 3, BIT4_Z); scope->threads .insert(thr); return thr; } #ifdef CHECK_WITH_VALGRIND #if 0 /* * These are not currently correct. If you use them you will get * double delete messages. There is still a leak related to a * waiting event that needs to be investigated. */ static void wait_next_delete(vthread_t base) { while (base) { vthread_t tmp = base->wait_next; delete base; base = tmp; if (base->waiting_for_event == 0) break; } } static void child_delete(vthread_t base) { while (base) { vthread_t tmp = base->child; delete base; base = tmp; } } #endif void vthreads_delete(struct __vpiScope*scope) { for (std::set::iterator cur = scope->threads.begin() ; cur != scope->threads.end() ; cur ++) { delete *cur; } scope->threads.clear(); } #endif /* * Reaping pulls the thread out of the stack of threads. If I have a * child, then hand it over to my parent. */ static void vthread_reap(vthread_t thr) { if (thr->child) { assert(thr->child->parent == thr); thr->child->parent = thr->parent; } if (thr->parent) { assert(thr->parent->child == thr); thr->parent->child = thr->child; } thr->child = 0; thr->parent = 0; // Remove myself from the containing scope. thr->parent_scope->threads.erase(thr); thr->pc = codespace_null(); /* If this thread is not scheduled, then is it safe to delete it now. Otherwise, let the schedule event (which will execute the thread at of_ZOMBIE) delete the object. */ if ((thr->is_scheduled == 0) && (thr->waiting_for_event == 0)) { assert(thr->fork_count == 0); assert(thr->wait_next == 0); if (thr->delay_delete) schedule_del_thr(thr); else vthread_delete(thr); } } void vthread_delete(vthread_t thr) { thr->bits4 = vvp_vector4_t(); delete thr; } void vthread_mark_scheduled(vthread_t thr) { while (thr != 0) { assert(thr->is_scheduled == 0); thr->is_scheduled = 1; thr = thr->wait_next; } } void vthread_delay_delete() { if (running_thread) running_thread->delay_delete = 1; } /* * This function runs each thread by fetching an instruction, * incrementing the PC, and executing the instruction. The thread may * be the head of a list, so each thread is run so far as possible. */ void vthread_run(vthread_t thr) { while (thr != 0) { vthread_t tmp = thr->wait_next; thr->wait_next = 0; assert(thr->is_scheduled); thr->is_scheduled = 0; running_thread = thr; for (;;) { vvp_code_t cp = thr->pc; thr->pc += 1; /* Run the opcode implementation. If the execution of the opcode returns false, then the thread is meant to be paused, so break out of the loop. */ bool rc = (cp->opcode)(thr, cp); if (rc == false) break; } thr = tmp; } running_thread = 0; } /* * The CHUNK_LINK instruction is a special next pointer for linking * chunks of code space. It's like a simplified %jmp. */ bool of_CHUNK_LINK(vthread_t thr, vvp_code_t code) { assert(code->cptr); thr->pc = code->cptr; return true; } /* * This is called by an event functor to wake up all the threads on * its list. I in fact created that list in the %wait instruction, and * I also am certain that the waiting_for_event flag is set. */ void vthread_schedule_list(vthread_t thr) { for (vthread_t cur = thr ; cur ; cur = cur->wait_next) { assert(cur->waiting_for_event); cur->waiting_for_event = 0; } schedule_vthread(thr, 0); } vvp_context_t vthread_get_wt_context() { if (running_thread) return running_thread->wt_context; else return 0; } vvp_context_t vthread_get_rd_context() { if (running_thread) return running_thread->rd_context; else return 0; } vvp_context_item_t vthread_get_wt_context_item(unsigned context_idx) { assert(running_thread && running_thread->wt_context); return vvp_get_context_item(running_thread->wt_context, context_idx); } vvp_context_item_t vthread_get_rd_context_item(unsigned context_idx) { assert(running_thread && running_thread->rd_context); return vvp_get_context_item(running_thread->rd_context, context_idx); } bool of_ABS_WR(vthread_t thr, vvp_code_t cp) { unsigned dst = cp->bit_idx[0]; unsigned src = cp->bit_idx[1]; thr->words[dst].w_real = fabs(thr->words[src].w_real); return true; } bool of_ALLOC(vthread_t thr, vvp_code_t cp) { /* Allocate a context. */ vvp_context_t child_context = vthread_alloc_context(cp->scope); /* Push the allocated context onto the write context stack. */ vvp_set_stacked_context(child_context, thr->wt_context); thr->wt_context = child_context; return true; } static bool of_AND_wide(vthread_t thr, vvp_code_t cp) { unsigned idx1 = cp->bit_idx[0]; unsigned idx2 = cp->bit_idx[1]; unsigned wid = cp->number; vvp_vector4_t val = vthread_bits_to_vector(thr, idx1, wid); val &= vthread_bits_to_vector(thr, idx2, wid); thr->bits4.set_vec(idx1, val); return true; } static bool of_AND_narrow(vthread_t thr, vvp_code_t cp) { unsigned idx1 = cp->bit_idx[0]; unsigned idx2 = cp->bit_idx[1]; unsigned wid = cp->number; for (unsigned idx = 0 ; idx < wid ; idx += 1) { vvp_bit4_t lb = thr_get_bit(thr, idx1); vvp_bit4_t rb = thr_get_bit(thr, idx2); thr_put_bit(thr, idx1, lb&rb); idx1 += 1; if (idx2 >= 4) idx2 += 1; } return true; } bool of_AND(vthread_t thr, vvp_code_t cp) { assert(cp->bit_idx[0] >= 4); if (cp->number <= 4) cp->opcode = &of_AND_narrow; else cp->opcode = &of_AND_wide; return cp->opcode(thr, cp); } bool of_ANDI(vthread_t thr, vvp_code_t cp) { unsigned idx1 = cp->bit_idx[0]; unsigned long imm = cp->bit_idx[1]; unsigned wid = cp->number; assert(idx1 >= 4); vvp_vector4_t val = vthread_bits_to_vector(thr, idx1, wid); vvp_vector4_t imv (wid, BIT4_0); unsigned trans = wid; if (trans > CPU_WORD_BITS) trans = CPU_WORD_BITS; imv.setarray(0, trans, &imm); val &= imv; thr->bits4.set_vec(idx1, val); return true; } bool of_ADD(vthread_t thr, vvp_code_t cp) { assert(cp->bit_idx[0] >= 4); unsigned long*lva = vector_to_array(thr, cp->bit_idx[0], cp->number); unsigned long*lvb = vector_to_array(thr, cp->bit_idx[1], cp->number); if (lva == 0 || lvb == 0) goto x_out; unsigned long carry; carry = 0; for (unsigned idx = 0 ; (idx*CPU_WORD_BITS) < cp->number ; idx += 1) lva[idx] = add_with_carry(lva[idx], lvb[idx], carry); /* We know from the vector_to_array that the address is valid in the thr->bitr4 vector, so just do the set bit. */ thr->bits4.setarray(cp->bit_idx[0], cp->number, lva); delete[]lva; delete[]lvb; return true; x_out: delete[]lva; delete[]lvb; vvp_vector4_t tmp(cp->number, BIT4_X); thr->bits4.set_vec(cp->bit_idx[0], tmp); return true; } bool of_ADD_WR(vthread_t thr, vvp_code_t cp) { double l = thr->words[cp->bit_idx[0]].w_real; double r = thr->words[cp->bit_idx[1]].w_real; thr->words[cp->bit_idx[0]].w_real = l + r; return true; } /* * This is %addi, add-immediate. The first value is a vector, the * second value is the immediate value in the bin_idx[1] position. The * immediate value can be up to 16 bits, which are then padded to the * width of the vector with zero. */ bool of_ADDI(vthread_t thr, vvp_code_t cp) { // Collect arguments unsigned bit_addr = cp->bit_idx[0]; unsigned long imm_value = cp->bit_idx[1]; unsigned bit_width = cp->number; assert(bit_addr >= 4); unsigned word_count = (bit_width+CPU_WORD_BITS-1)/CPU_WORD_BITS; unsigned long*lva = vector_to_array(thr, bit_addr, bit_width); if (lva == 0) goto x_out; unsigned long carry; carry = 0; for (unsigned idx = 0 ; idx < word_count ; idx += 1) { lva[idx] = add_with_carry(lva[idx], imm_value, carry); imm_value = 0; } /* We know from the vector_to_array that the address is valid in the thr->bitr4 vector, so just do the set bit. */ thr->bits4.setarray(bit_addr, bit_width, lva); delete[]lva; return true; x_out: delete[]lva; vvp_vector4_t tmp (bit_width, BIT4_X); thr->bits4.set_vec(bit_addr, tmp); return true; } /* %assign/ar , , * Generate an assignment event to a real array. Index register 3 * contains the canonical address of the word in the memory. * is the delay in simulation time. is the index register * containing the real value. */ bool of_ASSIGN_AR(vthread_t thr, vvp_code_t cp) { long adr = thr->words[3].w_int; unsigned delay = cp->bit_idx[0]; double value = thr->words[cp->bit_idx[1]].w_real; if (adr >= 0) { schedule_assign_array_word(cp->array, adr, value, delay); } return true; } /* %assign/ar/d , , * Generate an assignment event to a real array. Index register 3 * contains the canonical address of the word in the memory. * is the integer register that contains the delay value. * is the index register containing the real value. */ bool of_ASSIGN_ARD(vthread_t thr, vvp_code_t cp) { long adr = thr->words[3].w_int; vvp_time64_t delay = thr->words[cp->bit_idx[0]].w_uint; double value = thr->words[cp->bit_idx[1]].w_real; if (adr >= 0) { schedule_assign_array_word(cp->array, adr, value, delay); } return true; } /* %assign/ar/e , * Generate an assignment event to a real array. Index register 3 * contains the canonical address of the word in the memory. * is the index register containing the real value. The event * information is contained in the thread event control registers * and is set with %evctl. */ bool of_ASSIGN_ARE(vthread_t thr, vvp_code_t cp) { long adr = thr->words[3].w_int; double value = thr->words[cp->bit_idx[0]].w_real; if (adr >= 0) { if (thr->ecount == 0) { schedule_assign_array_word(cp->array, adr, value, 0); } else { schedule_evctl(cp->array, adr, value, thr->event, thr->ecount); } } return true; } /* %assign/av , , * This generates an assignment event to an array. Index register 0 * contains the width of the vector (and the word) and index register * 3 contains the canonical address of the word in memory. */ bool of_ASSIGN_AV(vthread_t thr, vvp_code_t cp) { unsigned wid = thr->words[0].w_int; long off = thr->words[1].w_int; long adr = thr->words[3].w_int; unsigned delay = cp->bit_idx[0]; unsigned bit = cp->bit_idx[1]; if (adr < 0) return true; long vwidth = get_array_word_size(cp->array); // We fell off the MSB end. if (off >= vwidth) return true; // Trim the bits after the MSB if (off + (long)wid > vwidth) { wid += vwidth - off - wid; } else if (off < 0 ) { // We fell off the LSB end. if ((unsigned)-off > wid ) return true; // Trim the bits before the LSB wid += off; bit -= off; off = 0; } assert(wid > 0); vvp_vector4_t value = vthread_bits_to_vector(thr, bit, wid); schedule_assign_array_word(cp->array, adr, off, value, delay); return true; } /* %assign/av/d , , * This generates an assignment event to an array. Index register 0 * contains the width of the vector (and the word) and index register * 3 contains the canonical address of the word in memory. The named * index register contains the delay. */ bool of_ASSIGN_AVD(vthread_t thr, vvp_code_t cp) { unsigned wid = thr->words[0].w_int; long off = thr->words[1].w_int; long adr = thr->words[3].w_int; vvp_time64_t delay = thr->words[cp->bit_idx[0]].w_uint; unsigned bit = cp->bit_idx[1]; if (adr < 0) return true; long vwidth = get_array_word_size(cp->array); // We fell off the MSB end. if (off >= vwidth) return true; // Trim the bits after the MSB if (off + (long)wid > vwidth) { wid += vwidth - off - wid; } else if (off < 0 ) { // We fell off the LSB end. if ((unsigned)-off > wid ) return true; // Trim the bits before the LSB wid += off; bit -= off; off = 0; } assert(wid > 0); vvp_vector4_t value = vthread_bits_to_vector(thr, bit, wid); schedule_assign_array_word(cp->array, adr, off, value, delay); return true; } bool of_ASSIGN_AVE(vthread_t thr, vvp_code_t cp) { unsigned wid = thr->words[0].w_int; long off = thr->words[1].w_int; long adr = thr->words[3].w_int; unsigned bit = cp->bit_idx[0]; if (adr < 0) return true; long vwidth = get_array_word_size(cp->array); // We fell off the MSB end. if (off >= vwidth) return true; // Trim the bits after the MSB if (off + (long)wid > vwidth) { wid += vwidth - off - wid; } else if (off < 0 ) { // We fell off the LSB end. if ((unsigned)-off > wid ) return true; // Trim the bits before the LSB wid += off; bit -= off; off = 0; } assert(wid > 0); vvp_vector4_t value = vthread_bits_to_vector(thr, bit, wid); // If the count is zero then just put the value. if (thr->ecount == 0) { schedule_assign_array_word(cp->array, adr, off, value, 0); } else { schedule_evctl(cp->array, adr, value, off, thr->event, thr->ecount); } return true; } /* * This is %assign/v0