/* * Copyright (c) 2004-2007 Stephen Williams (steve@icarus.com) * * This source code is free software; you can redistribute it * and/or modify it in source code form under the terms of the GNU * General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ # include "config.h" # include "vvp_net.h" # include "schedule.h" # include "statistics.h" # include # include # include # include # include # include # include # include /* *** BIT operations *** */ vvp_bit4_t add_with_carry(vvp_bit4_t a, vvp_bit4_t b, vvp_bit4_t&c) { if (bit4_is_xz(a) || bit4_is_xz(b) || bit4_is_xz(c)) { c = BIT4_X; return BIT4_X; } // NOTE: This relies on the facts that XZ values have been // weeded out, and that BIT4_1 is 1 and BIT4_0 is 0. int sum = (int)a + (int)b + (int)c; switch (sum) { case 0: return BIT4_0; case 1: c = BIT4_0; return BIT4_1; case 2: c = BIT4_1; return BIT4_0; case 3: c = BIT4_1; return BIT4_1; default: fprintf(stderr, "Incorrect result %d.\n", sum); assert(0); } } vvp_bit4_t operator & (vvp_bit4_t a, vvp_bit4_t b) { if (a == BIT4_0) return BIT4_0; if (b == BIT4_0) return BIT4_0; if (bit4_is_xz(a)) return BIT4_X; if (bit4_is_xz(b)) return BIT4_X; return BIT4_1; } vvp_bit4_t operator | (vvp_bit4_t a, vvp_bit4_t b) { if (a == BIT4_1) return BIT4_1; if (b == BIT4_1) return BIT4_1; if (bit4_is_xz(a)) return BIT4_X; if (bit4_is_xz(b)) return BIT4_X; return BIT4_0; } vvp_bit4_t operator ^ (vvp_bit4_t a, vvp_bit4_t b) { if (bit4_is_xz(a)) return BIT4_X; if (bit4_is_xz(b)) return BIT4_X; if (a == BIT4_0) return b; if (b == BIT4_0) return a; return BIT4_0; } vvp_bit4_t operator ~ (vvp_bit4_t a) { switch (a) { case BIT4_0: return BIT4_1; case BIT4_1: return BIT4_0; default: return BIT4_X; } } ostream& operator<<(ostream&out, vvp_bit4_t bit) { switch (bit) { case BIT4_0: out << "0"; break; case BIT4_1: out << "1"; break; case BIT4_X: out << "X"; break; case BIT4_Z: out << "Z"; break; default: out << "?"; break; } return out; } typedef unsigned short edge_t; inline edge_t VVP_EDGE(vvp_bit4_t from, vvp_bit4_t to) { return 1 << ((from << 2) | to); } const edge_t vvp_edge_posedge = VVP_EDGE(BIT4_0,BIT4_1) | VVP_EDGE(BIT4_0,BIT4_X) | VVP_EDGE(BIT4_0,BIT4_Z) | VVP_EDGE(BIT4_X,BIT4_1) | VVP_EDGE(BIT4_Z,BIT4_1) ; const edge_t vvp_edge_negedge = VVP_EDGE(BIT4_1,BIT4_0) | VVP_EDGE(BIT4_1,BIT4_X) | VVP_EDGE(BIT4_1,BIT4_Z) | VVP_EDGE(BIT4_X,BIT4_0) | VVP_EDGE(BIT4_Z,BIT4_0) ; int edge(vvp_bit4_t from, vvp_bit4_t to) { edge_t mask = VVP_EDGE(from, to); if (mask & vvp_edge_posedge) return 1; if (mask & vvp_edge_negedge) return -1; return 0; } void vvp_send_vec8(vvp_net_ptr_t ptr, vvp_vector8_t val) { while (struct vvp_net_t*cur = ptr.ptr()) { vvp_net_ptr_t next = cur->port[ptr.port()]; if (cur->fun) cur->fun->recv_vec8(ptr, val); ptr = next; } } void vvp_send_real(vvp_net_ptr_t ptr, double val) { while (struct vvp_net_t*cur = ptr.ptr()) { vvp_net_ptr_t next = cur->port[ptr.port()]; if (cur->fun) cur->fun->recv_real(ptr, val); ptr = next; } } void vvp_send_long(vvp_net_ptr_t ptr, long val) { while (struct vvp_net_t*cur = ptr.ptr()) { vvp_net_ptr_t next = cur->port[ptr.port()]; if (cur->fun) cur->fun->recv_long(ptr, val); ptr = next; } } void vvp_vector4_t::copy_from_(const vvp_vector4_t&that) { size_ = that.size_; if (size_ > BITS_PER_WORD) { unsigned words = (size_+BITS_PER_WORD-1) / BITS_PER_WORD; bits_ptr_ = new unsigned long[words]; for (unsigned idx = 0 ; idx < words ; idx += 1) bits_ptr_[idx] = that.bits_ptr_[idx]; } else { bits_val_ = that.bits_val_; } } void vvp_vector4_t::allocate_words_(unsigned wid, unsigned long init) { if (size_ > BITS_PER_WORD) { unsigned cnt = (size_ + BITS_PER_WORD - 1) / BITS_PER_WORD; bits_ptr_ = new unsigned long[cnt]; for (unsigned idx = 0 ; idx < cnt ; idx += 1) bits_ptr_[idx] = init; } else { bits_val_ = init; } } vvp_vector4_t::vvp_vector4_t(const vvp_vector4_t&that, unsigned adr, unsigned wid) { size_ = wid; assert((adr + wid) <= that.size_); allocate_words_(wid, WORD_X_BITS); if (wid > BITS_PER_WORD) { /* In this case, the subvector and the source vector are long. Do the transfer reasonably efficiently. */ unsigned ptr = adr / BITS_PER_WORD; unsigned off = adr % BITS_PER_WORD; unsigned noff = BITS_PER_WORD - off; unsigned long lmask = (1UL << 2UL*off) - 1UL; unsigned trans = 0; unsigned dst = 0; while (trans < wid) { // The low bits of the result. bits_ptr_[dst] = (that.bits_ptr_[ptr] & ~lmask) >> 2UL*off; trans += noff; if (trans >= wid) break; ptr += 1; // The high bits of the result. Skip this if the // source and destination are perfectly aligned. if (noff != BITS_PER_WORD) { bits_ptr_[dst] |= (that.bits_ptr_[ptr]&lmask) << 2*noff; trans += off; } dst += 1; } } else { for (unsigned idx = 0 ; idx < wid ; idx += 1) { set_bit(idx, that.value(adr+idx)); } } } /* * Change the size of the vvp_vector4_t vector to the new size. Copy * the old values, as many as well fit, into the new vector. */ void vvp_vector4_t::resize(unsigned newsize) { if (size_ == newsize) return; unsigned cnt = (size_ + BITS_PER_WORD - 1) / BITS_PER_WORD; if (newsize > BITS_PER_WORD) { unsigned newcnt = (newsize + BITS_PER_WORD - 1) / BITS_PER_WORD; unsigned long*newbits = new unsigned long[newcnt]; if (cnt > 1) { unsigned trans = cnt; if (trans > newcnt) trans = newcnt; for (unsigned idx = 0 ; idx < trans ; idx += 1) newbits[idx] = bits_ptr_[idx]; delete[]bits_ptr_; } else { newbits[0] = bits_val_; } for (unsigned idx = cnt ; idx < newcnt ; idx += 1) newbits[idx] = WORD_X_BITS; size_ = newsize; bits_ptr_ = newbits; } else { unsigned long newval; if (cnt > 1) { newval = bits_ptr_[0]; delete[]bits_ptr_; bits_val_ = newval; } size_ = newsize; } } unsigned long* vvp_vector4_t::subarray(unsigned adr, unsigned wid) const { const unsigned BIT2_PER_WORD = 8*sizeof(unsigned long); unsigned awid = (wid + BIT2_PER_WORD - 1) / (BIT2_PER_WORD); unsigned long*val = new unsigned long[awid]; for (unsigned idx = 0 ; idx < awid ; idx += 1) val[idx] = 0; if (size_ <= BITS_PER_WORD) { /* Handle the special case that the array is small. The entire value of the vector4 is within the bits_val_ so we know that the result is a single word, the source is a single word, and we just have to loop through that word. */ unsigned long tmp = bits_val_ >> 2UL*adr; tmp &= (1UL << 2*wid) - 1; if (tmp & WORD_X_BITS) goto x_out; unsigned long mask1 = 1; for (unsigned idx = 0 ; idx < wid ; idx += 1) { if (tmp & 1) val[0] |= mask1; mask1 <<= 1UL; tmp >>= 2UL; } return val; } else { /* Get the first word we are scanning. We may in fact be somewhere in the middle of that word. */ unsigned long tmp = bits_ptr_[adr/BITS_PER_WORD]; unsigned long off = adr%BITS_PER_WORD; tmp >>= 2UL * off; // Test for X bits but not beyond the desired wid. if (wid < (BITS_PER_WORD-off)) tmp &= ~(-1UL << 2*wid); if (tmp & WORD_X_BITS) goto x_out; // Where in the target array to write the next bit. unsigned long mask1 = 1; const unsigned long mask1_last = 1UL << (BIT2_PER_WORD-1); unsigned long*val_ptr = val; // Track where the source bit is in the source word. unsigned adr_bit = adr%BITS_PER_WORD; // Scan... for (unsigned idx = 0 ; idx < wid ; idx += 1) { /* Starting a new word? */ if (adr_bit == BITS_PER_WORD) { tmp = bits_ptr_[adr/BITS_PER_WORD]; // If this is the last word, then only test // for X in the valid bits. if ((wid-idx) < BITS_PER_WORD) tmp &= ~(WORD_Z_BITS<<2*(wid-idx)); if (tmp & WORD_X_BITS) goto x_out; adr_bit = 0; } if (tmp&1) *val_ptr |= mask1; adr += 1; adr_bit += 1; tmp >>= 2UL; if (mask1 == mask1_last) { val_ptr += 1; mask1 = 1; } else { mask1 <<= 1; } } } return val; x_out: delete[]val; return 0; } /* * Set the bits of that vector, which must be a subset of this vector, * into the addressed part of this vector. Use bit masking and word * copies to go as fast as reasonably possible. */ void vvp_vector4_t::set_vec(unsigned adr, const vvp_vector4_t&that) { assert(adr+that.size_ <= size_); if (size_ <= BITS_PER_WORD) { /* The destination vector (me!) is within a bits_val_ word, so the subvector is certainly within a bits_val_ word. Therefore, the entire operation is a matter of writing the bits of that into the addressed bits of this. The mask below is calculated to be 1 for all the bits that are to come from that. Do the job by some shifting, masking and OR. */ unsigned long lmask = (1UL << 2UL*adr) - 1; unsigned long hmask; unsigned long hshift = adr+that.size_; if (hshift >= BITS_PER_WORD) hmask = -1UL; else hmask = (1UL << 2UL*(adr+that.size_)) - 1; unsigned long mask = hmask & ~lmask; bits_val_ = (bits_val_ & ~mask) | ((that.bits_val_<<2UL*adr) & mask); } else if (that.size_ <= BITS_PER_WORD) { /* This vector is more than a word, but that vector is still small. Write into the destination, possibly spanning two destination works, depending on whether the source vector spans a word transition. */ unsigned long dptr = adr / BITS_PER_WORD; unsigned long doff = adr % BITS_PER_WORD; unsigned long lmask = (1UL << 2UL*doff) - 1; unsigned long hshift = doff+that.size_; unsigned long hmask; if (hshift >= BITS_PER_WORD) hmask = -1UL; else hmask = (1UL << 2*hshift) - 1UL; unsigned long mask = hmask & ~lmask; bits_ptr_[dptr] = (bits_ptr_[dptr] & ~mask) | ((that.bits_val_ << 2UL*doff) & mask); if ((doff + that.size_) > BITS_PER_WORD) { unsigned tail = doff + that.size_ - BITS_PER_WORD; mask = (1UL << 2UL*tail) - 1; dptr += 1; bits_ptr_[dptr] = (bits_ptr_[dptr] & ~mask) | ((that.bits_val_ >> 2UL*(that.size_-tail)) & mask); } } else if (adr%BITS_PER_WORD == 0) { /* In this case, both vectors are long, but the destination is neatly aligned. That means all but the last word can be simply copied with no masking. */ unsigned remain = that.size_; unsigned sptr = 0; unsigned dptr = adr / BITS_PER_WORD; while (remain >= BITS_PER_WORD) { bits_ptr_[dptr++] = that.bits_ptr_[sptr++]; remain -= BITS_PER_WORD; } if (remain > 0) { unsigned long mask = (1UL << 2UL*remain) - 1; bits_ptr_[dptr] = (bits_ptr_[dptr] & ~mask) | (that.bits_ptr_[sptr] & mask); } } else { /* We know that there are two long vectors, and we know that the destination is definitely NOT aligned. */ unsigned remain = that.size_; unsigned sptr = 0; unsigned dptr = adr / BITS_PER_WORD; unsigned doff = adr % BITS_PER_WORD; unsigned long lmask = (1UL << 2UL*doff) - 1; unsigned ndoff = BITS_PER_WORD - doff; while (remain >= BITS_PER_WORD) { bits_ptr_[dptr] = (bits_ptr_[dptr] & lmask) | ((that.bits_ptr_[sptr] << 2UL*doff) & ~lmask); dptr += 1; bits_ptr_[dptr] = (bits_ptr_[dptr] & ~lmask) | ((that.bits_ptr_[sptr] >> 2UL*ndoff) & lmask); remain -= BITS_PER_WORD; sptr += 1; } unsigned long hshift = doff+remain; unsigned long hmask; if (hshift >= BITS_PER_WORD) hmask = -1UL; else hmask = (1UL << 2UL*(doff+remain)) - 1; unsigned long mask = hmask & ~lmask; bits_ptr_[dptr] = (bits_ptr_[dptr] & ~mask) | ((that.bits_ptr_[sptr] << 2UL*doff) & mask); if ((doff + remain) > BITS_PER_WORD) { unsigned tail = doff + remain - BITS_PER_WORD; if (tail >= BITS_PER_WORD) mask = -1UL; else mask = (1UL << 2UL*tail) - 1; dptr += 1; bits_ptr_[dptr] = (bits_ptr_[dptr] & ~mask) | ((that.bits_ptr_[sptr] >> 2UL*(remain-tail))&mask); } } } bool vvp_vector4_t::eeq(const vvp_vector4_t&that) const { if (size_ != that.size_) return false; if (size_ < BITS_PER_WORD) { unsigned long mask = (1UL << 2UL * size_) - 1; return (bits_val_&mask) == (that.bits_val_&mask); } if (size_ == BITS_PER_WORD) { return bits_val_ == that.bits_val_; } unsigned words = size_ / BITS_PER_WORD; for (unsigned idx = 0 ; idx < words ; idx += 1) { if (bits_ptr_[idx] != that.bits_ptr_[idx]) return false; } unsigned long mask = size_%BITS_PER_WORD; if (mask > 0) { mask = (1UL << 2UL*mask) - 1; return (bits_ptr_[words]&mask) == (that.bits_ptr_[words]&mask); } return true; } bool vvp_vector4_t::has_xz() const { if (size_ < BITS_PER_WORD) { unsigned long mask = WORD_X_BITS >> 2*(BITS_PER_WORD - size_); return 0 != (bits_val_&mask); } if (size_ == BITS_PER_WORD) { return 0 != (bits_val_&WORD_X_BITS); } unsigned words = size_ / BITS_PER_WORD; for (unsigned idx = 0 ; idx < words ; idx += 1) { if (bits_ptr_[idx] & WORD_X_BITS) return true; } unsigned long mask = size_%BITS_PER_WORD; if (mask > 0) { mask = WORD_X_BITS >> 2*(BITS_PER_WORD - mask); return 0 != (bits_ptr_[words]&mask); } return false; } void vvp_vector4_t::change_z2x() { assert(BIT4_Z == 3 && BIT4_X == 2); # define Z2X(val) do{ (val) = (val) & ~(((val)&WORD_X_BITS) >> 1UL); }while(0) if (size_ <= BITS_PER_WORD) { Z2X(bits_val_); } else { unsigned words = (size_+BITS_PER_WORD-1) / BITS_PER_WORD; for (unsigned idx = 0 ; idx < words ; idx += 1) Z2X(bits_ptr_[idx]); } } char* vvp_vector4_t::as_string(char*buf, size_t buf_len) { char*res = buf; *buf++ = 'C'; *buf++ = '4'; *buf++ = '<'; buf_len -= 3; for (unsigned idx = 0 ; idx < size() && buf_len >= 2 ; idx += 1) { switch (value(size()-idx-1)) { case BIT4_0: *buf++ = '0'; break; case BIT4_1: *buf++ = '1'; break; case BIT4_X: *buf++ = 'x'; break; case BIT4_Z: *buf++ = 'z'; } buf_len -= 1; } *buf++ = '>'; *buf++ = 0; return res; } /* * Add an integer to the vvp_vector4_t in place, bit by bit so that * there is no size limitations. */ vvp_vector4_t& vvp_vector4_t::operator += (int64_t that) { vvp_bit4_t carry = BIT4_0; unsigned idx; if (has_xz()) { vvp_vector4_t xxx (size(), BIT4_X); *this = xxx; return *this; } for (idx = 0 ; idx < size() ; idx += 1) { if (that == 0 && carry==BIT4_0) break; vvp_bit4_t that_bit = (that&1)? BIT4_1 : BIT4_0; that >>= 1; if (that_bit==BIT4_0 && carry==BIT4_0) continue; vvp_bit4_t bit = value(idx); bit = add_with_carry(bit, that_bit, carry); set_bit(idx, bit); } return *this; } ostream& operator<< (ostream&out, const vvp_vector4_t&that) { out << that.size() << "'b"; for (unsigned idx = 0 ; idx < that.size() ; idx += 1) out << that.value(that.size()-idx-1); return out; } bool vector4_to_value(const vvp_vector4_t&vec, unsigned long&val) { unsigned long res = 0; unsigned long msk = 1; for (unsigned idx = 0 ; idx < vec.size() ; idx += 1) { switch (vec.value(idx)) { case BIT4_0: break; case BIT4_1: res |= msk; break; default: return false; } msk <<= 1UL; } val = res; return true; } bool vector4_to_value(const vvp_vector4_t&vec, double&val, bool signed_flag) { if (vec.size() == 0) { val = 0.0; return true; } bool flag = true; if (vec.value(vec.size()-1) != BIT4_1) { signed_flag = false; } double res = 0.0; if (signed_flag) { vvp_bit4_t carry = BIT4_1; for (unsigned idx = 0 ; idx < vec.size() ; idx += 1) { vvp_bit4_t a = ~vec.value(idx); vvp_bit4_t x = add_with_carry(a, BIT4_0, carry); switch (x) { case BIT4_0: break; case BIT4_1: res += pow(2.0, (int)idx); break; default: flag = false; } } res *= -1.0; } else { for (unsigned idx = 0 ; idx < vec.size() ; idx += 1) { switch (vec.value(idx)) { case BIT4_0: break; case BIT4_1: res += pow(2.0, (int)idx); break; default: flag = false; } } } val = res; return flag; } template T coerce_to_width(const T&that, unsigned width) { if (that.size() == width) return that; assert(that.size() > width); T res (width); for (unsigned idx = 0 ; idx < width ; idx += 1) res.set_bit(idx, that.value(idx)); return res; } vvp_vector2_t::vvp_vector2_t() { vec_ = 0; wid_ = 0; } vvp_vector2_t::vvp_vector2_t(unsigned long v, unsigned wid) { wid_ = wid; const unsigned bits_per_word = 8 * sizeof(vec_[0]); const unsigned words = (wid_ + bits_per_word-1) / bits_per_word; vec_ = new unsigned long[words]; vec_[0] = v; for (unsigned idx = 1 ; idx < words ; idx += 1) vec_[idx] = 0; } vvp_vector2_t::vvp_vector2_t(vvp_vector2_t::fill_t fill, unsigned wid) { wid_ = wid; const unsigned bits_per_word = 8 * sizeof(vec_[0]); const unsigned words = (wid_ + bits_per_word-1) / bits_per_word; vec_ = new unsigned long[words]; for (unsigned idx = 0 ; idx < words ; idx += 1) vec_[idx] = fill? -1 : 0; } vvp_vector2_t::vvp_vector2_t(const vvp_vector4_t&that) { wid_ = that.size(); const unsigned words = (that.size() + BITS_PER_WORD-1) / BITS_PER_WORD; if (words == 0) { vec_ = 0; wid_ = 0; return; } vec_ = new unsigned long[words]; for (unsigned idx = 0 ; idx < words ; idx += 1) vec_[idx] = 0; for (unsigned idx = 0 ; idx < that.size() ; idx += 1) { unsigned addr = idx / BITS_PER_WORD; unsigned shift = idx % BITS_PER_WORD; switch (that.value(idx)) { case BIT4_0: break; case BIT4_1: vec_[addr] |= 1UL << shift; break; default: delete[]vec_; vec_ = 0; wid_ = 0; return; } } } void vvp_vector2_t::copy_from_that_(const vvp_vector2_t&that) { wid_ = that.wid_; const unsigned words = (wid_ + BITS_PER_WORD-1) / BITS_PER_WORD; if (words == 0) { vec_ = 0; wid_ = 0; return; } vec_ = new unsigned long[words]; for (unsigned idx = 0 ; idx < words ; idx += 1) vec_[idx] = that.vec_[idx]; } vvp_vector2_t::vvp_vector2_t(const vvp_vector2_t&that) { copy_from_that_(that); } vvp_vector2_t::vvp_vector2_t(const vvp_vector2_t&that, unsigned newsize) { wid_ = newsize; if (newsize == 0) { vec_ = 0; return; } const unsigned words = (wid_ + BITS_PER_WORD-1) / BITS_PER_WORD; const unsigned twords = (that.wid_ + BITS_PER_WORD-1) / BITS_PER_WORD; vec_ = new unsigned long[words]; for (unsigned idx = 0 ; idx < words ; idx += 1) { if (idx < twords) vec_[idx] = that.vec_[idx]; else vec_[idx] = 0; } } vvp_vector2_t& vvp_vector2_t::operator= (const vvp_vector2_t&that) { if (this == &that) return *this; if (vec_) { delete[]vec_; vec_ = 0; } copy_from_that_(that); return *this; } vvp_vector2_t& vvp_vector2_t::operator <<= (unsigned int shift) { if (wid_ == 0) return *this; const unsigned words = (wid_ + BITS_PER_WORD-1) / BITS_PER_WORD; // Number of words to shift const unsigned wshift = shift / BITS_PER_WORD; // bits to shift within each word. const unsigned long oshift = shift % BITS_PER_WORD; // If shifting the entire value away, then return zeros. if (wshift >= words) { for (unsigned idx = 0 ; idx < words ; idx += 1) vec_[idx] = 0; return *this; } // Do the word shift first. if (wshift > 0) { for (unsigned idx = 0 ; idx < words-wshift ; idx += 1) { unsigned sel = words - idx - 1; vec_[sel] = vec_[sel-wshift]; } for (unsigned idx = 0 ; idx < wshift ; idx += 1) vec_[idx] = 0; } // Do the fine shift. if (oshift != 0) { unsigned long pad = 0; for (unsigned idx = 0 ; idx < words ; idx += 1) { unsigned long next_pad = vec_[idx] >> (BITS_PER_WORD-oshift); vec_[idx] = (vec_[idx] << oshift) | pad; pad = next_pad; } // Cleanup the tail bits. unsigned long mask = -1UL >> (BITS_PER_WORD - wid_%BITS_PER_WORD); vec_[words-1] &= mask; } return *this; } vvp_vector2_t& vvp_vector2_t::operator >>= (unsigned shift) { if (wid_ == 0) return *this; const unsigned words = (wid_ + BITS_PER_WORD-1) / BITS_PER_WORD; // Number of words to shift const unsigned wshift = shift / BITS_PER_WORD; // bits to shift within each word. const unsigned long oshift = shift % BITS_PER_WORD; // If shifting the entire value away, then return zeros. if (wshift >= words) { for (unsigned idx = 0 ; idx < words ; idx += 1) vec_[idx] = 0; return *this; } if (wshift > 0) { for (unsigned idx = 0 ; idx < words-wshift ; idx += 1) vec_[idx] = vec_[idx+wshift]; for (unsigned idx = words-wshift ; idx < words ; idx += 1) vec_[idx] = 0; } if (oshift > 0) { unsigned long pad = 0; for (unsigned idx = words ; idx > 0 ; idx -= 1) { unsigned long new_pad = vec_[idx-1] <<(BITS_PER_WORD-oshift); vec_[idx-1] = pad | (vec_[idx-1] >> oshift); pad = new_pad; } // Cleanup the tail bits. unsigned long mask = -1UL >> (BITS_PER_WORD - wid_%BITS_PER_WORD); vec_[words-1] &= mask; } return *this; } static unsigned long add_carry(unsigned long a, unsigned long b, unsigned long&carry) { unsigned long out = carry; carry = 0; if ((ULONG_MAX - out) < a) carry += 1; out += a; if ((ULONG_MAX - out) < b) carry += 1; out += b; return out; } vvp_vector2_t& vvp_vector2_t::operator += (const vvp_vector2_t&that) { assert(wid_ == that.wid_); if (wid_ == 0) return *this; const unsigned words = (wid_ + BITS_PER_WORD-1) / BITS_PER_WORD; unsigned long carry = 0; for (unsigned idx = 0 ; idx < words ; idx += 1) { vec_[idx] = add_carry(vec_[idx], that.vec_[idx], carry); } // Cleanup the tail bits. unsigned long mask = -1UL >> (BITS_PER_WORD - wid_%BITS_PER_WORD); vec_[words-1] &= mask; return *this; } vvp_vector2_t& vvp_vector2_t::operator -= (const vvp_vector2_t&that) { assert(wid_ == that.wid_); if (wid_ == 0) return *this; const unsigned words = (wid_ + BITS_PER_WORD-1) / BITS_PER_WORD; unsigned long carry = 1; for (unsigned idx = 0 ; idx < words ; idx += 1) { vec_[idx] = add_carry(vec_[idx], ~that.vec_[idx], carry); } return *this; } vvp_vector2_t::~vvp_vector2_t() { if (vec_) delete[]vec_; } unsigned vvp_vector2_t::size() const { return wid_; } int vvp_vector2_t::value(unsigned idx) const { if (idx >= wid_) return 0; const unsigned bits_per_word = 8 * sizeof(vec_[0]); unsigned addr = idx/bits_per_word; unsigned mask = idx%bits_per_word; if (vec_[addr] & (1UL<> 4UL*sizeof(unsigned long); res[0] &= word_mask; tmpa = (a >> 4UL*sizeof(unsigned long)) & word_mask; tmpb = b & word_mask; res[1] += tmpa * tmpb; res[2] = res[1] >> 4UL*sizeof(unsigned long); res[1] &= word_mask; tmpa = a & word_mask; tmpb = (b >> 4UL*sizeof(unsigned long)) & word_mask; res[1] += tmpa * tmpb; res[2] += res[1] >> 4UL*sizeof(unsigned long); res[3] = res[2] >> 4UL*sizeof(unsigned long); res[1] &= word_mask; res[2] &= word_mask; tmpa = (a >> 4UL*sizeof(unsigned long)) & word_mask; tmpb = (b >> 4UL*sizeof(unsigned long)) & word_mask; res[2] += tmpa * tmpb; res[3] += res[2] >> 4UL*sizeof(unsigned long); res[2] &= word_mask; high = (res[3] << 4UL*sizeof(unsigned long)) | res[2]; low = (res[1] << 4UL*sizeof(unsigned long)) | res[0]; } /* * Multiplication of two vector2 vectors returns a product as wide as * the sum of the widths of the input vectors. */ vvp_vector2_t operator * (const vvp_vector2_t&a, const vvp_vector2_t&b) { const unsigned bits_per_word = 8 * sizeof(a.vec_[0]); vvp_vector2_t r (0, a.size() + b.size()); unsigned awords = (a.wid_ + bits_per_word - 1) / bits_per_word; unsigned bwords = (b.wid_ + bits_per_word - 1) / bits_per_word; unsigned rwords = (r.wid_ + bits_per_word - 1) / bits_per_word; for (unsigned bdx = 0 ; bdx < bwords ; bdx += 1) { unsigned long tmpb = b.vec_[bdx]; if (tmpb == 0) continue; for (unsigned adx = 0 ; adx < awords ; adx += 1) { unsigned long tmpa = a.vec_[adx]; if (tmpa == 0) continue; unsigned long low, hig; multiply_long(tmpa, tmpb, low, hig); unsigned long carry = 0; for (unsigned sdx = 0 ; (adx+bdx+sdx) < rwords ; sdx += 1) { r.vec_[adx+bdx+sdx] = add_carry(r.vec_[adx+bdx+sdx], low, carry); low = hig; hig = 0; } } } return r; } static void div_mod (vvp_vector2_t dividend, vvp_vector2_t divisor, vvp_vector2_t"ient, vvp_vector2_t&remainder) { quotient = vvp_vector2_t(0, dividend.size()); if (divisor == quotient) { cerr << "ERROR: division by zero, exiting." << endl; exit(255); } if (dividend < divisor) { remainder = dividend; return; } vvp_vector2_t mask (1, dividend.size()); // Make the dividend 1 bit larger to prevent overflow of // divtmp in startup. dividend = vvp_vector2_t(dividend, dividend.size()+1); vvp_vector2_t divtmp (divisor, dividend.size()); while (divtmp < dividend) { divtmp <<= 1; mask <<= 1; } while (dividend >= divisor) { if (divtmp <= dividend) { dividend -= divtmp; quotient += mask; } divtmp >>= 1; mask >>= 1; } remainder = dividend; } vvp_vector2_t operator / (const vvp_vector2_t÷nd, const vvp_vector2_t&divisor) { vvp_vector2_t quot, rem; div_mod(dividend, divisor, quot, rem); return quot; } vvp_vector2_t operator % (const vvp_vector2_t÷nd, const vvp_vector2_t&divisor) { vvp_vector2_t quot, rem; div_mod(dividend, divisor, quot, rem); return rem; } bool operator > (const vvp_vector2_t&a, const vvp_vector2_t&b) { const unsigned awords = (a.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; const unsigned bwords = (b.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; const unsigned words = awords > bwords? awords : bwords; for (unsigned idx = words ; idx > 0 ; idx -= 1) { unsigned long aw = (idx <= awords)? a.vec_[idx-1] : 0; unsigned long bw = (idx <= bwords)? b.vec_[idx-1] : 0; if (aw > bw) return true; if (aw < bw) return false; } // If the above loop finishes, then the vectors are equal. return false; } bool operator >= (const vvp_vector2_t&a, const vvp_vector2_t&b) { const unsigned awords = (a.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; const unsigned bwords = (b.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; const unsigned words = awords > bwords? awords : bwords; for (unsigned idx = words ; idx > 0 ; idx -= 1) { unsigned long aw = (idx <= awords)? a.vec_[idx-1] : 0; unsigned long bw = (idx <= bwords)? b.vec_[idx-1] : 0; if (aw > bw) return true; if (aw < bw) return false; } // If the above loop finishes, then the vectors are equal. return true; } bool operator < (const vvp_vector2_t&a, const vvp_vector2_t&b) { const unsigned awords = (a.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; const unsigned bwords = (b.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; unsigned words = awords; if (bwords > words) words = bwords; for (unsigned idx = words ; idx > 0 ; idx -= 1) { unsigned long aw = (idx <= awords)? a.vec_[idx-1] : 0; unsigned long bw = (idx <= bwords)? b.vec_[idx-1] : 0; if (aw < bw) return true; if (aw > bw) return false; } // If the above loop finishes, then the vectors are equal. return false; } bool operator <= (const vvp_vector2_t&a, const vvp_vector2_t&b) { // XXXX For now, only support equal width vectors. assert(a.wid_ == b.wid_); const unsigned awords = (a.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; for (unsigned idx = awords ; idx > 0 ; idx -= 1) { if (a.vec_[idx-1] < b.vec_[idx-1]) return true; if (a.vec_[idx-1] > b.vec_[idx-1]) return false; } // If the above loop finishes, then the vectors are equal. return true; } bool operator == (const vvp_vector2_t&a, const vvp_vector2_t&b) { const unsigned awords = (a.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; const unsigned bwords = (b.wid_ + vvp_vector2_t::BITS_PER_WORD-1) / vvp_vector2_t::BITS_PER_WORD; const unsigned words = awords > bwords? awords : bwords; for (unsigned idx = words ; idx > 0 ; idx -= 1) { unsigned long aw = (idx <= awords)? a.vec_[idx-1] : 0; unsigned long bw = (idx <= bwords)? b.vec_[idx-1] : 0; if (aw > bw) return false; if (aw < bw) return false; } // If the above loop finishes, then the vectors are equal. return true; } vvp_vector4_t vector2_to_vector4(const vvp_vector2_t&that, unsigned wid) { vvp_vector4_t res (wid); for (unsigned idx = 0 ; idx < res.size() ; idx += 1) { vvp_bit4_t bit = BIT4_0; if (that.value(idx)) bit = BIT4_1; res.set_bit(idx, bit); } return res; } vvp_vector4_t c4string_to_vector4(const char*str) { assert((str[0]=='C') && (str[1]=='4') && (str[2]=='<')); str += 3; const char*tp = str + strspn(str,"01xz"); assert(tp[0] == '>'); vvp_vector4_t tmp (tp-str); for (unsigned idx = 0 ; idx < tmp.size() ; idx += 1) { vvp_bit4_t bit; switch (str[idx]) { case '0': bit = BIT4_0; break; case '1': bit = BIT4_1; break; case 'x': bit = BIT4_X; break; case 'z': bit = BIT4_Z; break; default: fprintf(stderr, "Unsupported bit value %c(%d).\n", str[idx], str[idx]); assert(0); bit = BIT4_0; break; } tmp.set_bit(tmp.size()-idx-1, bit); } return tmp; } ostream& operator<< (ostream&out, const vvp_vector2_t&that) { if (that.is_NaN()) { out << "NaN"; } else { out << vector2_to_vector4(that, that.size()); } return out; } vvp_vector8_t::vvp_vector8_t(const vvp_vector8_t&that) { size_ = that.size_; bits_ = new vvp_scalar_t[size_]; for (unsigned idx = 0 ; idx < size_ ; idx += 1) bits_[idx] = that.bits_[idx]; } vvp_vector8_t::vvp_vector8_t(unsigned size) : size_(size) { if (size_ == 0) { bits_ = 0; return; } bits_ = new vvp_scalar_t[size_]; } vvp_vector8_t::vvp_vector8_t(const vvp_vector4_t&that, unsigned str) : size_(that.size()) { if (size_ == 0) { bits_ = 0; return; } bits_ = new vvp_scalar_t[size_]; for (unsigned idx = 0 ; idx < size_ ; idx += 1) bits_[idx] = vvp_scalar_t (that.value(idx), str); } vvp_vector8_t::vvp_vector8_t(const vvp_vector4_t&that, unsigned str0, unsigned str1) : size_(that.size()) { if (size_ == 0) { bits_ = 0; return; } bits_ = new vvp_scalar_t[size_]; for (unsigned idx = 0 ; idx < size_ ; idx += 1) bits_[idx] = vvp_scalar_t (that.value(idx), str0, str1); } vvp_vector8_t& vvp_vector8_t::operator= (const vvp_vector8_t&that) { if (size_ != that.size_) { if (size_ > 0) delete[]bits_; size_ = 0; } if (that.size_ == 0) { assert(size_ == 0); return *this; } if (size_ == 0) { size_ = that.size_; bits_ = new vvp_scalar_t[size_]; } for (unsigned idx = 0 ; idx < size_ ; idx += 1) bits_[idx] = that.bits_[idx]; return *this; } bool vvp_vector8_t::eeq(const vvp_vector8_t&that) const { if (size_ != that.size_) return false; if (size_ == 0) return true; for (unsigned idx = 0 ; idx < size_ ; idx += 1) { if (! bits_[idx] .eeq( that.bits_[idx] )) return false; } return true; } ostream& operator<<(ostream&out, const vvp_vector8_t&that) { out << "C8<"; for (unsigned idx = 0 ; idx < that.size() ; idx += 1) out << that.value(that.size()-idx-1); out << ">"; return out; } vvp_net_fun_t::vvp_net_fun_t() { count_functors += 1; } vvp_net_fun_t::~vvp_net_fun_t() { } void vvp_net_fun_t::recv_vec4(vvp_net_ptr_t, const vvp_vector4_t&) { fprintf(stderr, "internal error: %s: recv_vec4 not implemented\n", typeid(*this).name()); assert(0); } void vvp_net_fun_t::recv_vec4_pv(vvp_net_ptr_t, const vvp_vector4_t&bits, unsigned base, unsigned wid, unsigned vwid) { cerr << "internal error: " << typeid(*this).name() << ": " << "recv_vect_pv(" << bits << ", " << base << ", " << wid << ", " << vwid << ") not implemented" << endl; assert(0); } void vvp_net_fun_t::recv_vec8(vvp_net_ptr_t port, vvp_vector8_t bit) { recv_vec4(port, reduce4(bit)); } void vvp_net_fun_t::recv_real(vvp_net_ptr_t, double bit) { fprintf(stderr, "internal error: %s: recv_real(%f) not implemented\n", typeid(*this).name(), bit); assert(0); } void vvp_net_fun_t::recv_long(vvp_net_ptr_t, long) { fprintf(stderr, "internal error: %s: recv_long not implemented\n", typeid(*this).name()); assert(0); } /* **** vvp_fun_drive methods **** */ vvp_fun_drive::vvp_fun_drive(vvp_bit4_t init, unsigned str0, unsigned str1) { assert(str0 < 8); assert(str1 < 8); drive0_ = str0; drive1_ = str1; } vvp_fun_drive::~vvp_fun_drive() { } void vvp_fun_drive::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) { assert(port.port() == 0); vvp_send_vec8(port.ptr()->out, vvp_vector8_t(bit, drive0_, drive1_)); } /* **** vvp_fun_signal methods **** */ vvp_fun_signal_base::vvp_fun_signal_base() { needs_init_ = true; continuous_assign_active_ = false; force_link = 0; cassign_link = 0; count_functors_sig += 1; } void vvp_fun_signal_base::deassign() { continuous_assign_active_ = false; } /* * The signal functor takes commands as long values to port-3. This * method interprets those commands. */ void vvp_fun_signal_base::recv_long(vvp_net_ptr_t ptr, long bit) { switch (ptr.port()) { case 3: // Command port switch (bit) { case 1: // deassign command deassign(); break; case 2: // release/net release(ptr, true); break; case 3: // release/reg release(ptr, false); break; default: fprintf(stderr, "Unsupported command %ld.\n", bit); assert(0); break; } break; default: // Other ports are errors. fprintf(stderr, "Unsupported port type %d.\n", ptr.port()); assert(0); break; } } vvp_fun_signal::vvp_fun_signal(unsigned wid, vvp_bit4_t init) : bits4_(wid, init) { } /* * Nets simply reflect their input to their output. * * NOTE: It is a quirk of vvp_fun_signal that it has an initial value * that needs to be propagated, but after that it only needs to * propagate if the value changes. Elimitating duplicate propagations * should improve performance, but has the quirk that an input that * matches the initial value might not be propagated. The hack used * herein is to keep a "needs_init_" flag that is turned false after * the first propagation, and forces the first propagation to happen * even if it matches the initial value. */ void vvp_fun_signal::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) { switch (ptr.port()) { case 0: // Normal input (feed from net, or set from process) /* If continuous assign is active, then this is a var and the continuous assigned values overrides any normal input. So process input only if continuous assignment is not active. */ if (!continuous_assign_active_) { if (needs_init_ || !bits4_.eeq(bit)) { bits4_ = bit; needs_init_ = false; calculate_output_(ptr); } } break; case 1: // Continuous assign value continuous_assign_active_ = true; bits4_ = bit; calculate_output_(ptr); break; case 2: // Force value // Force from a node may not have been sized completely // by the source, so coerce the size here. if (bit.size() != size()) force_ = coerce_to_width(bit, size()); else force_ = bit; force_mask_ = vvp_vector2_t(vvp_vector2_t::FILL1, size()); calculate_output_(ptr); break; default: fprintf(stderr, "Unsupported port type %d.\n", ptr.port()); assert(0); break; } } void vvp_fun_signal::recv_vec4_pv(vvp_net_ptr_t ptr, const vvp_vector4_t&bit, unsigned base, unsigned wid, unsigned vwid) { assert(bit.size() == wid); assert(bits4_.size() == vwid); switch (ptr.port()) { case 0: // Normal input if (! continuous_assign_active_) { for (unsigned idx = 0 ; idx < wid ; idx += 1) { if (base+idx >= bits4_.size()) break; bits4_.set_bit(base+idx, bit.value(idx)); } needs_init_ = false; calculate_output_(ptr); } break; case 2: // Force value if (force_mask_.size() == 0) force_mask_ = vvp_vector2_t(vvp_vector2_t::FILL0, size()); if (force_.size() == 0) force_ = vvp_vector4_t(vwid, BIT4_Z); for (unsigned idx = 0 ; idx < wid ; idx += 1) { force_mask_.set_bit(base+idx, 1); force_.set_bit(base+idx, bit.value(idx)); } calculate_output_(ptr); break; default: fprintf(stderr, "Unsupported port type %d.\n", ptr.port()); assert(0); break; } } void vvp_fun_signal::calculate_output_(vvp_net_ptr_t ptr) { if (force_mask_.size()) { assert(bits4_.size() == force_mask_.size()); assert(bits4_.size() == force_.size()); vvp_vector4_t bits (bits4_); for (unsigned idx = 0 ; idx < bits.size() ; idx += 1) { if (force_mask_.value(idx)) bits.set_bit(idx, force_.value(idx)); } vvp_send_vec4(ptr.ptr()->out, bits); } else { vvp_send_vec4(ptr.ptr()->out, bits4_); } run_vpi_callbacks(); } void vvp_fun_signal::recv_vec8(vvp_net_ptr_t ptr, vvp_vector8_t bit) { recv_vec4(ptr, reduce4(bit)); } void vvp_fun_signal::release(vvp_net_ptr_t ptr, bool net) { force_mask_ = vvp_vector2_t(); if (net) { vvp_send_vec4(ptr.ptr()->out, bits4_); run_vpi_callbacks(); } else { bits4_ = force_; } } unsigned vvp_fun_signal::size() const { if (force_mask_.size()) return force_.size(); else return bits4_.size(); } vvp_bit4_t vvp_fun_signal::value(unsigned idx) const { if (force_mask_.size() && force_mask_.value(idx)) return force_.value(idx); else return bits4_.value(idx); } vvp_scalar_t vvp_fun_signal::scalar_value(unsigned idx) const { if (force_mask_.size() && force_mask_.value(idx)) return vvp_scalar_t(force_.value(idx), 6, 6); else return vvp_scalar_t(bits4_.value(idx), 6, 6); } vvp_vector4_t vvp_fun_signal::vec4_value() const { if (force_mask_.size()) { assert(bits4_.size() == force_mask_.size()); assert(bits4_.size() == force_.size()); vvp_vector4_t bits (bits4_); for (unsigned idx = 0 ; idx < bits.size() ; idx += 1) { if (force_mask_.value(idx)) bits.set_bit(idx, force_.value(idx)); } return bits; } else { return bits4_; } } vvp_fun_signal8::vvp_fun_signal8(unsigned wid) : bits8_(wid) { } void vvp_fun_signal8::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit) { recv_vec8(ptr, bit); } void vvp_fun_signal8::recv_vec8(vvp_net_ptr_t ptr, vvp_vector8_t bit) { switch (ptr.port()) { case 0: // Normal input (feed from net, or set from process) if (!continuous_assign_active_) { if (needs_init_ || !bits8_.eeq(bit)) { bits8_ = bit; needs_init_ = false; calculate_output_(ptr); } } break; case 1: // Continuous assign value continuous_assign_active_ = true; bits8_ = bit; calculate_output_(ptr); break; case 2: // Force value // Force from a node may not have been sized completely // by the source, so coerce the size here. if (bit.size() != size()) force_ = coerce_to_width(bit, size()); else force_ = bit; force_mask_ = vvp_vector2_t(vvp_vector2_t::FILL1, size()); calculate_output_(ptr); break; default: fprintf(stderr, "Unsupported port type %d.\n", ptr.port()); assert(0); break; } } void vvp_fun_signal8::calculate_output_(vvp_net_ptr_t ptr) { if (force_mask_.size()) { assert(bits8_.size() == force_mask_.size()); assert(bits8_.size() == force_.size()); vvp_vector8_t bits (bits8_); for (unsigned idx = 0 ; idx < bits.size() ; idx += 1) { if (force_mask_.value(idx)) bits.set_bit(idx, force_.value(idx)); } vvp_send_vec8(ptr.ptr()->out, bits); } else { vvp_send_vec8(ptr.ptr()->out, bits8_); } run_vpi_callbacks(); } void vvp_fun_signal8::release(vvp_net_ptr_t ptr, bool net) { force_mask_ = vvp_vector2_t(); if (net) { vvp_send_vec8(ptr.ptr()->out, bits8_); run_vpi_callbacks(); } else { bits8_ = force_; } } unsigned vvp_fun_signal8::size() const { if (force_mask_.size()) return force_.size(); else return bits8_.size(); } vvp_bit4_t vvp_fun_signal8::value(unsigned idx) const { if (force_mask_.size() && force_mask_.value(idx)) return force_.value(idx).value(); else return bits8_.value(idx).value(); } vvp_vector4_t vvp_fun_signal8::vec4_value() const { if (force_mask_.size()) return reduce4(force_); else return reduce4(bits8_); } vvp_scalar_t vvp_fun_signal8::scalar_value(unsigned idx) const { if (force_mask_.size() && force_mask_.value(idx)) return force_.value(idx); else return bits8_.value(idx); } vvp_fun_signal_real::vvp_fun_signal_real() { } double vvp_fun_signal_real::real_value() const { if (force_mask_.size()) return force_; else return bits_; } /* * Testing for equality, we want a bitwise test instead of an * arithmetic test because we want to treat for example -0 different * from +0. */ bool bits_equal(double a, double b) { return memcmp(&a, &b, sizeof a) == 0; } void vvp_fun_signal_real::recv_real(vvp_net_ptr_t ptr, double bit) { switch (ptr.port()) { case 0: if (!continuous_assign_active_) { if (needs_init_ || !bits_equal(bits_,bit)) { bits_ = bit; needs_init_ = false; vvp_send_real(ptr.ptr()->out, bit); run_vpi_callbacks(); } } break; case 1: // Continuous assign value continuous_assign_active_ = true; bits_ = bit; vvp_send_real(ptr.ptr()->out, bit); run_vpi_callbacks(); break; case 2: // Force value force_mask_ = vvp_vector2_t(1, 1); force_ = bit; vvp_send_real(ptr.ptr()->out, bit); run_vpi_callbacks(); break; default: fprintf(stderr, "Unsupported port type %d.\n", ptr.port()); assert(0); break; } } void vvp_fun_signal_real::release(vvp_net_ptr_t ptr, bool net) { force_mask_ = vvp_vector2_t(); if (net) { vvp_send_real(ptr.ptr()->out, bits_); run_vpi_callbacks(); } else { bits_ = force_; } } /* **** vvp_wide_fun_* methods **** */ vvp_wide_fun_core::vvp_wide_fun_core(vvp_net_t*net, unsigned nports) { ptr_ = net; nports_ = nports; port_values_ = 0; port_rvalues_ = 0; } vvp_wide_fun_core::~vvp_wide_fun_core() { if (port_values_) delete[]port_values_; if (port_rvalues_) delete[]port_rvalues_; } void vvp_wide_fun_core::propagate_vec4(const vvp_vector4_t&bit, vvp_time64_t delay) { if (delay) schedule_assign_vector(ptr_->out, bit, delay); else vvp_send_vec4(ptr_->out, bit); } unsigned vvp_wide_fun_core::port_count() const { return nports_; } vvp_vector4_t& vvp_wide_fun_core::value(unsigned idx) { assert(idx < nports_); assert(port_values_); return port_values_[idx]; } double vvp_wide_fun_core::value_r(unsigned idx) { assert(idx < nports_); return port_rvalues_? port_rvalues_[idx] : 0.0; } void vvp_wide_fun_core::recv_real_from_inputs(unsigned p) { assert(0); } void vvp_wide_fun_core::dispatch_vec4_from_input_(unsigned port, vvp_vector4_t bit) { assert(port < nports_); if (port_values_ == 0) port_values_ = new vvp_vector4_t [nports_]; port_values_[port] = bit; recv_vec4_from_inputs(port); } void vvp_wide_fun_core::dispatch_real_from_input_(unsigned port, double bit) { assert(port < nports_); if (port_rvalues_ == 0) port_rvalues_ = new double[nports_]; port_rvalues_[port] = bit; recv_real_from_inputs(port); } vvp_wide_fun_t::vvp_wide_fun_t(vvp_wide_fun_core*c, unsigned base) : core_(c), port_base_(base) { } vvp_wide_fun_t::~vvp_wide_fun_t() { } void vvp_wide_fun_t::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit) { unsigned pidx = port_base_ + port.port(); core_->dispatch_vec4_from_input_(pidx, bit); } void vvp_wide_fun_t::recv_real(vvp_net_ptr_t port, double bit) { unsigned pidx = port_base_ + port.port(); core_->dispatch_real_from_input_(pidx, bit); } /* **** vvp_scalar_t methods **** */ /* * DRIVE STRENGTHS: * * The normal functor is not aware of strengths. It generates strength * simply by virtue of having strength specifications. The drive * strength specification includes a drive0 and drive1 strength, each * with 8 possible values (that can be represented in 3 bits) as given * in this table: * * HiZ = 0, * SMALL = 1, * MEDIUM = 2, * WEAK = 3, * LARGE = 4, * PULL = 5, * STRONG = 6, * SUPPLY = 7 * * The vvp_scalar_t value, however, is a combination of value and * strength, used in strength-aware contexts. * * OUTPUT STRENGTHS: * * The strength-aware values are specified as an 8 bit value, that is * two 4 bit numbers. The value is encoded with two drive strengths (0-7) * and two drive values (0 or 1). Each nibble contains three bits of * strength and one bit of value, like so: VSSS. The high nibble has * the strength-value closest to supply1, and the low nibble has the * strength-value closest to supply0. */ /* * A signal value is unambiguous if the top 4 bits and the bottom 4 * bits are identical. This means that the VSSSvsss bits of the 8bit * value have V==v and SSS==sss. */ # define UNAMBIG(v) (((v) & 0x0f) == (((v) >> 4) & 0x0f)) #if 0 # define STREN1(v) ( ((v)&0x80)? ((v)&0xf0) : (0x70 - ((v)&0xf0)) ) # define STREN0(v) ( ((v)&0x08)? ((v)&0x0f) : (0x07 - ((v)&0x0f)) ) #else # define STREN1(v) (((v)&0x70) >> 4) # define STREN0(v) ((v)&0x07) #endif vvp_scalar_t::vvp_scalar_t(vvp_bit4_t val, unsigned str0, unsigned str1) { assert(str0 <= 7); assert(str1 <= 7); if (str0 == 0 && str1 == 0) { value_ = 0x00; } else switch (val) { case BIT4_0: value_ = str0 | (str0<<4); break; case BIT4_1: value_ = str1 | (str1<<4) | 0x88; break; case BIT4_X: value_ = str0 | (str1<<4) | 0x80; break; case BIT4_Z: value_ = 0x00; break; } } vvp_bit4_t vvp_scalar_t::value() const { if (value_ == 0) { return BIT4_Z; } else switch (value_ & 0x88) { case 0x00: return BIT4_0; case 0x88: return BIT4_1; default: return BIT4_X; } } unsigned vvp_scalar_t::strength0() const { return STREN0(value_); } unsigned vvp_scalar_t::strength1() const { return STREN1(value_); } ostream& operator <<(ostream&out, vvp_scalar_t a) { out << a.strength0() << a.strength1(); switch (a.value()) { case BIT4_0: out << "0"; break; case BIT4_1: out << "1"; break; case BIT4_X: out << "X"; break; case BIT4_Z: out << "Z"; break; } return out; } vvp_scalar_t resolve(vvp_scalar_t a, vvp_scalar_t b) { // If the value is 0, that is the same as HiZ. In that case, // resolution is simply a matter of returning the *other* value. if (a.value_ == 0) return b; if (b.value_ == 0) return a; vvp_scalar_t res = a; if (UNAMBIG(a.value_) && UNAMBIG(b.value_)) { /* If both signals are unambiguous, simply choose the stronger. If they have the same strength but different values, then this becomes ambiguous. */ if (a.value_ == b.value_) { /* values are equal. do nothing. */ } else if ((b.value_&0x07) > (res.value_&0x07)) { /* New value is stronger. Take it. */ res.value_ = b.value_; } else if ((b.value_&0x77) == (res.value_&0x77)) { /* Strengths are the same. Make value ambiguous. */ res.value_ = (res.value_&0x70) | (b.value_&0x07) | 0x80; } else { /* Must be res is the stronger one. */ } } else if (UNAMBIG(res.value_)) { unsigned tmp = 0; if ((res.value_&0x70) > (b.value_&0x70)) tmp |= res.value_&0xf0; else tmp |= b.value_&0xf0; if ((res.value_&0x07) > (b.value_&0x07)) tmp |= res.value_&0x0f; else tmp |= b.value_&0x0f; res.value_ = tmp; } else if (UNAMBIG(b.value_)) { /* If one of the signals is unambiguous, then it will sweep up the weaker parts of the ambiguous signal. The result may be ambiguous, or maybe not. */ unsigned tmp = 0; if ((b.value_&0x70) > (res.value_&0x70)) tmp |= b.value_&0xf0; else tmp |= res.value_&0xf0; if ((b.value_&0x07) > (res.value_&0x07)) tmp |= b.value_&0x0f; else tmp |= res.value_&0x0f; res.value_ = tmp; } else { /* If both signals are ambiguous, then the result has an even wider ambiguity. */ unsigned tmp = 0; int sv1a = a.value_&0x80 ? STREN1(a.value_) : - STREN1(a.value_); int sv0a = a.value_&0x08 ? STREN0(a.value_) : - STREN0(a.value_); int sv1b = b.value_&0x80 ? STREN1(b.value_) : - STREN1(b.value_); int sv0b = b.value_&0x08 ? STREN0(b.value_) : - STREN0(b.value_); int sv1 = sv1a; int sv0 = sv0a; if (sv0a > sv1) sv1 = sv0a; if (sv1b > sv1) sv1 = sv1b; if (sv0b > sv1) sv1 = sv0b; if (sv1a < sv0) sv0 = sv1a; if (sv1b < sv0) sv0 = sv1b; if (sv0b < sv0) sv0 = sv0b; if (sv1 > 0) { tmp |= 0x80; tmp |= sv1 << 4; } else { /* Set the MSB when both arguments MSBs are set. This can only happen if both one strengths are zero. */ tmp |= (a.value_&b.value_)&0x80; tmp |= (-sv1) << 4; } if (sv0 > 0) { tmp |= 0x08; tmp |= sv0; } else { tmp |= (-sv0); } res.value_ = tmp; } /* Canonicalize the HiZ value. */ if ((res.value_&0x77) == 0) res.value_ = 0; return res; } vvp_vector8_t resolve(const vvp_vector8_t&a, const vvp_vector8_t&b) { assert(a.size() == b.size()); vvp_vector8_t out (a.size()); for (unsigned idx = 0 ; idx < out.size() ; idx += 1) { out.set_bit(idx, resolve(a.value(idx), b.value(idx))); } return out; } vvp_vector8_t resistive_reduction(const vvp_vector8_t&that) { static unsigned rstr[8] = { 0, /* Hi-Z --> Hi-Z */ 1, /* Small capacitance --> Small capacitance */ 1, /* Medium capacitance --> Small capacitance */ 2, /* Weak drive --> Medium capacitance */ 2, /* Large capacitance --> Medium capacitance */ 3, /* Pull drive --> Weak drive */ 5, /* Strong drive --> Pull drive */ 5 /* Supply drive --> Pull drive */ }; vvp_vector8_t res (that.size()); for (unsigned idx = 0 ; idx < res.size() ; idx += 1) { vvp_scalar_t bit = that.value(idx); bit = vvp_scalar_t(bit.value(), rstr[bit.strength0()], rstr[bit.strength1()]); res.set_bit(idx, bit); } return res; } vvp_vector4_t reduce4(const vvp_vector8_t&that) { vvp_vector4_t out (that.size()); for (unsigned idx = 0 ; idx < out.size() ; idx += 1) out.set_bit(idx, that.value(idx).value()); return out; } vvp_bit4_t compare_gtge(const vvp_vector4_t&lef, const vvp_vector4_t&rig, vvp_bit4_t out_if_equal) { unsigned min_size = lef.size(); if (rig.size() < min_size) min_size = rig.size(); // If one of the inputs is nil, treat is as all X values, and // that makes the result BIT4_X. if (min_size == 0) return BIT4_X; // As per the IEEE1364 definition of >, >=, < and <=, if there // are any X or Z values in either of the operand vectors, // then the result of the compare is BIT4_X. // Check for X/Z in the left operand for (unsigned idx = 0 ; idx < lef.size() ; idx += 1) { vvp_bit4_t bit = lef.value(idx); if (bit == BIT4_X) return BIT4_X; if (bit == BIT4_Z) return BIT4_X; } // Check for X/Z in the right operand for (unsigned idx = 0 ; idx < rig.size() ; idx += 1) { vvp_bit4_t bit = rig.value(idx); if (bit == BIT4_X) return BIT4_X; if (bit == BIT4_Z) return BIT4_X; } for (unsigned idx = lef.size() ; idx > rig.size() ; idx -= 1) { if (lef.value(idx-1) == BIT4_1) return BIT4_1; } for (unsigned idx = rig.size() ; idx > lef.size() ; idx -= 1) { if (rig.value(idx-1) == BIT4_1) return BIT4_0; } for (unsigned idx = min_size ; idx > 0 ; idx -= 1) { vvp_bit4_t lv = lef.value(idx-1); vvp_bit4_t rv = rig.value(idx-1); if (lv == rv) continue; if (lv == BIT4_1) return BIT4_1; else return BIT4_0; } return out_if_equal; } vvp_vector4_t operator ~ (const vvp_vector4_t&that) { vvp_vector4_t res (that.size()); for (unsigned idx = 0 ; idx < res.size() ; idx += 1) res.set_bit(idx, ~ that.value(idx)); return res; } vvp_bit4_t compare_gtge_signed(const vvp_vector4_t&a, const vvp_vector4_t&b, vvp_bit4_t out_if_equal) { assert(a.size() == b.size()); unsigned sign_idx = a.size()-1; vvp_bit4_t a_sign = a.value(sign_idx); vvp_bit4_t b_sign = b.value(sign_idx); if (a_sign == BIT4_X) return BIT4_X; if (a_sign == BIT4_Z) return BIT4_X; if (b_sign == BIT4_X) return BIT4_X; if (b_sign == BIT4_Z) return BIT4_Z; if (a_sign == b_sign) return compare_gtge(a, b, out_if_equal); for (unsigned idx = 0 ; idx < sign_idx ; idx += 1) { vvp_bit4_t a_bit = a.value(idx); vvp_bit4_t b_bit = a.value(idx); if (a_bit == BIT4_X) return BIT4_X; if (a_bit == BIT4_Z) return BIT4_X; if (b_bit == BIT4_X) return BIT4_X; if (b_bit == BIT4_Z) return BIT4_Z; } if(a_sign == BIT4_0) return BIT4_1; else return BIT4_0; }