Remove unneeded templates.

This commit is contained in:
steve 2000-03-13 00:02:34 +00:00
parent 01c5147079
commit 2e05f7f7ec
4 changed files with 475 additions and 524 deletions

View File

@ -18,7 +18,7 @@
# 59 Temple Place - Suite 330
# Boston, MA 02111-1307, USA
#
#ident "$Id: Makefile.in,v 1.19 2000/02/13 19:18:28 steve Exp $"
#ident "$Id: Makefile.in,v 1.20 2000/03/13 00:02:34 steve Exp $"
#
#
SHELL = /bin/sh
@ -58,7 +58,7 @@ all: libvvm.a
$(CC) -Wall $(CPPFLAGS) $(CFLAGS) -MD -c $< -o $*.o
mv $*.d dep
O = vvm_bit.o vvm_calltf.o vvm_event.o vvm_gates.o vvm_mult.o \
O = vvm_bit.o vvm_calltf.o vvm_event.o vvm_func.o vvm_gates.o vvm_mult.o \
vvm_pevent.o vvm_thread.o vpip.o
P = vpi_callback.o \

101
vvm/vvm.h
View File

@ -19,7 +19,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#if !defined(WINNT) && !defined(macintosh)
#ident "$Id: vvm.h,v 1.31 2000/02/23 04:43:43 steve Exp $"
#ident "$Id: vvm.h,v 1.32 2000/03/13 00:02:34 steve Exp $"
#endif
# include <cassert>
@ -276,6 +276,9 @@ class vvm_memory_t : public __vpiMemory {
/*
* $Log: vvm.h,v $
* Revision 1.32 2000/03/13 00:02:34 steve
* Remove unneeded templates.
*
* Revision 1.31 2000/02/23 04:43:43 steve
* Some compilers do not accept the not symbol.
*
@ -284,101 +287,5 @@ class vvm_memory_t : public __vpiMemory {
*
* Revision 1.29 2000/01/08 03:09:14 steve
* Non-blocking memory writes.
*
* Revision 1.28 2000/01/06 05:56:02 steve
* Add memory address range check.
*
* Revision 1.27 1999/12/12 19:47:54 steve
* Remove the useless vvm_simulation class.
*
* Revision 1.26 1999/12/09 06:00:19 steve
* Fix const/non-const errors.
*
* Revision 1.25 1999/12/05 02:24:09 steve
* Synthesize LPM_RAM_DQ for writes into memories.
*
* Revision 1.24 1999/12/02 03:36:01 steve
* shiftl and shiftr take unsized second parameter.
*
* Revision 1.23 1999/11/22 00:30:52 steve
* Detemplate some and, or and nor methods.
*
* Revision 1.22 1999/11/21 00:13:09 steve
* Support memories in continuous assignments.
*
* Revision 1.21 1999/11/10 02:52:24 steve
* Create the vpiMemory handle type.
*
* Revision 1.20 1999/11/01 02:07:41 steve
* Add the synth functor to do generic synthesis
* and add the LPM_FF device to handle rows of
* flip-flops.
*
* Revision 1.19 1999/10/31 04:11:28 steve
* Add to netlist links pin name and instance number,
* and arrange in vvm for pin connections by name
* and instance number.
*
* Revision 1.18 1999/10/29 03:37:22 steve
* Support vpiValueChance callbacks.
*
* Revision 1.17 1999/10/28 21:36:00 steve
* Get rid of monitor_t and fold __vpiSignal into signal.
*
* Revision 1.16 1999/10/28 00:47:25 steve
* Rewrite vvm VPI support to make objects more
* persistent, rewrite the simulation scheduler
* in C (to interface with VPI) and add VPI support
* for callbacks.
*
* Revision 1.15 1999/10/13 03:15:51 steve
* Remove useless operator=.
*
* Revision 1.14 1999/10/06 01:28:18 steve
* The $finish task should work immediately.
*
* Revision 1.13 1999/10/05 04:02:10 steve
* Relaxed width handling for <= assignment.
*
* Revision 1.12 1999/09/29 18:36:04 steve
* Full case support
*
* Revision 1.11 1999/09/28 01:13:15 steve
* Support in vvm > and >= behavioral operators.
*
* Revision 1.10 1999/08/15 01:23:56 steve
* Convert vvm to implement system tasks with vpi.
*
* Revision 1.9 1999/06/21 01:02:34 steve
* Add init to vvm_signal_t.
*
* Revision 1.8 1999/06/07 03:40:22 steve
* Implement the < binary operator.
*
* Revision 1.7 1999/05/03 01:51:29 steve
* Restore support for wait event control.
*
* Revision 1.6 1999/04/22 04:56:58 steve
* Add to vvm proceedural memory references.
*
* Revision 1.5 1999/03/16 04:43:46 steve
* Add some logical operators.
*
* Revision 1.4 1999/02/08 03:55:55 steve
* Do not generate code for signals,
* instead use the NetESignal node to
* generate gate-like signal devices.
*
* Revision 1.3 1998/12/17 23:54:58 steve
* VVM support for small sequential UDP objects.
*
* Revision 1.2 1998/11/10 00:48:31 steve
* Add support it vvm target for level-sensitive
* triggers (i.e. the Verilog wait).
* Fix display of $time is format strings.
*
* Revision 1.1 1998/11/09 23:44:10 steve
* Add vvm library.
*
*/
#endif

446
vvm/vvm_func.cc Normal file
View File

@ -0,0 +1,446 @@
/*
* Copyright (c) 2000 Stephen Williams (steve@icarus.com)
*
* This source code is free software; you can redistribute it
* and/or modify it in source code form under the terms of the GNU
* General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#if !defined(WINNT) && !defined(macintosh)
#ident "$Id: vvm_func.cc,v 1.1 2000/03/13 00:02:34 steve Exp $"
#endif
# include "vvm_func.h"
vvm_bitset_t<1> vvm_unop_and(const vvm_bits_t&r)
{
vvm_bitset_t<1> res;
res[0] = r.get_bit(0);
for (unsigned idx = 1 ; idx < r.get_width() ; idx += 1)
res[0] = res[0] & r.get_bit(idx);
return res;
}
vvm_bitset_t<1> vvm_unop_nand(const vvm_bits_t&r)
{
vvm_bitset_t<1>res = vvm_unop_and(r);
res[0] = v_not(res[0]);
return res;
}
vvm_bitset_t<1> vvm_unop_or(const vvm_bits_t&r)
{
vvm_bitset_t<1> res;
res[0] = V1;
for (unsigned idx = 0 ; idx < r.get_width() ; idx += 1) {
if (r.get_bit(idx) == V1)
return res;
}
res[0] = V0;
return res;
}
vvm_bitset_t<1> vvm_unop_nor(const vvm_bits_t&r)
{
vvm_bitset_t<1>res = vvm_unop_or(r);
res[0] = v_not(res[0]);
return res;
}
vvm_bitset_t<1> vvm_unop_xor(const vvm_bits_t&r)
{
vvm_bitset_t<1> res;
res[0] = V0;
for (unsigned idx = 0 ; idx < r.get_width() ; idx += 1) {
if (r.get_bit(idx) == V1)
res[0] = v_not(res[0]);
}
return res;
}
vvm_bitset_t<1> vvm_unop_xnor(const vvm_bits_t&r)
{
vvm_bitset_t<1>res = vvm_unop_xor(r);
res[0] = v_not(res[0]);
return res;
}
vvm_bitset_t<1> vvm_binop_eq(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
const unsigned lwid = l.get_width();
const unsigned rwid = r.get_width();
if (lwid <= rwid) {
for (unsigned idx = 0 ; idx < lwid ; idx += 1) {
if ((l.get_bit(idx) == Vx) || (l.get_bit(idx) == Vz)) {
result[0] = Vx;
return result;
}
if ((r.get_bit(idx) == Vx) || (r.get_bit(idx) == Vz)) {
result[0] = Vx;
return result;
}
if (l.get_bit(idx) != r.get_bit(idx)) {
result[0] = V0;
return result;
}
}
for (unsigned idx = lwid ; idx < rwid ; idx += 1)
switch (r.get_bit(idx)) {
case V0:
break;
case V1:
result[0] = V0;
return result;
case Vx:
case Vz:
result[0] = Vx;
return result;
}
return result;
} else {
for (unsigned idx = 0 ; idx < rwid ; idx += 1) {
if ((l.get_bit(idx) == Vx) || (l.get_bit(idx) == Vz)) {
result[0] = Vx;
return result;
}
if ((r.get_bit(idx) == Vx) || (r.get_bit(idx) == Vz)) {
result[0] = Vx;
return result;
}
if (l.get_bit(idx) != r.get_bit(idx)) {
result[0] = V0;
return result;
}
}
for (unsigned idx = rwid ; idx < lwid ; idx += 1)
switch (l.get_bit(idx)) {
case V0:
break;
case V1:
result[0] = V0;
return result;
case Vx:
case Vz:
result[0] = Vx;
return result;
}
return result;
}
}
vvm_bitset_t<1> vvm_binop_ne(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result = vvm_binop_eq(l,r);
result[0] = v_not(result[0]);
return result;
}
vvm_bitset_t<1> vvm_binop_eeq(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
const unsigned lwid = l.get_width();
const unsigned rwid = r.get_width();
if (lwid <= rwid) {
for (unsigned idx = 0 ; idx < lwid ; idx += 1) {
if (l.get_bit(idx) != r.get_bit(idx)) {
result[0] = V0;
return result;
}
}
for (unsigned idx = lwid ; idx < rwid ; idx += 1)
if (r.get_bit(idx) != V0) {
result[0] = V0;
return result;
}
} else {
for (unsigned idx = 0 ; idx < rwid ; idx += 1) {
if (l.get_bit(idx) != r.get_bit(idx)) {
result[0] = V0;
return result;
}
}
for (unsigned idx = rwid ; idx < lwid ; idx += 1)
if (l.get_bit(idx) != V0) {
result[0] = V0;
return result;
}
}
return result;
}
vvm_bitset_t<1> vvm_binop_nee(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result = vvm_binop_eeq(l,r);
result[0] = v_not(result[0]);
return result;
}
vvm_bitset_t<1> vvm_binop_xeq(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
const unsigned lwid = l.get_width();
const unsigned rwid = r.get_width();
if (lwid <= rwid) {
for (unsigned idx = 0 ; idx < lwid ; idx += 1) {
if ((l.get_bit(idx) == Vz) || (r.get_bit(idx) == Vz))
continue;
if ((l.get_bit(idx) == Vx) || (r.get_bit(idx) == Vx))
continue;
if (l.get_bit(idx) != r.get_bit(idx)) {
result[0] = V0;
return result;
}
}
for (unsigned idx = lwid ; idx < rwid ; idx += 1) {
if ((r.get_bit(idx) == Vx) || (r.get_bit(idx) == Vz))
continue;
if (r.get_bit(idx) != V0) {
result[0] = V0;
return result;
}
}
} else {
for (unsigned idx = 0 ; idx < rwid ; idx += 1) {
if ((l.get_bit(idx) == Vz) || (r.get_bit(idx) == Vz))
continue;
if ((l.get_bit(idx) == Vx) || (r.get_bit(idx) == Vx))
continue;
if (l.get_bit(idx) != r.get_bit(idx)) {
result[0] = V0;
return result;
}
}
for (unsigned idx = rwid ; idx < lwid ; idx += 1) {
if ((l.get_bit(idx) == Vx) || (l.get_bit(idx) == Vz))
continue;
if (l.get_bit(idx) != V0) {
result[0] = V0;
return result;
}
}
}
return result;
}
vvm_bitset_t<1> vvm_binop_zeq(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
const unsigned lwid = l.get_width();
const unsigned rwid = r.get_width();
if (lwid <= rwid) {
for (unsigned idx = 0 ; idx < lwid ; idx += 1) {
if ((l.get_bit(idx) == Vz) || (r.get_bit(idx) == Vz))
continue;
if (l.get_bit(idx) != r.get_bit(idx)) {
result[0] = V0;
return result;
}
}
for (unsigned idx = lwid ; idx < rwid ; idx += 1) {
if (r.get_bit(idx) == Vz)
continue;
if (r.get_bit(idx) != V0) {
result[0] = V0;
return result;
}
}
} else {
for (unsigned idx = 0 ; idx < rwid ; idx += 1) {
if ((l.get_bit(idx) == Vz) || (r.get_bit(idx) == Vz))
continue;
if (l.get_bit(idx) != r.get_bit(idx)) {
result[0] = V0;
return result;
}
}
for (unsigned idx = rwid ; idx < lwid ; idx += 1) {
if (l.get_bit(idx) == Vz)
continue;
if (l.get_bit(idx) != V0) {
result[0] = V0;
return result;
}
}
}
return result;
}
vvm_bitset_t<1> vvm_binop_lt(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result;
result[0] = V0;
const unsigned lwid = l.get_width();
const unsigned rwid = r.get_width();
const unsigned common = (lwid < rwid)? lwid : rwid;
for (unsigned idx = 0 ; idx < common ; idx += 1)
result[0] = less_with_cascade(l.get_bit(idx),
r.get_bit(idx),
result[0]);
if (lwid > rwid) {
for (unsigned idx = rwid ; idx < lwid ; idx += 1)
result[0] = less_with_cascade(l.get_bit(idx),
V0,
result[0]);
} else {
for (unsigned idx = lwid ; idx < rwid ; idx += 1)
result[0] = less_with_cascade(V0,
r.get_bit(idx),
result[0]);
}
return result;
}
vvm_bitset_t<1> vvm_binop_le(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
const unsigned lwid = l.get_width();
const unsigned rwid = r.get_width();
const unsigned common = (lwid < rwid)? lwid : rwid;
for (unsigned idx = 0 ; idx < common ; idx += 1)
result[0] = less_with_cascade(l.get_bit(idx),
r.get_bit(idx),
result[0]);
if (lwid > rwid) {
for (unsigned idx = rwid ; idx < lwid ; idx += 1)
result[0] = less_with_cascade(l.get_bit(idx),
V0,
result[0]);
} else {
for (unsigned idx = lwid ; idx < rwid ; idx += 1)
result[0] = less_with_cascade(V0,
r.get_bit(idx),
result[0]);
}
return result;
}
vvm_bitset_t<1> vvm_binop_gt(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result;
result[0] = V0;
const unsigned lwid = l.get_width();
const unsigned rwid = r.get_width();
const unsigned common = (lwid < rwid)? lwid : rwid;
for (unsigned idx = 0 ; idx < common ; idx += 1)
result[0] = greater_with_cascade(l.get_bit(idx),
r.get_bit(idx),
result[0]);
if (lwid > rwid) {
for (unsigned idx = rwid ; idx < lwid ; idx += 1)
result[0] = greater_with_cascade(l.get_bit(idx),
V0,
result[0]);
} else {
for (unsigned idx = lwid ; idx < rwid ; idx += 1)
result[0] = greater_with_cascade(V0,
r.get_bit(idx),
result[0]);
}
return result;
}
vvm_bitset_t<1> vvm_binop_ge(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
const unsigned lwid = l.get_width();
const unsigned rwid = r.get_width();
const unsigned common = (lwid < rwid)? lwid : rwid;
for (unsigned idx = 0 ; idx < common ; idx += 1)
result[0] = greater_with_cascade(l.get_bit(idx),
r.get_bit(idx),
result[0]);
if (lwid > rwid) {
for (unsigned idx = rwid ; idx < lwid ; idx += 1)
result[0] = greater_with_cascade(l.get_bit(idx),
V0,
result[0]);
} else {
for (unsigned idx = lwid ; idx < rwid ; idx += 1)
result[0] = greater_with_cascade(V0,
r.get_bit(idx),
result[0]);
}
return result;
}
vvm_bitset_t<1> vvm_binop_land(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> res1 = vvm_unop_or(l);
vvm_bitset_t<1> res2 = vvm_unop_or(r);
res1[0] = res1[0] & res2[0];
return res1;
}
vvm_bitset_t<1> vvm_binop_lor(const vvm_bits_t&l, const vvm_bits_t&r)
{
vvm_bitset_t<1> res1 = vvm_unop_or(l);
vvm_bitset_t<1> res2 = vvm_unop_or(r);
res1[0] = res1[0] | res2[0];
return res1;
}
vvm_bitset_t<1> vvm_unop_lnot(const vvm_bits_t&r)
{
vvm_bitset_t<1> res = vvm_unop_or(r);
return vvm_unop_not(res);
}
/*
* $Log: vvm_func.cc,v $
* Revision 1.1 2000/03/13 00:02:34 steve
* Remove unneeded templates.
*
*/

View File

@ -19,7 +19,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#if !defined(WINNT) && !defined(macintosh)
#ident "$Id: vvm_func.h,v 1.20 2000/02/23 04:43:43 steve Exp $"
#ident "$Id: vvm_func.h,v 1.21 2000/03/13 00:02:34 steve Exp $"
#endif
# include "vvm.h"
@ -48,72 +48,21 @@ vvm_bitset_t<WIDTH> vvm_unop_not(const vvm_bitset_t<WIDTH>&p)
/*
* The unary AND is the reduction AND. It returns a single bit.
*/
template <unsigned WIDTH>
vvm_bitset_t<1> vvm_unop_and(const vvm_bitset_t<WIDTH>&r)
{
vvm_bitset_t<1> res;
res[0] = r[0];
for (unsigned idx = 1 ; idx < WIDTH ; idx += 1) {
res[0] = res[0] & r[idx];
}
return res;
}
extern vvm_bitset_t<1> vvm_unop_and(const vvm_bits_t&r);
extern vvm_bitset_t<1> vvm_unop_nand(const vvm_bits_t&r);
/*
* The unary OR is the reduction OR. It returns a single bit.
*/
template <unsigned WIDTH>
vvm_bitset_t<1> vvm_unop_or(const vvm_bitset_t<WIDTH>&r)
{
vvm_bitset_t<1> res;
res[0] = V1;
extern vvm_bitset_t<1> vvm_unop_or(const vvm_bits_t&r);
extern vvm_bitset_t<1> vvm_unop_nor(const vvm_bits_t&r);
for (unsigned idx = 0 ; idx < WIDTH ; idx += 1) {
if (r[idx] == V1)
return res;
}
res[0] = V0;
return res;
}
template <unsigned WIDTH>
vvm_bitset_t<1> vvm_unop_nor(const vvm_bitset_t<WIDTH>&r)
{
vvm_bitset_t<1>res = vvm_unop_or(r);
return vvm_unop_not(res);
}
template <unsigned WIDTH>
vvm_bitset_t<1> vvm_unop_lnot(const vvm_bitset_t<WIDTH>&r)
{
vvm_bitset_t<1> res = vvm_unop_or(r);
return vvm_unop_not(res);
}
/*
* The unary XOR is the reduction XOR. It returns a single bit.
*/
template <unsigned WIDTH>
vvm_bitset_t<1> vvm_unop_xor(const vvm_bitset_t<WIDTH>&r)
{
vvm_bitset_t<1> res;
res[0] = V0;
for (unsigned idx = 0 ; idx < WIDTH ; idx += 1) {
if (r[idx] == V1)
res[0] = v_not(res[0]);
}
return res;
}
template <unsigned WIDTH>
vvm_bitset_t<1> vvm_unop_xnor(const vvm_bitset_t<WIDTH>&r)
{
return v_not(vvm_unop_xor(r));
}
extern vvm_bitset_t<1> vvm_unop_xor(const vvm_bits_t&r);
extern vvm_bitset_t<1> vvm_unop_xnor(const vvm_bits_t&r);
//
// simple-minded unary minus operator (two's complement)
@ -290,347 +239,47 @@ vvm_bitset_t<WIDTH> vvm_binop_shiftr(const vvm_bitset_t<WIDTH>&l,
* extended with zeros. Also, if there is Vx or Vz anywhere in either
* vectors, the result is Vx.
*/
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_eq(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
if (LW <= RW) {
for (unsigned idx = 0 ; idx < LW ; idx += 1) {
if ((l[idx] == Vx) || (l[idx] == Vz)) {
result[0] = Vx;
return result;
}
if ((r[idx] == Vx) || (r[idx] == Vz)) {
result[0] = Vx;
return result;
}
if (l[idx] != r[idx]) {
result[0] = V0;
return result;
}
}
for (unsigned idx = LW ; idx < RW ; idx += 1)
switch (r[idx]) {
case V0:
break;
case V1:
result[0] = V0;
return result;
case Vx:
case Vz:
result[0] = Vx;
return result;
}
return result;
} else {
for (unsigned idx = 0 ; idx < RW ; idx += 1) {
if ((l[idx] == Vx) || (l[idx] == Vz)) {
result[0] = Vx;
return result;
}
if ((r[idx] == Vx) || (r[idx] == Vz)) {
result[0] = Vx;
return result;
}
if (l[idx] != r[idx]) {
result[0] = V0;
return result;
}
}
for (unsigned idx = RW ; idx < LW ; idx += 1)
switch (l[idx]) {
case V0:
break;
case V1:
result[0] = V0;
return result;
case Vx:
case Vz:
result[0] = Vx;
return result;
}
return result;
}
}
extern vvm_bitset_t<1> vvm_binop_eq(const vvm_bits_t&l, const vvm_bits_t&r);
extern vvm_bitset_t<1> vvm_binop_ne(const vvm_bits_t&l, const vvm_bits_t&r);
/*
* This function return true if all the bits are the same. Even x and
* z bites are compared for equality.
*/
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_eeq(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
extern vvm_bitset_t<1> vvm_binop_eeq(const vvm_bits_t&l, const vvm_bits_t&r);
extern vvm_bitset_t<1> vvm_binop_nee(const vvm_bits_t&l, const vvm_bits_t&r);
if (LW <= RW) {
for (unsigned idx = 0 ; idx < LW ; idx += 1) {
if (l[idx] != r[idx]) {
result[0] = V0;
return result;
}
}
for (unsigned idx = LW ; idx < RW ; idx += 1)
if (r[idx] != V0) {
result[0] = V0;
return result;
}
} else {
for (unsigned idx = 0 ; idx < RW ; idx += 1) {
if (l[idx] != r[idx]) {
result[0] = V0;
return result;
}
}
for (unsigned idx = RW ; idx < LW ; idx += 1)
if (l[idx] != V0) {
result[0] = V0;
return result;
}
}
return result;
}
/*
* This function return true if all the bits are the same. The x and z
* bits are don't care, s don't make the result false.
*/
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_xeq(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
if (LW <= RW) {
for (unsigned idx = 0 ; idx < LW ; idx += 1) {
if ((l[idx] == Vz) || (r[idx] == Vz))
continue;
if ((l[idx] == Vx) || (r[idx] == Vx))
continue;
if (l[idx] != r[idx]) {
result[0] = V0;
return result;
}
}
for (unsigned idx = LW ; idx < RW ; idx += 1) {
if ((r[idx] == Vx) || (r[idx] == Vz))
continue;
if (r[idx] != V0) {
result[0] = V0;
return result;
}
}
} else {
for (unsigned idx = 0 ; idx < RW ; idx += 1) {
if ((l[idx] == Vz) || (r[idx] == Vz))
continue;
if ((l[idx] == Vx) || (r[idx] == Vx))
continue;
if (l[idx] != r[idx]) {
result[0] = V0;
return result;
}
}
for (unsigned idx = RW ; idx < LW ; idx += 1) {
if ((l[idx] == Vx) || (l[idx] == Vz))
continue;
if (l[idx] != V0) {
result[0] = V0;
return result;
}
}
}
return result;
}
extern vvm_bitset_t<1> vvm_binop_xeq(const vvm_bits_t&l, const vvm_bits_t&r);
/*
* This function return true if all the bits are the same. The z
* bits are don't care, so don't make the result false.
*/
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_zeq(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
extern vvm_bitset_t<1> vvm_binop_zeq(const vvm_bits_t&l, const vvm_bits_t&r);
if (LW <= RW) {
for (unsigned idx = 0 ; idx < LW ; idx += 1) {
if ((l[idx] == Vz) || (r[idx] == Vz))
continue;
if (l[idx] != r[idx]) {
result[0] = V0;
return result;
}
}
for (unsigned idx = LW ; idx < RW ; idx += 1) {
if (r[idx] == Vz)
continue;
if (r[idx] != V0) {
result[0] = V0;
return result;
}
}
} else {
for (unsigned idx = 0 ; idx < RW ; idx += 1) {
if ((l[idx] == Vz) || (r[idx] == Vz))
continue;
if (l[idx] != r[idx]) {
result[0] = V0;
return result;
}
}
for (unsigned idx = RW ; idx < LW ; idx += 1) {
if (l[idx] == Vz)
continue;
if (l[idx] != V0) {
result[0] = V0;
return result;
}
}
}
return result;
}
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_ne(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result = vvm_binop_eq(l,r);
result[0] = v_not(result[0]);
return result;
}
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_nee(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result = vvm_binop_eeq(l,r);
result[0] = v_not(result[0]);
return result;
}
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_lt(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result;
result[0] = V0;
const unsigned common = (LW < RW)? LW : RW;
for (unsigned idx = 0 ; idx < common ; idx += 1)
result[0] = less_with_cascade(l[idx], r[idx], result[0]);
if (LW > RW) {
for (unsigned idx = RW ; idx < LW ; idx += 1)
result[0] = less_with_cascade(l[idx], V0, result[0]);
} else {
for (unsigned idx = LW ; idx < RW ; idx += 1)
result[0] = less_with_cascade(V0, r[idx], result[0]);
}
return result;
}
extern vvm_bitset_t<1> vvm_binop_lt(const vvm_bits_t&l, const vvm_bits_t&r);
/*
* The <= operator takes operands of natural width and returns a
* single bit. The result is V1 if l <= r, otherwise V0;
*/
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_le(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
const unsigned common = (LW < RW)? LW : RW;
for (unsigned idx = 0 ; idx < common ; idx += 1)
result[0] = less_with_cascade(l[idx], r[idx], result[0]);
extern vvm_bitset_t<1> vvm_binop_le(const vvm_bits_t&l, const vvm_bits_t&r);
if (LW > RW) {
for (unsigned idx = RW ; idx < LW ; idx += 1)
result[0] = less_with_cascade(l[idx], V0, result[0]);
} else {
for (unsigned idx = LW ; idx < RW ; idx += 1)
result[0] = less_with_cascade(V0, r[idx], result[0]);
}
extern vvm_bitset_t<1> vvm_binop_gt(const vvm_bits_t&l, const vvm_bits_t&r);
return result;
}
extern vvm_bitset_t<1> vvm_binop_ge(const vvm_bits_t&l, const vvm_bits_t&r);
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_gt(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result;
result[0] = V0;
const unsigned common = (LW < RW)? LW : RW;
for (unsigned idx = 0 ; idx < common ; idx += 1)
result[0] = greater_with_cascade(l[idx], r[idx], result[0]);
extern vvm_bitset_t<1> vvm_binop_land(const vvm_bits_t&l, const vvm_bits_t&r);
if (LW > RW) {
for (unsigned idx = RW ; idx < LW ; idx += 1)
result[0] = greater_with_cascade(l[idx], V0, result[0]);
} else {
for (unsigned idx = LW ; idx < RW ; idx += 1)
result[0] = greater_with_cascade(V0, r[idx], result[0]);
}
extern vvm_bitset_t<1> vvm_binop_lor(const vvm_bits_t&l, const vvm_bits_t&r);
return result;
}
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_ge(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> result;
result[0] = V1;
const unsigned common = (LW < RW)? LW : RW;
for (unsigned idx = 0 ; idx < common ; idx += 1)
result[0] = greater_with_cascade(l[idx], r[idx], result[0]);
if (LW > RW) {
for (unsigned idx = RW ; idx < LW ; idx += 1)
result[0] = greater_with_cascade(l[idx], V0, result[0]);
} else {
for (unsigned idx = LW ; idx < RW ; idx += 1)
result[0] = greater_with_cascade(V0, r[idx], result[0]);
}
return result;
}
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_land(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> res1 = vvm_unop_or(l);
vvm_bitset_t<1> res2 = vvm_unop_or(r);
res1[0] = res1[0] & res2[0];
return res1;
}
template <unsigned LW, unsigned RW>
vvm_bitset_t<1> vvm_binop_lor(const vvm_bitset_t<LW>&l,
const vvm_bitset_t<RW>&r)
{
vvm_bitset_t<1> res1 = vvm_unop_or(l);
vvm_bitset_t<1> res2 = vvm_unop_or(r);
res1[0] = res1[0] | res2[0];
return res1;
}
extern vvm_bitset_t<1> vvm_unop_lnot(const vvm_bits_t&r);
template <unsigned W>
vvm_bitset_t<W> vvm_ternary(vpip_bit_t c, const vvm_bitset_t<W>&t,
@ -648,6 +297,9 @@ vvm_bitset_t<W> vvm_ternary(vpip_bit_t c, const vvm_bitset_t<W>&t,
/*
* $Log: vvm_func.h,v $
* Revision 1.21 2000/03/13 00:02:34 steve
* Remove unneeded templates.
*
* Revision 1.20 2000/02/23 04:43:43 steve
* Some compilers do not accept the not symbol.
*
@ -662,59 +314,5 @@ vvm_bitset_t<W> vvm_ternary(vpip_bit_t c, const vvm_bitset_t<W>&t,
*
* Revision 1.16 1999/12/02 03:36:01 steve
* shiftl and shiftr take unsized second parameter.
*
* Revision 1.15 1999/10/28 00:47:25 steve
* Rewrite vvm VPI support to make objects more
* persistent, rewrite the simulation scheduler
* in C (to interface with VPI) and add VPI support
* for callbacks.
*
* Revision 1.14 1999/10/05 06:19:47 steve
* Add support for reduction NOR.
*
* Revision 1.13 1999/10/01 15:26:29 steve
* Add some vvm operators from Eric Aardoom.
*
* Revision 1.12 1999/09/29 22:57:26 steve
* LT supports different width objects.
*
* Revision 1.11 1999/09/29 18:36:04 steve
* Full case support
*
* Revision 1.10 1999/09/28 01:13:16 steve
* Support in vvm > and >= behavioral operators.
*
* Revision 1.9 1999/09/23 04:39:52 steve
* The <= operator takes different width operands.
*
* Revision 1.8 1999/09/11 04:43:17 steve
* Support ternary and <= operators in vvm.
*
* Revision 1.7 1999/06/24 04:20:47 steve
* Add !== and === operators.
*
* Revision 1.6 1999/06/07 03:40:22 steve
* Implement the < binary operator.
*
* Revision 1.5 1999/06/07 02:23:31 steve
* Support non-blocking assignment down to vvm.
*
* Revision 1.4 1999/05/01 20:43:55 steve
* Handle wide events, such as @(a) where a has
* many bits in it.
*
* Add to vvm the binary ^ and unary & operators.
*
* Dump events a bit more completely.
*
* Revision 1.3 1999/03/16 04:43:46 steve
* Add some logical operators.
*
* Revision 1.2 1999/03/15 02:42:44 steve
* Add the AND and OR bitwise operators.
*
* Revision 1.1 1998/11/09 23:44:11 steve
* Add vvm library.
*
*/
#endif