iverilog/vvp/arith.cc

855 lines
19 KiB
C++
Raw Normal View History

2001-06-05 05:05:41 +02:00
/*
* Copyright (c) 2001-2005 Stephen Williams (steve@icarus.com)
2001-06-05 05:05:41 +02:00
*
* This source code is free software; you can redistribute it
* and/or modify it in source code form under the terms of the GNU
* General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#ifdef HAVE_CVS_IDENT
2007-01-20 03:09:54 +01:00
#ident "$Id: arith.cc,v 1.50 2007/01/20 02:09:54 steve Exp $"
2001-06-05 05:05:41 +02:00
#endif
# include "arith.h"
# include "schedule.h"
# include <limits.h>
2005-09-16 00:54:04 +02:00
# include <iostream>
2001-06-05 05:05:41 +02:00
# include <assert.h>
2001-11-04 06:03:21 +01:00
# include <stdlib.h>
#ifdef HAVE_MALLOC_H
# include <malloc.h>
2001-11-04 06:03:21 +01:00
#endif
2001-06-05 05:05:41 +02:00
vvp_arith_::vvp_arith_(unsigned wid)
: wid_(wid), x_val_(wid)
2001-06-05 05:05:41 +02:00
{
for (unsigned idx = 0 ; idx < wid ; idx += 1)
x_val_.set_bit(idx, BIT4_X);
2005-01-22 02:06:20 +01:00
op_a_ = x_val_;
op_b_ = x_val_;
}
void vvp_arith_::dispatch_operand_(vvp_net_ptr_t ptr, vvp_vector4_t bit)
{
unsigned port = ptr.port();
switch (port) {
case 0:
op_a_ = bit;
break;
case 1:
op_b_ = bit;
break;
default:
assert(0);
}
}
2001-10-16 04:47:37 +02:00
// Division
2001-10-16 04:47:37 +02:00
2005-02-19 02:32:52 +01:00
vvp_arith_div::vvp_arith_div(unsigned wid, bool signed_flag)
: vvp_arith_(wid), signed_flag_(signed_flag)
{
}
vvp_arith_div::~vvp_arith_div()
{
}
2006-01-03 07:19:31 +01:00
void vvp_arith_div::wide4_(vvp_net_ptr_t ptr)
{
2006-01-03 07:19:31 +01:00
vvp_vector2_t a2 (op_a_);
if (a2.is_NaN()) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
vvp_vector2_t b2 (op_b_);
if (b2.is_NaN()) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
vvp_vector2_t res2 = a2 / b2;
vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res2, wid_));
}
2001-10-16 04:47:37 +02:00
void vvp_arith_div::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2005-02-19 02:32:52 +01:00
{
dispatch_operand_(ptr, bit);
if (wid_ > 8 * sizeof(unsigned long)) {
2006-01-03 07:19:31 +01:00
wide4_(ptr);
2005-02-19 02:32:52 +01:00
return ;
}
unsigned long a;
if (! vector4_to_value(op_a_, a)) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
unsigned long b;
if (! vector4_to_value(op_b_, b)) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
2005-02-19 03:41:23 +01:00
bool negate = false;
/* If we are doing signed divide, then take the sign out of
the operands for now, and remember to put the sign back
later. */
if (signed_flag_) {
if (op_a_.value(op_a_.size()-1)) {
a = (-a) & ~ (-1UL << op_a_.size());
negate = !negate;
}
if (op_b_.value(op_b_.size()-1)) {
b = (-b) & ~ (-1UL << op_b_.size());
negate = ! negate;
}
}
2005-02-19 02:32:52 +01:00
unsigned long val = a / b;
2005-02-19 03:41:23 +01:00
if (negate)
val = -val;
2005-02-19 02:32:52 +01:00
assert(wid_ <= 8*sizeof(val));
vvp_vector4_t vval (wid_);
for (unsigned idx = 0 ; idx < wid_ ; idx += 1) {
if (val & 1)
vval.set_bit(idx, BIT4_1);
else
vval.set_bit(idx, BIT4_0);
2001-10-16 04:47:37 +02:00
2005-02-19 02:32:52 +01:00
val >>= 1;
}
vvp_send_vec4(ptr.ptr()->out, vval);
}
2005-03-12 07:42:28 +01:00
vvp_arith_mod::vvp_arith_mod(unsigned wid, bool sf)
: vvp_arith_(wid), signed_flag_(sf)
{
2005-03-12 07:42:28 +01:00
}
2001-10-16 04:47:37 +02:00
2005-03-12 07:42:28 +01:00
vvp_arith_mod::~vvp_arith_mod()
{
}
2005-03-12 07:42:28 +01:00
void vvp_arith_mod::wide_(vvp_net_ptr_t ptr)
{
vvp_vector2_t a2 (op_a_);
if (a2.is_NaN()) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
vvp_vector2_t b2 (op_b_);
if (b2.is_NaN()) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
vvp_vector2_t res = a2 % b2;
vvp_send_vec4(ptr.ptr()->out, vector2_to_vector4(res, res.size()));
2005-03-12 07:42:28 +01:00
}
2004-06-30 04:15:57 +02:00
void vvp_arith_mod::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2005-03-12 07:42:28 +01:00
{
dispatch_operand_(ptr, bit);
2005-03-12 07:42:28 +01:00
if (wid_ > 8 * sizeof(unsigned long)) {
wide_(ptr);
return ;
}
2005-03-12 07:42:28 +01:00
unsigned long a;
if (! vector4_to_value(op_a_, a)) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
2005-03-12 07:42:28 +01:00
unsigned long b;
if (! vector4_to_value(op_b_, b)) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
bool negate = false;
/* If we are doing signed divide, then take the sign out of
the operands for now, and remember to put the sign back
later. */
2004-06-30 04:15:57 +02:00
if (signed_flag_) {
2005-03-12 07:42:28 +01:00
if (op_a_.value(op_a_.size()-1)) {
a = (-a) & ~ (-1UL << op_a_.size());
negate = !negate;
2004-06-30 04:15:57 +02:00
}
2005-03-12 07:42:28 +01:00
if (op_b_.value(op_b_.size()-1)) {
b = (-b) & ~ (-1UL << op_b_.size());
negate = ! negate;
2004-06-30 04:15:57 +02:00
}
}
if (b == 0) {
2005-03-12 07:42:28 +01:00
vvp_vector4_t xval (wid_);
for (unsigned idx = 0 ; idx < wid_ ; idx += 1)
xval.set_bit(idx, BIT4_X);
2005-03-12 07:42:28 +01:00
vvp_send_vec4(ptr.ptr()->out, xval);
return;
}
2005-03-12 07:42:28 +01:00
unsigned long val = a % b;
if (negate)
val = -val;
2005-03-12 07:42:28 +01:00
assert(wid_ <= 8*sizeof(val));
2005-03-12 07:42:28 +01:00
vvp_vector4_t vval (wid_);
for (unsigned idx = 0 ; idx < wid_ ; idx += 1) {
if (val & 1)
vval.set_bit(idx, BIT4_1);
else
vval.set_bit(idx, BIT4_0);
2005-03-12 07:42:28 +01:00
val >>= 1;
}
2005-03-12 07:42:28 +01:00
vvp_send_vec4(ptr.ptr()->out, vval);
}
2005-03-12 07:42:28 +01:00
// Multiplication
vvp_arith_mult::vvp_arith_mult(unsigned wid)
: vvp_arith_(wid)
{
}
vvp_arith_mult::~vvp_arith_mult()
{
}
void vvp_arith_mult::wide_(vvp_net_ptr_t ptr)
{
vvp_vector2_t a2 (op_a_);
vvp_vector2_t b2 (op_b_);
if (a2.is_NaN() || b2.is_NaN()) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
vvp_vector2_t result = a2 * b2;
vvp_vector4_t res4 = vector2_to_vector4(result, wid_);
vvp_send_vec4(ptr.ptr()->out, res4);
}
void vvp_arith_mult::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
{
dispatch_operand_(ptr, bit);
if (wid_ > 8 * sizeof(unsigned long)) {
wide_(ptr);
return ;
}
unsigned long a;
if (! vector4_to_value(op_a_, a)) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
unsigned long b;
if (! vector4_to_value(op_b_, b)) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
}
unsigned long val = a * b;
assert(wid_ <= 8*sizeof(val));
vvp_vector4_t vval (wid_);
for (unsigned idx = 0 ; idx < wid_ ; idx += 1) {
if (val & 1)
vval.set_bit(idx, BIT4_1);
else
vval.set_bit(idx, BIT4_0);
val >>= 1;
}
vvp_send_vec4(ptr.ptr()->out, vval);
}
#if 0
void vvp_arith_mult::set(vvp_ipoint_t i, bool push, unsigned val, unsigned)
{
put(i, val);
vvp_ipoint_t base = ipoint_make(i,0);
if(wid_ > 8*sizeof(unsigned long)) {
wide(base, push);
return;
}
unsigned long a = 0, b = 0;
for (unsigned idx = 0 ; idx < wid_ ; idx += 1) {
vvp_ipoint_t ptr = ipoint_index(base,idx);
functor_t obj = functor_index(ptr);
unsigned val = obj->ival;
if (val & 0xaa) {
output_x_(base, push);
return;
}
if (val & 0x01)
a += 1UL << idx;
if (val & 0x04)
b += 1UL << idx;
}
output_val_(base, push, a*b);
}
#endif
#if 0
void vvp_arith_mult::wide(vvp_ipoint_t base, bool push)
{
unsigned char *a, *b, *sum;
a = new unsigned char[wid_];
b = new unsigned char[wid_];
sum = new unsigned char[wid_];
unsigned mxa = 0;
unsigned mxb = 0;
for (unsigned idx = 0 ; idx < wid_ ; idx += 1) {
vvp_ipoint_t ptr = ipoint_index(base, idx);
functor_t obj = functor_index(ptr);
unsigned ival = obj->ival;
if (ival & 0xaa) {
output_x_(base, push);
delete[]sum;
delete[]b;
delete[]a;
return;
}
if((a[idx] = ((ival & 0x01) != 0))) mxa=idx+1;
if((b[idx] = ((ival & 0x04) != 0))) mxb=idx;
sum[idx] = 0;
}
/* do the a*b multiply using the long method we learned in
grade school. We know at this point that there are no X or
Z values in the a or b vectors. */
for(unsigned i=0 ; i<=mxb ; i += 1) {
if(b[i]) {
unsigned char carry=0;
unsigned char temp;
for(unsigned j=0 ; j<=mxa ; j += 1) {
if((i+j) >= wid_)
break;
temp=sum[i+j] + a[j] + carry;
sum[i+j]=(temp&1);
carry=(temp>>1);
}
}
}
for (unsigned idx = 0 ; idx < wid_ ; idx += 1) {
vvp_ipoint_t ptr = ipoint_index(base,idx);
functor_t obj = functor_index(ptr);
unsigned val = sum[idx];
obj->put_oval(val, push);
}
delete[]sum;
delete[]b;
delete[]a;
}
#endif
// Addition
vvp_arith_sum::vvp_arith_sum(unsigned wid)
: vvp_arith_(wid)
{
}
vvp_arith_sum::~vvp_arith_sum()
{
}
2001-06-07 05:09:03 +02:00
void vvp_arith_sum::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
{
dispatch_operand_(ptr, bit);
vvp_net_t*net = ptr.ptr();
2001-06-05 05:05:41 +02:00
vvp_vector4_t value (wid_);
/* Pad input vectors with this value to widen to the desired
output width. */
const vvp_bit4_t pad = BIT4_0;
vvp_bit4_t carry = BIT4_0;
2001-06-05 05:05:41 +02:00
for (unsigned idx = 0 ; idx < wid_ ; idx += 1) {
vvp_bit4_t a = (idx >= op_a_.size())? pad : op_a_.value(idx);
vvp_bit4_t b = (idx >= op_b_.size())? pad : op_b_.value(idx);
vvp_bit4_t cur = add_with_carry(a, b, carry);
2001-06-05 05:05:41 +02:00
if (cur == BIT4_X) {
vvp_send_vec4(net->out, x_val_);
2001-06-05 05:05:41 +02:00
return;
}
value.set_bit(idx, cur);
2001-06-05 05:05:41 +02:00
}
vvp_send_vec4(net->out, value);
2001-06-05 05:05:41 +02:00
}
vvp_arith_sub::vvp_arith_sub(unsigned wid)
: vvp_arith_(wid)
{
}
vvp_arith_sub::~vvp_arith_sub()
{
}
2001-06-07 05:09:03 +02:00
/*
2005-01-30 06:06:49 +01:00
* Subtraction works by adding the 2s complement of the B input from
* the A input. The 2s complement is the 1s complement plus one, so we
* further reduce the operation to adding in the inverted value and
* adding a correction.
*/
void vvp_arith_sub::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2001-06-07 05:09:03 +02:00
{
2005-01-22 02:06:20 +01:00
dispatch_operand_(ptr, bit);
vvp_net_t*net = ptr.ptr();
vvp_vector4_t value (wid_);
2001-06-07 05:09:03 +02:00
/* Pad input vectors with this value to widen to the desired
output width. */
const vvp_bit4_t pad = BIT4_1;
vvp_bit4_t carry = BIT4_1;
2001-06-07 05:09:03 +02:00
for (unsigned idx = 0 ; idx < wid_ ; idx += 1) {
vvp_bit4_t a = (idx >= op_a_.size())? pad : op_a_.value(idx);
2005-01-30 06:06:49 +01:00
vvp_bit4_t b = (idx >= op_b_.size())? pad : ~op_b_.value(idx);
vvp_bit4_t cur = add_with_carry(a, b, carry);
2001-06-07 05:09:03 +02:00
if (cur == BIT4_X) {
vvp_send_vec4(net->out, x_val_);
2001-06-07 05:09:03 +02:00
return;
}
value.set_bit(idx, cur);
2001-06-07 05:09:03 +02:00
}
vvp_send_vec4(net->out, value);
2001-06-07 05:09:03 +02:00
}
2005-01-22 02:06:20 +01:00
vvp_cmp_eeq::vvp_cmp_eeq(unsigned wid)
: vvp_arith_(wid)
{
}
void vvp_cmp_eeq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2005-01-22 02:06:20 +01:00
{
dispatch_operand_(ptr, bit);
vvp_vector4_t eeq (1);
eeq.set_bit(0, BIT4_1);
assert(op_a_.size() == op_b_.size());
for (unsigned idx = 0 ; idx < op_a_.size() ; idx += 1)
if (op_a_.value(idx) != op_b_.value(idx)) {
eeq.set_bit(0, BIT4_0);
break;
}
2005-03-09 06:52:03 +01:00
vvp_net_t*net = ptr.ptr();
vvp_send_vec4(net->out, eeq);
}
vvp_cmp_nee::vvp_cmp_nee(unsigned wid)
: vvp_arith_(wid)
{
}
void vvp_cmp_nee::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2005-03-09 06:52:03 +01:00
{
dispatch_operand_(ptr, bit);
vvp_vector4_t eeq (1);
eeq.set_bit(0, BIT4_0);
assert(op_a_.size() == op_b_.size());
for (unsigned idx = 0 ; idx < op_a_.size() ; idx += 1)
if (op_a_.value(idx) != op_b_.value(idx)) {
eeq.set_bit(0, BIT4_1);
break;
}
2005-01-22 02:06:20 +01:00
vvp_net_t*net = ptr.ptr();
vvp_send_vec4(net->out, eeq);
}
2004-06-16 18:33:25 +02:00
vvp_cmp_eq::vvp_cmp_eq(unsigned wid)
: vvp_arith_(wid)
{
}
2005-01-22 17:21:11 +01:00
/*
* Compare Vector a and Vector b. If in any bit position the a and b
* bits are known and different, then the result is 0. Otherwise, if
* there are X/Z bits anywhere in A or B, the result is X. Finally,
* the result is 1.
*/
void vvp_cmp_eq::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2004-06-16 18:33:25 +02:00
{
2005-01-22 17:21:11 +01:00
dispatch_operand_(ptr, bit);
2004-06-16 18:33:25 +02:00
2007-01-20 03:09:54 +01:00
if (op_a_.size() != op_b_.size()) {
cerr << "COMPARISON size mismatch. "
<< "a=" << op_a_ << ", b=" << op_b_ << endl;
assert(0);
}
2004-06-16 18:33:25 +02:00
2005-01-22 17:21:11 +01:00
vvp_vector4_t res (1);
res.set_bit(0, BIT4_1);
for (unsigned idx = 0 ; idx < op_a_.size() ; idx += 1) {
vvp_bit4_t a = op_a_.value(idx);
vvp_bit4_t b = op_b_.value(idx);
if (a == BIT4_X)
res.set_bit(0, BIT4_X);
else if (a == BIT4_Z)
res.set_bit(0, BIT4_X);
else if (b == BIT4_X)
res.set_bit(0, BIT4_X);
else if (b == BIT4_Z)
res.set_bit(0, BIT4_X);
else if (a != b) {
res.set_bit(0, BIT4_0);
2004-06-16 18:33:25 +02:00
break;
}
}
2005-01-22 17:21:11 +01:00
vvp_net_t*net = ptr.ptr();
vvp_send_vec4(net->out, res);
2004-06-16 18:33:25 +02:00
}
2005-01-22 17:21:11 +01:00
2004-06-16 18:33:25 +02:00
vvp_cmp_ne::vvp_cmp_ne(unsigned wid)
: vvp_arith_(wid)
{
}
2005-01-22 17:21:11 +01:00
/*
* Compare Vector a and Vector b. If in any bit position the a and b
* bits are known and different, then the result is 1. Otherwise, if
* there are X/Z bits anywhere in A or B, the result is X. Finally,
* the result is 0.
*/
void vvp_cmp_ne::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2004-06-16 18:33:25 +02:00
{
2005-01-22 17:21:11 +01:00
dispatch_operand_(ptr, bit);
2004-06-16 18:33:25 +02:00
2005-01-22 17:21:11 +01:00
assert(op_a_.size() == op_b_.size());
2004-06-16 18:33:25 +02:00
2005-01-22 17:21:11 +01:00
vvp_vector4_t res (1);
res.set_bit(0, BIT4_0);
for (unsigned idx = 0 ; idx < op_a_.size() ; idx += 1) {
vvp_bit4_t a = op_a_.value(idx);
vvp_bit4_t b = op_b_.value(idx);
if (a == BIT4_X)
res.set_bit(0, BIT4_X);
else if (a == BIT4_Z)
res.set_bit(0, BIT4_X);
else if (b == BIT4_X)
res.set_bit(0, BIT4_X);
else if (b == BIT4_Z)
res.set_bit(0, BIT4_X);
else if (a != b) {
res.set_bit(0, BIT4_1);
2004-06-16 18:33:25 +02:00
break;
}
}
2005-01-22 17:21:11 +01:00
vvp_net_t*net = ptr.ptr();
vvp_send_vec4(net->out, res);
2004-06-16 18:33:25 +02:00
}
2005-01-22 17:21:11 +01:00
2004-06-16 18:33:25 +02:00
2004-09-22 18:44:07 +02:00
vvp_cmp_gtge_base_::vvp_cmp_gtge_base_(unsigned wid, bool flag)
2003-04-11 07:15:38 +02:00
: vvp_arith_(wid), signed_flag_(flag)
{
}
void vvp_cmp_gtge_base_::recv_vec4_base_(vvp_net_ptr_t ptr,
vvp_vector4_t bit,
vvp_bit4_t out_if_equal)
{
dispatch_operand_(ptr, bit);
vvp_bit4_t out = signed_flag_
? compare_gtge_signed(op_a_, op_b_, out_if_equal)
: compare_gtge(op_a_, op_b_, out_if_equal);
vvp_vector4_t val (1);
val.set_bit(0, out);
vvp_send_vec4(ptr.ptr()->out, val);
return;
}
2001-07-06 06:46:44 +02:00
2004-09-22 18:44:07 +02:00
vvp_cmp_ge::vvp_cmp_ge(unsigned wid, bool flag)
: vvp_cmp_gtge_base_(wid, flag)
{
}
void vvp_cmp_ge::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2004-09-22 18:44:07 +02:00
{
recv_vec4_base_(ptr, bit, BIT4_1);
2004-09-22 18:44:07 +02:00
}
vvp_cmp_gt::vvp_cmp_gt(unsigned wid, bool flag)
: vvp_cmp_gtge_base_(wid, flag)
{
}
void vvp_cmp_gt::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2004-09-22 18:44:07 +02:00
{
recv_vec4_base_(ptr, bit, BIT4_0);
2004-09-22 18:44:07 +02:00
}
2004-09-22 18:44:07 +02:00
2005-03-19 07:23:49 +01:00
vvp_shiftl::vvp_shiftl(unsigned wid)
: vvp_arith_(wid)
2001-07-06 06:46:44 +02:00
{
2005-03-19 07:23:49 +01:00
}
2005-03-19 07:23:49 +01:00
vvp_shiftl::~vvp_shiftl()
{
}
void vvp_shiftl::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2005-03-19 07:23:49 +01:00
{
dispatch_operand_(ptr, bit);
2001-07-06 06:46:44 +02:00
2005-03-19 07:23:49 +01:00
vvp_vector4_t out (op_a_.size());
2001-07-06 06:46:44 +02:00
2005-03-19 07:23:49 +01:00
unsigned long shift;
if (! vector4_to_value(op_b_, shift)) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
2005-03-19 07:23:49 +01:00
}
2001-07-06 06:46:44 +02:00
2005-03-19 07:23:49 +01:00
if (shift > out.size())
shift = out.size();
2001-07-06 06:46:44 +02:00
2005-03-19 07:23:49 +01:00
for (unsigned idx = 0 ; idx < shift ; idx += 1)
out.set_bit(idx, BIT4_0);
2001-07-06 06:46:44 +02:00
2005-03-19 07:23:49 +01:00
for (unsigned idx = shift ; idx < out.size() ; idx += 1)
out.set_bit(idx, op_a_.value(idx-shift));
2001-07-06 06:46:44 +02:00
2005-03-19 07:23:49 +01:00
vvp_send_vec4(ptr.ptr()->out, out);
2001-07-06 06:46:44 +02:00
}
2006-07-30 04:51:35 +02:00
vvp_shiftr::vvp_shiftr(unsigned wid, bool signed_flag)
: vvp_arith_(wid), signed_flag_(signed_flag)
2001-07-07 04:57:33 +02:00
{
2005-03-19 07:23:49 +01:00
}
2001-07-07 04:57:33 +02:00
2005-03-19 07:23:49 +01:00
vvp_shiftr::~vvp_shiftr()
{
}
2001-07-07 04:57:33 +02:00
void vvp_shiftr::recv_vec4(vvp_net_ptr_t ptr, const vvp_vector4_t&bit)
2005-03-19 07:23:49 +01:00
{
dispatch_operand_(ptr, bit);
2001-07-07 04:57:33 +02:00
2005-03-19 07:23:49 +01:00
vvp_vector4_t out (op_a_.size());
2001-07-07 04:57:33 +02:00
2005-03-19 07:23:49 +01:00
unsigned long shift;
if (! vector4_to_value(op_b_, shift)) {
vvp_send_vec4(ptr.ptr()->out, x_val_);
return;
2005-03-19 07:23:49 +01:00
}
2001-07-07 04:57:33 +02:00
2005-03-19 07:23:49 +01:00
if (shift > out.size())
shift = out.size();
2001-07-07 04:57:33 +02:00
2005-03-19 07:23:49 +01:00
for (unsigned idx = shift ; idx < out.size() ; idx += 1)
out.set_bit(idx-shift, op_a_.value(idx));
2001-07-07 04:57:33 +02:00
2006-07-30 04:51:35 +02:00
vvp_bit4_t pad = BIT4_0;
if (signed_flag_ && op_a_.size() > 0)
pad = op_a_.value(op_a_.size()-1);
2005-03-19 07:23:49 +01:00
for (unsigned idx = 0 ; idx < shift ; idx += 1)
2006-07-30 04:51:35 +02:00
out.set_bit(idx+out.size()-shift, pad);
2001-07-07 04:57:33 +02:00
2005-03-19 07:23:49 +01:00
vvp_send_vec4(ptr.ptr()->out, out);
2001-07-07 04:57:33 +02:00
}
2005-03-19 07:23:49 +01:00
2001-07-07 04:57:33 +02:00
vvp_arith_real_::vvp_arith_real_()
{
}
void vvp_arith_real_::dispatch_operand_(vvp_net_ptr_t ptr, double bit)
{
switch (ptr.port()) {
case 0:
op_a_ = bit;
break;
case 1:
op_b_ = bit;
break;
default:
assert(0);
}
}
vvp_arith_div_real::vvp_arith_div_real()
{
}
vvp_arith_div_real::~vvp_arith_div_real()
{
}
void vvp_arith_div_real::recv_real(vvp_net_ptr_t ptr, double bit)
{
dispatch_operand_(ptr, bit);
double val = op_a_ / op_b_;
vvp_send_real(ptr.ptr()->out, val);
}
vvp_arith_sub_real::vvp_arith_sub_real()
{
}
vvp_arith_sub_real::~vvp_arith_sub_real()
{
}
void vvp_arith_sub_real::recv_real(vvp_net_ptr_t ptr, double bit)
{
dispatch_operand_(ptr, bit);
double val = op_a_ - op_b_;
vvp_send_real(ptr.ptr()->out, val);
}
2001-06-05 05:05:41 +02:00
/*
* $Log: arith.cc,v $
2007-01-20 03:09:54 +01:00
* Revision 1.50 2007/01/20 02:09:54 steve
* Better size error details.
*
2006-07-30 04:51:35 +02:00
* Revision 1.49 2006/07/30 02:51:36 steve
* Fix/implement signed right shift.
*
2006-01-03 07:19:31 +01:00
* Revision 1.48 2006/01/03 06:19:31 steve
* Support wide divide nodes.
*
* Revision 1.47 2005/11/10 13:27:16 steve
* Handle very wide % and / operations using expanded vector2 support.
*
2005-09-16 00:54:04 +02:00
* Revision 1.46 2005/09/15 22:54:04 steve
* Use iostream instead of stdio.
*
* Revision 1.45 2005/07/06 04:29:25 steve
* Implement real valued signals and arith nodes.
*
* Revision 1.44 2005/06/22 00:04:48 steve
* Reduce vvp_vector4 copies by using const references.
*
2005-03-19 07:23:49 +01:00
* Revision 1.43 2005/03/19 06:23:49 steve
* Handle LPM shifts.
*
2005-03-12 07:42:28 +01:00
* Revision 1.42 2005/03/12 06:42:28 steve
* Implement .arith/mod.
*
2005-03-09 06:52:03 +01:00
* Revision 1.41 2005/03/09 05:52:04 steve
* Handle case inequality in netlists.
*
2005-02-19 03:41:23 +01:00
* Revision 1.40 2005/02/19 02:41:23 steve
* Handle signed divide.
*
2005-02-19 02:32:52 +01:00
* Revision 1.39 2005/02/19 01:32:52 steve
* Implement .arith/div.
*
* Revision 1.38 2005/02/04 05:13:02 steve
* Add wide .arith/mult, and vvp_vector2_t vectors.
*
2005-01-30 06:06:49 +01:00
* Revision 1.37 2005/01/30 05:06:49 steve
* Get .arith/sub working.
*
* Revision 1.36 2005/01/28 05:34:25 steve
* Add vector4 implementation of .arith/mult.
*
* Revision 1.35 2005/01/22 17:36:15 steve
* .cmp/x supports signed magnitude compare.
*
2005-01-22 17:21:11 +01:00
* Revision 1.34 2005/01/22 16:21:11 steve
* Implement vectored CMP_EQ and NE
*
2005-01-22 02:06:20 +01:00
* Revision 1.33 2005/01/22 01:06:20 steve
* Implement the .cmp/eeq LPM node.
*
* Revision 1.32 2005/01/16 04:19:08 steve
* Reimplement comparators as vvp_vector4_t nodes.
*
* Revision 1.31 2004/12/11 02:31:29 steve
* Rework of internals to carry vectors through nexus instead
* of single bits. Make the ivl, tgt-vvp and vvp initial changes
* down this path.
*
2001-06-05 05:05:41 +02:00
*/