Merge pull request #1106 from daglem/part-select-address-overflow
Guard against overflow / wrap around of internal bit address
This commit is contained in:
commit
ce7b26e3f9
36
elab_expr.cc
36
elab_expr.cc
|
|
@ -5990,6 +5990,24 @@ NetExpr* PEIdent::elaborate_expr_net_idx_up_(Design*des, NetScope*scope,
|
|||
if (base_c->value().is_defined()) {
|
||||
long lsv = base_c->value().as_long();
|
||||
long rel_base = 0;
|
||||
|
||||
// Check whether an unsigned base fits in a 32 bit int.
|
||||
// This ensures correct results for the vlog95 target, and
|
||||
// for the vvp target on LLP64 platforms (Microsoft Windows).
|
||||
if (!base_c->has_sign() && (int32_t)lsv < 0) {
|
||||
// Return 'bx for a wrapped around base.
|
||||
ex = new NetEConst(verinum(verinum::Vx, wid, true));
|
||||
ex->set_line(*this);
|
||||
delete base;
|
||||
if (warn_ob_select) {
|
||||
cerr << get_fileline() << ": warning: " << net->name();
|
||||
if (net->word_index()) cerr << "[]";
|
||||
cerr << "[" << (unsigned long)lsv << "+:" << wid
|
||||
<< "] is always outside vector." << endl;
|
||||
}
|
||||
return ex;
|
||||
}
|
||||
|
||||
// Get the signal range.
|
||||
const netranges_t&packed = net->sig()->packed_dims();
|
||||
if (prefix_indices.size()+1 < net->sig()->packed_dims().size()) {
|
||||
|
|
@ -6139,6 +6157,24 @@ NetExpr* PEIdent::elaborate_expr_net_idx_do_(Design*des, NetScope*scope,
|
|||
if (base_c->value().is_defined()) {
|
||||
long lsv = base_c->value().as_long();
|
||||
long rel_base = 0;
|
||||
|
||||
// Check whether an unsigned base fits in a 32 bit int.
|
||||
// This ensures correct results for the vlog95 target, and
|
||||
// for the vvp target on LLP64 platforms (Microsoft Windows).
|
||||
if (!base_c->has_sign() && (int32_t)lsv < 0) {
|
||||
// Return 'bx for a wrapped around base.
|
||||
ex = new NetEConst(verinum(verinum::Vx, wid, true));
|
||||
ex->set_line(*this);
|
||||
delete base;
|
||||
if (warn_ob_select) {
|
||||
cerr << get_fileline() << ": warning: " << net->name();
|
||||
if (net->word_index()) cerr << "[]";
|
||||
cerr << "[" << (unsigned long)lsv << "-:" << wid
|
||||
<< "] is always outside vector." << endl;
|
||||
}
|
||||
return ex;
|
||||
}
|
||||
|
||||
// Get the signal range.
|
||||
const netranges_t&packed = net->sig()->packed_dims();
|
||||
if (prefix_indices.size()+1 < net->sig()->packed_dims().size()) {
|
||||
|
|
|
|||
17
elab_lval.cc
17
elab_lval.cc
|
|
@ -907,6 +907,23 @@ bool PEIdent::elaborate_lval_net_idx_(Design*des,
|
|||
if (base_c->value().is_defined()) {
|
||||
long lsv = base_c->value().as_long();
|
||||
long rel_base = 0;
|
||||
|
||||
// Check whether an unsigned base fits in a 32 bit int.
|
||||
// This ensures correct results for the vlog95 target, and
|
||||
// for the vvp target on LLP64 platforms (Microsoft Windows).
|
||||
if (!base_c->has_sign() && (int32_t)lsv < 0) {
|
||||
// The base is wrapped around.
|
||||
delete base;
|
||||
if (warn_ob_select) {
|
||||
cerr << get_fileline() << ": warning: " << lv->name();
|
||||
if (lv->word()) cerr << "[]";
|
||||
cerr << "[" << (unsigned long)lsv
|
||||
<< (index_tail.sel == index_component_t::SEL_IDX_UP ? "+:" : "-:")
|
||||
<< wid << "] is always outside vector." << endl;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get the signal range.
|
||||
const netranges_t&packed = reg->packed_dims();
|
||||
if (prefix_indices.size()+1 < reg->packed_dims().size()) {
|
||||
|
|
|
|||
18
elab_net.cc
18
elab_net.cc
|
|
@ -250,6 +250,7 @@ bool PEIdent::eval_part_select_(Design*des, NetScope*scope, NetNet*sig,
|
|||
|
||||
/* We have an undefined index and that is out of range. */
|
||||
if (! tmp->value().is_defined()) {
|
||||
delete tmp_ex;
|
||||
if (warn_ob_select) {
|
||||
cerr << get_fileline() << ": warning: "
|
||||
<< sig->name();
|
||||
|
|
@ -268,7 +269,24 @@ bool PEIdent::eval_part_select_(Design*des, NetScope*scope, NetNet*sig,
|
|||
}
|
||||
|
||||
long midx_val = tmp->value().as_long();
|
||||
|
||||
// Check whether an unsigned base fits in a 32 bit int.
|
||||
// This ensures correct results for the vlog95 target, and
|
||||
// for the vvp target on LLP64 platforms (Microsoft Windows).
|
||||
if (!tmp->has_sign() && (int32_t)midx_val < 0) {
|
||||
// The base is wrapped around.
|
||||
delete tmp_ex;
|
||||
if (warn_ob_select) {
|
||||
cerr << get_fileline() << ": warning: " << sig->name();
|
||||
cerr << "[" << (unsigned long)midx_val
|
||||
<< (index_tail.sel == index_component_t::SEL_IDX_UP ? "+:" : "-:")
|
||||
<< wid << "] is always outside vector." << endl;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
delete tmp_ex;
|
||||
|
||||
if (prefix_indices.size()+1 < sig->packed_dims().size()) {
|
||||
// Here we are selecting one or more sub-arrays.
|
||||
// Make this work by finding the indexed sub-arrays and
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* partsel_outside_const
|
||||
* Check that base_const ± bit_number in a part-select does not wrap around
|
||||
* within the internal 32 bit (signed) vector address space.
|
||||
* This could incorrectly select bits from a vector when unsized literal
|
||||
* constant numbers are used in the part-select index expression.
|
||||
*/
|
||||
|
||||
module main;
|
||||
|
||||
reg [1:0] arr = 1;
|
||||
wire [1:0] outside_const = arr[-'d1 +: 2];
|
||||
|
||||
initial begin
|
||||
if (outside_const !== 'x) begin
|
||||
$display("FAILED -- const base_expr out of bounds value %b != xx", outside_const);
|
||||
$finish;
|
||||
end
|
||||
|
||||
$display("PASSED");
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule // main
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* partsel_outside_expr
|
||||
* Check that base_expr ± bit_number in a part-select does not wrap around
|
||||
* within the internal 32 bit (signed) vector address space.
|
||||
* This could incorrectly select bits from a vector when unsigned integers
|
||||
* are used in the part-select index expression.
|
||||
*/
|
||||
|
||||
module main;
|
||||
|
||||
reg [1:0] arr = 1;
|
||||
integer unsigned uoffset = -'d1;
|
||||
wire [1:0] outside_expr = arr[uoffset +: 2];
|
||||
|
||||
initial begin
|
||||
if (outside_expr !== 'x) begin
|
||||
$display("FAILED -- non-const base_expr out of bounds value %b != xx", outside_expr);
|
||||
$finish;
|
||||
end
|
||||
|
||||
$display("PASSED");
|
||||
$finish;
|
||||
end
|
||||
|
||||
endmodule // main
|
||||
|
|
@ -141,6 +141,8 @@ partsel_invalid_idx3 vvp_tests/partsel_invalid_idx3.json
|
|||
partsel_invalid_idx4 vvp_tests/partsel_invalid_idx4.json
|
||||
partsel_invalid_idx5 vvp_tests/partsel_invalid_idx5.json
|
||||
partsel_invalid_idx6 vvp_tests/partsel_invalid_idx6.json
|
||||
partsel_outside_const vvp_tests/partsel_outside_const.json
|
||||
partsel_outside_expr vvp_tests/partsel_outside_expr.json
|
||||
partsel_reversed_idx1 vvp_tests/partsel_reversed_idx1.json
|
||||
partsel_reversed_idx2 vvp_tests/partsel_reversed_idx2.json
|
||||
partsel_reversed_idx3 vvp_tests/partsel_reversed_idx3.json
|
||||
|
|
|
|||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "partsel_outside_const.v"
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "partsel_outside_expr.v"
|
||||
}
|
||||
106
netmisc.cc
106
netmisc.cc
|
|
@ -327,74 +327,52 @@ static unsigned num_bits(long arg)
|
|||
NetExpr *normalize_variable_base(NetExpr *base, long msb, long lsb,
|
||||
unsigned long wid, bool is_up, long soff)
|
||||
{
|
||||
long offset = lsb;
|
||||
bool msb_lo = msb < lsb;
|
||||
|
||||
if (msb < lsb) {
|
||||
/* Correct the offset if needed. */
|
||||
if (is_up) offset -= wid - 1;
|
||||
/* Calculate the space needed for the offset. */
|
||||
unsigned min_wid = num_bits(offset);
|
||||
if (num_bits(soff) > min_wid)
|
||||
min_wid = num_bits(soff);
|
||||
/* We need enough space for the larger of the offset or the
|
||||
* base expression. */
|
||||
if (min_wid < base->expr_width()) min_wid = base->expr_width();
|
||||
/* Now that we have the minimum needed width increase it by
|
||||
* one to make room for the normalization calculation. */
|
||||
min_wid += 2;
|
||||
/* Pad the base expression to the correct width. */
|
||||
base = pad_to_width(base, min_wid, *base);
|
||||
/* If the base expression is unsigned and either the lsb
|
||||
* is negative or it does not fill the width of the base
|
||||
* expression then we could generate negative normalized
|
||||
* values so cast the expression to signed to get the
|
||||
* math correct. */
|
||||
if ((lsb < 0 || num_bits(lsb+1) <= base->expr_width()) &&
|
||||
! base->has_sign()) {
|
||||
/* We need this extra select to hide the signed
|
||||
* property from the padding above. It will be
|
||||
* removed automatically during code generation. */
|
||||
NetESelect *tmp = new NetESelect(base, 0 , min_wid);
|
||||
tmp->set_line(*base);
|
||||
tmp->cast_signed(true);
|
||||
base = tmp;
|
||||
}
|
||||
/* Normalize the expression. */
|
||||
base = make_sub_expr(offset+soff, base);
|
||||
// Calculate the canonical offset.
|
||||
long offset = soff;
|
||||
if (msb_lo) {
|
||||
// E.g. logic [0:15] up_vect - prepare to calculate offset - base
|
||||
offset += lsb;
|
||||
if (is_up) // E.g. up_vect[msb_base_expr +: width_expr]
|
||||
offset -= wid - 1;
|
||||
} else {
|
||||
/* Correct the offset if needed. */
|
||||
if (!is_up) offset += wid - 1;
|
||||
/* If the offset is zero then just return the base (index)
|
||||
* expression. */
|
||||
if ((soff-offset) == 0) return base;
|
||||
/* Calculate the space needed for the offset. */
|
||||
unsigned min_wid = num_bits(-offset);
|
||||
if (num_bits(soff) > min_wid)
|
||||
min_wid = num_bits(soff);
|
||||
/* We need enough space for the larger of the offset or the
|
||||
* base expression. */
|
||||
if (min_wid < base->expr_width()) min_wid = base->expr_width();
|
||||
/* Now that we have the minimum needed width increase it by
|
||||
* one to make room for the normalization calculation. */
|
||||
min_wid += 2;
|
||||
/* Pad the base expression to the correct width. */
|
||||
base = pad_to_width(base, min_wid, *base);
|
||||
/* If the offset is greater than zero then we need to do
|
||||
* signed math to get the location value correct. */
|
||||
if (offset > 0 && ! base->has_sign()) {
|
||||
/* We need this extra select to hide the signed
|
||||
* property from the padding above. It will be
|
||||
* removed automatically during code generation. */
|
||||
NetESelect *tmp = new NetESelect(base, 0 , min_wid);
|
||||
tmp->set_line(*base);
|
||||
tmp->cast_signed(true);
|
||||
base = tmp;
|
||||
}
|
||||
/* Normalize the expression. */
|
||||
base = make_add_expr(base, soff-offset);
|
||||
// E.g. logic [15:0] down_vect - prepare to calculate offset + base
|
||||
offset -= lsb;
|
||||
if (!is_up) // E.g. down_vect[msb_base_expr -: width_expr]
|
||||
offset -= wid - 1;
|
||||
// There is no need to calculate 0 + base.
|
||||
if (offset == 0) return base;
|
||||
}
|
||||
|
||||
return base;
|
||||
// Calculate the space needed for the offset.
|
||||
unsigned off_wid = num_bits(offset);
|
||||
// Get the width of the base expression.
|
||||
unsigned base_wid = base->expr_width();
|
||||
|
||||
// If the result could be negative, then we need to do signed math
|
||||
// to get the location value correct.
|
||||
bool add_base_sign = !base->has_sign() && (offset < 0 || (msb_lo && off_wid <= base_wid));
|
||||
|
||||
// If base is signed, we must add a sign bit to offset as well.
|
||||
bool add_off_sign = offset >= 0 && (base->has_sign() || add_base_sign);
|
||||
|
||||
// We need enough space for the larger of the offset or the
|
||||
// base expression, plus an extra bit for arithmetic overflow.
|
||||
unsigned min_wid = 1 + max(off_wid + add_off_sign, base_wid + add_base_sign);
|
||||
base = pad_to_width(base, min_wid, *base);
|
||||
if (add_base_sign) {
|
||||
/* We need this extra select to hide the signed
|
||||
* property from the padding above. It will be
|
||||
* removed automatically during code generation. */
|
||||
NetESelect *tmp = new NetESelect(base, 0 , min_wid);
|
||||
tmp->set_line(*base);
|
||||
tmp->cast_signed(true);
|
||||
base = tmp;
|
||||
}
|
||||
|
||||
// Normalize the expression.
|
||||
return msb_lo ? make_sub_expr(offset, base) : make_add_expr(base, offset);
|
||||
}
|
||||
|
||||
NetExpr *normalize_variable_bit_base(const list<long>&indices, NetExpr*base,
|
||||
|
|
|
|||
|
|
@ -252,8 +252,10 @@ bool vvp_fun_part_var::recv_vec4_(vvp_net_ptr_t port, const vvp_vector4_t&bit,
|
|||
case 1:
|
||||
// INT_MIN is before the vector and is used to
|
||||
// represent a 'bx value on the select input.
|
||||
tmp = INT32_MIN;
|
||||
vector4_to_value(bit, tmp, is_signed_);
|
||||
// It is also used to guard against 32 bit
|
||||
// unsigned -> signed address overflow / wrap around.
|
||||
if (!vector4_to_value(bit, tmp, is_signed_) || (!is_signed_ && tmp < 0))
|
||||
tmp = INT32_MIN;
|
||||
if (static_cast<int>(tmp) == base) return false;
|
||||
base = tmp;
|
||||
break;
|
||||
|
|
|
|||
Loading…
Reference in New Issue