ivtests: Add support for vlog95 tests
This adds the "normal-vlog95" test type, because we need to run a different set of commands for this to work so a new test type makes sense. This also moves a few of the existing tests to the new format in order to test the new test rig.
This commit is contained in:
parent
a4aade5d1a
commit
564304d20c
|
|
@ -54,6 +54,11 @@ This describes the kind of test to run. The valid values are:
|
||||||
that succeeds execute it using the vvp command. If there is no gold file
|
that succeeds execute it using the vvp command. If there is no gold file
|
||||||
specified, then look for an output line with the "PASSED" string.
|
specified, then look for an output line with the "PASSED" string.
|
||||||
|
|
||||||
|
* **normal-vlog95** - This is similar to the normal case, but uses
|
||||||
|
the -tvlog95 target in a first pass to generate simplified verilog, then a
|
||||||
|
regular iverilog command with the -tvvp target to generate the actual
|
||||||
|
executable. This tests the -tvlog95 target.
|
||||||
|
|
||||||
* **NI** - Mark the test as not implemented. The test will be skipped without
|
* **NI** - Mark the test as not implemented. The test will be skipped without
|
||||||
running or reporting an error.
|
running or reporting an error.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
ivltests/br_gh13a.v:7: warning: Unsized expression (('sd1)<<(~(40'b0000000000000000000000000000000000000000))) expanded beyond and was clipped to 65568 bits. Try using sized operands.
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
ivltests/br_gh13a.v:7: warning: Unsized expression (('sd1)<<(~(40'b0000000000000000000000000000000000000000))) expanded beyond and was clipped to 65568 bits. Try using sized operands.
|
||||||
|
|
@ -0,0 +1,2 @@
|
||||||
|
0
|
||||||
|
PASSED
|
||||||
|
|
@ -0,0 +1,2 @@
|
||||||
|
0
|
||||||
|
PASSED
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
./ivltests/br_gh13a.v:7: warning: Unsized expression (('sd1)<<(~(40'b0000000000000000000000000000000000000000))) expanded beyond and was clipped to 65568 bits. Try using sized operands.
|
|
||||||
0
|
|
||||||
PASSED
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
1001
|
1001
|
||||||
001
|
1001
|
||||||
0101
|
0101
|
||||||
01
|
101
|
||||||
|
|
@ -68,19 +68,6 @@
|
||||||
# gold or diff commands.
|
# gold or diff commands.
|
||||||
#
|
#
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
# Differences when iverilog is run without -gstrict-expr-width
|
|
||||||
#------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
# The standard requires oversized unsized constant numbers to be truncated.
|
|
||||||
# These tests are specifically testing that such numbers aren't truncated.
|
|
||||||
pr903 normal ivltests
|
|
||||||
pr1388974 normal ivltests
|
|
||||||
|
|
||||||
# The standard doesn't support lossless expressions.
|
|
||||||
br_gh13a normal ivltests gold=br_gh13a.gold
|
|
||||||
param-width normal ivltests gold=param-width-ivl.gold
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------
|
#------------------------------------------------------------------------------
|
||||||
# Differences when vvp is run without -compatible
|
# Differences when vvp is run without -compatible
|
||||||
#------------------------------------------------------------------------------
|
#------------------------------------------------------------------------------
|
||||||
|
|
|
||||||
|
|
@ -726,7 +726,6 @@ onehot normal contrib # one hot design
|
||||||
p_monta normal ivltests
|
p_monta normal ivltests
|
||||||
par_mismatch CE,-gspecify ivltests
|
par_mismatch CE,-gspecify ivltests
|
||||||
param-extend normal ivltests
|
param-extend normal ivltests
|
||||||
param-width normal ivltests gold=param-width.gold
|
|
||||||
param_add normal ivltests # Addition in param declar
|
param_add normal ivltests # Addition in param declar
|
||||||
param_and normal ivltests # bitwise &
|
param_and normal ivltests # bitwise &
|
||||||
param_and2 normal ivltests # logical && in param declar
|
param_and2 normal ivltests # logical && in param declar
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,8 @@
|
||||||
# describes the test.
|
# describes the test.
|
||||||
|
|
||||||
array_packed_write_read vvp_tests/array_packed_write_read.json
|
array_packed_write_read vvp_tests/array_packed_write_read.json
|
||||||
|
br_gh13a vvp_tests/br_gh13a.json
|
||||||
|
br_gh13a-vlog95 vvp_tests/br_gh13a-vlog95.json
|
||||||
case1 vvp_tests/case1.json
|
case1 vvp_tests/case1.json
|
||||||
case2 vvp_tests/case2.json
|
case2 vvp_tests/case2.json
|
||||||
case2-S vvp_tests/case2-S.json
|
case2-S vvp_tests/case2-S.json
|
||||||
|
|
@ -23,5 +25,11 @@ dffsynth11 vvp_tests/dffsynth11.json
|
||||||
dumpfile vvp_tests/dumpfile.json
|
dumpfile vvp_tests/dumpfile.json
|
||||||
macro_str_esc vvp_tests/macro_str_esc.json
|
macro_str_esc vvp_tests/macro_str_esc.json
|
||||||
memsynth1 vvp_tests/memsynth1.json
|
memsynth1 vvp_tests/memsynth1.json
|
||||||
|
param-width vvp_tests/param-width.json
|
||||||
|
param-width-vlog95 vvp_tests/param-width-vlog95.json
|
||||||
|
pr1388974 vvp_tests/pr1388974.json
|
||||||
|
pr1388974-vlog95 vvp_tests/pr1388974-vlog95.json
|
||||||
|
pr903 vvp_tests/pr903.json
|
||||||
|
pr903-vlog95 vvp_tests/pr903-vlog95.json
|
||||||
struct_packed_write_read vvp_tests/struct_packed_write_read.json
|
struct_packed_write_read vvp_tests/struct_packed_write_read.json
|
||||||
struct_packed_write_read2 vvp_tests/struct_packed_write_read2.json
|
struct_packed_write_read2 vvp_tests/struct_packed_write_read2.json
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,8 @@ import os
|
||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
|
|
||||||
def assemble_iverilog_cmd(source: str, it_dir: str, args: list) -> list:
|
def assemble_iverilog_cmd(source: str, it_dir: str, args: list, outfile = "a.out") -> list:
|
||||||
res = ["iverilog", "-o", os.path.join("work", "a.out")]
|
res = ["iverilog", "-o", os.path.join("work", outfile)]
|
||||||
res += ["-D__ICARUS_UNSIZED__"]
|
res += ["-D__ICARUS_UNSIZED__"]
|
||||||
res += args
|
res += args
|
||||||
src = os.path.join(it_dir, source)
|
src = os.path.join(it_dir, source)
|
||||||
|
|
@ -44,7 +44,9 @@ def build_runtime(it_key: str) -> None:
|
||||||
|
|
||||||
This is called in front of tests to make sure that the directory
|
This is called in front of tests to make sure that the directory
|
||||||
structure is correct, and common temp files that might linger from
|
structure is correct, and common temp files that might linger from
|
||||||
a previous run are removed.'''
|
a previous run are removed. We need to make sure that the directories
|
||||||
|
"work" and "log" are present, and the log files related to this key
|
||||||
|
are removed.'''
|
||||||
|
|
||||||
try:
|
try:
|
||||||
os.mkdir("log")
|
os.mkdir("log")
|
||||||
|
|
@ -122,71 +124,28 @@ def run_CE(options : dict) -> list:
|
||||||
else:
|
else:
|
||||||
return [0, "Passed - CE"]
|
return [0, "Passed - CE"]
|
||||||
|
|
||||||
|
def check_run_outputs(options : dict, expected_fail : bool, it_stdout : str, log_list : list) -> list:
|
||||||
|
'''Check the output files, and return success for failed.
|
||||||
|
|
||||||
def do_run_normal(options : dict, expected_fail : bool) -> list:
|
This function takes an options dictionary that describes the settings, and
|
||||||
'''Run the iverilog and vvp commands.
|
the output from the final command. This also takes a list of log files to check
|
||||||
|
there there are gold files present.'''
|
||||||
In this case, run the compiler to generate a vvp output file, and
|
|
||||||
run the vvp command to actually execute the simulation. Collect
|
|
||||||
the results and look for a "PASSED" string.'''
|
|
||||||
|
|
||||||
|
# Get the options this step needs...
|
||||||
it_key = options['key']
|
it_key = options['key']
|
||||||
it_dir = options['directory']
|
|
||||||
it_iverilog_args = options['iverilog_args']
|
|
||||||
it_vvp_args = options['vvp_args']
|
|
||||||
it_vvp_args_extended = options['vvp_args_extended']
|
|
||||||
it_gold = options['gold']
|
it_gold = options['gold']
|
||||||
it_diff = options['diff']
|
it_diff = options['diff']
|
||||||
|
|
||||||
build_runtime(it_key)
|
|
||||||
|
|
||||||
# Run the iverilog command
|
|
||||||
ivl_cmd = assemble_iverilog_cmd(options['source'], it_dir, it_iverilog_args)
|
|
||||||
ivl_res = subprocess.run(ivl_cmd, capture_output=True)
|
|
||||||
|
|
||||||
log_results(it_key, "iverilog", ivl_res)
|
|
||||||
if ivl_res.returncode != 0:
|
|
||||||
return [1, "Failed - Compile failed"]
|
|
||||||
|
|
||||||
# run the vvp command
|
|
||||||
vvp_cmd = assemble_vvp_cmd(it_vvp_args, it_vvp_args_extended)
|
|
||||||
vvp_res = subprocess.run(vvp_cmd, capture_output=True)
|
|
||||||
log_results(it_key, "vvp", vvp_res);
|
|
||||||
|
|
||||||
if vvp_res.returncode != 0:
|
|
||||||
return [1, "Failed - Vvp execution failed"]
|
|
||||||
|
|
||||||
it_stdout = vvp_res.stdout.decode('ascii')
|
|
||||||
|
|
||||||
# If there is a gold file configured, the test result depends on
|
|
||||||
# the outputs matching the gold file.
|
|
||||||
if it_gold is not None:
|
if it_gold is not None:
|
||||||
|
|
||||||
compared = True
|
compared = True
|
||||||
|
for log_name in log_list:
|
||||||
log_path = os.path.join("log", f"{it_key}-iverilog-stdout.log")
|
log_path = os.path.join("log", f"{it_key}-{log_name}.log")
|
||||||
gold_path = os.path.join("gold", f"{it_gold}-iverilog-stdout.gold")
|
gold_path = os.path.join("gold", f"{it_gold}-{log_name}.gold")
|
||||||
compared_ivl_stdout = compare_files(log_path, gold_path)
|
compared = compared and compare_files(log_path, gold_path)
|
||||||
compared = compared and compared_ivl_stdout
|
|
||||||
|
|
||||||
log_path = os.path.join("log", f"{it_key}-iverilog-stderr.log")
|
|
||||||
gold_path = os.path.join("gold", f"{it_gold}-iverilog-stderr.gold")
|
|
||||||
compared_ivl_stderr = compare_files(log_path, gold_path)
|
|
||||||
compared = compared and compared_ivl_stderr
|
|
||||||
|
|
||||||
log_path = os.path.join("log", f"{it_key}-vvp-stdout.log")
|
|
||||||
gold_path = os.path.join("gold", f"{it_gold}-vvp-stdout.gold")
|
|
||||||
compared_vvp_stdout = compare_files(log_path, gold_path)
|
|
||||||
compared = compared and compared_vvp_stdout
|
|
||||||
|
|
||||||
log_path = os.path.join("log", f"{it_key}-vvp-stderr.log")
|
|
||||||
gold_path = os.path.join("gold", f"{it_gold}-vvp-stderr.gold")
|
|
||||||
compared_vvp_stderr = compare_files(log_path, gold_path)
|
|
||||||
compared = compared and compared_vvp_stderr
|
|
||||||
|
|
||||||
if expected_fail:
|
if expected_fail:
|
||||||
if compared:
|
if compared:
|
||||||
return [1, "Failed - Passed, but expected failure"]
|
return [1, "Failed = Passed, but expected failure"]
|
||||||
else:
|
else:
|
||||||
return [0, "Passed - Expected fail"]
|
return [0, "Passed - Expected fail"]
|
||||||
else:
|
else:
|
||||||
|
|
@ -195,7 +154,6 @@ def do_run_normal(options : dict, expected_fail : bool) -> list:
|
||||||
else:
|
else:
|
||||||
return [1, "Failed - Gold output doesn't match actual output."]
|
return [1, "Failed - Gold output doesn't match actual output."]
|
||||||
|
|
||||||
|
|
||||||
# If there is a diff description, then compare named files instead of
|
# If there is a diff description, then compare named files instead of
|
||||||
# the log and a gold file.
|
# the log and a gold file.
|
||||||
if it_diff is not None:
|
if it_diff is not None:
|
||||||
|
|
@ -224,6 +182,7 @@ def do_run_normal(options : dict, expected_fail : bool) -> list:
|
||||||
else:
|
else:
|
||||||
return [1, f"Failed - Files {diff_name1} and {diff_name2} differ."]
|
return [1, f"Failed - Files {diff_name1} and {diff_name2} differ."]
|
||||||
|
|
||||||
|
|
||||||
# Otherwise, look for the PASSED output string in stdout.
|
# Otherwise, look for the PASSED output string in stdout.
|
||||||
for line in it_stdout.splitlines():
|
for line in it_stdout.splitlines():
|
||||||
if line == "PASSED":
|
if line == "PASSED":
|
||||||
|
|
@ -240,8 +199,99 @@ def do_run_normal(options : dict, expected_fail : bool) -> list:
|
||||||
return [1, "Failed - No PASSED output, and no gold file"]
|
return [1, "Failed - No PASSED output, and no gold file"]
|
||||||
|
|
||||||
|
|
||||||
|
def do_run_normal_vlog95(options : dict, expected_fail : bool) -> list:
|
||||||
|
'''Run the iverilog and vvp commands.
|
||||||
|
|
||||||
|
In this case, run the compiler with the -tvlog95 flag to generate
|
||||||
|
an intermediate verilog file, then run the compiler again to generate
|
||||||
|
a vvp out. Run that vvp output to test the simulation results. Collect
|
||||||
|
the results and look for a "PASSED" string.'''
|
||||||
|
|
||||||
|
it_key = options['key']
|
||||||
|
it_dir = options['directory']
|
||||||
|
it_iverilog_args = ["-tvlog95"] + options['iverilog_args']
|
||||||
|
it_vvp_args = options['vvp_args']
|
||||||
|
it_vvp_args_extended = options['vvp_args_extended']
|
||||||
|
|
||||||
|
build_runtime(it_key)
|
||||||
|
|
||||||
|
# Run the first iverilog command, to generate the intermediate verilog
|
||||||
|
ivl1_cmd = assemble_iverilog_cmd(options['source'], it_dir, it_iverilog_args, "a.out.v")
|
||||||
|
ivl1_res = subprocess.run(ivl1_cmd, capture_output=True)
|
||||||
|
|
||||||
|
log_results(it_key, "iverilog", ivl1_res)
|
||||||
|
if ivl1_res.returncode != 0:
|
||||||
|
return [1, "Failed - Compile failed"]
|
||||||
|
|
||||||
|
# Run another iverilog command to compile the code generated from the first step.
|
||||||
|
ivl2_cmd = assemble_iverilog_cmd("a.out.v", "work", [ ], "a.out")
|
||||||
|
ivl2_res = subprocess.run(ivl2_cmd, capture_output=True)
|
||||||
|
|
||||||
|
log_results(it_key, "iverilog-vlog95", ivl2_res)
|
||||||
|
if ivl2_res.returncode != 0:
|
||||||
|
return [1, "Failed - Compile of generated code failed"]
|
||||||
|
|
||||||
|
# Run the vvp command
|
||||||
|
vvp_cmd = assemble_vvp_cmd(it_vvp_args, it_vvp_args_extended)
|
||||||
|
vvp_res = subprocess.run(vvp_cmd, capture_output=True)
|
||||||
|
log_results(it_key, "vvp", vvp_res);
|
||||||
|
|
||||||
|
if vvp_res.returncode != 0:
|
||||||
|
return [1, "Failed - Vvp execution failed"]
|
||||||
|
|
||||||
|
it_stdout = vvp_res.stdout.decode('ascii')
|
||||||
|
log_list = ["iverilog-stdout", "iverilog-stderr",
|
||||||
|
"iverilog-vlog95-stdout", "iverilog-vlog95-stderr",
|
||||||
|
"vvp-stdout", "vvp-stderr"]
|
||||||
|
|
||||||
|
return check_run_outputs(options, expected_fail, it_stdout, log_list)
|
||||||
|
|
||||||
|
|
||||||
|
def do_run_normal(options : dict, expected_fail : bool) -> list:
|
||||||
|
'''Run the iverilog and vvp commands.
|
||||||
|
|
||||||
|
In this case, run the compiler to generate a vvp output file, and
|
||||||
|
run the vvp command to actually execute the simulation. Collect
|
||||||
|
the results and look for a "PASSED" string.'''
|
||||||
|
|
||||||
|
it_key = options['key']
|
||||||
|
it_dir = options['directory']
|
||||||
|
it_iverilog_args = options['iverilog_args']
|
||||||
|
it_vvp_args = options['vvp_args']
|
||||||
|
it_vvp_args_extended = options['vvp_args_extended']
|
||||||
|
|
||||||
|
build_runtime(it_key)
|
||||||
|
|
||||||
|
# Run the iverilog command
|
||||||
|
ivl_cmd = assemble_iverilog_cmd(options['source'], it_dir, it_iverilog_args)
|
||||||
|
ivl_res = subprocess.run(ivl_cmd, capture_output=True)
|
||||||
|
|
||||||
|
log_results(it_key, "iverilog", ivl_res)
|
||||||
|
if ivl_res.returncode != 0:
|
||||||
|
return [1, "Failed - Compile failed"]
|
||||||
|
|
||||||
|
# run the vvp command
|
||||||
|
vvp_cmd = assemble_vvp_cmd(it_vvp_args, it_vvp_args_extended)
|
||||||
|
vvp_res = subprocess.run(vvp_cmd, capture_output=True)
|
||||||
|
log_results(it_key, "vvp", vvp_res);
|
||||||
|
|
||||||
|
if vvp_res.returncode != 0:
|
||||||
|
return [1, "Failed - Vvp execution failed"]
|
||||||
|
|
||||||
|
it_stdout = vvp_res.stdout.decode('ascii')
|
||||||
|
log_list = ["iverilog-stdout", "iverilog-stderr",
|
||||||
|
"vvp-stdout", "vvp-stderr"]
|
||||||
|
|
||||||
|
return check_run_outputs(options, expected_fail, it_stdout, log_list)
|
||||||
|
|
||||||
def run_normal(options : dict) -> list:
|
def run_normal(options : dict) -> list:
|
||||||
return do_run_normal(options, False)
|
return do_run_normal(options, False)
|
||||||
|
|
||||||
def run_EF(options : dict) -> list:
|
def run_EF(options : dict) -> list:
|
||||||
return do_run_normal(options, True)
|
return do_run_normal(options, True)
|
||||||
|
|
||||||
|
def run_normal_vlog95(options : dict) -> list:
|
||||||
|
return do_run_normal_vlog95(options, False)
|
||||||
|
|
||||||
|
def run_EF_vlog95(options : dict) -> list:
|
||||||
|
return do_run_normal_vlog95(options, True)
|
||||||
|
|
|
||||||
|
|
@ -56,12 +56,18 @@ def process_test(item: list) -> str:
|
||||||
elif it_type == "normal":
|
elif it_type == "normal":
|
||||||
res = run_ivl.run_normal(it_options)
|
res = run_ivl.run_normal(it_options)
|
||||||
|
|
||||||
|
elif it_type == "normal-vlog95":
|
||||||
|
res = run_ivl.run_normal_vlog95(it_options)
|
||||||
|
|
||||||
elif it_type == "CE":
|
elif it_type == "CE":
|
||||||
res = run_ivl.run_CE(it_options)
|
res = run_ivl.run_CE(it_options)
|
||||||
|
|
||||||
elif it_type == "EF":
|
elif it_type == "EF":
|
||||||
res = run_ivl.run_EF(it_options)
|
res = run_ivl.run_EF(it_options)
|
||||||
|
|
||||||
|
elif it_type == "EF-vlog95":
|
||||||
|
res = run_ivl.run_EF_vlog95(it_options)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
res = f"{it_key}: I don't understand the test type ({it_type})."
|
res = f"{it_key}: I don't understand the test type ({it_type})."
|
||||||
raise Exception(res)
|
raise Exception(res)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"type" : "normal-vlog95",
|
||||||
|
"source" : "br_gh13a.v",
|
||||||
|
"gold" : "br_gh13a-vlog95"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"type" : "normal",
|
||||||
|
"source" : "br_gh13a.v",
|
||||||
|
"gold" : "br_gh13a"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"type" : "normal-vlog95",
|
||||||
|
"source" : "param-width.v",
|
||||||
|
"gold" : "param-width-vlog95"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"type" : "normal",
|
||||||
|
"source" : "param-width.v",
|
||||||
|
"gold" : "param-width"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,4 @@
|
||||||
|
{
|
||||||
|
"type" : "normal-vlog95",
|
||||||
|
"source" : "pr1388974.v"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,4 @@
|
||||||
|
{
|
||||||
|
"type" : "normal",
|
||||||
|
"source" : "pr1388974.v"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,4 @@
|
||||||
|
{
|
||||||
|
"type" : "normal-vlog95",
|
||||||
|
"source" : "pr903.v"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,4 @@
|
||||||
|
{
|
||||||
|
"type" : "normal",
|
||||||
|
"source" : "pr903.v"
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue