Merge pull request #847 from steveicarus/steveicarus/ivtest-pythonify
ivtest: Rewrite the ivtest suite tests in python
This commit is contained in:
commit
378c812fe2
|
|
@ -11,4 +11,6 @@ perl vvp_reg.pl || status=1
|
|||
|
||||
perl vpi_reg.pl || status=1
|
||||
|
||||
python3 vvp_reg.py || status=1
|
||||
|
||||
exit $status
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ jobs:
|
|||
- name: Install dependencies
|
||||
run: |
|
||||
brew install bison
|
||||
pip3 install docopt
|
||||
|
||||
- name: Build, check and install
|
||||
run: |
|
||||
|
|
@ -52,7 +53,7 @@ jobs:
|
|||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update -qq
|
||||
sudo apt install -y make g++ git bison flex gperf libreadline-dev autoconf python3-sphinx
|
||||
sudo apt install -y make g++ git bison flex gperf libreadline-dev autoconf python3-sphinx python3-docopt
|
||||
|
||||
- name: Build, check and install
|
||||
run: |
|
||||
|
|
@ -99,8 +100,11 @@ jobs:
|
|||
git
|
||||
base-devel
|
||||
gperf
|
||||
python-pip
|
||||
mingw-w64-${{ matrix.arch }}-toolchain
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
|
||||
- name: Build and check
|
||||
run: |
|
||||
cd msys2
|
||||
|
|
@ -110,7 +114,8 @@ jobs:
|
|||
run: pacman -U --noconfirm msys2/*.zst
|
||||
|
||||
- name: Test
|
||||
run: ./.github/test.sh
|
||||
run: |
|
||||
./.github/test.sh
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -9,5 +9,6 @@ Icarus Verilog.
|
|||
:maxdepth: 1
|
||||
|
||||
getting_started
|
||||
regression_tests
|
||||
version_stamps
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,79 @@
|
|||
|
||||
The Regression Test Suite
|
||||
=========================
|
||||
|
||||
Icarus Verilog development includes a regression test suite that is included
|
||||
along with the source. The "ivtest" directory contains the regression test
|
||||
suite, and this suite is used by the github actions as continuous integration
|
||||
to make sure the code is always going forward.
|
||||
|
||||
NOTE: There are scripts written in perl to run the regression tests, but they
|
||||
are bing gradually replaced with a newer set of scripts. It is the newer
|
||||
method that is described here.
|
||||
|
||||
Test Descriptions
|
||||
-----------------
|
||||
|
||||
Regression tests are listed in the regress-vvp.list file. Each line lists the
|
||||
name of the test and the path to the dest description. The list file is
|
||||
therefore pretty simple, and all the description of the test is in the
|
||||
description file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
macro_str_esc vvp_tests/macro_str_esc.json
|
||||
|
||||
The "name" is a simple name, and the test-description-file is the path (relative
|
||||
the ivtest directory) to the description file. A simple test description file
|
||||
is a JSON file, like this:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
{
|
||||
"type" : "normal",
|
||||
"source" : "macro_str_esc.v",
|
||||
"gold" : "macro_str_esc"
|
||||
}
|
||||
|
||||
This description file contains all the information that the vvp_reg.py script
|
||||
needs to run the regression test. The sections below describe the keys and
|
||||
values in the description file dictionary.
|
||||
|
||||
source (required)
|
||||
^^^^^^^^^^^^^^^^^
|
||||
This specifies the name of the source file. The file is actually to be found
|
||||
in the ivltests/ directory.
|
||||
|
||||
|
||||
type (required)
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
This describes the kind of test to run. The valid values are:
|
||||
|
||||
* **normal** - Compile the source using the iverilog compiler vvp target, and if
|
||||
that succeeds execute it using the vvp command. If there is no gold file
|
||||
specified, then look for an output line with the "PASSED" string.
|
||||
|
||||
* **NI** - Mark the test as not implemented. The test will be skipped without
|
||||
running or reporting an error.
|
||||
|
||||
* **CE** - Compile, but expect the compiler to fail
|
||||
|
||||
* **EF** - Compile and run, burt expect the run time to fail.
|
||||
|
||||
gold (optional)
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
If this is specified, it replaces the "Passed" condition with a comparison of
|
||||
the output with a gold file. The argument is the name of the gold file set,
|
||||
which will be found in the "gold/" directory. The name here is actually the
|
||||
basename of the gold files, with separate actual gold files for the iverilog
|
||||
and vvp stderr and stdout. For example, if a "normal" test includes a gold
|
||||
file, then the program is compiled and run, and the outputs are compared with
|
||||
the gold file to make sure it ran properly.
|
||||
|
||||
iverilog-args (optional)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If this is specified, it is a list of strings that are passed as arguments to
|
||||
the iverilog command line.
|
||||
|
|
@ -10,6 +10,7 @@ log/
|
|||
work/
|
||||
vpi_log/
|
||||
vhdl/
|
||||
__pycache__/
|
||||
|
||||
# The normal regression output files.
|
||||
|
||||
|
|
|
|||
|
|
@ -62,20 +62,20 @@ module test;
|
|||
if (word_sw3 !== 16'b01111111_11111111) begin $display("FAILED -- word_sw3 = 'b%b", word_sw3 ); err=1; end
|
||||
// access to parts of structure elements
|
||||
if (word_ep0 !== 16'bxxxxxxxx_xxxxxxxx) begin $display("FAILED -- word_ep0 = 'b%b", word_ep0 ); err=1; end
|
||||
if (word_ep1 !== 16'bxxxx1111_xxxx0000) begin $display("FAILED -- word_ep1 = 'b%b", word_ep1 ); err=1; end
|
||||
if (word_ep1.high !== 8'bxxxx1111 ) begin $display("FAILED -- word_ep1.high = 'b%b", word_ep1.high); err=1; end
|
||||
if (word_ep1.low !== 8'bxxxx0000 ) begin $display("FAILED -- word_ep1.low = 'b%b", word_ep1.low ); err=1; end
|
||||
if (word_ep2 !== 16'bxxxx1111_xxxx0000) begin $display("FAILED -- word_ep2 = 'b%b", word_ep2 ); err=1; end
|
||||
if (word_ep2.high !== 8'bxxxx1111 ) begin $display("FAILED -- word_ep2.high = 'b%b", word_ep2.high); err=1; end
|
||||
if (word_ep2.low !== 8'bxxxx0000 ) begin $display("FAILED -- word_ep2.low = 'b%b", word_ep2.low ); err=1; end
|
||||
if (word_ep3 !== 16'bxxxx0111_xxxx0000) begin $display("FAILED -- word_ep3 = 'b%b", word_ep3 ); err=1; end
|
||||
if (word_ep3.high !== 8'bxxxx0111 ) begin $display("FAILED -- word_ep3.high = 'b%b", word_ep3.high); err=1; end
|
||||
if (word_ep3.low !== 8'bxxxx0000 ) begin $display("FAILED -- word_ep3.low = 'b%b", word_ep3.low ); err=1; end
|
||||
if (word_ep1 !== 16'bzzzz1111_zzzz0000) begin $display("FAILED -- word_ep1 = 'b%b", word_ep1 ); err=1; end
|
||||
if (word_ep1.high !== 8'bzzzz1111 ) begin $display("FAILED -- word_ep1.high = 'b%b", word_ep1.high); err=1; end
|
||||
if (word_ep1.low !== 8'bzzzz0000 ) begin $display("FAILED -- word_ep1.low = 'b%b", word_ep1.low ); err=1; end
|
||||
if (word_ep2 !== 16'bzzzz1111_zzzz0000) begin $display("FAILED -- word_ep2 = 'b%b", word_ep2 ); err=1; end
|
||||
if (word_ep2.high !== 8'bzzzz1111 ) begin $display("FAILED -- word_ep2.high = 'b%b", word_ep2.high); err=1; end
|
||||
if (word_ep2.low !== 8'bzzzz0000 ) begin $display("FAILED -- word_ep2.low = 'b%b", word_ep2.low ); err=1; end
|
||||
if (word_ep3 !== 16'bzzzz0111_zzzz0000) begin $display("FAILED -- word_ep3 = 'b%b", word_ep3 ); err=1; end
|
||||
if (word_ep3.high !== 8'bzzzz0111 ) begin $display("FAILED -- word_ep3.high = 'b%b", word_ep3.high); err=1; end
|
||||
if (word_ep3.low !== 8'bzzzz0000 ) begin $display("FAILED -- word_ep3.low = 'b%b", word_ep3.low ); err=1; end
|
||||
// access to parts of the whole structure
|
||||
if (word_sp0 !== 16'bxxxxxxxx_xxxxxxxx) begin $display("FAILED -- word_sp0 = 'b%b", word_sp0 ); err=1; end
|
||||
if (word_sp1 !== 16'bxxxx1111_1111xxxx) begin $display("FAILED -- word_sp1 = 'b%b", word_sp1 ); err=1; end
|
||||
if (word_sp2 !== 16'bxxxx1111_1111xxxx) begin $display("FAILED -- word_sp2 = 'b%b", word_sp2 ); err=1; end
|
||||
if (word_sp3 !== 16'bxxxx0111_1111xxxx) begin $display("FAILED -- word_sp3 = 'b%b", word_sp3 ); err=1; end
|
||||
if (word_sp1 !== 16'bzzzz1111_1111zzzz) begin $display("FAILED -- word_sp1 = 'b%b", word_sp1 ); err=1; end
|
||||
if (word_sp2 !== 16'bzzzz1111_1111zzzz) begin $display("FAILED -- word_sp2 = 'b%b", word_sp2 ); err=1; end
|
||||
if (word_sp3 !== 16'bzzzz0111_1111zzzz) begin $display("FAILED -- word_sp3 = 'b%b", word_sp3 ); err=1; end
|
||||
|
||||
if (!err) $display("PASSED");
|
||||
end
|
||||
|
|
|
|||
|
|
@ -99,9 +99,6 @@ br_gh306b normal ivltests
|
|||
case5-syn-fail normal ivltests
|
||||
casesynth7 normal ivltests
|
||||
casesynth8 normal ivltests
|
||||
dffsynth normal ivltests
|
||||
dffsynth8 normal ivltests
|
||||
memsynth1 normal ivltests
|
||||
memsynth2 normal ivltests
|
||||
memsynth3 normal ivltests
|
||||
memsynth5 normal ivltests
|
||||
|
|
|
|||
|
|
@ -75,7 +75,6 @@
|
|||
#------------------------------------------------------------------------------
|
||||
|
||||
# Escaped defines ``
|
||||
macro_str_esc normal ivltests gold=macro_str_esc.gold
|
||||
macro_with_args normal ivltests gold=macro_with_args.gold
|
||||
mcl1 normal ivltests gold=mcl1.gold
|
||||
pr622 normal ivltests gold=pr622.gold
|
||||
|
|
|
|||
|
|
@ -516,7 +516,6 @@ struct_packed_queue_fail CE,-g2009 ivltests
|
|||
struct_packed_sysfunct normal,-g2009 ivltests
|
||||
struct_packed_sysfunct2 normal,-g2009 ivltests
|
||||
struct_packed_uarray_fail CE,-g2009 ivltests
|
||||
struct_packed_write_read2 normal,-g2009 ivltests
|
||||
struct_invalid_member CE,-g2009 ivltests gold=struct_invalid_member.gold
|
||||
struct_signed normal,-g2009 ivltests
|
||||
sv-constants normal,-g2005-sv ivltests
|
||||
|
|
|
|||
|
|
@ -94,9 +94,6 @@ br_gh99x normal ivltests
|
|||
br_gh115 normal ivltests
|
||||
br_gh306a CE ivltests
|
||||
br_gh306b CE ivltests
|
||||
case1 normal ivltests
|
||||
case2 normal ivltests
|
||||
case3 normal ivltests
|
||||
case4 normal ivltests
|
||||
case5 normal ivltests
|
||||
case5-syn-fail CE ivltests
|
||||
|
|
@ -112,22 +109,10 @@ casesynth6 normal ivltests
|
|||
casesynth7 normal ivltests gold=casesynth7.gold
|
||||
casesynth8 CE ivltests
|
||||
casesynth9 normal ivltests
|
||||
casex_synth normal ivltests
|
||||
condit1 normal ivltests
|
||||
conditsynth1 normal ivltests
|
||||
conditsynth2 normal ivltests
|
||||
conditsynth3 normal ivltests
|
||||
dffsynth normal ivltests
|
||||
dffsynth2 normal ivltests
|
||||
dffsynth3 normal ivltests
|
||||
dffsynth4 normal ivltests
|
||||
dffsynth5 normal ivltests
|
||||
dffsynth6 normal ivltests
|
||||
dffsynth7 normal ivltests
|
||||
dffsynth8 CE ivltests
|
||||
dffsynth9 normal ivltests
|
||||
dffsynth10 normal ivltests
|
||||
dffsynth11 normal ivltests
|
||||
ff_dual_enable normal ivltests
|
||||
for_loop_synth normal ivltests
|
||||
for_loop_synth2 normal ivltests
|
||||
|
|
@ -138,7 +123,6 @@ inside_synth2 normal ivltests
|
|||
inside_synth3 normal ivltests
|
||||
land5 normal ivltests
|
||||
lcatsynth normal ivltests
|
||||
memsynth1 normal ivltests
|
||||
memsynth2 normal ivltests
|
||||
memsynth3 normal ivltests
|
||||
memsynth4 normal ivltests
|
||||
|
|
|
|||
|
|
@ -289,8 +289,6 @@ br_gh115 CE,-S ivltests
|
|||
basiclatch normal ivltests
|
||||
blocksynth2 normal ivltests
|
||||
blocksynth3 normal ivltests
|
||||
case1 normal ivltests
|
||||
case2 normal ivltests
|
||||
case4 normal ivltests
|
||||
case5 normal ivltests
|
||||
case5-syn-fail normal ivltests
|
||||
|
|
@ -299,20 +297,13 @@ casesynth1 normal ivltests
|
|||
casesynth2 normal ivltests
|
||||
casesynth3 normal ivltests
|
||||
casesynth7 NI
|
||||
casex_synth normal ivltests
|
||||
condit1 normal ivltests
|
||||
conditsynth1 normal ivltests
|
||||
conditsynth2 normal ivltests
|
||||
conditsynth3 normal ivltests
|
||||
dffsynth normal ivltests
|
||||
dffsynth3 normal ivltests
|
||||
dffsynth4 normal ivltests
|
||||
dffsynth9 normal ivltests
|
||||
dffsynth10 normal ivltests
|
||||
dffsynth11 normal ivltests
|
||||
|
||||
inside_synth normal ivltests
|
||||
inside_synth3 normal ivltests
|
||||
memsynth1 normal ivltests
|
||||
memsynth2 normal ivltests
|
||||
memsynth3 normal ivltests
|
||||
memsynth5 normal ivltests
|
||||
|
|
|
|||
|
|
@ -87,8 +87,6 @@ sv_default_port_value3 CE,-g2009 ivltests
|
|||
br_gh440 CE,-g2009 ivltests gold=br_gh440-v11.gold
|
||||
|
||||
# v11 has incomplete synthesis support
|
||||
dffsynth CE,-S ivltests
|
||||
memsynth1 CE,-S ivltests
|
||||
memsynth2 CE,-S ivltests
|
||||
memsynth3 CE,-S ivltests
|
||||
memsynth5 CE,-S ivltests
|
||||
|
|
@ -97,11 +95,6 @@ memsynth7 CE,-S ivltests
|
|||
memsynth9 CE,-S ivltests
|
||||
mix_reset CE,-S ivltests
|
||||
|
||||
# These tests pass, but synthesis is creating unnecessary latches.
|
||||
case1 normal ivltests
|
||||
case2 normal ivltests
|
||||
casex_synth normal ivltests
|
||||
|
||||
# For V11 vvp does not fail for these tests
|
||||
automatic_error11 normal ivltests gold=automatic_error11.gold
|
||||
automatic_error12 normal ivltests gold=automatic_error12.gold
|
||||
|
|
|
|||
|
|
@ -68,8 +68,6 @@
|
|||
#
|
||||
|
||||
# v11 has incomplete synthesis support
|
||||
dffsynth CE,-S ivltests
|
||||
memsynth1 CE,-S ivltests
|
||||
memsynth2 CE,-S ivltests
|
||||
memsynth3 CE,-S ivltests
|
||||
memsynth5 CE,-S ivltests
|
||||
|
|
@ -77,8 +75,3 @@ memsynth6 CE,-S ivltests
|
|||
memsynth7 CE,-S ivltests
|
||||
memsynth9 CE,-S ivltests
|
||||
mix_reset CE,-S ivltests
|
||||
|
||||
# These tests pass, but synthesis is creating unnecessary latches.
|
||||
case1 normal ivltests
|
||||
case2 normal ivltests
|
||||
casex_synth normal ivltests
|
||||
|
|
|
|||
|
|
@ -68,8 +68,6 @@
|
|||
#
|
||||
|
||||
# v13 has incomplete synthesis support
|
||||
dffsynth CE,-S ivltests
|
||||
memsynth1 CE,-S ivltests
|
||||
memsynth2 CE,-S ivltests
|
||||
memsynth3 CE,-S ivltests
|
||||
memsynth5 CE,-S ivltests
|
||||
|
|
@ -77,8 +75,3 @@ memsynth6 CE,-S ivltests
|
|||
memsynth7 CE,-S ivltests
|
||||
memsynth9 CE,-S ivltests
|
||||
mix_reset CE,-S ivltests
|
||||
|
||||
# These tests pass, but synthesis is creating unnecessary latches.
|
||||
case1 normal ivltests
|
||||
case2 normal ivltests
|
||||
casex_synth normal ivltests
|
||||
|
|
|
|||
|
|
@ -0,0 +1,26 @@
|
|||
|
||||
# Test list files are a list of test names and the json that
|
||||
# describes the test.
|
||||
|
||||
array_packed_write_read vvp_tests/array_packed_write_read.json
|
||||
case1 vvp_tests/case1.json
|
||||
case2 vvp_tests/case2.json
|
||||
case2-S vvp_tests/case2-S.json
|
||||
case3 vvp_tests/case3.json
|
||||
casex_synth vvp_tests/casex_synth.json
|
||||
dffsynth vvp_tests/dffsynth.json
|
||||
dffsynth-S vvp_tests/dffsynth-S.json
|
||||
dffsynth2 vvp_tests/dffsynth2.json
|
||||
dffsynth3 vvp_tests/dffsynth3.json
|
||||
dffsynth4 vvp_tests/dffsynth4.json
|
||||
dffsynth5 vvp_tests/dffsynth5.json
|
||||
dffsynth6 vvp_tests/dffsynth6.json
|
||||
dffsynth7 vvp_tests/dffsynth7.json
|
||||
dffsynth8 vvp_tests/dffsynth8.json
|
||||
dffsynth9 vvp_tests/dffsynth9.json
|
||||
dffsynth10 vvp_tests/dffsynth10.json
|
||||
dffsynth11 vvp_tests/dffsynth11.json
|
||||
macro_str_esc vvp_tests/macro_str_esc.json
|
||||
memsynth1 vvp_tests/memsynth1.json
|
||||
struct_packed_write_read vvp_tests/struct_packed_write_read.json
|
||||
struct_packed_write_read2 vvp_tests/struct_packed_write_read2.json
|
||||
|
|
@ -0,0 +1,242 @@
|
|||
'''Functions for running Icarus Verilog
|
||||
|
||||
'''
|
||||
|
||||
import subprocess
|
||||
import difflib
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
def assemble_iverilog_cmd(source: str, it_dir: str, args: list) -> list:
|
||||
res = ["iverilog", "-o", os.path.join("work", "a.out")]
|
||||
res += ["-D__ICARUS_UNSIZED__"]
|
||||
res += args
|
||||
src = os.path.join(it_dir, source)
|
||||
res += [src]
|
||||
return res
|
||||
|
||||
|
||||
def assemble_vvp_cmd(args: list = [], plusargs: list = []) -> list:
|
||||
res = ["vvp", os.path.join("work", "a.out")]
|
||||
return res
|
||||
|
||||
|
||||
def get_ivl_version () -> list:
|
||||
'''Figure out the version of the installed iverilog compler.
|
||||
|
||||
The return value is a list of 2 numbers, the major and minor version
|
||||
numbers, or None if the version string couldn't be found.'''
|
||||
|
||||
# Get the output from the "iverilog -V" command for the version string.
|
||||
text = subprocess.check_output(["iverilog", "-V"])
|
||||
match = re.search(b'Icarus Verilog version ([0-9]+)\.([0-9]+)', text)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
return [int(match[1]), int(match[2])]
|
||||
|
||||
def build_runtime(it_key: str) -> None:
|
||||
'''Check and prepare the runtime environment for a test
|
||||
|
||||
This is called in front of tests to make sure that the directory
|
||||
structure is correct, and common temp files that might linger from
|
||||
a previous run are removed.'''
|
||||
|
||||
try:
|
||||
os.mkdir("log")
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
try:
|
||||
os.remove(os.path.join("log", it_key + ".log"))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
try:
|
||||
os.mkdir("work")
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
try:
|
||||
os.remove(os.path.join("work", "a.out"))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
def log_results(key, title, res) -> None:
|
||||
''' Write results into log files.
|
||||
|
||||
Generate a log file with the name of the key and title, and
|
||||
put the stdout and stderr into separate files.'''
|
||||
|
||||
with open(os.path.join("log", f"{key}-{title}-stdout.log"), 'wb') as fd:
|
||||
fd.write(res.stdout)
|
||||
|
||||
with open(os.path.join("log", f"{key}-{title}-stderr.log"), 'wb') as fd:
|
||||
fd.write(res.stderr)
|
||||
|
||||
|
||||
def compare_files(log_path, gold_path):
|
||||
'''Compare the log file and the gold file
|
||||
|
||||
The files are read it, line at a time, and the lines are compared.
|
||||
If they differ, then write tou stdout a unified diff. In any case,
|
||||
return True or False to indicate the results of the test.'''
|
||||
|
||||
with open(log_path, 'rt') as fd:
|
||||
a = fd.readlines()
|
||||
with open(gold_path, 'rt') as fd:
|
||||
b = fd.readlines()
|
||||
|
||||
flag = a == b
|
||||
if not flag:
|
||||
print(f"{log_path} and {gold_path} differ:")
|
||||
sys.stdout.writelines(difflib.unified_diff(a, b, log_path, gold_path))
|
||||
|
||||
return flag
|
||||
|
||||
|
||||
def run_CE(options : dict) -> list:
|
||||
''' Run the compiler, and expect an error
|
||||
|
||||
In this case, we assert that the command fails to run and reports
|
||||
an error. This is to check that invalid input generates errors.'''
|
||||
|
||||
it_key = options['key']
|
||||
it_dir = options['directory']
|
||||
it_args = options['iverilog_args']
|
||||
|
||||
build_runtime(it_key)
|
||||
|
||||
cmd = assemble_iverilog_cmd(options['source'], it_dir, it_args)
|
||||
res = subprocess.run(cmd, capture_output=True)
|
||||
log_results(it_key, "iverilog", res)
|
||||
|
||||
if res.returncode == 0:
|
||||
return [1, "Failed - CE (no error reported)"]
|
||||
elif res.returncode >= 256:
|
||||
return [1, "Failed - CE (execution error)"]
|
||||
else:
|
||||
return [0, "Passed - CE"]
|
||||
|
||||
|
||||
def do_run_normal(options : dict, expected_fail : bool) -> list:
|
||||
'''Run the iverilog and vvp commands.
|
||||
|
||||
In this case, run the compiler to generate a vvp output file, and
|
||||
run the vvp command to actually execute the simulation. Collect
|
||||
the results and look for a "PASSED" string.'''
|
||||
|
||||
it_key = options['key']
|
||||
it_dir = options['directory']
|
||||
it_iverilog_args = options['iverilog_args']
|
||||
it_gold = options['gold']
|
||||
it_diff = options['diff']
|
||||
|
||||
build_runtime(it_key)
|
||||
|
||||
# Run the iverilog command
|
||||
ivl_cmd = assemble_iverilog_cmd(options['source'], it_dir, it_iverilog_args)
|
||||
ivl_res = subprocess.run(ivl_cmd, capture_output=True)
|
||||
|
||||
log_results(it_key, "iverilog", ivl_res)
|
||||
if ivl_res.returncode != 0:
|
||||
return [1, "Failed - Compile failed"]
|
||||
|
||||
# run the vvp command
|
||||
vvp_cmd = assemble_vvp_cmd()
|
||||
vvp_res = subprocess.run(vvp_cmd, capture_output=True)
|
||||
log_results(it_key, "vvp", vvp_res);
|
||||
|
||||
if vvp_res.returncode != 0:
|
||||
return [1, "Failed - Vvp execution failed"]
|
||||
|
||||
it_stdout = vvp_res.stdout.decode('ascii')
|
||||
|
||||
# If there is a gold file configured, the test result depends on
|
||||
# the outputs matching the gold file.
|
||||
if it_gold is not None:
|
||||
|
||||
compared = True
|
||||
|
||||
log_path = os.path.join("log", f"{it_key}-iverilog-stdout.log")
|
||||
gold_path = os.path.join("gold", f"{it_gold}-iverilog-stdout.gold")
|
||||
compared_ivl_stdout = compare_files(log_path, gold_path)
|
||||
compared = compared and compared_ivl_stdout
|
||||
|
||||
log_path = os.path.join("log", f"{it_key}-iverilog-stderr.log")
|
||||
gold_path = os.path.join("gold", f"{it_gold}-iverilog-stderr.gold")
|
||||
compared_ivl_stderr = compare_files(log_path, gold_path)
|
||||
compared = compared and compared_ivl_stderr
|
||||
|
||||
log_path = os.path.join("log", f"{it_key}-vvp-stdout.log")
|
||||
gold_path = os.path.join("gold", f"{it_gold}-vvp-stdout.gold")
|
||||
compared_vvp_stdout = compare_files(log_path, gold_path)
|
||||
compared = compared and compared_vvp_stdout
|
||||
|
||||
log_path = os.path.join("log", f"{it_key}-vvp-stderr.log")
|
||||
gold_path = os.path.join("gold", f"{it_gold}-vvp-stderr.gold")
|
||||
compared_vvp_stderr = compare_files(log_path, gold_path)
|
||||
compared = compared and compared_vvp_stderr
|
||||
|
||||
if expected_fail:
|
||||
if compared:
|
||||
return [1, "Failed - Passed, but expected failure"]
|
||||
else:
|
||||
return [0, "Passed - Expected fail"]
|
||||
else:
|
||||
if compared:
|
||||
return [0, "Passed"]
|
||||
else:
|
||||
return [1, "Failed - Gold output doesn't match actual output."]
|
||||
|
||||
|
||||
# If there is a diff description, then compare named files instead of
|
||||
# the log and a gold file.
|
||||
if it_diff is not None:
|
||||
diff_name1 = it_diff[0]
|
||||
diff_name2 = it_diff[1]
|
||||
diff_skip = int(it_diff[2])
|
||||
|
||||
with open(diff_name1) as fd:
|
||||
for idx in range(diff_skip):
|
||||
fd.readline()
|
||||
diff_data1 = fd.read()
|
||||
|
||||
with open(diff_name2) as fd:
|
||||
for idx in range(diff_skip):
|
||||
fd.readline()
|
||||
diff_data2 = fd.read()
|
||||
|
||||
if expected_fail:
|
||||
if diff_data1 == diff_data2:
|
||||
return [1, "Failed - Passed, but expected failure"]
|
||||
else:
|
||||
return [0, "Passed"]
|
||||
else:
|
||||
if diff_data1 == diff_data2:
|
||||
return [0, "Passed"]
|
||||
else:
|
||||
return [1, f"Failed - Files {diff_name1} and {diff_name2} differ."]
|
||||
|
||||
# Otherwise, look for the PASSED output string in stdout.
|
||||
for line in it_stdout.splitlines():
|
||||
if line == "PASSED":
|
||||
if expected_fail:
|
||||
return [1, "Failed - Passed, but expected failure"]
|
||||
else:
|
||||
return [0, "Passed"]
|
||||
|
||||
# If there is no PASSED output, and nothing else to check, then
|
||||
# assume a failure.
|
||||
if expected_fail:
|
||||
return [0, "Passed"]
|
||||
else:
|
||||
return [1, "Failed - No PASSED output, and no gold file"]
|
||||
|
||||
|
||||
def run_normal(options : dict) -> list:
|
||||
return do_run_normal(options, False)
|
||||
|
||||
def run_EF(options : dict) -> list:
|
||||
return do_run_normal(options, True)
|
||||
|
|
@ -59,10 +59,8 @@
|
|||
# gold or diff commands.
|
||||
#
|
||||
|
||||
struct_packed_write_read normal,-g2009 ivltests
|
||||
struct_packed_value_list normal,-g2009 ivltests
|
||||
|
||||
array_packed_write_read normal,-g2009 ivltests
|
||||
array_packed_value_list normal,-g2009 ivltests
|
||||
array_packed_sysfunct normal,-g2009 ivltests
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,76 @@
|
|||
|
||||
'''Functions for processing test list files
|
||||
|
||||
The read_lists() function is the main export of this package. This function
|
||||
takes a list of file names, reads all the test items in the files, and
|
||||
puts the result into a sorted list.
|
||||
|
||||
The tests list file is formatted like so:
|
||||
|
||||
<key> <type> <directory> <other>
|
||||
|
||||
The <key> is the name of the test. This is used to generate the source file
|
||||
name for the test program.
|
||||
|
||||
The <directory> is the name of a subdirectory were we search for the test.
|
||||
So for example, if <key>==foo and <directory>==bar, then the Verilog source
|
||||
file will be inferred to be bar/foo.v.
|
||||
|
||||
The <type> is the test type.
|
||||
|
||||
The <other> field sets up how the tests will be checked. Things like gold
|
||||
files and working directories are given here.
|
||||
'''
|
||||
|
||||
def read_list(fd) -> list:
|
||||
'''Return a list of test items (each in list form) from the file.
|
||||
|
||||
The input fd is the file opened in text mode. This function will read
|
||||
the file, a line at a time, and make a list of lists, with each list
|
||||
in the list a list of tokens for the line. This is used by the read_lists
|
||||
function.'''
|
||||
|
||||
build_list = list()
|
||||
for line_raw in fd:
|
||||
# Strip comments and leading/traling white space
|
||||
idx = line_raw.find("#")
|
||||
if idx < 0:
|
||||
idx = len(line_raw)
|
||||
line = line_raw[0:idx].strip()
|
||||
|
||||
# Split into tokens
|
||||
line_list = line.split()
|
||||
if len(line_list) == 0:
|
||||
continue
|
||||
|
||||
build_list.append(line_list)
|
||||
|
||||
return build_list
|
||||
|
||||
|
||||
def read_lists(paths: list) -> list:
|
||||
'''Read the paths in the list, and return the list of tests.
|
||||
|
||||
The input is a list of list file names, and the result is a list
|
||||
of all the tests, sorted, and with duplicates resolved. The order
|
||||
of the test file lists is important, as is the order of tests
|
||||
within each list file.'''
|
||||
|
||||
tests_list = list()
|
||||
for path in paths:
|
||||
with open(path, "r") as fd:
|
||||
tests_list += read_list(fd)
|
||||
|
||||
# The loop above creates a tests_list to list all of the tests in the
|
||||
# order that they appear. Now we go though the list in order and eliminate
|
||||
# duplictes by test name. This allows that lists might override tests that
|
||||
# are already declared.
|
||||
tests_dict = dict()
|
||||
for item in tests_list:
|
||||
tests_dict[item[0]] = item;
|
||||
|
||||
# Convert the result to a sorted list, and return that.
|
||||
tests_list = list(tests_dict.values())
|
||||
tests_list.sort()
|
||||
|
||||
return tests_list
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
#! python3
|
||||
'''
|
||||
Usage:
|
||||
vvp_reg
|
||||
vvp_reg <list-paths>...
|
||||
|
||||
<list-paths> is a list of files in the current working directory that
|
||||
each contain a list of tests. By convention, the file has the
|
||||
suffix ".list". The files will be processed in order, so tests
|
||||
can be overridden if listed twice. If no files are given, a
|
||||
default list is used.
|
||||
'''
|
||||
|
||||
import sys
|
||||
# It appears that docopt doesn't work on msys2 installations, so
|
||||
# skip it completely on win32 platforms.
|
||||
if sys.platform != 'win32':
|
||||
from docopt import docopt
|
||||
import test_lists
|
||||
import json
|
||||
import run_ivl
|
||||
|
||||
|
||||
def process_test(item: list) -> str:
|
||||
'''Process a single test
|
||||
|
||||
This takes in the list of tokens from the tests list file, and converts
|
||||
them (interprets them) to a collection of values.'''
|
||||
|
||||
# This is the name of the test, and the name of the main sorce file
|
||||
it_key = item[0]
|
||||
test_path = item[1]
|
||||
with open(test_path, 'rt') as fd:
|
||||
it_dict = json.load(fd)
|
||||
|
||||
# Get the test type from the json configuration.
|
||||
it_type = it_dict['type']
|
||||
|
||||
# Wrap all of this into an options dictionary for ease of handling.
|
||||
it_options = {
|
||||
'key' : it_key,
|
||||
'type' : it_type,
|
||||
'iverilog_args' : it_dict.get('iverilog-args', [ ]),
|
||||
'directory' : "ivltests",
|
||||
'source' : it_dict['source'],
|
||||
'modulename' : None,
|
||||
'gold' : it_dict.get('gold', None),
|
||||
'diff' : None
|
||||
}
|
||||
|
||||
if it_type == "NI":
|
||||
res = [0, "Not Implemented."]
|
||||
|
||||
elif it_type == "normal":
|
||||
res = run_ivl.run_normal(it_options)
|
||||
|
||||
elif it_type == "CE":
|
||||
res = run_ivl.run_CE(it_options)
|
||||
|
||||
elif it_type == "EF":
|
||||
res = run_ivl.run_EF(it_options)
|
||||
|
||||
else:
|
||||
res = f"{it_key}: I don't understand the test type ({it_type})."
|
||||
raise Exception(res)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(f"Running tests on platform: {sys.platform}")
|
||||
if sys.platform == 'win32':
|
||||
args = { "<list-paths>" : [] }
|
||||
else:
|
||||
args = docopt(__doc__)
|
||||
|
||||
# This returns [13, 0] or similar
|
||||
ivl_version = run_ivl.get_ivl_version()
|
||||
ivl_version_major = ivl_version[0]
|
||||
print(f"Icarus Verilog version: {ivl_version_major}")
|
||||
|
||||
# Select the lists to use. If any list paths are given on the command
|
||||
# line, then use only those. Otherwise, use a default list.
|
||||
list_paths = args["<list-paths>"]
|
||||
if len(list_paths) == 0:
|
||||
list_paths = list()
|
||||
list_paths += ["regress-vvp.list"]
|
||||
|
||||
print(f"Use lists: {list_paths}")
|
||||
|
||||
# Read the list files, to get the tests.
|
||||
tests_list = test_lists.read_lists(list_paths)
|
||||
|
||||
# We need the width of the widest key so that we can figure out
|
||||
# how to align the key:result columns.
|
||||
width = 0
|
||||
for cur in tests_list:
|
||||
if len(cur[0]) > width:
|
||||
width = len(cur[0])
|
||||
|
||||
error_count = 0
|
||||
for cur in tests_list:
|
||||
result = process_test(cur)
|
||||
error_count += result[0]
|
||||
print(f"{cur[0]:>{width}}: {result[1]}")
|
||||
|
||||
print("===================================================")
|
||||
print(f"Test results: Ran {len(tests_list)}, Failed {error_count}.")
|
||||
exit(error_count)
|
||||
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
|
||||
This directory contains configurations for thet tests that test the iverilog
|
||||
compiler with the vvp simulation engine. Eash test file is actually a JSON
|
||||
file that calls out the test type, names the source file, the gold file, any
|
||||
command argument flags.
|
||||
|
||||
{
|
||||
"type" : "normal",
|
||||
"source" : "macro_str_esc.v",
|
||||
"gold" : "macro_str_esc"
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "array_packed_write_read.v",
|
||||
"iverilog-args" : [ "-g2009" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "case1.v"
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "case2.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "case2.v"
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "case3.v"
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "casex_synth.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "NI",
|
||||
"source" : "dffsynth.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth.v"
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth10.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth11.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth2.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth3.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth4.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth5.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth6.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth7.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "NI",
|
||||
"source" : "dffsynth8.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "dffsynth9.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "macro_str_esc.v",
|
||||
"gold" : "macro_str_esc"
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "NI",
|
||||
"source" : "memsynth1.v",
|
||||
"iverilog-args" : [ "-S" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "struct_packed_write_read.v",
|
||||
"iverilog-args" : [ "-g2009" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"type" : "normal",
|
||||
"source" : "struct_packed_write_read2.v",
|
||||
"iverilog-args" : [ "-g2009" ]
|
||||
}
|
||||
Loading…
Reference in New Issue