Tests: Remove old benchmarksim, should use rtlmeter instead
This commit is contained in:
parent
5115be6e6b
commit
7e5c5d677f
|
|
@ -2630,11 +2630,6 @@ verilator_flags / verilator_flags2
|
|||
Verilator. If a flag is a standard flag, ``+incdir`` for example, pass
|
||||
it with ``v_flags2`` instead.
|
||||
|
||||
benchmarksim
|
||||
Output the number of model evaluations and execution time of a test to
|
||||
``test_output_dir>/<test_name>_benchmarksim.csv``. Multiple invocations
|
||||
of the same test file will append to to the same .csv file.
|
||||
|
||||
xsim_flags / xsim_flags2 / xsim_run_flags
|
||||
The equivalent of ``v_flags``, ``v_flags2`` and ``all_run_flags``, but
|
||||
only for use with the Xilinx XSim simulator.
|
||||
|
|
|
|||
|
|
@ -702,7 +702,6 @@ class VlTest:
|
|||
self.name = match.group(2) # Name of this test
|
||||
|
||||
self.benchmark = Args.benchmark
|
||||
self.benchmarksim = False
|
||||
self.clean_command = None
|
||||
self.context_threads = 0 # Number of threads to allocate in the context
|
||||
self.errors = None
|
||||
|
|
@ -891,16 +890,6 @@ class VlTest:
|
|||
def _define_opt_calc(self) -> str:
|
||||
return "--define " if self.xsim else "+define+"
|
||||
|
||||
def init_benchmarksim(self) -> None:
|
||||
# Simulations with benchmarksim enabled append to the same file between runs.
|
||||
# Test files must ensure a clean benchmark data file before executing tests.
|
||||
filename = self.benchmarksim_filename
|
||||
with open(filename, 'w', encoding="utf8") as fh:
|
||||
fh.write("# Verilator simulation benchmark data\n")
|
||||
fh.write("# Test name: " + self.name + "\n")
|
||||
fh.write("# Top file: " + self.top_filename + "\n")
|
||||
fh.write("evals, time[s]\n")
|
||||
|
||||
def soprint(self, message: str) -> str:
|
||||
message = message.rstrip() + "\n"
|
||||
message = self.scenario + "/" + self.name + ": " + message
|
||||
|
|
@ -1138,9 +1127,6 @@ class VlTest:
|
|||
else:
|
||||
self.trace_format = 'vcd-c' # pylint: disable=attribute-defined-outside-init
|
||||
|
||||
if param.get('benchmarksim', None):
|
||||
self.benchmarksim = True # pylint: disable=attribute-defined-outside-init
|
||||
|
||||
verilator_flags = [*param.get('verilator_flags', "")]
|
||||
if Args.gdb:
|
||||
verilator_flags += ["--gdb"]
|
||||
|
|
@ -1687,10 +1673,6 @@ class VlTest:
|
|||
VlTest._cached_aslr_off = ""
|
||||
return VlTest._cached_aslr_off
|
||||
|
||||
@property
|
||||
def benchmarksim_filename(self) -> str:
|
||||
return self.obj_dir + "/" + self.name + "_benchmarksim.csv"
|
||||
|
||||
@property
|
||||
def driver_verilator_flags(self) -> list:
|
||||
return Args.passdown_verilator_flags
|
||||
|
|
@ -2038,10 +2020,6 @@ class VlTest:
|
|||
str(int(round(self.main_time_multiplier, 0))) + "\n")
|
||||
|
||||
fh.write("#include <memory>\n")
|
||||
if self.benchmarksim:
|
||||
fh.write("#include <fstream>\n")
|
||||
fh.write("#include <chrono>\n")
|
||||
fh.write("#include <iomanip>\n")
|
||||
|
||||
fh.write("// OS header\n")
|
||||
fh.write('#include "verilatedos.h"' + "\n")
|
||||
|
|
@ -2139,11 +2117,6 @@ class VlTest:
|
|||
fh.write(" topp->eval();\n")
|
||||
setp = "topp->"
|
||||
|
||||
if self.benchmarksim:
|
||||
fh.write(" std::chrono::time_point<std::chrono::steady_clock> starttime;\n")
|
||||
fh.write(" bool warm = false;\n")
|
||||
fh.write(" uint64_t n_evals = 0;\n")
|
||||
|
||||
if self.trace:
|
||||
fh.write("\n")
|
||||
fh.write("#if VM_TRACE\n")
|
||||
|
|
@ -2249,27 +2222,9 @@ class VlTest:
|
|||
fh.write(" return 0;\n")
|
||||
fh.write(" }\n")
|
||||
self._print_advance_time(fh, self.sc_time_resolution_multiplier, action)
|
||||
if self.benchmarksim:
|
||||
fh.write(" if (VL_UNLIKELY(!warm)) {\n")
|
||||
fh.write(" starttime = std::chrono::steady_clock::now();\n")
|
||||
fh.write(" warm = true;\n")
|
||||
fh.write(" } else {\n")
|
||||
fh.write(" ++n_evals;\n")
|
||||
fh.write(" }\n")
|
||||
|
||||
fh.write(" }\n")
|
||||
|
||||
if self.benchmarksim:
|
||||
fh.write(" {\n")
|
||||
fh.write(" const std::chrono::duration<double> exec_s"
|
||||
" = std::chrono::steady_clock::now() - starttime;\n")
|
||||
fh.write(" std::ofstream benchfile(\"" + self.benchmarksim_filename +
|
||||
"\", std::ofstream::out | std::ofstream::app);\n")
|
||||
fh.write(" benchfile << std::fixed << std::setprecision(9)"
|
||||
" << n_evals << \",\" << exec_s.count() << std::endl;\n")
|
||||
fh.write(" benchfile.close();\n")
|
||||
fh.write(" }\n")
|
||||
|
||||
fh.write(" if (!contextp->gotFinish()) {\n")
|
||||
fh.write(' vl_fatal(__FILE__, __LINE__, "main",' +
|
||||
' "%Error: Timeout; never got a $finish");' + "\n")
|
||||
|
|
|
|||
|
|
@ -12,43 +12,12 @@ import vltest_bootstrap
|
|||
test.scenarios('vlt')
|
||||
test.top_filename = "t/t_gen_alw.v" # Use any top file
|
||||
|
||||
test.init_benchmarksim()
|
||||
|
||||
# As an example, compile and simulate the top file with varying optimization level
|
||||
l_opts = ['-O0', '-O1', '-O2', '-O3']
|
||||
|
||||
for l_opt in l_opts:
|
||||
test.compile(benchmarksim=1, v_flags2=[l_opt])
|
||||
test.compile(v_flags2=[l_opt])
|
||||
|
||||
test.execute()
|
||||
|
||||
filename = test.benchmarksim_filename
|
||||
gotn = 0
|
||||
with open(filename, 'r', encoding="utf8") as fh:
|
||||
lineno = 0
|
||||
headered = False
|
||||
for line in fh:
|
||||
lineno += 1
|
||||
if re.match(r'^#', line):
|
||||
continue
|
||||
if not headered:
|
||||
headered = True
|
||||
if not re.search(r'evals, ', line):
|
||||
test.error(filename + ":" + str(lineno) + ": Expected header but found: " + line)
|
||||
else:
|
||||
m = re.search(r'(\d+\.?\d*),(\d+\.?\d*)', line)
|
||||
if not m:
|
||||
test.error(filename + ":" + str(lineno) + ": Expected 2 tokens on line: " + line)
|
||||
continue
|
||||
cycles = float(m.group(1))
|
||||
time = float(m.group(2))
|
||||
if cycles <= 0.0 or time <= 0.0:
|
||||
test.error(filename + ":" + str(lineno) + ": Invalid data on line: " + line)
|
||||
continue
|
||||
gotn += 1
|
||||
|
||||
n_lines_expected = len(l_opts)
|
||||
if gotn != int(n_lines_expected):
|
||||
test.error("Expected " + str(n_lines_expected) + " lines but found " + str(gotn))
|
||||
|
||||
test.passes()
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import vltest_bootstrap
|
|||
test.priority(30)
|
||||
test.scenarios('vlt_all')
|
||||
|
||||
test.init_benchmarksim()
|
||||
test.cycles = (int(test.benchmark) if test.benchmark else 100000)
|
||||
test.sim_time = test.cycles * 10 + 1000
|
||||
|
||||
|
|
@ -23,7 +22,6 @@ HIER_THREADS = 4
|
|||
config_file = test.t_dir + "/" + test.name + ".vlt"
|
||||
|
||||
test.compile(
|
||||
benchmarksim=1,
|
||||
v_flags2=[
|
||||
config_file, "+define+SIM_CYCLES=" + str(test.cycles), "--hierarchical", "--stats",
|
||||
(f"-DWORKERS={HIER_BLOCK_THREADS}" if test.vltmt and HIER_BLOCK_THREADS > 1 else ""),
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import vltest_bootstrap
|
|||
test.priority(30)
|
||||
test.scenarios('vlt_all')
|
||||
|
||||
test.init_benchmarksim()
|
||||
test.cycles = (int(test.benchmark) if test.benchmark else 100000)
|
||||
test.sim_time = test.cycles * 10 + 1000
|
||||
|
||||
|
|
@ -23,7 +22,6 @@ HIER_THREADS = 4
|
|||
config_file = test.t_dir + "/" + test.name + ".vlt"
|
||||
|
||||
test.compile(
|
||||
benchmarksim=1,
|
||||
v_flags2=[
|
||||
config_file, "+define+SIM_CYCLES=" + str(test.cycles), "--prof-exec", "--hierarchical",
|
||||
"--stats", "-Wno-UNOPTFLAT",
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ threads = 2
|
|||
config_file = test.t_dir + "/" + test.name + ".vlt"
|
||||
flags = [config_file, "--hierarchical", "-Wno-UNOPTFLAT", "-DSIM_CYCLES=" + str(cycles)]
|
||||
|
||||
test.compile(benchmarksim=1, v_flags2=["--prof-pgo"] + flags, threads=threads)
|
||||
test.compile(v_flags2=["--prof-pgo"] + flags, threads=threads)
|
||||
|
||||
test.execute(all_run_flags=[
|
||||
"+verilator+prof+exec+start+0",
|
||||
|
|
@ -33,10 +33,9 @@ test.file_grep(test.obj_dir + "/profile.vlt", r'profile_data -model "V' + test.n
|
|||
# Check for cost rollovers
|
||||
test.file_grep_not(test.obj_dir + "/profile.vlt", r'.*cost 64\'d\d{18}.*')
|
||||
|
||||
# Differentiate benchmarksim results
|
||||
# Differentiate results
|
||||
test.name = test.name + "_optimized"
|
||||
test.compile(
|
||||
benchmarksim=1,
|
||||
# Intentionally no --prof-pgo here to make sure profile data can be read in
|
||||
# without it (that is: --prof-pgo has no effect on profile_data hash names)
|
||||
v_flags2=[test.obj_dir + "/profile.vlt"] + flags,
|
||||
|
|
|
|||
Loading…
Reference in New Issue