OpenRAM/compiler/tests/testutils.py

342 lines
13 KiB
Python
Raw Normal View History

# See LICENSE for licensing information.
#
2021-01-22 20:23:28 +01:00
# Copyright (c) 2016-2021 Regents of the University of California and The Board
2019-06-14 17:43:41 +02:00
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
2020-06-05 18:44:30 +02:00
import unittest
import sys, os, glob
sys.path.append(os.getenv("OPENRAM_HOME"))
from globals import OPTS
import debug
import pdb
import traceback
2020-06-05 18:44:30 +02:00
class openram_test(unittest.TestCase):
""" Base unit test that we have some shared classes in. """
def local_drc_check(self, w):
self.reset()
tempgds = "{}.gds".format(w.name)
w.gds_write("{0}{1}".format(OPTS.openram_temp, tempgds))
import verify
result=verify.run_drc(w.name, tempgds, None)
if result != 0:
self.fail("DRC failed: {}".format(w.name))
if not OPTS.keep_temp:
self.cleanup()
def local_check(self, a, final_verification=False):
self.reset()
tempspice = "{}.sp".format(a.name)
tempgds = "{}.gds".format(a.name)
a.lvs_write("{0}{1}".format(OPTS.openram_temp, tempspice))
# cannot write gds in netlist_only mode
if not OPTS.netlist_only:
a.gds_write("{0}{1}".format(OPTS.openram_temp, tempgds))
import verify
2020-04-17 18:35:07 +02:00
# Run both DRC and LVS even if DRC might fail
# Magic can still extract despite DRC failing, so it might be ok in some techs
# if we ignore things like minimum metal area of pins
drc_result=verify.run_drc(a.name, tempgds, tempspice, extract=True, final_verification=final_verification)
2020-07-01 00:27:10 +02:00
# We can still run LVS even if DRC fails in Magic OR Calibre
lvs_result=verify.run_lvs(a.name, tempgds, tempspice, final_verification=final_verification)
2020-11-03 15:29:17 +01:00
# Only allow DRC to fail and LVS to pass if we are using magic
2020-07-01 00:27:10 +02:00
if lvs_result == 0 and drc_result != 0:
2020-06-05 18:44:30 +02:00
# import shutil
# zip_file = "/tmp/{0}_{1}".format(a.name, os.getpid())
# debug.info(0, "Archiving failed files to {}.zip".format(zip_file))
# shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
self.fail("DRC failed but LVS passed: {}".format(a.name))
2020-04-17 18:35:07 +02:00
elif drc_result != 0:
2020-06-05 18:44:30 +02:00
# import shutil
# zip_file = "/tmp/{0}_{1}".format(a.name, os.getpid())
# debug.info(0,"Archiving failed files to {}.zip".format(zip_file))
# shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
2020-04-17 18:35:07 +02:00
self.fail("DRC failed: {}".format(a.name))
2020-11-03 15:29:17 +01:00
if lvs_result != 0:
2020-06-05 18:44:30 +02:00
# import shutil
# zip_file = "/tmp/{0}_{1}".format(a.name, os.getpid())
# debug.info(0,"Archiving failed files to {}.zip".format(zip_file))
# shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
self.fail("LVS mismatch: {}".format(a.name))
2020-04-17 18:35:07 +02:00
# For debug...
2020-06-05 18:44:30 +02:00
# import pdb; pdb.set_trace()
if not OPTS.keep_temp:
self.cleanup()
2019-06-30 09:50:18 +02:00
def run_pex(self, a, output=None):
tempspice = "{}.sp".format(a.name)
tempgds = "{}.gds".format(a.name)
2020-11-03 15:29:17 +01:00
a.gds_write("{0}{1}".format(OPTS.openram_temp, tempgds))
import verify
result=verify.run_pex(a.name, tempgds, tempspice, final_verification=False)
if result != 0:
self.fail("PEX ERROR: {}".format(a.name))
return output
def find_feasible_test_period(self, delay_obj, sram, load, slew):
"""Creates a delay simulation to determine a feasible period for the functional tests to run.
Only determines the feasible period for a single port and assumes that for all ports for performance.
"""
debug.info(1, "Finding feasible period for current test.")
delay_obj.set_load_slew(load, slew)
2020-06-05 18:44:30 +02:00
test_port = delay_obj.read_ports[0] # Only test one port, assumes other ports have similar period.
delay_obj.analysis_init(probe_address="1" * sram.addr_size, probe_data=sram.word_size - 1)
delay_obj.find_feasible_period_one_port(test_port)
return delay_obj.period
def cleanup(self):
""" Reset the duplicate checker and cleanup files. """
files = glob.glob(OPTS.openram_temp + '*')
for f in files:
# Only remove the files
if os.path.isfile(f):
os.remove(f)
def reset(self):
"""
Reset everything after each test.
"""
# Reset the static duplicate name checker for unit tests.
import hierarchy_design
hierarchy_design.hierarchy_design.name_map=[]
def check_golden_data(self, data, golden_data, error_tolerance=1e-2):
"""
This function goes through two dictionaries, key by key and compares
each item. It uses relative comparisons for the items and returns false
if there is a mismatch.
"""
# Check each result
data_matches = True
for k in data.keys():
if type(data[k])==list:
for i in range(len(data[k])):
2020-06-05 18:44:30 +02:00
if not self.isclose(k, data[k][i], golden_data[k][i], error_tolerance):
data_matches = False
else:
2020-06-05 18:44:30 +02:00
if not self.isclose(k, data[k], golden_data[k], error_tolerance):
data_matches = False
if not data_matches:
import pprint
data_string=pprint.pformat(data)
2020-06-05 18:44:30 +02:00
debug.error("Results exceeded {:.1f}% tolerance compared to golden results:\n".format(error_tolerance * 100) + data_string)
return data_matches
2020-06-05 18:44:30 +02:00
def isclose(self, key, value, actual_value, error_tolerance=1e-2):
""" This is used to compare relative values. """
import debug
2020-06-05 18:44:30 +02:00
relative_diff = self.relative_diff(value, actual_value)
check = relative_diff <= error_tolerance
if check:
2020-06-05 18:44:30 +02:00
debug.info(2, "CLOSE\t{0: <10}\t{1:.3f}\t{2:.3f}\tdiff={3:.1f}%".format(key, value, actual_value, relative_diff * 100))
return True
else:
2020-06-05 18:44:30 +02:00
debug.error("NOT CLOSE\t{0: <10}\t{1:.3f}\t{2:.3f}\tdiff={3:.1f}%".format(key, value, actual_value, relative_diff * 100))
return False
def relative_diff(self, value1, value2):
""" Compute the relative difference of two values and normalize to the largest.
If largest value is 0, just return the difference."""
# Edge case to avoid divide by zero
if value1==0 and value2==0:
return 0.0
# Don't need relative, exact compare
if value1==value2:
return 0.0
# Get normalization value
norm_value = abs(max(value1, value2))
return abs(value1 - value2) / norm_value
2020-06-05 18:44:30 +02:00
def relative_compare(self, value, actual_value, error_tolerance):
""" This is used to compare relative values. """
if (value==actual_value): # if we don't need a relative comparison!
return True
2020-06-05 18:44:30 +02:00
return (abs(value - actual_value) / max(value, actual_value) <= error_tolerance)
def isapproxdiff(self, filename1, filename2, error_tolerance=0.001):
"""Compare two files.
Arguments:
filename1 -- First file name
filename2 -- Second file name
Return value:
True if the files are the same, False otherwise.
"""
import re
import debug
2018-06-30 00:11:14 +02:00
numeric_const_pattern = r"""
[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?
"""
rx = re.compile(numeric_const_pattern, re.VERBOSE)
fp1 = open(filename1, 'rb')
fp2 = open(filename2, 'rb')
mismatches=0
line_num=0
while True:
line_num+=1
line1 = fp1.readline().decode('utf-8')
line2 = fp2.readline().decode('utf-8')
2020-06-05 18:44:30 +02:00
# print("line1:", line1)
# print("line2:", line2)
# 1. Find all of the floats using a regex
line1_floats=rx.findall(line1)
line2_floats=rx.findall(line2)
2020-06-05 18:44:30 +02:00
debug.info(3, "line1_floats: " + str(line1_floats))
debug.info(3, "line2_floats: " + str(line2_floats))
# 2. Remove the floats from the string
for f in line1_floats:
2020-06-05 18:44:30 +02:00
line1=line1.replace(f, "", 1)
for f in line2_floats:
2020-06-05 18:44:30 +02:00
line2=line2.replace(f, "", 1)
# print("line1:", line1)
# print("line2:", line2)
# 3. Convert to floats rather than strings
line1_floats = [float(x) for x in line1_floats]
line2_floats = [float(x) for x in line1_floats]
# 4. Check if remaining string matches
if line1 != line2:
2020-06-05 18:44:30 +02:00
# Uncomment if you want to see all the individual chars of the two lines
# print(str([i for i in line1]))
# print(str([i for i in line2]))
if mismatches==0:
2020-06-05 18:44:30 +02:00
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1, filename2))
mismatches += 1
2020-06-05 18:44:30 +02:00
debug.error("MISMATCH Line ({0}):\n{1}\n!=\n{2}".format(line_num, line1.rstrip('\n'), line2.rstrip('\n')))
# 5. Now compare that the floats match
elif len(line1_floats)!=len(line2_floats):
if mismatches==0:
2020-06-05 18:44:30 +02:00
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1, filename2))
mismatches += 1
2020-06-05 18:44:30 +02:00
debug.error("MISMATCH Line ({0}) Length {1} != {2}".format(line_num, len(line1_floats), len(line2_floats)))
else:
2020-06-05 18:44:30 +02:00
for (float1, float2) in zip(line1_floats, line2_floats):
relative_diff = self.relative_diff(float1, float2)
check = relative_diff <= error_tolerance
if not check:
if mismatches==0:
2020-06-05 18:44:30 +02:00
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1, filename2))
mismatches += 1
2020-06-05 18:44:30 +02:00
debug.error("MISMATCH Line ({0}) Float {1} != {2} diff: {3:.1f}%".format(line_num, float1, float2, relative_diff * 100))
# Only show the first 10 mismatch lines
if not line1 and not line2 or mismatches>10:
fp1.close()
fp2.close()
return mismatches==0
# Never reached
return False
2020-06-05 18:44:30 +02:00
def isdiff(self, filename1, filename2):
""" This is used to compare two files and display the diff if they are different.. """
import debug
import filecmp
import difflib
2020-06-05 18:44:30 +02:00
check = filecmp.cmp(filename1, filename2)
if not check:
2020-06-05 18:44:30 +02:00
debug.error("MISMATCH file1={0} file2={1}".format(filename1, filename2))
f1 = open(filename1, mode="r", encoding='utf-8')
s1 = f1.readlines()
f1.close()
2020-06-05 18:44:30 +02:00
f2 = open(filename2, mode="r", encoding='utf-8')
s2 = f2.readlines()
f2.close()
mismatches=0
for line in list(difflib.unified_diff(s1, s2)):
mismatches += 1
if mismatches==0:
print("DIFF LINES:")
if mismatches<11:
print(line.rstrip('\n'))
else:
return False
return False
else:
2020-06-05 18:44:30 +02:00
debug.info(2, "MATCH {0} {1}".format(filename1, filename2))
return True
2020-06-05 18:44:30 +02:00
def dbg():
import pdb; pdb.set_trace()
2020-11-03 15:29:17 +01:00
2016-11-15 18:55:18 +01:00
def header(filename, technology):
# Skip the header for gitlab regression
import getpass
if getpass.getuser() == "gitlab-runner":
return
2016-11-08 18:57:35 +01:00
tst = "Running Test for:"
print("\n")
print(" ______________________________________________________________________________ ")
print("|==============================================================================|")
print("|=========" + tst.center(60) + "=========|")
print("|=========" + technology.center(60) + "=========|")
print("|=========" + filename.center(60) + "=========|")
2020-06-05 18:44:30 +02:00
from globals import OPTS
2021-02-03 23:19:11 +01:00
if OPTS.openram_temp:
print("|=========" + OPTS.openram_temp.center(60) + "=========|")
print("|==============================================================================|")
2020-11-03 15:29:17 +01:00
def debugTestRunner(post_mortem=None):
"""unittest runner doing post mortem debugging on failing tests"""
if post_mortem is None and OPTS.debug:
post_mortem = pdb.post_mortem
2020-11-03 15:29:17 +01:00
class DebugTestResult(unittest.TextTestResult):
def addError(self, test, err):
# called before tearDown()
traceback.print_exception(*err)
if post_mortem:
post_mortem(err[2])
super(DebugTestResult, self).addError(test, err)
2020-11-03 15:29:17 +01:00
def addFailure(self, test, err):
traceback.print_exception(*err)
2020-06-05 18:44:30 +02:00
if post_mortem:
post_mortem(err[2])
super(DebugTestResult, self).addFailure(test, err)
return unittest.TextTestRunner(resultclass=DebugTestResult)