PEP8 format testutils.

This commit is contained in:
mrg 2020-06-05 09:44:30 -07:00
parent e14deff3d1
commit fb3acae908
1 changed files with 68 additions and 68 deletions

View File

@ -5,23 +5,21 @@
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest,warnings
import pdb,traceback
import sys,os,glob,copy
import shutil
import unittest
import sys, os, glob
sys.path.append(os.getenv("OPENRAM_HOME"))
from globals import OPTS
import debug
class openram_test(unittest.TestCase):
""" Base unit test that we have some shared classes in. """
def local_drc_check(self, w):
self.reset()
tempgds = "{0}{1}.gds".format(OPTS.openram_temp,w.name)
tempgds = "{0}{1}.gds".format(OPTS.openram_temp, w.name)
w.gds_write(tempgds)
import verify
@ -36,8 +34,8 @@ class openram_test(unittest.TestCase):
self.reset()
tempspice = "{0}{1}.sp".format(OPTS.openram_temp,a.name)
tempgds = "{0}{1}.gds".format(OPTS.openram_temp,a.name)
tempspice = "{0}{1}.sp".format(OPTS.openram_temp, a.name)
tempgds = "{0}{1}.gds".format(OPTS.openram_temp, a.name)
a.lvs_write(tempspice)
# cannot write gds in netlist_only mode
@ -56,34 +54,36 @@ class openram_test(unittest.TestCase):
# Only allow DRC to fail and LVS to pass if we are using magic
if "magic" in OPTS.drc_exe and lvs_result == 0 and drc_result != 0:
#zip_file = "/tmp/{0}_{1}".format(a.name,os.getpid())
#debug.info(0,"Archiving failed files to {}.zip".format(zip_file))
#shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
# import shutil
# zip_file = "/tmp/{0}_{1}".format(a.name, os.getpid())
# debug.info(0, "Archiving failed files to {}.zip".format(zip_file))
# shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
debug.warning("DRC failed but LVS passed: {}".format(a.name))
#self.fail("DRC failed but LVS passed: {}".format(a.name))
# self.fail("DRC failed but LVS passed: {}".format(a.name))
elif drc_result != 0:
#zip_file = "/tmp/{0}_{1}".format(a.name,os.getpid())
#debug.info(0,"Archiving failed files to {}.zip".format(zip_file))
#shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
# import shutil
# zip_file = "/tmp/{0}_{1}".format(a.name, os.getpid())
# debug.info(0,"Archiving failed files to {}.zip".format(zip_file))
# shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
self.fail("DRC failed: {}".format(a.name))
if lvs_result != 0:
#zip_file = "/tmp/{0}_{1}".format(a.name,os.getpid())
#debug.info(0,"Archiving failed files to {}.zip".format(zip_file))
#shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
# import shutil
# zip_file = "/tmp/{0}_{1}".format(a.name, os.getpid())
# debug.info(0,"Archiving failed files to {}.zip".format(zip_file))
# shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
self.fail("LVS mismatch: {}".format(a.name))
# For debug...
#import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
if OPTS.purge_temp:
self.cleanup()
def run_pex(self, a, output=None):
if output == None:
output = OPTS.openram_temp + a.name + ".pex.netlist"
tempspice = "{0}{1}.sp".format(OPTS.openram_temp,a.name)
tempgds = "{0}{1}.gds".format(OPTS.openram_temp,a.name)
tempspice = "{0}{1}.sp".format(OPTS.openram_temp, a.name)
tempgds = "{0}{1}.gds".format(OPTS.openram_temp, a.name)
import verify
result=verify.run_pex(a.name, tempgds, tempspice, output=output, final_verification=False)
@ -97,8 +97,8 @@ class openram_test(unittest.TestCase):
"""
debug.info(1, "Finding feasible period for current test.")
delay_obj.set_load_slew(load, slew)
test_port = delay_obj.read_ports[0] #Only test one port, assumes other ports have similar period.
delay_obj.analysis_init(probe_address="1"*sram.addr_size, probe_data=(sram.word_size-1))
test_port = delay_obj.read_ports[0] # Only test one port, assumes other ports have similar period.
delay_obj.analysis_init(probe_address="1" * sram.addr_size, probe_data=sram.word_size - 1)
delay_obj.find_feasible_period_one_port(test_port)
return delay_obj.period
@ -130,29 +130,27 @@ class openram_test(unittest.TestCase):
for k in data.keys():
if type(data[k])==list:
for i in range(len(data[k])):
if not self.isclose(k,data[k][i],golden_data[k][i],error_tolerance):
if not self.isclose(k, data[k][i], golden_data[k][i], error_tolerance):
data_matches = False
else:
if not self.isclose(k,data[k],golden_data[k],error_tolerance):
if not self.isclose(k, data[k], golden_data[k], error_tolerance):
data_matches = False
if not data_matches:
import pprint
data_string=pprint.pformat(data)
debug.error("Results exceeded {:.1f}% tolerance compared to golden results:\n".format(error_tolerance*100)+data_string)
debug.error("Results exceeded {:.1f}% tolerance compared to golden results:\n".format(error_tolerance * 100) + data_string)
return data_matches
def isclose(self,key,value,actual_value,error_tolerance=1e-2):
def isclose(self, key, value, actual_value, error_tolerance=1e-2):
""" This is used to compare relative values. """
import debug
relative_diff = self.relative_diff(value,actual_value)
relative_diff = self.relative_diff(value, actual_value)
check = relative_diff <= error_tolerance
if check:
debug.info(2,"CLOSE\t{0: <10}\t{1:.3f}\t{2:.3f}\tdiff={3:.1f}%".format(key,value,actual_value,relative_diff*100))
debug.info(2, "CLOSE\t{0: <10}\t{1:.3f}\t{2:.3f}\tdiff={3:.1f}%".format(key, value, actual_value, relative_diff * 100))
return True
else:
debug.error("NOT CLOSE\t{0: <10}\t{1:.3f}\t{2:.3f}\tdiff={3:.1f}%".format(key,value,actual_value,relative_diff*100))
debug.error("NOT CLOSE\t{0: <10}\t{1:.3f}\t{2:.3f}\tdiff={3:.1f}%".format(key, value, actual_value, relative_diff * 100))
return False
def relative_diff(self, value1, value2):
@ -169,18 +167,14 @@ class openram_test(unittest.TestCase):
# Get normalization value
norm_value = abs(max(value1, value2))
# Edge case where greater is a zero
if norm_value == 0:
min_value = abs(min(value1, value2))
return abs(value1 - value2) / norm_value
def relative_compare(self, value,actual_value,error_tolerance):
def relative_compare(self, value, actual_value, error_tolerance):
""" This is used to compare relative values. """
if (value==actual_value): # if we don't need a relative comparison!
return True
return (abs(value - actual_value) / max(value,actual_value) <= error_tolerance)
return (abs(value - actual_value) / max(value, actual_value) <= error_tolerance)
def isapproxdiff(self, filename1, filename2, error_tolerance=0.001):
"""Compare two files.
@ -218,23 +212,22 @@ class openram_test(unittest.TestCase):
line_num+=1
line1 = fp1.readline().decode('utf-8')
line2 = fp2.readline().decode('utf-8')
#print("line1:",line1)
#print("line2:",line2)
# print("line1:", line1)
# print("line2:", line2)
# 1. Find all of the floats using a regex
line1_floats=rx.findall(line1)
line2_floats=rx.findall(line2)
debug.info(3,"line1_floats: "+str(line1_floats))
debug.info(3,"line2_floats: "+str(line2_floats))
debug.info(3, "line1_floats: " + str(line1_floats))
debug.info(3, "line2_floats: " + str(line2_floats))
# 2. Remove the floats from the string
for f in line1_floats:
line1=line1.replace(f,"",1)
line1=line1.replace(f, "", 1)
for f in line2_floats:
line2=line2.replace(f,"",1)
#print("line1:",line1)
#print("line2:",line2)
line2=line2.replace(f, "", 1)
# print("line1:", line1)
# print("line2:", line2)
# 3. Convert to floats rather than strings
line1_floats = [float(x) for x in line1_floats]
@ -242,29 +235,29 @@ class openram_test(unittest.TestCase):
# 4. Check if remaining string matches
if line1 != line2:
#Uncomment if you want to see all the individual chars of the two lines
#print(str([i for i in line1]))
#print(str([i for i in line2]))
# Uncomment if you want to see all the individual chars of the two lines
# print(str([i for i in line1]))
# print(str([i for i in line2]))
if mismatches==0:
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1,filename2))
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1, filename2))
mismatches += 1
debug.error("MISMATCH Line ({0}):\n{1}\n!=\n{2}".format(line_num,line1.rstrip('\n'),line2.rstrip('\n')))
debug.error("MISMATCH Line ({0}):\n{1}\n!=\n{2}".format(line_num, line1.rstrip('\n'), line2.rstrip('\n')))
# 5. Now compare that the floats match
elif len(line1_floats)!=len(line2_floats):
if mismatches==0:
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1,filename2))
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1, filename2))
mismatches += 1
debug.error("MISMATCH Line ({0}) Length {1} != {2}".format(line_num,len(line1_floats),len(line2_floats)))
debug.error("MISMATCH Line ({0}) Length {1} != {2}".format(line_num, len(line1_floats), len(line2_floats)))
else:
for (float1,float2) in zip(line1_floats,line2_floats):
relative_diff = self.relative_diff(float1,float2)
for (float1, float2) in zip(line1_floats, line2_floats):
relative_diff = self.relative_diff(float1, float2)
check = relative_diff <= error_tolerance
if not check:
if mismatches==0:
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1,filename2))
debug.error("Mismatching files:\nfile1={0}\nfile2={1}".format(filename1, filename2))
mismatches += 1
debug.error("MISMATCH Line ({0}) Float {1} != {2} diff: {3:.1f}%".format(line_num,float1,float2,relative_diff*100))
debug.error("MISMATCH Line ({0}) Float {1} != {2} diff: {3:.1f}%".format(line_num, float1, float2, relative_diff * 100))
# Only show the first 10 mismatch lines
if not line1 and not line2 or mismatches>10:
@ -275,19 +268,18 @@ class openram_test(unittest.TestCase):
# Never reached
return False
def isdiff(self,filename1,filename2):
def isdiff(self, filename1, filename2):
""" This is used to compare two files and display the diff if they are different.. """
import debug
import filecmp
import difflib
check = filecmp.cmp(filename1,filename2)
check = filecmp.cmp(filename1, filename2)
if not check:
debug.error("MISMATCH file1={0} file2={1}".format(filename1,filename2))
f1 = open(filename1,mode="r",encoding='utf-8')
debug.error("MISMATCH file1={0} file2={1}".format(filename1, filename2))
f1 = open(filename1, mode="r", encoding='utf-8')
s1 = f1.readlines()
f1.close()
f2 = open(filename2,mode="r",encoding='utf-8')
f2 = open(filename2, mode="r", encoding='utf-8')
s2 = f2.readlines()
f2.close()
mismatches=0
@ -302,10 +294,13 @@ class openram_test(unittest.TestCase):
return False
return False
else:
debug.info(2,"MATCH {0} {1}".format(filename1,filename2))
debug.info(2, "MATCH {0} {1}".format(filename1, filename2))
return True
def dbg():
import pdb; pdb.set_trace()
def header(filename, technology):
# Skip the header for gitlab regression
import getpass
@ -319,14 +314,18 @@ def header(filename, technology):
print("|=========" + tst.center(60) + "=========|")
print("|=========" + technology.center(60) + "=========|")
print("|=========" + filename.center(60) + "=========|")
from globals import OPTS
from globals import OPTS
print("|=========" + OPTS.openram_temp.center(60) + "=========|")
print("|==============================================================================|")
def debugTestRunner(post_mortem=None):
"""unittest runner doing post mortem debugging on failing tests"""
import pdb
import traceback
if post_mortem is None and not OPTS.purge_temp:
post_mortem = pdb.post_mortem
class DebugTestResult(unittest.TextTestResult):
def addError(self, test, err):
# called before tearDown()
@ -334,9 +333,10 @@ def debugTestRunner(post_mortem=None):
if post_mortem:
post_mortem(err[2])
super(DebugTestResult, self).addError(test, err)
def addFailure(self, test, err):
traceback.print_exception(*err)
if post_mortem:
if post_mortem:
post_mortem(err[2])
super(DebugTestResult, self).addFailure(test, err)
return unittest.TextTestRunner(resultclass=DebugTestResult)