From 5c122c2fe15a81edad5826e29d50e82cf24f078f Mon Sep 17 00:00:00 2001 From: John McMaster Date: Tue, 11 Sep 2018 18:18:40 -0700 Subject: [PATCH] timfuz: misc cleanup Signed-off-by: John McMaster --- fuzzers/007-timing/Makefile | 5 + fuzzers/007-timing/checksub.py | 7 +- fuzzers/007-timing/corner_csv.py | 2 +- fuzzers/007-timing/projects/Makefile | 4 + .../007-timing/projects/placelut/README.md | 2 + .../007-timing/projects/placelut_fb/Makefile | 2 + .../007-timing/projects/placelut_fb/README.md | 2 + .../{placelut_fb.py => generate.py} | 0 .../projects/placelut_fb/generate.sh | 9 + .../projects/placelut_fb/generate.tcl | 47 +++++ .../projects/placelut_ff_fb/Makefile | 2 + .../projects/placelut_ff_fb/README.md | 3 + .../{placelut_ff_fb.py => generate.py} | 0 .../projects/placelut_ff_fb/generate.sh | 9 + .../projects/placelut_ff_fb/generate.tcl | 47 +++++ fuzzers/007-timing/rref.py | 2 +- fuzzers/007-timing/solve_linprog.py | 5 - fuzzers/007-timing/timfuz.py | 165 ++---------------- fuzzers/007-timing/timfuz_massage.py | 18 +- fuzzers/007-timing/timfuz_solve.py | 4 +- 20 files changed, 156 insertions(+), 179 deletions(-) create mode 100644 fuzzers/007-timing/Makefile create mode 100644 fuzzers/007-timing/projects/Makefile create mode 100644 fuzzers/007-timing/projects/placelut/README.md create mode 100644 fuzzers/007-timing/projects/placelut_fb/Makefile create mode 100644 fuzzers/007-timing/projects/placelut_fb/README.md rename fuzzers/007-timing/projects/placelut_fb/{placelut_fb.py => generate.py} (100%) create mode 100755 fuzzers/007-timing/projects/placelut_fb/generate.sh create mode 100644 fuzzers/007-timing/projects/placelut_fb/generate.tcl create mode 100644 fuzzers/007-timing/projects/placelut_ff_fb/Makefile create mode 100644 fuzzers/007-timing/projects/placelut_ff_fb/README.md rename fuzzers/007-timing/projects/placelut_ff_fb/{placelut_ff_fb.py => generate.py} (100%) create mode 100755 fuzzers/007-timing/projects/placelut_ff_fb/generate.sh create mode 100644 fuzzers/007-timing/projects/placelut_ff_fb/generate.tcl diff --git a/fuzzers/007-timing/Makefile b/fuzzers/007-timing/Makefile new file mode 100644 index 00000000..1f90909f --- /dev/null +++ b/fuzzers/007-timing/Makefile @@ -0,0 +1,5 @@ +all: + cd speed && make + cd timgrid && make + cd projects && make + diff --git a/fuzzers/007-timing/checksub.py b/fuzzers/007-timing/checksub.py index 9cdb259d..00559735 100644 --- a/fuzzers/007-timing/checksub.py +++ b/fuzzers/007-timing/checksub.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -from timfuz import Benchmark, Ar_di2np, Ar_ds2t, A_di2ds, A_ds2di, simplify_rows, loadc_Ads_b, index_names, A_ds2np, load_sub, run_sub_json +from timfuz import Benchmark, Ar_di2np, Ar_ds2t, A_di2ds, A_ds2di, loadc_Ads_b, index_names, A_ds2np, load_sub, run_sub_json import numpy as np import glob import json @@ -58,11 +58,6 @@ def run(fns_in, sub_json=None, verbose=False): Ads, b = loadc_Ads_b(fns_in, corner, ico=True) - # Remove duplicate rows - # is this necessary? - # maybe better to just add them into the matrix directly - #Ads, b = simplify_rows(Ads, b) - if sub_json: print('Subbing JSON %u rows' % len(Ads)) #pds(Ads, 'Orig') diff --git a/fuzzers/007-timing/corner_csv.py b/fuzzers/007-timing/corner_csv.py index 1df10bd0..d64c64ab 100644 --- a/fuzzers/007-timing/corner_csv.py +++ b/fuzzers/007-timing/corner_csv.py @@ -5,7 +5,7 @@ import glob def run(fout, fns_in, corner, verbose=0): Ads, b = loadc_Ads_b(fns_in, corner, ico=True) - Ads, b = simplify_rows(Ads, b) + Ads, b = simplify_rows(Ads, b, corner=corner) fout.write('ico,fast_max fast_min slow_max slow_min,rows...\n') for row_b, row_ds in zip(b, Ads): diff --git a/fuzzers/007-timing/projects/Makefile b/fuzzers/007-timing/projects/Makefile new file mode 100644 index 00000000..eb1f30a9 --- /dev/null +++ b/fuzzers/007-timing/projects/Makefile @@ -0,0 +1,4 @@ +all: + echo "FIXME: tie projects together" + false + diff --git a/fuzzers/007-timing/projects/placelut/README.md b/fuzzers/007-timing/projects/placelut/README.md new file mode 100644 index 00000000..0df7e3f8 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut/README.md @@ -0,0 +1,2 @@ +LUTs are physically laid out in an array and directly connected to a test pattern generator + diff --git a/fuzzers/007-timing/projects/placelut_fb/Makefile b/fuzzers/007-timing/projects/placelut_fb/Makefile new file mode 100644 index 00000000..9c8b1947 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut_fb/Makefile @@ -0,0 +1,2 @@ +include ../project.mk + diff --git a/fuzzers/007-timing/projects/placelut_fb/README.md b/fuzzers/007-timing/projects/placelut_fb/README.md new file mode 100644 index 00000000..1e597818 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut_fb/README.md @@ -0,0 +1,2 @@ +LUTs are physically laid out in an array and connected to test pattern generator and fed back to other LUTs + diff --git a/fuzzers/007-timing/projects/placelut_fb/placelut_fb.py b/fuzzers/007-timing/projects/placelut_fb/generate.py similarity index 100% rename from fuzzers/007-timing/projects/placelut_fb/placelut_fb.py rename to fuzzers/007-timing/projects/placelut_fb/generate.py diff --git a/fuzzers/007-timing/projects/placelut_fb/generate.sh b/fuzzers/007-timing/projects/placelut_fb/generate.sh new file mode 100755 index 00000000..579f2021 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut_fb/generate.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -ex +source ../generate.sh + +python ../generate.py --sdx 4 --sdy 4 >top.v +vivado -mode batch -source ../generate.tcl +timing_txt2csv + diff --git a/fuzzers/007-timing/projects/placelut_fb/generate.tcl b/fuzzers/007-timing/projects/placelut_fb/generate.tcl new file mode 100644 index 00000000..0b536169 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut_fb/generate.tcl @@ -0,0 +1,47 @@ +source ../../../../../utils/utils.tcl +source ../../project.tcl + +proc build_design {} { + create_project -force -part $::env(XRAY_PART) design design + read_verilog top.v + synth_design -top top + + puts "Locking pins" + set_property LOCK_PINS {I0:A1 I1:A2 I2:A3 I3:A4 I4:A5 I5:A6} \ + [get_cells -quiet -filter {REF_NAME == LUT6} -hierarchical] + + puts "Package stuff" + set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_00) IOSTANDARD LVCMOS33" [get_ports clk] + set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_01) IOSTANDARD LVCMOS33" [get_ports stb] + set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_02) IOSTANDARD LVCMOS33" [get_ports di] + set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_03) IOSTANDARD LVCMOS33" [get_ports do] + + puts "pblocking" + create_pblock roi + set roipb [get_pblocks roi] + set_property EXCLUDE_PLACEMENT 1 $roipb + add_cells_to_pblock $roipb [get_cells roi] + resize_pblock $roipb -add "$::env(XRAY_ROI)" + + puts "randplace" + randplace_pblock 50 roi + + set_property CFGBVS VCCO [current_design] + set_property CONFIG_VOLTAGE 3.3 [current_design] + set_property BITSTREAM.GENERAL.PERFRAMECRC YES [current_design] + + puts "dedicated route" + set_property CLOCK_DEDICATED_ROUTE FALSE [get_nets clk_IBUF] + + place_design + route_design + + write_checkpoint -force design.dcp + # disable combinitorial loop + # set_property IS_ENABLED 0 [get_drc_checks {LUTLP-1}] + #write_bitstream -force design.bit +} + +build_design +write_info3 + diff --git a/fuzzers/007-timing/projects/placelut_ff_fb/Makefile b/fuzzers/007-timing/projects/placelut_ff_fb/Makefile new file mode 100644 index 00000000..9c8b1947 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut_ff_fb/Makefile @@ -0,0 +1,2 @@ +include ../project.mk + diff --git a/fuzzers/007-timing/projects/placelut_ff_fb/README.md b/fuzzers/007-timing/projects/placelut_ff_fb/README.md new file mode 100644 index 00000000..983849b3 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut_ff_fb/README.md @@ -0,0 +1,3 @@ +LUTs are physically laid out in an array and connected to test pattern generator and fed back to other LUTs. +FFs are randomly inserted between connections. + diff --git a/fuzzers/007-timing/projects/placelut_ff_fb/placelut_ff_fb.py b/fuzzers/007-timing/projects/placelut_ff_fb/generate.py similarity index 100% rename from fuzzers/007-timing/projects/placelut_ff_fb/placelut_ff_fb.py rename to fuzzers/007-timing/projects/placelut_ff_fb/generate.py diff --git a/fuzzers/007-timing/projects/placelut_ff_fb/generate.sh b/fuzzers/007-timing/projects/placelut_ff_fb/generate.sh new file mode 100755 index 00000000..579f2021 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut_ff_fb/generate.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -ex +source ../generate.sh + +python ../generate.py --sdx 4 --sdy 4 >top.v +vivado -mode batch -source ../generate.tcl +timing_txt2csv + diff --git a/fuzzers/007-timing/projects/placelut_ff_fb/generate.tcl b/fuzzers/007-timing/projects/placelut_ff_fb/generate.tcl new file mode 100644 index 00000000..0b536169 --- /dev/null +++ b/fuzzers/007-timing/projects/placelut_ff_fb/generate.tcl @@ -0,0 +1,47 @@ +source ../../../../../utils/utils.tcl +source ../../project.tcl + +proc build_design {} { + create_project -force -part $::env(XRAY_PART) design design + read_verilog top.v + synth_design -top top + + puts "Locking pins" + set_property LOCK_PINS {I0:A1 I1:A2 I2:A3 I3:A4 I4:A5 I5:A6} \ + [get_cells -quiet -filter {REF_NAME == LUT6} -hierarchical] + + puts "Package stuff" + set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_00) IOSTANDARD LVCMOS33" [get_ports clk] + set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_01) IOSTANDARD LVCMOS33" [get_ports stb] + set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_02) IOSTANDARD LVCMOS33" [get_ports di] + set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_03) IOSTANDARD LVCMOS33" [get_ports do] + + puts "pblocking" + create_pblock roi + set roipb [get_pblocks roi] + set_property EXCLUDE_PLACEMENT 1 $roipb + add_cells_to_pblock $roipb [get_cells roi] + resize_pblock $roipb -add "$::env(XRAY_ROI)" + + puts "randplace" + randplace_pblock 50 roi + + set_property CFGBVS VCCO [current_design] + set_property CONFIG_VOLTAGE 3.3 [current_design] + set_property BITSTREAM.GENERAL.PERFRAMECRC YES [current_design] + + puts "dedicated route" + set_property CLOCK_DEDICATED_ROUTE FALSE [get_nets clk_IBUF] + + place_design + route_design + + write_checkpoint -force design.dcp + # disable combinitorial loop + # set_property IS_ENABLED 0 [get_drc_checks {LUTLP-1}] + #write_bitstream -force design.bit +} + +build_design +write_info3 + diff --git a/fuzzers/007-timing/rref.py b/fuzzers/007-timing/rref.py index 19dcc89c..4cf1b9e7 100644 --- a/fuzzers/007-timing/rref.py +++ b/fuzzers/007-timing/rref.py @@ -71,7 +71,7 @@ class State(object): Ads, b = loadc_Ads_b(fn_ins, corner=corner, ico=True) if simplify: print('Simplifying corner %s' % (corner,)) - Ads, b = simplify_rows(Ads, b, remove_zd=False) + Ads, b = simplify_rows(Ads, b, remove_zd=False, corner=corner) return State(Ads) def write_state(state, fout): diff --git a/fuzzers/007-timing/solve_linprog.py b/fuzzers/007-timing/solve_linprog.py index 201ba7ca..ec25474c 100644 --- a/fuzzers/007-timing/solve_linprog.py +++ b/fuzzers/007-timing/solve_linprog.py @@ -105,11 +105,6 @@ def run_corner(Anp, b, names, verbose=False, opts={}, meta={}, outfn=None): if nonzero and (verbose or ((nonzeros < 100 or nonzeros % 20 == 0) and nonzeros <= plim)): print(' % 4u % -80s % 10.1f' % (xi, name, x)) print('Delay on %d / %d' % (nonzeros, len(res.x))) - if not os.path.exists('res'): - os.mkdir('res') - fn_out = 'res/%s' % datetime.datetime.utcnow().isoformat().split('.')[0] - print('Writing %s' % fn_out) - np.save(fn_out, (3, c, A_ub, b_ub, bounds, names, res, meta)) if outfn: # ballpark minimum actual observed delay is around 7 (carry chain) diff --git a/fuzzers/007-timing/timfuz.py b/fuzzers/007-timing/timfuz.py index 868e8126..09b34da2 100644 --- a/fuzzers/007-timing/timfuz.py +++ b/fuzzers/007-timing/timfuz.py @@ -130,12 +130,20 @@ def Ab_ub_dt2d(eqns): return list(A_ubd), list(b_ub) # This significantly reduces runtime -def simplify_rows(Ads, b_ub, remove_zd=False): +def simplify_rows(Ads, b_ub, remove_zd=False, corner=None): '''Remove duplicate equations, taking highest delay''' # dict of constants to highest delay eqns = OrderedDict() assert len(Ads) == len(b_ub), (len(Ads), len(b_ub)) + assert corner is not None + minmax = { + 'fast_max': max, + 'fast_min': min, + 'slow_max': max, + 'slow_min': min, + }[corner] + sys.stdout.write('SimpR ') sys.stdout.flush() progress = int(max(1, len(b_ub) / 100)) @@ -161,7 +169,7 @@ def simplify_rows(Ads, b_ub, remove_zd=False): continue rowt = Ar_ds2t(rowd) - eqns[rowt] = max(eqns.get(rowt, 0), b) + eqns[rowt] = minmax(eqns.get(rowt, 0), b) print(' done') @@ -473,159 +481,6 @@ def filter_ncols(A_ubd, b_ub, cols_min=0, cols_max=0): assert len(b_ub_ret) return A_ubd_ret, b_ub_ret -def preprocess(A_ubd, b_ub, opts, names, verbose=0): - def debug(what): - if verbose: - print('') - print_eqns(A_ubd, b_ub, verbose=verbose, label=what, lim=20) - col_dist(A_ubd, what, names) - check_feasible_d(A_ubd, b_ub, names) - - col_dist(A_ubd, 'pre-filt', names, lim=12) - debug('pre-filt') - - need_simpc = 0 - - # Input set may have redundant constraints - A_ubd, b_ub = simplify_rows(A_ubd=A_ubd, b_ub=b_ub) - debug("simp_rows") - cols_min_pre = opts.get('cols_min_pre', None) - cols_max_pre = opts.get('cols_max_pre', None) - # Filter input based on number of columns - if cols_min_pre or cols_max_pre: - A_ubd, b_ub = filter_ncols(A_ubd=A_ubd, b_ub=b_ub, cols_min=cols_min_pre, cols_max=cols_max_pre) - debug("filt_ncols") - need_simpc = 1 - - # Limit input rows, mostly for quick full run checks - row_limit = opts.get('row_limit', None) - if row_limit: - before_rows = len(b_ub) - A_ubd = A_ubd[0:row_limit] - b_ub = b_ub[0:row_limit] - print('Row limit %d => %d rows' % (before_rows, len(b_ub))) - need_simpc = 1 - - if need_simpc: - names, A_ubd, b_ub = simplify_cols(names=names, A_ubd=A_ubd, b_ub=b_ub) - debug("simp_cols") - - return A_ubd, b_ub, names - -def massage_equations(A_ubd, b_ub, opts, names, verbose=0): - ''' - Equation pipeline - Some operations may generate new equations - Simplify after these to avoid unnecessary overhead on redundant constraints - Similarly some operations may eliminate equations, potentially eliminating a column (ie variable) - Remove these columns as necessary to speed up solving - ''' - - def debug(what): - if verbose: - print('') - print_eqns(A_ubd, b_ub, verbose=verbose, label=what, lim=20) - col_dist(A_ubd, what, names) - check_feasible_d(A_ubd, b_ub, names) - - A_ubd, b_ub, names = preprocess(A_ubd, b_ub, opts, names, verbose=verbose) - - # Try to (intelligently) subtract equations to generate additional constraints - # This helps avoid putting all delay in a single shared variable - derive_lim = opts.get('derive_lim', None) - if derive_lim: - dstart = len(b_ub) - - # Original simple - if 0: - for di in range(derive_lim): - print - assert len(A_ubd) == len(b_ub) - n_orig = len(b_ub) - - # Meat of the operation - # Focus on easy equations for first pass to get a lot of easy derrivations - col_lim = 12 if di == 0 else None - #col_lim = None - A_ubd, b_ub = derive_eq_by_row(A_ubd, b_ub, col_lim=col_lim) - debug("der_rows") - # Run another simplify pass since new equations may have overlap with original - A_ubd, b_ub = simplify_rows(A_ubd, b_ub) - print('Derive row %d / %d: %d => %d equations' % (di + 1, derive_lim, n_orig, len(b_ub))) - debug("der_rows simp") - - n_orig2 = len(b_ub) - # Meat of the operation - A_ubd, b_ub = derive_eq_by_col(A_ubd, b_ub) - debug("der_cols") - # Run another simplify pass since new equations may have overlap with original - A_ubd, b_ub = simplify_rows(A_ubd=A_ubd, b_ub=b_ub) - print('Derive col %d / %d: %d => %d equations' % (di + 1, derive_lim, n_orig2, len(b_ub))) - debug("der_cols simp") - - if n_orig == len(b_ub): - break - - if 1: - # Each iteration one more column is allowed until all columns are included - # (and the system is stable) - col_lim = 15 - di = 0 - while True: - print - n_orig = len(b_ub) - - print('Loop %d, lim %d' % (di + 1, col_lim)) - # Meat of the operation - A_ubd, b_ub = derive_eq_by_row(A_ubd, b_ub, col_lim=col_lim, tweak=True) - debug("der_rows") - # Run another simplify pass since new equations may have overlap with original - A_ubd, b_ub = simplify_rows(A_ubd, b_ub) - print('Derive row: %d => %d equations' % (n_orig, len(b_ub))) - debug("der_rows simp") - - n_orig2 = len(b_ub) - # Meat of the operation - A_ubd, b_ub = derive_eq_by_col(A_ubd, b_ub) - debug("der_cols") - # Run another simplify pass since new equations may have overlap with original - A_ubd, b_ub = simplify_rows(A_ubd=A_ubd, b_ub=b_ub) - print('Derive col %d: %d => %d equations' % (di + 1, n_orig2, len(b_ub))) - debug("der_cols simp") - - # Doesn't help computation, but helps debugging - names, A_ubd, b_ub = simplify_cols(names=names, A_ubd=A_ubd, b_ub=b_ub) - A_ubd, b_ub = sort_equations(A_ubd, b_ub) - debug("loop done") - col_dist(A_ubd, 'derive done iter %d, lim %d' % (di, col_lim), names, lim=12) - - rows = len(A_ubd) - if n_orig == len(b_ub) and col_lim >= rows: - break - col_lim += col_lim / 5 - di += 1 - - dend = len(b_ub) - print('') - print('Derive net: %d => %d' % (dstart, dend)) - print('') - # Was experimentting to see how much the higher order columns really help - - cols_min_post = opts.get('cols_min_post', None) - cols_max_post = opts.get('cols_max_post', None) - # Filter input based on number of columns - if cols_min_post or cols_max_post: - A_ubd, b_ub = filter_ncols(A_ubd=A_ubd, b_ub=b_ub, cols_min=cols_min_post, cols_max=cols_max_post) - debug("filter_ncals final") - - names, A_ubd, b_ub = simplify_cols(names=names, A_ubd=A_ubd, b_ub=b_ub) - debug("simp_cols final") - - # Helps debug readability - A_ubd, b_ub = sort_equations(A_ubd, b_ub) - debug("final (sorted)") - return names, A_ubd, b_ub - def Ar_di2ds(rowA, names): row = OrderedDict() for k, v in rowA.items(): diff --git a/fuzzers/007-timing/timfuz_massage.py b/fuzzers/007-timing/timfuz_massage.py index 046886ce..e35f4e28 100644 --- a/fuzzers/007-timing/timfuz_massage.py +++ b/fuzzers/007-timing/timfuz_massage.py @@ -323,7 +323,7 @@ def derive_eq_by_col(Ads, b_ub, verbose=0): return Ads_ret, b_ret # keep derriving until solution is (probably) stable -def massage_equations_old(Ads, b, verbose=False, derive_lim=3): +def massage_equations_old(Ads, b, verbose=False, derive_lim=3, corner=None): ''' Equation pipeline Some operations may generate new equations @@ -356,7 +356,7 @@ def massage_equations_old(Ads, b, verbose=False, derive_lim=3): Ads, b = derive_eq_by_row(Ads, b, col_lim=col_lim) debug("der_rows") # Run another simplify pass since new equations may have overlap with original - Ads, b = simplify_rows(Ads, b) + Ads, b = simplify_rows(Ads, b, corner=corner) print('Derive row %d / %d: %d => %d equations' % (di + 1, derive_lim, n_orig, len(b))) debug("der_rows simp") @@ -365,7 +365,7 @@ def massage_equations_old(Ads, b, verbose=False, derive_lim=3): Ads, b = derive_eq_by_col(Ads, b) debug("der_cols") # Run another simplify pass since new equations may have overlap with original - Ads, b = simplify_rows(Ads, b) + Ads, b = simplify_rows(Ads, b, corner=corner) print('Derive col %d / %d: %d => %d equations' % (di + 1, derive_lim, n_orig2, len(b))) debug("der_cols simp") @@ -394,7 +394,7 @@ def massage_equations_old(Ads, b, verbose=False, derive_lim=3): return Ads, b # iteratively increasing column limit until all columns are added -def massage_equations_inc_col_lim(Ads, b, verbose=False): +def massage_equations_inc_col_lim(Ads, b, verbose=False, corner=None): ''' Equation pipeline Some operations may generate new equations @@ -428,7 +428,7 @@ def massage_equations_inc_col_lim(Ads, b, verbose=False): Ads, b = derive_eq_by_row(Ads, b, col_lim=col_lim, tweak=True) debug("der_rows") # Run another simplify pass since new equations may have overlap with original - Ads, b = simplify_rows(Ads, b) + Ads, b = simplify_rows(Ads, b, corner=corner) print('Derive row: %d => %d equations' % (n_orig, len(b))) debug("der_rows simp") @@ -437,7 +437,7 @@ def massage_equations_inc_col_lim(Ads, b, verbose=False): Ads, b = derive_eq_by_col(Ads, b) debug("der_cols") # Run another simplify pass since new equations may have overlap with original - Ads, b = simplify_rows(Ads, b) + Ads, b = simplify_rows(Ads, b, corner=corner) print('Derive col %d: %d => %d equations' % (di + 1, n_orig2, len(b))) debug("der_cols simp") @@ -467,7 +467,7 @@ def massage_equations_inc_col_lim(Ads, b, verbose=False): # only derive based on nearby equations # theory is they will be the best to diff -def massage_equations_near(Ads, b, verbose=False): +def massage_equations_near(Ads, b, verbose=False, corner=None): ''' Equation pipeline Some operations may generate new equations @@ -497,7 +497,7 @@ def massage_equations_near(Ads, b, verbose=False): Ads, b = derive_eq_by_near_row(Ads, b, tweak=True) debug("der_rows") # Run another simplify pass since new equations may have overlap with original - Ads, b = simplify_rows(Ads, b) + Ads, b = simplify_rows(Ads, b, corner=corner) print('Derive row: %d => %d equations' % (n_orig, len(b))) debug("der_rows simp") @@ -506,7 +506,7 @@ def massage_equations_near(Ads, b, verbose=False): Ads, b = derive_eq_by_col(Ads, b) debug("der_cols") # Run another simplify pass since new equations may have overlap with original - Ads, b = simplify_rows(Ads, b) + Ads, b = simplify_rows(Ads, b, corner=corner) print('Derive col: %d => %d equations' % (n_orig2, len(b))) debug("der_cols simp") diff --git a/fuzzers/007-timing/timfuz_solve.py b/fuzzers/007-timing/timfuz_solve.py index c6343e70..0d5b36d9 100644 --- a/fuzzers/007-timing/timfuz_solve.py +++ b/fuzzers/007-timing/timfuz_solve.py @@ -91,7 +91,7 @@ def run(fns_in, corner, run_corner, sub_json=None, sub_csv=None, dedup=True, mas if dedup: oldn = len(Ads) iold = instances(Ads) - Ads, b = simplify_rows(Ads, b) + Ads, b = simplify_rows(Ads, b, corner=corner) print('Simplify %u => %u rows' % (oldn, len(Ads))) print('Simplify %u => %u instances' % (iold, instances(Ads))) @@ -142,7 +142,7 @@ def run(fns_in, corner, run_corner, sub_json=None, sub_csv=None, dedup=True, mas This creates derived constraints to provide more realistic results ''' if massage: - Ads, b = massage_equations(Ads, b) + Ads, b = massage_equations(Ads, b, corner=corner) print('Converting to numpy...') names, Anp = A_ds2np(Ads)