timfuz: misc cleanup

Signed-off-by: John McMaster <johndmcmaster@gmail.com>
This commit is contained in:
John McMaster 2018-09-11 18:18:40 -07:00
parent 6197a93536
commit 5c122c2fe1
20 changed files with 156 additions and 179 deletions

View File

@ -0,0 +1,5 @@
all:
cd speed && make
cd timgrid && make
cd projects && make

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3
from timfuz import Benchmark, Ar_di2np, Ar_ds2t, A_di2ds, A_ds2di, simplify_rows, loadc_Ads_b, index_names, A_ds2np, load_sub, run_sub_json
from timfuz import Benchmark, Ar_di2np, Ar_ds2t, A_di2ds, A_ds2di, loadc_Ads_b, index_names, A_ds2np, load_sub, run_sub_json
import numpy as np
import glob
import json
@ -58,11 +58,6 @@ def run(fns_in, sub_json=None, verbose=False):
Ads, b = loadc_Ads_b(fns_in, corner, ico=True)
# Remove duplicate rows
# is this necessary?
# maybe better to just add them into the matrix directly
#Ads, b = simplify_rows(Ads, b)
if sub_json:
print('Subbing JSON %u rows' % len(Ads))
#pds(Ads, 'Orig')

View File

@ -5,7 +5,7 @@ import glob
def run(fout, fns_in, corner, verbose=0):
Ads, b = loadc_Ads_b(fns_in, corner, ico=True)
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, corner=corner)
fout.write('ico,fast_max fast_min slow_max slow_min,rows...\n')
for row_b, row_ds in zip(b, Ads):

View File

@ -0,0 +1,4 @@
all:
echo "FIXME: tie projects together"
false

View File

@ -0,0 +1,2 @@
LUTs are physically laid out in an array and directly connected to a test pattern generator

View File

@ -0,0 +1,2 @@
include ../project.mk

View File

@ -0,0 +1,2 @@
LUTs are physically laid out in an array and connected to test pattern generator and fed back to other LUTs

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -ex
source ../generate.sh
python ../generate.py --sdx 4 --sdy 4 >top.v
vivado -mode batch -source ../generate.tcl
timing_txt2csv

View File

@ -0,0 +1,47 @@
source ../../../../../utils/utils.tcl
source ../../project.tcl
proc build_design {} {
create_project -force -part $::env(XRAY_PART) design design
read_verilog top.v
synth_design -top top
puts "Locking pins"
set_property LOCK_PINS {I0:A1 I1:A2 I2:A3 I3:A4 I4:A5 I5:A6} \
[get_cells -quiet -filter {REF_NAME == LUT6} -hierarchical]
puts "Package stuff"
set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_00) IOSTANDARD LVCMOS33" [get_ports clk]
set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_01) IOSTANDARD LVCMOS33" [get_ports stb]
set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_02) IOSTANDARD LVCMOS33" [get_ports di]
set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_03) IOSTANDARD LVCMOS33" [get_ports do]
puts "pblocking"
create_pblock roi
set roipb [get_pblocks roi]
set_property EXCLUDE_PLACEMENT 1 $roipb
add_cells_to_pblock $roipb [get_cells roi]
resize_pblock $roipb -add "$::env(XRAY_ROI)"
puts "randplace"
randplace_pblock 50 roi
set_property CFGBVS VCCO [current_design]
set_property CONFIG_VOLTAGE 3.3 [current_design]
set_property BITSTREAM.GENERAL.PERFRAMECRC YES [current_design]
puts "dedicated route"
set_property CLOCK_DEDICATED_ROUTE FALSE [get_nets clk_IBUF]
place_design
route_design
write_checkpoint -force design.dcp
# disable combinitorial loop
# set_property IS_ENABLED 0 [get_drc_checks {LUTLP-1}]
#write_bitstream -force design.bit
}
build_design
write_info3

View File

@ -0,0 +1,2 @@
include ../project.mk

View File

@ -0,0 +1,3 @@
LUTs are physically laid out in an array and connected to test pattern generator and fed back to other LUTs.
FFs are randomly inserted between connections.

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -ex
source ../generate.sh
python ../generate.py --sdx 4 --sdy 4 >top.v
vivado -mode batch -source ../generate.tcl
timing_txt2csv

View File

@ -0,0 +1,47 @@
source ../../../../../utils/utils.tcl
source ../../project.tcl
proc build_design {} {
create_project -force -part $::env(XRAY_PART) design design
read_verilog top.v
synth_design -top top
puts "Locking pins"
set_property LOCK_PINS {I0:A1 I1:A2 I2:A3 I3:A4 I4:A5 I5:A6} \
[get_cells -quiet -filter {REF_NAME == LUT6} -hierarchical]
puts "Package stuff"
set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_00) IOSTANDARD LVCMOS33" [get_ports clk]
set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_01) IOSTANDARD LVCMOS33" [get_ports stb]
set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_02) IOSTANDARD LVCMOS33" [get_ports di]
set_property -dict "PACKAGE_PIN $::env(XRAY_PIN_03) IOSTANDARD LVCMOS33" [get_ports do]
puts "pblocking"
create_pblock roi
set roipb [get_pblocks roi]
set_property EXCLUDE_PLACEMENT 1 $roipb
add_cells_to_pblock $roipb [get_cells roi]
resize_pblock $roipb -add "$::env(XRAY_ROI)"
puts "randplace"
randplace_pblock 50 roi
set_property CFGBVS VCCO [current_design]
set_property CONFIG_VOLTAGE 3.3 [current_design]
set_property BITSTREAM.GENERAL.PERFRAMECRC YES [current_design]
puts "dedicated route"
set_property CLOCK_DEDICATED_ROUTE FALSE [get_nets clk_IBUF]
place_design
route_design
write_checkpoint -force design.dcp
# disable combinitorial loop
# set_property IS_ENABLED 0 [get_drc_checks {LUTLP-1}]
#write_bitstream -force design.bit
}
build_design
write_info3

View File

@ -71,7 +71,7 @@ class State(object):
Ads, b = loadc_Ads_b(fn_ins, corner=corner, ico=True)
if simplify:
print('Simplifying corner %s' % (corner,))
Ads, b = simplify_rows(Ads, b, remove_zd=False)
Ads, b = simplify_rows(Ads, b, remove_zd=False, corner=corner)
return State(Ads)
def write_state(state, fout):

View File

@ -105,11 +105,6 @@ def run_corner(Anp, b, names, verbose=False, opts={}, meta={}, outfn=None):
if nonzero and (verbose or ((nonzeros < 100 or nonzeros % 20 == 0) and nonzeros <= plim)):
print(' % 4u % -80s % 10.1f' % (xi, name, x))
print('Delay on %d / %d' % (nonzeros, len(res.x)))
if not os.path.exists('res'):
os.mkdir('res')
fn_out = 'res/%s' % datetime.datetime.utcnow().isoformat().split('.')[0]
print('Writing %s' % fn_out)
np.save(fn_out, (3, c, A_ub, b_ub, bounds, names, res, meta))
if outfn:
# ballpark minimum actual observed delay is around 7 (carry chain)

View File

@ -130,12 +130,20 @@ def Ab_ub_dt2d(eqns):
return list(A_ubd), list(b_ub)
# This significantly reduces runtime
def simplify_rows(Ads, b_ub, remove_zd=False):
def simplify_rows(Ads, b_ub, remove_zd=False, corner=None):
'''Remove duplicate equations, taking highest delay'''
# dict of constants to highest delay
eqns = OrderedDict()
assert len(Ads) == len(b_ub), (len(Ads), len(b_ub))
assert corner is not None
minmax = {
'fast_max': max,
'fast_min': min,
'slow_max': max,
'slow_min': min,
}[corner]
sys.stdout.write('SimpR ')
sys.stdout.flush()
progress = int(max(1, len(b_ub) / 100))
@ -161,7 +169,7 @@ def simplify_rows(Ads, b_ub, remove_zd=False):
continue
rowt = Ar_ds2t(rowd)
eqns[rowt] = max(eqns.get(rowt, 0), b)
eqns[rowt] = minmax(eqns.get(rowt, 0), b)
print(' done')
@ -473,159 +481,6 @@ def filter_ncols(A_ubd, b_ub, cols_min=0, cols_max=0):
assert len(b_ub_ret)
return A_ubd_ret, b_ub_ret
def preprocess(A_ubd, b_ub, opts, names, verbose=0):
def debug(what):
if verbose:
print('')
print_eqns(A_ubd, b_ub, verbose=verbose, label=what, lim=20)
col_dist(A_ubd, what, names)
check_feasible_d(A_ubd, b_ub, names)
col_dist(A_ubd, 'pre-filt', names, lim=12)
debug('pre-filt')
need_simpc = 0
# Input set may have redundant constraints
A_ubd, b_ub = simplify_rows(A_ubd=A_ubd, b_ub=b_ub)
debug("simp_rows")
cols_min_pre = opts.get('cols_min_pre', None)
cols_max_pre = opts.get('cols_max_pre', None)
# Filter input based on number of columns
if cols_min_pre or cols_max_pre:
A_ubd, b_ub = filter_ncols(A_ubd=A_ubd, b_ub=b_ub, cols_min=cols_min_pre, cols_max=cols_max_pre)
debug("filt_ncols")
need_simpc = 1
# Limit input rows, mostly for quick full run checks
row_limit = opts.get('row_limit', None)
if row_limit:
before_rows = len(b_ub)
A_ubd = A_ubd[0:row_limit]
b_ub = b_ub[0:row_limit]
print('Row limit %d => %d rows' % (before_rows, len(b_ub)))
need_simpc = 1
if need_simpc:
names, A_ubd, b_ub = simplify_cols(names=names, A_ubd=A_ubd, b_ub=b_ub)
debug("simp_cols")
return A_ubd, b_ub, names
def massage_equations(A_ubd, b_ub, opts, names, verbose=0):
'''
Equation pipeline
Some operations may generate new equations
Simplify after these to avoid unnecessary overhead on redundant constraints
Similarly some operations may eliminate equations, potentially eliminating a column (ie variable)
Remove these columns as necessary to speed up solving
'''
def debug(what):
if verbose:
print('')
print_eqns(A_ubd, b_ub, verbose=verbose, label=what, lim=20)
col_dist(A_ubd, what, names)
check_feasible_d(A_ubd, b_ub, names)
A_ubd, b_ub, names = preprocess(A_ubd, b_ub, opts, names, verbose=verbose)
# Try to (intelligently) subtract equations to generate additional constraints
# This helps avoid putting all delay in a single shared variable
derive_lim = opts.get('derive_lim', None)
if derive_lim:
dstart = len(b_ub)
# Original simple
if 0:
for di in range(derive_lim):
print
assert len(A_ubd) == len(b_ub)
n_orig = len(b_ub)
# Meat of the operation
# Focus on easy equations for first pass to get a lot of easy derrivations
col_lim = 12 if di == 0 else None
#col_lim = None
A_ubd, b_ub = derive_eq_by_row(A_ubd, b_ub, col_lim=col_lim)
debug("der_rows")
# Run another simplify pass since new equations may have overlap with original
A_ubd, b_ub = simplify_rows(A_ubd, b_ub)
print('Derive row %d / %d: %d => %d equations' % (di + 1, derive_lim, n_orig, len(b_ub)))
debug("der_rows simp")
n_orig2 = len(b_ub)
# Meat of the operation
A_ubd, b_ub = derive_eq_by_col(A_ubd, b_ub)
debug("der_cols")
# Run another simplify pass since new equations may have overlap with original
A_ubd, b_ub = simplify_rows(A_ubd=A_ubd, b_ub=b_ub)
print('Derive col %d / %d: %d => %d equations' % (di + 1, derive_lim, n_orig2, len(b_ub)))
debug("der_cols simp")
if n_orig == len(b_ub):
break
if 1:
# Each iteration one more column is allowed until all columns are included
# (and the system is stable)
col_lim = 15
di = 0
while True:
print
n_orig = len(b_ub)
print('Loop %d, lim %d' % (di + 1, col_lim))
# Meat of the operation
A_ubd, b_ub = derive_eq_by_row(A_ubd, b_ub, col_lim=col_lim, tweak=True)
debug("der_rows")
# Run another simplify pass since new equations may have overlap with original
A_ubd, b_ub = simplify_rows(A_ubd, b_ub)
print('Derive row: %d => %d equations' % (n_orig, len(b_ub)))
debug("der_rows simp")
n_orig2 = len(b_ub)
# Meat of the operation
A_ubd, b_ub = derive_eq_by_col(A_ubd, b_ub)
debug("der_cols")
# Run another simplify pass since new equations may have overlap with original
A_ubd, b_ub = simplify_rows(A_ubd=A_ubd, b_ub=b_ub)
print('Derive col %d: %d => %d equations' % (di + 1, n_orig2, len(b_ub)))
debug("der_cols simp")
# Doesn't help computation, but helps debugging
names, A_ubd, b_ub = simplify_cols(names=names, A_ubd=A_ubd, b_ub=b_ub)
A_ubd, b_ub = sort_equations(A_ubd, b_ub)
debug("loop done")
col_dist(A_ubd, 'derive done iter %d, lim %d' % (di, col_lim), names, lim=12)
rows = len(A_ubd)
if n_orig == len(b_ub) and col_lim >= rows:
break
col_lim += col_lim / 5
di += 1
dend = len(b_ub)
print('')
print('Derive net: %d => %d' % (dstart, dend))
print('')
# Was experimentting to see how much the higher order columns really help
cols_min_post = opts.get('cols_min_post', None)
cols_max_post = opts.get('cols_max_post', None)
# Filter input based on number of columns
if cols_min_post or cols_max_post:
A_ubd, b_ub = filter_ncols(A_ubd=A_ubd, b_ub=b_ub, cols_min=cols_min_post, cols_max=cols_max_post)
debug("filter_ncals final")
names, A_ubd, b_ub = simplify_cols(names=names, A_ubd=A_ubd, b_ub=b_ub)
debug("simp_cols final")
# Helps debug readability
A_ubd, b_ub = sort_equations(A_ubd, b_ub)
debug("final (sorted)")
return names, A_ubd, b_ub
def Ar_di2ds(rowA, names):
row = OrderedDict()
for k, v in rowA.items():

View File

@ -323,7 +323,7 @@ def derive_eq_by_col(Ads, b_ub, verbose=0):
return Ads_ret, b_ret
# keep derriving until solution is (probably) stable
def massage_equations_old(Ads, b, verbose=False, derive_lim=3):
def massage_equations_old(Ads, b, verbose=False, derive_lim=3, corner=None):
'''
Equation pipeline
Some operations may generate new equations
@ -356,7 +356,7 @@ def massage_equations_old(Ads, b, verbose=False, derive_lim=3):
Ads, b = derive_eq_by_row(Ads, b, col_lim=col_lim)
debug("der_rows")
# Run another simplify pass since new equations may have overlap with original
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, corner=corner)
print('Derive row %d / %d: %d => %d equations' % (di + 1, derive_lim, n_orig, len(b)))
debug("der_rows simp")
@ -365,7 +365,7 @@ def massage_equations_old(Ads, b, verbose=False, derive_lim=3):
Ads, b = derive_eq_by_col(Ads, b)
debug("der_cols")
# Run another simplify pass since new equations may have overlap with original
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, corner=corner)
print('Derive col %d / %d: %d => %d equations' % (di + 1, derive_lim, n_orig2, len(b)))
debug("der_cols simp")
@ -394,7 +394,7 @@ def massage_equations_old(Ads, b, verbose=False, derive_lim=3):
return Ads, b
# iteratively increasing column limit until all columns are added
def massage_equations_inc_col_lim(Ads, b, verbose=False):
def massage_equations_inc_col_lim(Ads, b, verbose=False, corner=None):
'''
Equation pipeline
Some operations may generate new equations
@ -428,7 +428,7 @@ def massage_equations_inc_col_lim(Ads, b, verbose=False):
Ads, b = derive_eq_by_row(Ads, b, col_lim=col_lim, tweak=True)
debug("der_rows")
# Run another simplify pass since new equations may have overlap with original
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, corner=corner)
print('Derive row: %d => %d equations' % (n_orig, len(b)))
debug("der_rows simp")
@ -437,7 +437,7 @@ def massage_equations_inc_col_lim(Ads, b, verbose=False):
Ads, b = derive_eq_by_col(Ads, b)
debug("der_cols")
# Run another simplify pass since new equations may have overlap with original
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, corner=corner)
print('Derive col %d: %d => %d equations' % (di + 1, n_orig2, len(b)))
debug("der_cols simp")
@ -467,7 +467,7 @@ def massage_equations_inc_col_lim(Ads, b, verbose=False):
# only derive based on nearby equations
# theory is they will be the best to diff
def massage_equations_near(Ads, b, verbose=False):
def massage_equations_near(Ads, b, verbose=False, corner=None):
'''
Equation pipeline
Some operations may generate new equations
@ -497,7 +497,7 @@ def massage_equations_near(Ads, b, verbose=False):
Ads, b = derive_eq_by_near_row(Ads, b, tweak=True)
debug("der_rows")
# Run another simplify pass since new equations may have overlap with original
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, corner=corner)
print('Derive row: %d => %d equations' % (n_orig, len(b)))
debug("der_rows simp")
@ -506,7 +506,7 @@ def massage_equations_near(Ads, b, verbose=False):
Ads, b = derive_eq_by_col(Ads, b)
debug("der_cols")
# Run another simplify pass since new equations may have overlap with original
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, corner=corner)
print('Derive col: %d => %d equations' % (n_orig2, len(b)))
debug("der_cols simp")

View File

@ -91,7 +91,7 @@ def run(fns_in, corner, run_corner, sub_json=None, sub_csv=None, dedup=True, mas
if dedup:
oldn = len(Ads)
iold = instances(Ads)
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, corner=corner)
print('Simplify %u => %u rows' % (oldn, len(Ads)))
print('Simplify %u => %u instances' % (iold, instances(Ads)))
@ -142,7 +142,7 @@ def run(fns_in, corner, run_corner, sub_json=None, sub_csv=None, dedup=True, mas
This creates derived constraints to provide more realistic results
'''
if massage:
Ads, b = massage_equations(Ads, b)
Ads, b = massage_equations(Ads, b, corner=corner)
print('Converting to numpy...')
names, Anp = A_ds2np(Ads)