timfuz: site alpha

Signed-off-by: John McMaster <johndmcmaster@gmail.com>
This commit is contained in:
John McMaster 2018-09-24 19:44:04 -07:00
parent 2c6c3487a4
commit 649daab2a2
17 changed files with 507 additions and 153 deletions

View File

@ -60,7 +60,7 @@ def pds(Ads, s):
def run(fns_in, sub_json=None, verbose=False):
assert len(fns_in) > 0
# arbitrary corner...data is thrown away
Ads, b = loadc_Ads_b(fns_in, "slow_max", ico=True)
Ads, b = loadc_Ads_b(fns_in, "slow_max")
if sub_json:
print('Subbing JSON %u rows' % len(Ads))

View File

@ -5,7 +5,7 @@ from timfuz import Benchmark, loadc_Ads_bs, index_names, load_sub, run_sub_json,
def gen_group(fnin, sub_json, strict=False, verbose=False):
print('Loading data')
Ads, bs = loadc_Ads_bs([fnin], ico=True)
Ads, bs = loadc_Ads_bs([fnin])
print('Sub: %u rows' % len(Ads))
iold = instances(Ads)

View File

@ -4,7 +4,7 @@ from timfuz import Benchmark, loadc_Ads_bs, load_sub, Ads2bounds, corners2csv, c
def gen_flat(fns_in, sub_json, corner=None):
Ads, bs = loadc_Ads_bs(fns_in, ico=True)
Ads, bs = loadc_Ads_bs(fns_in)
bounds = Ads2bounds(Ads, bs)
# Elements with zero delay assigned due to sub group
group_zeros = set()

View File

@ -4,7 +4,8 @@ TIMFUZ_DIR=$(XRAY_DIR)/fuzzers/007-timing
CORNER=slow_max
ALLOW_ZERO_EQN?=N
BADPRJ_OK?=N
BUILD_DIR?=build
BUILD_DIR?=build/MUST_SET
CSV_BASENAME=timing4i.csv
all: $(BUILD_DIR)/$(CORNER)/timgrid-vc.json $(BUILD_DIR)/$(CORNER)/qor.txt
@ -19,7 +20,7 @@ clean:
.PHONY: all run clean
$(BUILD_DIR)/$(CORNER):
mkdir $(BUILD_DIR)/$(CORNER)
mkdir -p $(BUILD_DIR)/$(CORNER)
# parent should have built this
$(BUILD_DIR)/checksub:
@ -46,6 +47,11 @@ $(BUILD_DIR)/$(CORNER)/timgrid-vc.json: $(BUILD_DIR)/$(CORNER)/flat.csv
python3 $(TIMFUZ_DIR)/tile_annotate.py --timgrid-s $(TIMFUZ_DIR)/timgrid/build/timgrid-s.json --out $(BUILD_DIR)/$(CORNER)/timgrid-vc.json $(BUILD_DIR)/$(CORNER)/flat.csv
$(BUILD_DIR)/$(CORNER)/qor.txt: $(BUILD_DIR)/$(CORNER)/flat.csv
ifeq ($(SOLVING),i)
python3 $(TIMFUZ_DIR)/solve_qor.py --corner $(CORNER) --bounds-csv $(BUILD_DIR)/$(CORNER)/flat.csv specimen_*/timing4i.csv >$(BUILD_DIR)/$(CORNER)/qor.txt.tmp
mv $(BUILD_DIR)/$(CORNER)/qor.txt.tmp $(BUILD_DIR)/$(CORNER)/qor.txt
else
# FIXME
touch $(BUILD_DIR)/$(CORNER)/qor.txt
endif

View File

@ -6,5 +6,8 @@ TIMFUZ_DIR=$XRAY_DIR/fuzzers/007-timing
timing_txt2csv () {
python3 $TIMFUZ_DIR/timing_txt2icsv.py --speed-json $TIMFUZ_DIR/speed/build/speed.json --out timing4i.csv.tmp timing4.txt
mv timing4i.csv.tmp timing4i.csv
python3 $TIMFUZ_DIR/timing_txt2scsv.py --speed-json $TIMFUZ_DIR/speed/build/speed.json --out timing4s.csv.tmp timing4.txt
mv timing4s.csv.tmp timing4s.csv
}

View File

@ -0,0 +1,74 @@
# Interconnect and site (IS) high level aggregation
# Creates corner data and aggregates them together
TIMFUZ_DIR=$(XRAY_DIR)/fuzzers/007-timing
SOLVING=i
CSV_BASENAME=timing4$(SOLVING).csv
BUILD_DIR?=build/MUST_SET
SPECIMENS :=
CSVS := $(addsuffix /$(CSV_BASENAME),$(SPECIMENS))
RREF_CORNER=slow_max
# Set ZERO elements to zero delay (as is expected they should be)
RMZERO?=N
RREF_ARGS=
ifeq ($(RMZERO),Y)
RREF_ARGS+=--rm-zero
endif
# FIXME: clean this up by generating targets from CORNERS
# fast_max => build/i/fast_max/timgrid-vc.json
TIMGRID_VCS=$(BUILD_DIR)/fast_max/timgrid-vc.json $(BUILD_DIR)/fast_min/timgrid-vc.json $(BUILD_DIR)/slow_max/timgrid-vc.json $(BUILD_DIR)/slow_min/timgrid-vc.json
#TIMGRID_VCS=$(addsuffix /timgrid-vc.json,$(addprefix $(BUILD_DIR_I)/,$(CORNERS)))
# make $(BUILD_DIR)/checksub first
$(BUILD_DIR)/fast_max/timgrid-vc.json: $(BUILD_DIR)/checksub
$(MAKE) -f $(TIMFUZ_DIR)/projects/corner.mk CORNER=fast_max
$(BUILD_DIR)/fast_min/timgrid-vc.json: $(BUILD_DIR)/checksub
$(MAKE) -f $(TIMFUZ_DIR)/projects/corner.mk CORNER=fast_min
$(BUILD_DIR)/slow_max/timgrid-vc.json: $(BUILD_DIR)/checksub
$(MAKE) -f $(TIMFUZ_DIR)/projects/corner.mk CORNER=slow_max
$(BUILD_DIR)/slow_min/timgrid-vc.json: $(BUILD_DIR)/checksub
$(MAKE) -f $(TIMFUZ_DIR)/projects/corner.mk CORNER=slow_min
# Normally require all projects to complete
# If BADPRJ_OK is allowed, only take projects that were successful
# FIXME: couldn't get call to work
exist_csvs = \
for f in $(CSVS); do \
if [ "$(BADPRJ_OK)" != 'Y' -o -f $$f ] ; then \
echo $$f; \
fi; \
done
all: $(BUILD_DIR)/timgrid-v.json
# rref should be the same regardless of corner
$(BUILD_DIR)/sub.json: $(SPECIMENS_OK)
mkdir -p $(BUILD_DIR)
# Discover which variables can be separated
# This is typically the longest running operation
\
csvs=$$(for f in $(CSVS); do if [ "$(BADPRJ_OK)" != 'Y' -o -f $$f ] ; then echo $$f; fi; done) ; \
echo $$csvs ; \
python3 $(TIMFUZ_DIR)/rref.py --corner $(RREF_CORNER) --simplify $(RREF_ARGS) --out $(BUILD_DIR)/sub.json.tmp $$csvs
mv $(BUILD_DIR)/sub.json.tmp $(BUILD_DIR)/sub.json
$(BUILD_DIR)/grouped.csv: $(SPECIMENS_OK) $(BUILD_DIR)/sub.json
# Separate variables
\
csvs=$$(for f in $(CSVS); do if [ "$(BADPRJ_OK)" != 'Y' -o -f $$f ] ; then echo $$f; fi; done) ; \
python3 $(TIMFUZ_DIR)/csv_flat2group.py --sub-json $(BUILD_DIR)/sub.json --strict --out $(BUILD_DIR)/grouped.csv.tmp $$csvs
mv $(BUILD_DIR)/grouped.csv.tmp $(BUILD_DIR)/grouped.csv
$(BUILD_DIR)/checksub: $(BUILD_DIR)/grouped.csv $(BUILD_DIR)/sub.json
# Verify sub.json makes a cleanly solvable solution with no non-pivot leftover
python3 $(TIMFUZ_DIR)/checksub.py --sub-json $(BUILD_DIR)/sub.json $(BUILD_DIR)/grouped.csv
touch $(BUILD_DIR)/checksub
$(BUILD_DIR)/timgrid-v.json: $(TIMGRID_VCS)
python3 $(TIMFUZ_DIR)/timgrid_vc2v.py --out $(BUILD_DIR)/timgrid-v.json $(TIMGRID_VCS)

View File

@ -4,38 +4,26 @@
N := 1
SPECIMENS := $(addprefix specimen_,$(shell seq -f '%03.0f' $(N)))
SPECIMENS_OK := $(addsuffix /OK,$(SPECIMENS))
CSVS := $(addsuffix /timing4i.csv,$(SPECIMENS))
TIMFUZ_DIR=$(XRAY_DIR)/fuzzers/007-timing
RREF_CORNER=slow_max
# Allow an empty system of equations?
# for testing only on small projects
ALLOW_ZERO_EQN?=N
# Constrained projects may fail to build
# Set to Y to make a best effort to suck in the ones that did build
BADPRJ_OK?=N
# Set ZERO elements to zero delay (as is expected they should be)
RMZERO?=N
BUILD_DIR?=build
# interconnect
BUILD_DIR_I?=$(BUILD_DIR)/i
# site
BUILD_DIR_S?=$(BUILD_DIR)/s
RREF_ARGS=
ifeq ($(RMZERO),Y)
RREF_ARGS+=--rm-zero
endif
CORNERS=fast_max fast_min slow_max slow_min
TIMGRID_VCS=$(BUILD_DIR)/fast_max/timgrid-vc.json $(BUILD_DIR)/fast_min/timgrid-vc.json $(BUILD_DIR)/slow_max/timgrid-vc.json $(BUILD_DIR)/slow_min/timgrid-vc.json
TIMGRID_V_I=$(BUILD_DIR_I)/timgrid-v.json
TIMGRID_V_S=$(BUILD_DIR_S)/timgrid-v.json
all: $(BUILD_DIR)/timgrid-v.json
# make $(BUILD_DIR)/checksub first
$(BUILD_DIR)/fast_max/timgrid-vc.json: $(BUILD_DIR)/checksub
$(MAKE) -f $(TIMFUZ_DIR)/projects/corner.mk CORNER=fast_max
$(BUILD_DIR)/fast_min/timgrid-vc.json: $(BUILD_DIR)/checksub
$(MAKE) -f $(TIMFUZ_DIR)/projects/corner.mk CORNER=fast_min
$(BUILD_DIR)/slow_max/timgrid-vc.json: $(BUILD_DIR)/checksub
$(MAKE) -f $(TIMFUZ_DIR)/projects/corner.mk CORNER=slow_max
$(BUILD_DIR)/slow_min/timgrid-vc.json: $(BUILD_DIR)/checksub
$(MAKE) -f $(TIMFUZ_DIR)/projects/corner.mk CORNER=slow_min
$(SPECIMENS_OK):
bash generate.sh $(subst /OK,,$@) || (if [ "$(BADPRJ_OK)" != 'Y' ] ; then exit 1; fi; exit 0)
touch $@
@ -52,38 +40,16 @@ clean:
.PHONY: all run clean
# Normally require all projects to complete
# If BADPRJ_OK is allowed, only take projects that were successful
# FIXME: couldn't get call to work
exist_csvs = \
for f in $(CSVS); do \
if [ "$(BADPRJ_OK)" != 'Y' -o -f $$f ] ; then \
echo $$f; \
fi; \
done
$(TIMGRID_V_I): $(SPECIMENS_OK)
$(MAKE) -f $(TIMFUZ_DIR)/projects/is.mk BUILD_DIR=$(BUILD_DIR_I) SOLVING=i SPECIMENS=$(SPECIMENS) all
i: $(TIMGRID_V_I)
# rref should be the same regardless of corner
$(BUILD_DIR)/sub.json: $(SPECIMENS_OK)
mkdir -p $(BUILD_DIR)
# Discover which variables can be separated
# This is typically the longest running operation
\
csvs=$$(for f in $(CSVS); do if [ "$(BADPRJ_OK)" != 'Y' -o -f $$f ] ; then echo $$f; fi; done) ; \
python3 $(TIMFUZ_DIR)/rref.py --corner $(RREF_CORNER) --simplify $(RREF_ARGS) --out $(BUILD_DIR)/sub.json.tmp $$csvs
mv $(BUILD_DIR)/sub.json.tmp $(BUILD_DIR)/sub.json
$(TIMGRID_V_S): $(SPECIMENS_OK)
$(MAKE) -f $(TIMFUZ_DIR)/projects/is.mk BUILD_DIR=$(BUILD_DIR_S) SOLVING=s SPECIMENS=$(SPECIMENS) all
s: $(TIMGRID_V_S)
$(BUILD_DIR)/grouped.csv: $(SPECIMENS_OK) $(BUILD_DIR)/sub.json
# Separate variables
\
csvs=$$(for f in $(CSVS); do if [ "$(BADPRJ_OK)" != 'Y' -o -f $$f ] ; then echo $$f; fi; done) ; \
python3 $(TIMFUZ_DIR)/csv_flat2group.py --sub-json $(BUILD_DIR)/sub.json --strict --out $(BUILD_DIR)/grouped.csv.tmp $$csvs
mv $(BUILD_DIR)/grouped.csv.tmp $(BUILD_DIR)/grouped.csv
.PHONY: i s
$(BUILD_DIR)/checksub: $(BUILD_DIR)/grouped.csv $(BUILD_DIR)/sub.json
# Verify sub.json makes a cleanly solvable solution with no non-pivot leftover
python3 $(TIMFUZ_DIR)/checksub.py --sub-json $(BUILD_DIR)/sub.json $(BUILD_DIR)/grouped.csv
touch $(BUILD_DIR)/checksub
$(BUILD_DIR)/timgrid-v.json: $(TIMGRID_VCS)
python3 $(TIMFUZ_DIR)/timgrid_vc2v.py --out $(BUILD_DIR)/timgrid-v.json $(TIMGRID_VCS)
$(BUILD_DIR)/timgrid-v.json: $(TIMGRID_V_I) $(TIMGRID_V_S)
python3 $(TIMFUZ_DIR)/timgrid_vc2v.py --out $(BUILD_DIR)/timgrid-v.json $(TIMGRID_V_I) $(TIMGRID_V_S)

View File

@ -17,7 +17,7 @@ proc write_info3 {} {
set outdir "."
set fp [open "$outdir/timing4.txt" w]
# bel as site/bel, so don't bother with site
puts $fp "linetype net src_site src_site_pin src_bel src_bel_pin dst_site dst_site_pin dst_bel dst_bel_pin ico fast_max fast_min slow_max slow_min pips inodes wires"
puts $fp "linetype net src_site src_site_type src_site_pin src_bel src_bel_pin dst_site dst_site_type dst_site_pin dst_bel dst_bel_pin ico fast_max fast_min slow_max slow_min pips inodes wires"
set TIME_start [clock clicks -milliseconds]
set verbose 0
@ -63,6 +63,7 @@ proc write_info3 {} {
incr site_dst_nets
continue
}
set src_site_type [get_property SITE_TYPE $src_site]
foreach src_site_pin $src_site_pins {
if $verbose {
puts "Source: $src_pin at site $src_site:$src_bel, spin $src_site_pin"
@ -93,9 +94,11 @@ proc write_info3 {} {
set dst_site_pins [get_site_pins -of_objects $dst_pin]
# Some nets are internal
# But should this happen on dest if we've already filtered source?
# FIXME: need these for inter site model
if {"$dst_site_pins" eq ""} {
continue
}
set dst_site_type [get_property SITE_TYPE $dst_site]
# Also apparantly you can have multiple of these as well
foreach dst_site_pin $dst_site_pins {
set fast_max [get_property "FAST_MAX" $delay]
@ -118,7 +121,7 @@ proc write_info3 {} {
#set wires [get_wires -of_objects $net -from $src_site_pin -to $dst_site_pin]
set wires [get_wires -of_objects $nodes]
puts -nonewline $fp "NET $net $src_site $src_site_pin $src_bel $src_bel_pin $dst_site $dst_site_pin $dst_bel $dst_bel_pin $ico $fast_max $fast_min $slow_max $slow_min"
puts -nonewline $fp "NET $net $src_site $src_site_type $src_site_pin $src_bel $src_bel_pin $dst_site $dst_site_type $dst_site_pin $dst_bel $dst_bel_pin $ico $fast_max $fast_min $slow_max $slow_min"
# Write pips w/ speed index
puts -nonewline $fp " "

View File

@ -73,7 +73,7 @@ class State(object):
def load(fn_ins, simplify=False, corner=None, rm_zero=False):
zero_names = OrderedSet()
Ads, b = loadc_Ads_b(fn_ins, corner=corner, ico=True)
Ads, b = loadc_Ads_b(fn_ins, corner=corner)
if rm_zero:
zero_names = rm_zero_cols(Ads)
if simplify:

View File

@ -6,7 +6,7 @@ import numpy as np
def run(fns_in, corner, bounds_csv, verbose=False):
print('Loading data')
Ads, borig = loadc_Ads_b(fns_in, corner, ico=True)
Ads, borig = loadc_Ads_b(fns_in, corner)
bounds = load_bounds(bounds_csv, corner)
# verify is flattened

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python3
import timfuz
from timfuz import loadc_Ads_bs, Ads2bounds
from timfuz import loadc_Ads_bs, Ads2bounds, PREFIX_W, PREFIX_P, PREFIX_SITEW, sitew_s2vals
import sys
import os
@ -9,22 +9,90 @@ import time
import json
def add_pip_wire(tilej, bounds, verbose=False):
'''
We know all possible pips and wires from tilej
Iterate over them and see if a result was generated
'''
used_bounds = set()
for tile in tilej['tiles'].values():
def addk(pws, prefix, k, v):
variable = prefix + ':' + v
val = bounds.get(variable, None)
# print(variable, val)
if val:
used_bounds.add(variable)
else:
val = [None, None, None, None]
pws[k] = val
pips = tile['pips']
for k, v in pips.items():
#pips[k] = bounds.get('PIP_' + v, [None, None, None, None])
addk(pips, PREFIX_P, k, v)
wires = tile['wires']
for k, v in wires.items():
#wires[k] = bounds.get('WIRE_' + v, [None, None, None, None])
addk(wires, PREFIX_W, k, v)
# verify all the variables that should be used were applied
# ...except tilecon may be an ROI and we solved everything
print(
"Interconnect: %u / %u variables used" %
(len(used_bounds), len(bounds)))
if verbose:
print('Remainder: %s' % (set(bounds.keys()) - used_bounds))
def add_sites(tilej, bounds):
# XXX: no source of truth currently
# is there a way we could get this?
sitej = tilej.setdefault('sites', {})
for variable, bound in bounds.items():
# group delays by site
site_type, site_pin, bel_type, bel_pin = sitew_s2vals(variable)
asitej = sitej.setdefault(site_type, {})
# group together?
# wish there was a way to do tuple keys
k = ('%s:%s:%s' % (site_pin, bel_type, bel_pin))
#print(site_type, k)
asitej[k] = bound
#nsites = sum([len(v) for v in sitej.values()])
print('Sites: added %u sites, %u site wires' % (len(sitej), len(bounds)))
def sep_bounds(bounds):
pw = {}
sites = {}
for k, v in bounds.items():
prefix = k.split(':')[0]
if prefix == PREFIX_W:
pw[k] = v
elif prefix == PREFIX_P:
pw[k] = v
elif prefix == PREFIX_SITEW:
sites[k] = v
else:
assert 0, 'Unknown delay: %s %s' % (k, prefix)
return pw, sites
def run(fns_in, fnout, tile_json_fn, verbose=False):
# modified in place
tilej = json.load(open(tile_json_fn, 'r'))
for fnin in fns_in:
Ads, bs = loadc_Ads_bs([fnin], ico=True)
Ads, bs = loadc_Ads_bs([fnin])
bounds = Ads2bounds(Ads, bs)
bounds_pw, bounds_sites = sep_bounds(bounds)
print(len(bounds), len(bounds_pw), len(bounds_sites))
for tile in tilej['tiles'].values():
pips = tile['pips']
for k, v in pips.items():
pips[k] = bounds.get('PIP_' + v, [None, None, None, None])
wires = tile['wires']
for k, v in wires.items():
wires[k] = bounds.get('WIRE_' + v, [None, None, None, None])
add_pip_wire(tilej, bounds_pw)
add_sites(tilej, bounds_sites)
timfuz.tilej_stats(tilej)

View File

@ -17,6 +17,25 @@ import collections
from benchmark import Benchmark
# prefix to make easier to track
# models do not overlap between PIPs and WIREs
PREFIX_W = 'WIRE'
PREFIX_P = 'PIP'
# site wire (a to b)
PREFIX_SITEW = 'SITEW'
def sitew_vals2s(site_type, site_pin, bel_type, bel_pin):
'''Pack site wire components into a variable string'''
return '%s:%s:%s:%s:%s' % (
PREFIX_SITEW, site_type, site_pin, bel_type, bel_pin)
def sitew_s2vals(s):
prefix, site_type, site_pin, bel_type, bel_pin = s.split(':')
assert prefix == 'SITEW'
return site_type, site_pin, bel_type, bel_pin
# Equations are filtered out until nothing is left
class SimplifiedToZero(Exception):
@ -524,11 +543,14 @@ def loadc_Ads_mkb(fns, mkb, filt):
def loadc_Ads_b(fns, corner, ico=None):
corner = corner or "slow_max"
corneri = corner_s2i[corner]
'''
if ico is not None:
filt = lambda ico_, corners, vars: ico_ == ico
else:
filt = lambda ico_, corners, vars: True
'''
assert ico is None, 'ICO filtering moved to higher levels'
filt = lambda ico_, corners, vars: True
def mkb(val):
return val[corneri]
@ -537,10 +559,14 @@ def loadc_Ads_b(fns, corner, ico=None):
def loadc_Ads_bs(fns, ico=None):
'''
if ico is not None:
filt = lambda ico_, corners, vars: ico_ == ico
else:
filt = lambda ico_, corners, vars: True
'''
assert ico is None, 'ICO filtering moved to higher levels'
filt = lambda ico_, corners, vars: True
def mkb(val):
return val
@ -730,35 +756,50 @@ def corners2csv(bs):
def tilej_stats(tilej):
stats = {}
for etype in ('pips', 'wires'):
tm = stats.setdefault(etype, {})
tm['net'] = 0
tm['solved'] = [0, 0, 0, 0]
tm['covered'] = [0, 0, 0, 0]
for tile in tilej['tiles'].values():
def tile_stats():
stats = {}
for etype in ('pips', 'wires'):
pips = tile[etype]
for k, v in pips.items():
stats[etype]['net'] += 1
for i in range(4):
if pips[k][i]:
stats[etype]['solved'][i] += 1
if pips[k][i] is not None:
stats[etype]['covered'][i] += 1
tm = stats.setdefault(etype, {})
tm['net'] = 0
tm['solved'] = [0, 0, 0, 0]
tm['covered'] = [0, 0, 0, 0]
for tile in tilej['tiles'].values():
for etype in ('pips', 'wires'):
pips = tile[etype]
for k, v in pips.items():
stats[etype]['net'] += 1
for i in range(4):
if pips[k][i]:
stats[etype]['solved'][i] += 1
if pips[k][i] is not None:
stats[etype]['covered'][i] += 1
return stats
def site_stats():
sitej = tilej['sites']
return {
'wires': sum([len(v) for v in sitej.values()]),
'sites': len(sitej)
}
tstats = tile_stats()
sstats = site_stats()
for corner, corneri in corner_s2i.items():
print('Corner %s' % corner)
for etype in ('pips', 'wires'):
net = stats[etype]['net']
solved = stats[etype]['solved'][corneri]
covered = stats[etype]['covered'][corneri]
net = tstats[etype]['net']
solved = tstats[etype]['solved'][corneri]
covered = tstats[etype]['covered'][corneri]
print(
' %s: %u / %u solved, %u / %u covered' %
(etype, solved, net, covered, net))
print(
' sites: %u sites, %u site wires' %
(sstats['sites'], sstats['wires']))
def load_bounds(bounds_csv, corner, ico=True):
Ads, b = loadc_Ads_b([bounds_csv], corner, ico=ico)
def load_bounds(bounds_csv, corner):
Ads, b = loadc_Ads_b([bounds_csv], corner)
return Ads2bounds(Ads, b)

View File

@ -165,7 +165,7 @@ def run(
verbose=False,
**kwargs):
print('Loading data')
Ads, b = loadc_Ads_b(fns_in, corner, ico=True)
Ads, b = loadc_Ads_b(fns_in, corner)
# Remove duplicate rows
# is this necessary?
@ -193,7 +193,7 @@ def run(
Used primarily for multiple optimization passes, such as different algorithms or additional constraints
'''
if bounds_csv:
Ads2, b2 = loadc_Ads_b([bounds_csv], corner, ico=True)
Ads2, b2 = loadc_Ads_b([bounds_csv], corner)
bounds = Ads2bounds(Ads2, b2)
assert len(bounds), 'Failed to load bounds'
rows_old = len(Ads)

View File

@ -23,7 +23,31 @@ corner2minmax = {
}
def build_tilejo(fnins):
def merge_bdict(vi, vo):
'''
vi: input dictionary
vo: output dictionary
values are corner delay 4 tuples
'''
for name, bis in vi.items():
bos = vo.get(name, [None, None, None, None])
for cornerk, corneri in corner_s2i.items():
bo = bos[corneri]
bi = bis[corneri]
# no new data
if bi is None:
pass
# no previous data
elif bo is None:
bos[corneri] = bi
# combine
else:
minmax = corner2minmax[cornerk]
bos[corneri] = minmax(bi, bo)
def merge_tiles(tileji, tilejo):
'''
{
"tiles": {
@ -38,36 +62,37 @@ def build_tilejo(fnins):
],
'''
tilejo = {"tiles": {}}
for tilek, tilevi in tileji.items():
# No previous data? Copy
tilevo = tilejo.get(tilek, None)
if tilevo is None:
tilejo[tilek] = tilevi
# Otherwise combine
else:
merge_bdict(tilevi['pips'], tilevo['pips'])
merge_bdict(tilevi['wires'], tilevo['wires'])
def merge_sites(siteji, sitejo):
for k, vi in siteji.items():
vo = sitejo.get(k, None)
# No previous data? Copy
if vo is None:
sitejo[k] = vi
# Otherwise combine
else:
merge_bdict(vi, vo)
def build_tilejo(fnins):
# merge all inputs into blank output
tilejo = {"tiles": {}, "sites": {}}
for fnin in fnins:
tileji = json.load(open(fnin, 'r'))
for tilek, tilevi in tileji['tiles'].items():
# No previous data? Copy
tilevo = tilejo['tiles'].get(tilek, None)
if tilevo is None:
tilejo['tiles'][tilek] = tilevi
# Otherwise combine
else:
def process_type(etype):
for pipk, pipvi in tilevi[etype].items():
pipvo = tilevo[etype][pipk]
for cornerk, corneri in corner_s2i.items():
cornervo = pipvo[corneri]
cornervi = pipvi[corneri]
# no new data
if cornervi is None:
pass
# no previous data
elif cornervo is None:
pipvo[corneri] = cornervi
# combine
else:
minmax = corner2minmax[cornerk]
pipvo[corneri] = minmax(cornervi, cornervo)
merge_tiles(tileji['tiles'], tilejo['tiles'])
merge_sites(tileji['sites'], tilejo['sites'])
process_type('pips')
process_type('wires')
return tilejo
@ -110,15 +135,11 @@ def check_corner_minmax(tilej, verbose=False):
timfuz.tilej_stats(tilej)
def check_corners_minmax(tilej, verbose=False):
# TODO: check fast vs slow
pass
def run(fnins, fnout, verbose=False):
tilejo = build_tilejo(fnins)
check_corner_minmax(tilejo)
check_corners_minmax(tilejo)
# XXX: check fast vs slow?
# check_corners_minmax(tilejo)
json.dump(
tilejo,
open(fnout, 'w'),

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3
from timfuz import Benchmark, A_di2ds
from timfuz import Benchmark, A_di2ds, PREFIX_W, PREFIX_P
from timing_txt2json import gen_timing4n, load_speed_json
import glob
@ -9,11 +9,6 @@ import json
import sys
from collections import OrderedDict
# prefix to make easier to track
# models do not overlap between PIPs and WIREs
PREFIX_W = 'WIRE_'
PREFIX_P = 'PIP_'
# Verify the nodes and wires really do line up
def vals2Adi_check(vals, names):
@ -32,11 +27,11 @@ def json2Ads(vals, verbose=False):
def pip2speed(pip):
_site, _name, pip_name = pip
return PREFIX_P + pip_name
return PREFIX_P + ':' + pip_name
def wire2speed(wire):
_site, _name, wire_name = wire
return PREFIX_W + wire_name
return PREFIX_W + ':' + wire_name
print('Making equations')
@ -96,7 +91,7 @@ def main():
parser = argparse.ArgumentParser(
description=
'Convert obscure timing4.txt into more readable but roughly equivilent timing4i.csv (interconnect)'
'Convert obscure timing4.txt into timing4i.csv (interconnect delay variable occurances)'
)
parser.add_argument('--verbose', type=int, help='')

View File

@ -37,7 +37,7 @@ def parse_wire(s, speed_i2s):
def gen_timing4(fn, speed_i2s):
f = open(fn, 'r')
header_want = 'linetype net src_site src_site_pin src_bel src_bel_pin dst_site dst_site_pin dst_bel dst_bel_pin ico fast_max fast_min slow_max slow_min pips inodes wires'
header_want = 'linetype net src_site src_site_type src_site_pin src_bel src_bel_pin dst_site dst_site_type dst_site_pin dst_bel dst_bel_pin ico fast_max fast_min slow_max slow_min pips inodes wires'
ncols = len(header_want.split())
# src_bel dst_bel ico fast_max fast_min slow_max slow_min pips
@ -50,6 +50,7 @@ def gen_timing4(fn, speed_i2s):
bads = 0
net_lines = 0
for l in f:
def group_line():
ncols = len('lintype ico delays'.split())
assert len(parts) == ncols
@ -58,23 +59,25 @@ def gen_timing4(fn, speed_i2s):
def net_line():
assert len(parts) == ncols
_lintype, net, src_site, src_site_pin, src_bel, src_bel_pin, dst_site, dst_site_pin, dst_bel, dst_bel_pin, ico, fast_max, fast_min, slow_max, slow_min, pips, nodes, wires = parts
_lintype, net, src_site, src_site_type, src_site_pin, src_bel, src_bel_pin, dst_site, dst_site_type, dst_site_pin, dst_bel, dst_bel_pin, ico, fast_max, fast_min, slow_max, slow_min, pips, nodes, wires = parts
pips = pips.split('|')
nodes = nodes.split('|')
wires = wires.split('|')
return {
'net': net,
'src': {
'site': src_site,
'site': src_site,
'site_type': src_site_type,
'site_pin': src_site_pin,
'bel': src_bel,
'bel_pin': src_bel_pin,
'bel': src_bel,
'bel_pin': src_bel_pin,
},
'dst': {
'site': dst_site,
'site': dst_site,
'site_type': dst_site_type,
'site_pin': dst_site_pin,
'bel': dst_bel,
'bel_pin': dst_bel_pin,
'bel': dst_bel,
'bel_pin': dst_bel_pin,
},
't': {
# ps
@ -99,13 +102,11 @@ def gen_timing4(fn, speed_i2s):
val = {
'NET': net_line,
'GROUP': group_line,
}[lintype]()
}[lintype]()
yield lintype, val
rets += 1
print(
' load %s: %d bad, %d good, %u net lines' %
(fn, bads, rets, net_lines))
print(' load %s: %d bad, %d good lines' % (fn, bads, rets))
def gen_timing4n(fn, speed_i2s):
@ -115,7 +116,7 @@ def gen_timing4n(fn, speed_i2s):
yield val
def gen_timing4i(fn, speed_i2s):
def gen_timing4a(fn, speed_i2s):
'''
Like above, but aggregate ico + non-ico into single entries
Key these based on uniqueness of (src_bel, dst_bel)
@ -126,24 +127,26 @@ def gen_timing4i(fn, speed_i2s):
'''
entries = {}
timgen = gen_timing4(fn, speed_i2s)
rets = 0
while True:
def get_ico(exp_ico):
ret = []
try:
lintype, val = gen.next()
lintype, val = next(timgen)
except StopIteration:
return None
assert lintype == 'GROUP'
ico, delays = val
assert ico == exp_ico
for _ in range(delays):
lintype, val = gen.next()
lintype, val = next(timgen)
assert lintype == 'NET'
ret.append(val)
return ret
ico0s = get_ico(0)
if ico0 is None:
if ico0s is None:
break
ico1s = get_ico(1)
# TODO: verify this is actually true
@ -151,7 +154,8 @@ def gen_timing4i(fn, speed_i2s):
def same_path(l, r):
# if source and dest are the same, should be the same thing
return l['src']['bel_pin'] == r['src']['bel_pin'] and l['dst']['bel_pin'] == r['dst']['bel_pin']
return l['src']['bel_pin'] == r['src']['bel_pin'] and l['dst'][
'bel_pin'] == r['dst']['bel_pin']
for ico0, ico1 in zip(ico0s, ico1s):
# TODO: verify this is actually true
@ -161,8 +165,11 @@ def gen_timing4i(fn, speed_i2s):
ico0['t'] = (
ico0['t'],
ico1['t'],
)
)
yield ico0
rets += 1
print(' load %s: %u aggregated lines' % (fn, rets))
def load_speed_json(f):
j = json.load(f)
@ -175,6 +182,7 @@ def load_speed_json(f):
return j, speed_i2s
'''
def run(speed_json_f, fout, fns_in, verbose=0, corner=None):
print('Loading data')
_speedj, speed_i2s = load_speed_json(speed_json_f)
@ -235,3 +243,4 @@ def main():
if __name__ == '__main__':
main()
'''

View File

@ -0,0 +1,168 @@
#!/usr/bin/env python3
from timfuz import Benchmark, A_di2ds, PREFIX_SITEW, sitew_vals2s
from timing_txt2json import gen_timing4a, load_speed_json
import glob
import math
import json
import sys
from collections import OrderedDict
def gen_diffs(speed_json_f, fns_in):
print('Loading data')
_speedj, speed_i2s = load_speed_json(speed_json_f)
for fn_in in fns_in:
for val in gen_timing4a(fn_in, speed_i2s):
# diff to get site only delay
tsites = {}
for k in val['t'][0].keys():
v = val['t'][0][k] - val['t'][1][k]
assert v >= 0
tsites[k] = v
yield val, tsites
'''
def run(speed_json_f, fout, fns_in, verbose=0, corner=None):
fout.write('fast_max fast_min slow_max slow_min,src_site_type,src_site,src_bel,src_bel_pin,dst_site_type,dst_site,dst_bel,dst_bel_pin\n')
for val, tsites in gen_diffs(speed_json_f, fns_in):
def mkb(t):
return (t['fast_max'], t['fast_min'], t['slow_max'], t['slow_min'])
bstr = ' '.join([str(x) for x in mkb(tsites)])
def srcdst(which):
sd = val[which]
# IOB_X0Y106 IOB_X0Y106/INBUF_EN IOB_X0Y106/INBUF_EN/OUT
# print(sd['site'], sd['bel'], sd['bel_pin'])
site, bel, bel_pin = sd['bel_pin'].split('/')
assert sd['site'] == site
assert sd['bel'] == site + '/' + bel
return sd['site_type'], site, bel, bel_pin
items = [bstr]
items.extend(srcdst('src'))
items.extend(srcdst('dst'))
fout.write(','.join(items) + '\n')
print('done')
'''
# XXX: move to json converter?
def sd_parts(sd):
'''Return site_type, site_pin, bel_type, bel_pin as non-prefixed strings'''
# IOB_X0Y106 IOB_X0Y106/INBUF_EN IOB_X0Y106/INBUF_EN/OUT
# print(sd['site'], sd['bel'], sd['bel_pin'])
site_type = sd['site_type']
site, bel_type, bel_pin = sd['bel_pin'].split('/')
assert sd['site'] == site
assert sd['bel'] == site + '/' + bel_type
site, site_pin = sd['site_pin'].split('/')
assert sd['site_pin'] == sd['site'] + '/' + site_pin
return site_type, site_pin, bel_type, bel_pin
def run(speed_json_f, fout, fns_in, verbose=0, corner=None):
'''
instead of writing to a simplified csv, lets just go directly to a delay format identical to what fabric uses
Path types:
-inter site: think these are removed for now?
1 model
NOTE: be careful of a net that goes external and comes back in, which isn't inter site
definition is that it doesn't have any site pins
-intra site
2 models
'''
fout.write(
'ico,fast_max fast_min slow_max slow_min,src_site_type,src_site,src_bel,src_bel_pin,dst_site_type,dst_site,dst_bel,dst_bel_pin\n'
)
for val, tsites in gen_diffs(speed_json_f, fns_in):
def mkb(t):
return (t['fast_max'], t['fast_min'], t['slow_max'], t['slow_min'])
bstr = ' '.join([str(x) for x in mkb(tsites)])
# Identify inter site transaction (SITEI)
if val['src']['site_pin'] is None and val['dst']['site_pin'] is None:
# add one delay model for the path
assert 0, 'FIXME: inter site transaction'
row_ds = {'SITEI_BLAH': None}
else:
# if it exits a site it should enter another (possibly the same site)
# site in (SITEI) or site out (SITEO)?
# nah, keep things simple and just call them SITEW
assert val['src']['site_pin'] and val['dst']['site_pin']
row_ds = {}
def add_delay(sd):
site_type, site_pin, bel_type, bel_pin = sd_parts(sd)
# there are _ in some of the names
# use some other chars
k = sitew_vals2s(site_type, site_pin, bel_type, bel_pin)
# even if its the same site src and dst, input and output should be different types
assert k not in row_ds
row_ds[k] = 1
add_delay(val['src'])
add_delay(val['dst'])
row_ico = 0
items = [str(row_ico), bstr]
for k, v in sorted(row_ds.items()):
items.append('%u %s' % (v, k))
fout.write(','.join(items) + '\n')
print('done')
def main():
import argparse
parser = argparse.ArgumentParser(
description=
'Convert obscure timing4.txt into timing4s.csv (site delay variable occurances)'
)
parser.add_argument('--verbose', type=int, help='')
# made a bulk conversion easier...keep?
parser.add_argument(
'--auto-name', action='store_true', help='timing4.txt => timing4i.csv')
parser.add_argument(
'--speed-json',
default='build_speed/speed.json',
help='Provides speed index to name translation')
parser.add_argument('--out', default=None, help='Output timing4i.csv file')
parser.add_argument('fns_in', nargs='+', help='Input timing4.txt files')
args = parser.parse_args()
bench = Benchmark()
fnout = args.out
if fnout is None:
if args.auto_name:
assert len(args.fns_in) == 1
fnin = args.fns_in[0]
fnout = fnin.replace('.txt', 's.csv')
assert fnout != fnin, 'Expect .txt in'
else:
# practically there are too many stray prints to make this work as expected
assert 0, 'File name required'
fnout = '/dev/stdout'
print("Writing to %s" % fnout)
fout = open(fnout, 'w')
fns_in = args.fns_in
if not fns_in:
fns_in = glob.glob('specimen_*/timing4.txt')
run(
speed_json_f=open(args.speed_json, 'r'),
fout=fout,
fns_in=fns_in,
verbose=args.verbose)
if __name__ == '__main__':
main()