timfuz: fix zero delay element inconsistency

Signed-off-by: John McMaster <johndmcmaster@gmail.com>
This commit is contained in:
John McMaster 2018-09-11 16:43:37 -07:00
parent 1fc02389ed
commit fa524df657
6 changed files with 44 additions and 82 deletions

View File

@ -89,7 +89,8 @@ def run(fns_in, sub_json=None, verbose=False):
'''
print
# https://docs.scipy.org/doc/numpy-dev/reference/generated/numpy.linalg.matrix_rank.html
print('rank: %s / %d col' % (np.linalg.matrix_rank(Amat), len(names)))
rank = np.linalg.matrix_rank(Amat)
print('rank: %s / %d col' % (rank, len(names)))
# doesn't work on non-square matrices
if 0:
# https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.slogdet.html
@ -99,6 +100,8 @@ def run(fns_in, sub_json=None, verbose=False):
print('slogdet :( : 0')
else:
print('slogdet :) : %s, %s' % (sign, logdet))
if rank != len(names):
raise Exception("Matrix not fully ranked w/ %u / %u" % (rank, len(names)))
def main():
import argparse

View File

@ -1,19 +1,6 @@
#!/usr/bin/env python3
# https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.optimize.linprog.html
from scipy.optimize import linprog
from timfuz import Benchmark, Ar_di2np, Ar_ds2t, A_di2ds, A_ds2di, simplify_rows, loadc_Ads_b, index_names, A_ds2np, load_sub, run_sub_json, A_ub_np2d, print_eqns, print_eqns_np, Ads2bounds, loadc_Ads_raw, instances
from timfuz_massage import massage_equations
import numpy as np
import glob
import json
import math
from collections import OrderedDict
from fractions import Fraction
import sys
import datetime
import os
import time
from timfuz import Benchmark, loadc_Ads_b, index_names, load_sub, run_sub_json, instances
def gen_group(fnin, sub_json, strict=False, verbose=False):
print('Loading data')
@ -31,15 +18,16 @@ def gen_group(fnin, sub_json, strict=False, verbose=False):
for row_ds, row_b in zip(Ads, b):
yield row_ds, [row_b for _ in range(4)]
def run(fnin, fnout, sub_json, corner=None, strict=False, verbose=False):
def run(fns_in, fnout, sub_json, corner=None, strict=False, verbose=False):
with open(fnout, 'w') as fout:
fout.write('ico,fast_max fast_min slow_max slow_min,rows...\n')
for row_ds, row_bs in gen_group(fnin, sub_json, strict=strict):
row_ico = 1
items = [str(row_ico), ' '.join([str(x) for x in row_bs])]
for k, v in sorted(row_ds.items()):
items.append('%u %s' % (v, k))
fout.write(','.join(items) + '\n')
for fn_in in fns_in:
for row_ds, row_bs in gen_group(fn_in, sub_json, strict=strict):
row_ico = 1
items = [str(row_ico), ' '.join([str(x) for x in row_bs])]
for k, v in sorted(row_ds.items()):
items.append('%u %s' % (v, k))
fout.write(','.join(items) + '\n')
def main():
import argparse
@ -54,8 +42,11 @@ def main():
parser.add_argument('--strict', action='store_true', help='')
parser.add_argument('--sub-csv', help='')
parser.add_argument('--sub-json', required=True, help='Group substitutions to make fully ranked')
parser.add_argument('fnin', default=None, help='input timing delay .csv')
parser.add_argument('fnout', default=None, help='output timing delay .csv')
parser.add_argument('--out', help='Output sub.json substitution result')
parser.add_argument(
'fns_in',
nargs='*',
help='timing3.txt input files')
args = parser.parse_args()
# Store options in dict to ease passing through functions
bench = Benchmark()
@ -63,7 +54,7 @@ def main():
sub_json = load_sub(args.sub_json)
try:
run(args.fnin, args.fnout, sub_json=sub_json, strict=args.strict, verbose=args.verbose)
run(args.fns_in, args.out, sub_json=sub_json, strict=args.strict, verbose=args.verbose)
finally:
print('Exiting after %s' % bench)

View File

@ -1,37 +0,0 @@
module roi (
input wire clk,
output wire out);
reg [23:0] counter;
assign out = counter[23] ^ counter[22] ^ counter[2] && counter[1] || counter[0];
always @(posedge clk) begin
counter <= counter + 1;
end
endmodule
module top(input wire clk, input wire stb, input wire di, output wire do);
localparam integer DIN_N = 0;
localparam integer DOUT_N = 1;
reg [DIN_N-1:0] din;
wire [DOUT_N-1:0] dout;
reg [DIN_N-1:0] din_shr;
reg [DOUT_N-1:0] dout_shr;
always @(posedge clk) begin
din_shr <= {din_shr, di};
dout_shr <= {dout_shr, din_shr[DIN_N-1]};
if (stb) begin
din <= din_shr;
dout_shr <= dout;
end
end
assign do = dout_shr[DOUT_N-1];
roi roi(
.clk(clk),
.out(dout[0])
);
endmodule

View File

@ -26,25 +26,33 @@ build/sub.json: $(SPECIMENS_OK)
mkdir -p build
# Discover which variables can be separated
# This is typically the longest running operation
python3 $(TIMFUZ_DIR)/rref.py --simplify --out build/sub.json $(CSVS)
python3 $(TIMFUZ_DIR)/rref.py --simplify --out build/sub.json.tmp $(CSVS)
mv build/sub.json.tmp build/sub.json
build/grouped.csv: $(SPECIMENS_OK) build/sub.json
# Separate variables
python3 $(TIMFUZ_DIR)/csv_flat2group.py --sub-json build/sub.json --strict $(CSVS) build/grouped.csv
# Verify sub.json makes a solvable solution
# python3 $(TIMFUZ_DIR)/checksub.py --sub-json build/sub.json grouped.csv
python3 $(TIMFUZ_DIR)/csv_flat2group.py --sub-json build/sub.json --strict --out build/grouped.csv.tmp $(CSVS)
mv build/grouped.csv.tmp build/grouped.csv
build/leastsq.csv: build/sub.json build/grouped.csv
build/checksub: build/grouped.csv build/sub.json
# Verify sub.json makes a cleanly solvable solution with no non-pivot leftover
python3 $(TIMFUZ_DIR)/checksub.py --sub-json build/sub.json build/grouped.csv
touch build/checksub
build/leastsq.csv: build/sub.json build/grouped.csv build/checksub
# Create a rough timing model that approximately fits the given paths
python3 $(TIMFUZ_DIR)/solve_leastsq.py --sub-json build/sub.json build/grouped.csv --out build/leastsq.csv
python3 $(TIMFUZ_DIR)/solve_leastsq.py --sub-json build/sub.json build/grouped.csv --out build/leastsq.csv.tmp
mv build/leastsq.csv.tmp build/leastsq.csv
build/linprog.csv: build/leastsq.csv build/grouped.csv
# Tweak rough timing model, making sure all constraints are satisfied
python3 $(TIMFUZ_DIR)/solve_linprog.py --sub-json build/sub.json --sub-csv build/leastsq.csv --massage build/grouped.csv --out build/linprog.csv
python3 $(TIMFUZ_DIR)/solve_linprog.py --sub-json build/sub.json --sub-csv build/leastsq.csv --massage build/grouped.csv --out build/linprog.csv.tmp
mv build/linprog.csv.tmp build/linprog.csv
build/flat.csv: build/linprog.csv
# Take separated variables and back-annotate them to the original timing variables
python3 $(TIMFUZ_DIR)/csv_group2flat.py --sub-json build/sub.json --sort build/linprog.csv build/flat.csv
python3 $(TIMFUZ_DIR)/csv_group2flat.py --sub-json build/sub.json --sort build/linprog.csv build/flat.csv.tmp
mv build/flat.csv.tmp build/flat.csv
build/tilea.json: build/flat.csv
# Final processing

View File

@ -71,7 +71,7 @@ class State(object):
Ads, b = loadc_Ads_b(fn_ins, corner=corner, ico=True)
if simplify:
print('Simplifying corner %s' % (corner,))
Ads, b = simplify_rows(Ads, b)
Ads, b = simplify_rows(Ads, b, remove_zd=False)
return State(Ads)
def write_state(state, fout):

View File

@ -130,7 +130,7 @@ def Ab_ub_dt2d(eqns):
return list(A_ubd), list(b_ub)
# This significantly reduces runtime
def simplify_rows(Ads, b_ub):
def simplify_rows(Ads, b_ub, remove_zd=False):
'''Remove duplicate equations, taking highest delay'''
# dict of constants to highest delay
eqns = OrderedDict()
@ -149,7 +149,7 @@ def simplify_rows(Ads, b_ub):
# TODO: elements have zero delay (ex: COUT)
# Remove these from now since they make me nervous
# Although they should just solve to 0
if not b:
if remove_zd and not b:
zero_ds += 1
continue
@ -168,7 +168,7 @@ def simplify_rows(Ads, b_ub):
#A_ub_ret = eqns.keys()
A_ubd_ret, b_ub_ret = Ab_ub_dt2d(eqns)
print('Simplify rows: %d => %d w/ zd %d, ze %d' % (len(b_ub), len(b_ub_ret), zero_ds, zero_es))
print('Simplify rows: %d => %d rows w/ zd %d, ze %d' % (len(b_ub), len(b_ub_ret), zero_ds, zero_es))
#return A_ub_ret, b_ub_ret
#return A_ub_np2d(A_ub_ret), b_ub_ret
return A_ubd_ret, b_ub_ret
@ -693,9 +693,9 @@ def loadc_Ads_b(fns, corner, ico=None):
corneri = corner_s2i[corner]
if ico is not None:
filt = lambda ico, corners, vars: ico == ico
filt = lambda ico_, corners, vars: ico_ == ico
else:
filt = lambda ico, corners, vars: True
filt = lambda ico_, corners, vars: True
def mkb(val):
return val[corneri]
@ -725,9 +725,6 @@ def load_sub(fn):
return j
def row_sub_syms(row, sub_json, strict=False, verbose=False):
zero = Fraction(0)
zero = 0
if 0 and verbose:
print("")
print(row.items())
@ -761,9 +758,9 @@ def row_sub_syms(row, sub_json, strict=False, verbose=False):
if verbose:
print('pivot %i %s' % (n, pivot))
for subk, subv in sorted(sub_json['subs'][group].items()):
oldn = row.get(subk, zero)
oldn = row.get(subk, type(subv)(0))
rown = -n * subv
rown += type(rown)(oldn)
rown += oldn
if verbose:
print(" %s: %d => %d" % (subk, oldn, rown))
if rown == 0:
@ -783,7 +780,7 @@ def row_sub_syms(row, sub_json, strict=False, verbose=False):
# verify no subs are left
for subs in sub_json['subs'].values():
for sub in subs:
assert sub not in row, 'Unexpected element %s' % sub
assert sub not in row, 'non-pivot element after group sub %s' % sub
# Verify all constants are positive
for k, v in sorted(row.items()):