diff --git a/.github/kokoro/db-full.sh b/.github/kokoro/db-full.sh index 7abd0974..6d6c63f1 100755 --- a/.github/kokoro/db-full.sh +++ b/.github/kokoro/db-full.sh @@ -94,7 +94,6 @@ EXTRAS_HARNESS_RET=$? set -e # Generate extra parts file (tilegrid, tileconn, part yaml, part json and package_pin) -# TODO: Disabled for now as for big parts it takes a huge amount of time set +e make db-extras-${XRAY_SETTINGS}-parts -j $CORES EXTRAS_PARTS_RET=$? diff --git a/Makefile b/Makefile index aaa05a0c..0bc81806 100644 --- a/Makefile +++ b/Makefile @@ -160,8 +160,6 @@ endef $(foreach PART,$(XRAY_PARTS),$(eval $(call multiple-parts,$(PART)))) db-extras-artix7-parts: $(addprefix db-part-only-,$(ARTIX_PARTS)) - +XRAY_PIN_00=J13 XRAY_PIN_01=J14 XRAY_PIN_02=K15 XRAY_PIN_03=K16 \ - XRAY_PART=xc7a35tftg256-1 XRAY_EQUIV_PART=xc7a50tfgg484-1 $(MAKE) -C fuzzers roi_only db-extras-artix7-harness: +source minitests/roi_harness/basys3-swbut.sh && $(MAKE) -C fuzzers roi_only @@ -182,6 +180,8 @@ db-extras-artix7-harness: +source minitests/roi_harness/arty-swbut.sh && \ $(MAKE) -C minitests/roi_harness \ HARNESS_DIR=$(XRAY_DATABASE_DIR)/artix7/harness/arty-a7/swbut run copy + +XRAY_PIN_00=J13 XRAY_PIN_01=J14 XRAY_PIN_02=K15 XRAY_PIN_03=K16 \ + XRAY_PART=xc7a35tftg256-1 XRAY_EQUIV_PART=xc7a50tfgg484-1 $(MAKE) -C fuzzers roi_only db-extras-kintex7-parts: @true diff --git a/fuzzers/074-dump_all/generate_grid.py b/fuzzers/074-dump_all/generate_grid.py index 32f78ec6..4743d262 100644 --- a/fuzzers/074-dump_all/generate_grid.py +++ b/fuzzers/074-dump_all/generate_grid.py @@ -12,6 +12,7 @@ import pickle import sys from prjxray import util, lib +from prjxray.xjson import extract_numbers def get_tile_grid_info(fname): @@ -605,6 +606,16 @@ def main(): tileconn, raw_node_data = generate_tileconn( pool, node_tree, nodes, wire_map, grid) + for data in tileconn: + data['wire_pairs'] = tuple( + sorted( + data['wire_pairs'], + key=lambda x: tuple(extract_numbers(s) for s in x))) + + tileconn = tuple( + sorted( + tileconn, key=lambda x: (x['tile_types'], x['grid_deltas']))) + print('{} Writing tileconn'.format(datetime.datetime.now())) with open(tileconn_file, 'w') as f: json.dump(tileconn, f, indent=2, sort_keys=True) diff --git a/fuzzers/074-dump_all/jobtiles.tcl b/fuzzers/074-dump_all/jobtiles.tcl index 37b37e58..d4468a2e 100644 --- a/fuzzers/074-dump_all/jobtiles.tcl +++ b/fuzzers/074-dump_all/jobtiles.tcl @@ -48,6 +48,24 @@ proc lookup_speed_model_name {name} { return [dict get $speed_model_name_map $name] } +# For BSW_INT_LONG_MUX, use the model from BSW_INT_HLONG_MUX. +# This isn't exactly correct, but it is a better model to use. +# BSW_INT_LONG_MUX is a tl_buffer (which we don't really understand), and +# BSW_INT_HLONG_MUX is not. This subsitution appears good enough for now. +set int_hlong_mux [lookup_speed_model_name BSW_INT_HLONG_MUX] +set int_long_mux [lookup_speed_model_name BSW_INT_LONG_MUX] + +set long_forward [get_property FORWARD $int_long_mux] +set hlong_forward [get_property FORWARD $int_hlong_mux] +dict set speed_model_name_map $long_forward [lookup_speed_model_name $hlong_forward] + +set long_reverse [get_property REVERSE $int_long_mux] +set hlong_reverse [get_property REVERSE $int_hlong_mux] +dict set speed_model_name_map $long_forward [lookup_speed_model_name $hlong_reverse] + +# Same here! +dict set speed_model_name_map _BSW_LONG_TLREVERSE [lookup_speed_model_name _BSW_LONG_NONTLFORWARD] + for {set j $start } { $j < $stop } { incr j } { set tile [lindex $tiles $j] diff --git a/fuzzers/074-dump_all/reduce_tile_types.py b/fuzzers/074-dump_all/reduce_tile_types.py index f99b16bc..f8aaec4d 100644 --- a/fuzzers/074-dump_all/reduce_tile_types.py +++ b/fuzzers/074-dump_all/reduce_tile_types.py @@ -18,6 +18,7 @@ import multiprocessing import os import functools import json +from prjxray.xjson import extract_numbers def check_and_strip_prefix(name, prefix): @@ -365,6 +366,11 @@ def main(): tile_type, site_types[site_type]['type'])), 'w') as f: json.dump(site_types[site_type], f, indent=2, sort_keys=True) + reduced_tile['sites'] = sorted( + reduced_tile['sites'], + key=lambda site: extract_numbers( + '{}_{}'.format(site['name'], site['prefix']))) + with open(tile_type_file, 'w') as f: json.dump(reduced_tile, f, indent=2, sort_keys=True) diff --git a/minitests/roi_harness/create_design_json.py b/minitests/roi_harness/create_design_json.py index ac336fa5..1c30fbdf 100644 --- a/minitests/roi_harness/create_design_json.py +++ b/minitests/roi_harness/create_design_json.py @@ -6,12 +6,14 @@ import fasm from prjxray.db import Database from prjxray.roi import Roi from prjxray.util import get_db_root, get_part +from prjxray.xjson import extract_numbers def set_port_wires(ports, name, pin, wires_outside_roi): for port in ports: if name == port['name']: - port['wires_outside_roi'] = wires_outside_roi + port['wires_outside_roi'] = sorted( + wires_outside_roi, key=extract_numbers) assert port['pin'] == pin return @@ -104,8 +106,12 @@ def main(): if not_in_roi: required_features.append(fasm_line) - design_json['required_features'] = fasm.fasm_tuple_to_string( - required_features, canonical=True).split('\n') + design_json['required_features'] = sorted( + fasm.fasm_tuple_to_string(required_features, + canonical=True).split('\n'), + key=extract_numbers) + + design_json['ports'].sort(key=lambda x: extract_numbers(x['name'])) xjson.pprint(sys.stdout, design_json) diff --git a/prjxray/xjson.py b/prjxray/xjson.py new file mode 100644 index 00000000..da888e0a --- /dev/null +++ b/prjxray/xjson.py @@ -0,0 +1,98 @@ +import io +import json +import re + +from collections import OrderedDict + + +def extract_numbers(s): + """ + >>> extract_numbers("CLK_HROW_WR10END2_3") + ('CLK_HROW_WR', 10, 'END', 2, '_', 3) + >>> extract_numbers("VBRK_WR1END2") + ('VBRK_WR', 1, 'END', 2) + """ + bits = [] + for m in re.finditer("([^0-9]*)([0-9]*)", s): + if m.group(1): + bits.append(m.group(1)) + if m.group(2): + bits.append(int(m.group(2))) + return tuple(bits) + + +def sort(data): + """Sort data types via "natural" numbers. + + Supports all the basic Python data types. + >>> o = sort({ + ... 't1': {'c','b'}, # Set + ... 't2': ('a2', 'a10', 'e'), # Tuple + ... 't3': [5, 3, 2], # List + ... 't4': { # Dictionary + ... 'a4': ('b2', 'b3'), + ... 'a2': ['c1', 'c2', 'c0', 'c10'], + ... }, + ... 't5': ['a1b5', 'a2b1', 'a1b1'], + ... }) + >>> for t in o: + ... print(t+':', o[t]) + t1: ('b', 'c') + t2: ('a2', 'a10', 'e') + t3: (5, 3, 2) + t4: OrderedDict([('a2', ('c1', 'c2', 'c0', 'c10')), ('a4', ('b2', 'b3'))]) + t5: ('a1b5', 'a2b1', 'a1b1') + + Don't mangle "pairs" + >>> sort([('b', 'c'), ('2', '1')]) + (('b', 'c'), ('2', '1')) + """ + + def key(o): + if o is None: + return None + elif isinstance(o, str): + return extract_numbers(o) + elif isinstance(o, int): + return o + elif isinstance(o, (list, tuple)): + return tuple(key(i) for i in o) + elif isinstance(o, dict): + return tuple((key(k), key(v)) for k, v in o.items()) + elif isinstance(o, set): + return tuple(key(k) for k in o) + raise ValueError(repr(o)) + + def rsorter(o): + if isinstance(o, dict): + nitems = [] + for k, v in o.items(): + nitems.append((key(k), k, rsorter(v))) + nitems.sort(key=lambda n: n[0]) + + new_dict = OrderedDict() + for _, k, v in nitems: + new_dict[k] = v + return new_dict + + elif isinstance(o, set): + return tuple(sorted((rsorter(v) for v in o), key=key)) + elif isinstance(o, (tuple, list)): + return tuple(rsorter(v) for v in o) + else: + return o + + return rsorter(data) + + +def pprint(f, data): + detach = False + if not isinstance(f, io.TextIOBase): + detach = True + f = io.TextIOWrapper(f) + data = sort(data) + json.dump(data, f, indent=4) + f.write('\n') + f.flush() + if detach: + f.detach() diff --git a/utils/create_timing_worksheet_db.py b/utils/create_timing_worksheet_db.py index ab269b36..845b7af8 100644 --- a/utils/create_timing_worksheet_db.py +++ b/utils/create_timing_worksheet_db.py @@ -125,7 +125,13 @@ class Net(object): def extend_rc_tree(self, ws, current_rc_root, timing_lookup, node): rc_elements = [] - for wire in node['wires']: + + # LV nodes have a workaround applied because of a work around in the + # pip timing data. + is_lv_node = any( + wire['name'].split('/')[1].startswith('LV') + for wire in node['wires']) + for idx, wire in enumerate(node['wires']): wire_timing = timing_lookup.find_wire(wire['name']) ws['A{}'.format(self.row)] = wire['name'] ws['B{}'.format(self.row)] = 'Part of wire' @@ -134,8 +140,20 @@ class Net(object): cells = {} cells['R'] = 'C{}'.format(self.row) cells['C'] = 'D{}'.format(self.row) - ws[cells['R']] = wire_timing.resistance - ws[cells['C']] = wire_timing.capacitance + if not is_lv_node: + ws[cells['R']] = wire_timing.resistance + ws[cells['C']] = wire_timing.capacitance + else: + # Only use first 2 wire RC's, ignore the rest. It appears + # that some of the RC constant was lumped into the switch + # timing, so don't double count. + if idx < 2: + ws[cells['R']] = wire_timing.resistance + ws[cells['C']] = wire_timing.capacitance + else: + ws[cells['R']] = 0 + ws[cells['C']] = 0 + rc_elements.append( RcElement( resistance=cells['R'], diff --git a/utils/xjson.py b/utils/xjson.py index 4e670be7..7affd767 100755 --- a/utils/xjson.py +++ b/utils/xjson.py @@ -1,134 +1,7 @@ #!/usr/bin/env python3 -import io -import json -import re import sys - -from collections import OrderedDict -from ordered_set import OrderedSet - - -def extract_numbers(s): - """ - >>> extract_numbers("CLK_HROW_WR10END2_3") - ('CLK_HROW_WR', 10, 'END', 2, '_', 3) - >>> extract_numbers("VBRK_WR1END2") - ('VBRK_WR', 1, 'END', 2) - """ - bits = [] - for m in re.finditer("([^0-9]*)([0-9]*)", s): - if m.group(1): - bits.append(m.group(1)) - if m.group(2): - bits.append(int(m.group(2))) - return tuple(bits) - - -def sort(data): - """Sort data types via "natural" numbers. - - Supports all the basic Python data types. - >>> o = sort({ - ... 't1': {'c','b'}, # Set - ... 't2': ('a2', 'a10', 'e'), # Tuple - ... 't3': [5, 3, 2], # List - ... 't4': { # Dictionary - ... 'a4': ('b2', 'b3'), - ... 'a2': ['c1', 'c2', 'c0', 'c10'], - ... }, - ... 't5': ['a1b5', 'a2b1', 'a1b1'], - ... }) - >>> for t in o: - ... print(t+':', o[t]) - t1: OrderedSet(['b', 'c']) - t2: ('a2', 'a10', 'e') - t3: (2, 3, 5) - t4: OrderedDict([('a2', ('c0', 'c1', 'c2', 'c10')), ('a4', ('b2', 'b3'))]) - t5: ('a1b1', 'a1b5', 'a2b1') - - Don't mangle "pairs" - >>> sort([('b', 'c'), ('2', '1')]) - [('b', 'c'), ('2', '1')] - """ - # FIXME: We assume that a list is a tileconn.json format... - if isinstance(data, list) and len(data) > 0 and 'wire_pairs' in data[0]: - for o in data: - o['wire_pairs'].sort( - key=lambda o: (extract_numbers(o[0]), extract_numbers(o[1]))) - - data.sort(key=lambda o: (o['tile_types'], o['grid_deltas'])) - return data - else: - - def key(o): - if o is None: - return None - elif isinstance(o, str): - return extract_numbers(o) - elif isinstance(o, int): - return o - elif isinstance(o, (list, tuple)): - return tuple(key(i) for i in o) - elif isinstance(o, dict): - return tuple((key(k), key(v)) for k, v in o.items()) - elif isinstance(o, set): - return tuple(key(k) for k in o) - raise ValueError(repr(o)) - - def rsorter(o): - if isinstance(o, dict): - nitems = [] - for k, v in o.items(): - nitems.append((key(k), k, rsorter(v))) - nitems.sort(key=lambda n: n[0]) - - new_dict = OrderedDict() - for _, k, v in nitems: - new_dict[k] = v - return new_dict - - elif isinstance(o, set): - nitems = [] - for k in o: - nitems.append((key(k), k)) - nitems.sort(key=lambda n: n[0]) - - new_set = OrderedSet() - for _, k in nitems: - new_set.add(k) - return new_set - - elif isinstance(o, (tuple, list)): - if len(o) == 2: - return o - - nlist = [] - for i in o: - nlist.append((key(i), rsorter(i))) - nlist.sort(key=lambda n: n[0]) - - new_list = [] - for _, i in nlist: - new_list.append(i) - return tuple(new_list) - else: - return o - - return rsorter(data) - - -def pprint(f, data): - detach = False - if not isinstance(f, io.TextIOBase): - detach = True - f = io.TextIOWrapper(f) - data = sort(data) - json.dump(data, f, indent=4) - f.write('\n') - f.flush() - if detach: - f.detach() - +import json +from prjxray.xjson import pprint if __name__ == "__main__": if len(sys.argv) == 1: