Merge pull request #1230 from litghost/sorting-fix

Copy of #1226, remove use of xjson from `074`

>  - Rework how the `json` files are sorted (numbers are treated as numerics).
>  - Sort `csv` and `txt` files.
>  - Sort `segbits.*origin_info.db` files.
>  - Sort the grid file.
> 
> How this changes the output can be seen in https://github.com/SymbiFlow/prjxray-db/pull/11/files
This commit is contained in:
Tim Ansell 2020-02-12 19:26:08 -08:00 committed by GitHub
commit db14b30fdb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 226 additions and 68 deletions

View File

@ -1,5 +1,5 @@
SHELL = bash
ALL_EXCLUDE = third_party .git env build
ALL_EXCLUDE = third_party .git env build docs/env
# Check if root
ifeq ($(shell id -u),0)

View File

@ -15,8 +15,6 @@ import re
import sys
import copy
from utils import xjson
# All site names appear to follow the pattern <type>_X<abs coord>Y<abs coord>.
# Generally speaking, only the tile relatively coordinates are required to
# assemble arch defs, so we re-origin the coordinates to be relative to the tile
@ -115,7 +113,7 @@ def main():
site_pin['name'] = site_pin['name'][len(orig_site_name) + 1:]
xjson.pprint(sys.stdout, output_site_pins)
json.dumps(output_site_pins, indent=2, sort_keys=True)
if __name__ == "__main__":

View File

@ -5,8 +5,7 @@ import os.path
import prjxray.lib
import pickle
import collections
from utils import xjson
import json
def build_node_index(fname):
@ -273,7 +272,7 @@ def main():
print('{} Writing node tree'.format(datetime.datetime.now()))
with open(os.path.join(args.output_dir, 'node_tree.json'), 'w') as f:
xjson.pprint(f, nodes)
json.dump(nodes, f, indent=2, sort_keys=True)
if __name__ == '__main__':

View File

@ -11,7 +11,6 @@ import datetime
import pickle
import sys
from utils import xjson
from prjxray import util, lib
@ -608,7 +607,7 @@ def main():
print('{} Writing tileconn'.format(datetime.datetime.now()))
with open(tileconn_file, 'w') as f:
xjson.pprint(f, tileconn)
json.dump(tileconn, f, indent=2, sort_keys=True)
else:
with open(wire_map_file, 'rb') as f:
wire_map = pickle.load(f)
@ -653,7 +652,7 @@ def main():
if len(error_nodes) > 0:
error_nodes_file = os.path.join(args.output_dir, 'error_nodes.json')
with open(error_nodes_file, 'w') as f:
xjson.pprint(f, error_nodes)
json.dump(error_nodes, f, indent=2, sort_keys=True)
ignored_wires = []
ignored_wires_file = args.ignored_wires

View File

@ -13,8 +13,6 @@ import os.path
import re
import json
from utils import xjson
def main():
parser = argparse.ArgumentParser(
@ -57,7 +55,7 @@ def main():
with open(os.path.join(args.output_dir,
'site_type_{}.json'.format(site_type)),
'w') as f:
xjson.pprint(f, proto_site_type)
json.dump(proto_site_type, f)
if __name__ == '__main__':

View File

@ -17,8 +17,7 @@ import progressbar
import multiprocessing
import os
import functools
from utils import xjson
import json
def check_and_strip_prefix(name, prefix):
@ -364,10 +363,10 @@ def main():
with open(os.path.join(
args.output_dir, 'tile_type_{}_site_type_{}.json'.format(
tile_type, site_types[site_type]['type'])), 'w') as f:
xjson.pprint(f, site_types[site_type])
json.dump(site_types[site_type], f, indent=2, sort_keys=True)
with open(tile_type_file, 'w') as f:
xjson.pprint(f, reduced_tile)
json.dump(reduced_tile, f, indent=2, sort_keys=True)
if __name__ == '__main__':

View File

@ -2,6 +2,7 @@
intervaltree
junit-xml
numpy
ordered-set
parse
progressbar2
pyjson5

View File

@ -47,6 +47,7 @@ sort sets (lists where the order doesn't matter).
"""
import csv
import os
import random
import re
@ -284,16 +285,35 @@ def sortable_line_from_segbits(l):
return (tag, tuple(bits)), l
def sort_db(filename):
def sortable_line_from_origin_segbits(l):
tag, origin, sbit = l.split(' ', 2)
tag = sortable_tag(tag)
bits = bit.parseline(sbit)
return (tag, tuple(bits)), l
def sort_db(pathname):
"""Sort a XXX.db file."""
filename = os.path.split(pathname)[-1]
if filename.startswith('segbits_'):
sortable_line_from_dbfile = sortable_line_from_segbits
if 'origin_info' in filename:
sortable_line_from_dbfile = sortable_line_from_origin_segbits
else:
sortable_line_from_dbfile = sortable_line_from_segbits
elif 'origin_info' in filename:
return False
elif filename.startswith('ppips_'):
sortable_line_from_dbfile = sortable_line_from_ppips
elif filename.startswith('grid-'):
sortable_line_from_dbfile = sortable_line_from_ppips
elif filename.startswith('mask_'):
sortable_line_from_dbfile = sortable_line_from_mask
else:
return False
lines = open(filename).readlines()
lines = open(pathname).readlines()
tosort = []
for l in lines:
@ -305,16 +325,16 @@ def sort_db(filename):
tosort.sort(key=cmp.cmp_key)
# Make sure the sort is stable
for i in range(0, 4):
copy = tosort.copy()
random.shuffle(copy)
copy.sort(key=cmp.cmp_key)
assert len(copy) == len(tosort)
for i in range(0, len(copy)):
assert copy[i] == tosort[i], "\n%r\n != \n%r\n" % (
copy[i], tosort[i])
#for i in range(0, 4):
# copy = tosort.copy()
# random.shuffle(copy)
# copy.sort(key=cmp.cmp_key)
# assert len(copy) == len(tosort)
# for i in range(0, len(copy)):
# assert copy[i] == tosort[i], "\n%r\n != \n%r\n" % (
# copy[i], tosort[i])
with open(filename, 'w') as f:
with open(pathname, 'w') as f:
for _, l in tosort:
f.write(l)
f.write('\n')
@ -322,11 +342,43 @@ def sort_db(filename):
return True
def sort_csv(pathname):
rows = []
fields = []
delimiter = None
with open(pathname, newline='') as f:
if pathname.endswith('.csv'):
delimiter = ','
elif pathname.endswith('.txt'):
delimiter = ' '
reader = csv.DictReader(f, delimiter=delimiter)
fields.extend(reader.fieldnames)
rows.extend(reader)
del reader
def sort_key(r):
v = []
for field in fields:
v.append(sortable_tag(r[field]))
return tuple(v)
rows.sort(key=sort_key)
with open(pathname, 'w', newline='') as f:
writer = csv.DictWriter(
f, fields, delimiter=delimiter, lineterminator='\n')
writer.writeheader()
writer.writerows(rows)
return True
def sort_json(filename):
"""Sort a XXX.json file."""
try:
d = json.load(open(filename))
except json.JSONDecodeError:
except json.JSONDecodeError as e:
print(e)
return False
with open(filename, 'w') as f:
@ -335,30 +387,75 @@ def sort_json(filename):
return True
def sort_db_text(n):
rows = []
with open(n) as f:
for l in f:
rows.append(([extract_num(s) for s in l.split()], l))
rows.sort(key=lambda i: i[0])
with open(n, 'w') as f:
for l in rows:
f.write(l[-1])
return True
def sort_file(n):
assert os.path.exists(n)
base, ext = os.path.splitext(n)
dirname, base = os.path.split(base)
# Leave db files with fuzzer of origin untouched
if "origin_info" in n and not base.startswith('segbits'):
print("Ignoring file {:45s}".format(n), flush=True)
return
if ext == '.db':
print("Sorting DB file {:45s}".format(n), end=" ", flush=True)
x = sort_db(n)
elif ext == '.json':
print("Sorting JSON file {:45s}".format(n), end=" ", flush=True)
x = sort_json(n)
elif ext in ('.csv', '.txt'):
if n.endswith('-db.txt'):
print("Sorting txt file {:45s}".format(n), end=" ", flush=True)
x = sort_db_text(n)
else:
print("Sorting CSV file {:45s}".format(n), end=" ", flush=True)
x = sort_csv(n)
else:
print("Ignoring file {:45s}".format(n), end=" ", flush=True)
x = True
if x:
print(".. success.")
else:
print(".. failed.")
def sort_dir(dirname):
for n in sorted(os.listdir(dirname)):
n = os.path.join(dirname, n)
if os.path.isdir(n):
print("Entering dir {:45s}".format(n), flush=True)
sort_dir(n)
continue
elif not os.path.isfile(n):
print("Ignoring non-file {:45s}".format(n), flush=True)
continue
sort_file(n)
def main(argv):
for n in sorted(os.listdir()):
if not os.path.isfile(n):
continue
# Leave db files with fuzzer of origin untouched
if "origin_info" in n:
continue
base, ext = os.path.splitext(n)
if ext == '.db':
print("Sorting DB file {:40s}".format(n), end=" ", flush=True)
x = sort_db(n)
elif ext == '.json':
print("Sorting JSON file {:40s}".format(n), end=" ", flush=True)
x = sort_json(n)
else:
print("Ignoring file {:40s}".format(n), end=" ", flush=True)
x = True
if x:
print(".. success.")
else:
print(".. failed.")
if argv[1:]:
for n in argv[1:]:
sort_file(n)
else:
sort_dir('.')
return 0

View File

@ -4,6 +4,9 @@ import json
import re
import sys
from collections import OrderedDict
from ordered_set import OrderedSet
def extract_numbers(s):
"""
@ -22,6 +25,31 @@ def extract_numbers(s):
def sort(data):
"""Sort data types via "natural" numbers.
Supports all the basic Python data types.
>>> o = sort({
... 't1': {'c','b'}, # Set
... 't2': ('a2', 'a10', 'e'), # Tuple
... 't3': [5, 3, 2], # List
... 't4': { # Dictionary
... 'a4': ('b2', 'b3'),
... 'a2': ['c1', 'c2', 'c0', 'c10'],
... },
... 't5': ['a1b5', 'a2b1', 'a1b1'],
... })
>>> for t in o:
... print(t+':', o[t])
t1: OrderedSet(['b', 'c'])
t2: ('a2', 'a10', 'e')
t3: (2, 3, 5)
t4: OrderedDict([('a2', ('c0', 'c1', 'c2', 'c10')), ('a4', ('b2', 'b3'))])
t5: ('a1b1', 'a1b5', 'a2b1')
Don't mangle "pairs"
>>> sort([('b', 'c'), ('2', '1')])
[('b', 'c'), ('2', '1')]
"""
# FIXME: We assume that a list is a tileconn.json format...
if isinstance(data, list) and len(data) > 0 and 'wire_pairs' in data[0]:
for o in data:
@ -29,25 +57,64 @@ def sort(data):
key=lambda o: (extract_numbers(o[0]), extract_numbers(o[1])))
data.sort(key=lambda o: (o['tile_types'], o['grid_deltas']))
return data
else:
def walker(o, f):
def key(o):
if o is None:
return None
elif isinstance(o, str):
return extract_numbers(o)
elif isinstance(o, int):
return o
elif isinstance(o, (list, tuple)):
return tuple(key(i) for i in o)
elif isinstance(o, dict):
return tuple((key(k), key(v)) for k, v in o.items())
elif isinstance(o, set):
return tuple(key(k) for k in o)
raise ValueError(repr(o))
def rsorter(o):
if isinstance(o, dict):
for i in o.values():
walker(i, f)
elif isinstance(o, list):
nitems = []
for k, v in o.items():
nitems.append((key(k), k, rsorter(v)))
nitems.sort(key=lambda n: n[0])
new_dict = OrderedDict()
for _, k, v in nitems:
new_dict[k] = v
return new_dict
elif isinstance(o, set):
nitems = []
for k in o:
nitems.append((key(k), k))
nitems.sort(key=lambda n: n[0])
new_set = OrderedSet()
for _, k in nitems:
new_set.add(k)
return new_set
elif isinstance(o, (tuple, list)):
if len(o) == 2:
return o
nlist = []
for i in o:
walker(i, f)
f(o)
nlist.append((key(i), rsorter(i)))
nlist.sort(key=lambda n: n[0])
def f(o):
if isinstance(o, list):
if len(o) > 2:
strings = all(isinstance(x, str) for x in o)
if strings:
o.sort()
new_list = []
for _, i in nlist:
new_list.append(i)
return tuple(new_list)
else:
return o
walker(data, f)
return rsorter(data)
def pprint(f, data):
@ -55,8 +122,8 @@ def pprint(f, data):
if not isinstance(f, io.TextIOBase):
detach = True
f = io.TextIOWrapper(f)
sort(data)
json.dump(data, f, sort_keys=True, indent=4)
data = sort(data)
json.dump(data, f, indent=4)
f.write('\n')
f.flush()
if detach: