From 5ad97aa6366e2653ed2fcf69e27cecee76fb9437 Mon Sep 17 00:00:00 2001 From: mrg Date: Wed, 20 Jul 2022 10:27:10 -0700 Subject: [PATCH 1/7] Update README and setpaths with new PYTHONPATH --- README.md | 7 ++----- setpaths.sh | 6 ------ 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 438c33db..e2187f23 100644 --- a/README.md +++ b/README.md @@ -53,19 +53,16 @@ You must set two environment variables: + OPENRAM\_HOME should point to the compiler source directory. + OPENERAM\_TECH should point to one or more root technology directories (colon separated). +You should also add OPENRAM\_HOME to your PYTHONPATH. For example add this to your .bashrc: ``` export OPENRAM_HOME="$HOME/openram/compiler" export OPENRAM_TECH="$HOME/openram/technology" -``` - -You should also add OPENRAM\_HOME to your PYTHONPATH: - -``` export PYTHONPATH=$OPENRAM_HOME ``` + Note that if you want symbols to resolve in your editor, you may also want to add the specific technology directory that you use and any custom technology modules as well. For example: ``` diff --git a/setpaths.sh b/setpaths.sh index 280594f5..efceed78 100755 --- a/setpaths.sh +++ b/setpaths.sh @@ -7,9 +7,3 @@ export OPENRAM_HOME="`pwd`/compiler" export OPENRAM_TECH="`pwd`/technology" export PYTHONPATH=$OPENRAM_HOME -for dir in `pwd`/compiler/* -do - if [ -d $dir ]; then - export PYTHONPATH=$PYTHONPATH:$dir - fi; -done From 6707a93c3c71bbd4ac5a7dc33dc972f633017a52 Mon Sep 17 00:00:00 2001 From: mrg Date: Wed, 20 Jul 2022 10:27:30 -0700 Subject: [PATCH 2/7] Add fudge factor for bitcell array side rail spacings to fix DRC in freepdk45. --- compiler/modules/replica_bitcell_array.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/compiler/modules/replica_bitcell_array.py b/compiler/modules/replica_bitcell_array.py index 609aa566..f3725489 100644 --- a/compiler/modules/replica_bitcell_array.py +++ b/compiler/modules/replica_bitcell_array.py @@ -309,8 +309,8 @@ class replica_bitcell_array(bitcell_base_array): # This creates space for the unused wordline connections as well as the # row-based or column based power and ground lines. - self.vertical_pitch = getattr(self, "{}_pitch".format(self.supply_stack[0])) - self.horizontal_pitch = getattr(self, "{}_pitch".format(self.supply_stack[2])) + self.vertical_pitch = 1.1 * getattr(self, "{}_pitch".format(self.supply_stack[0])) + self.horizontal_pitch = 1.1 * getattr(self, "{}_pitch".format(self.supply_stack[2])) self.unused_offset = vector(0.25, 0.25) # This is a bitcell x bitcell offset to scale @@ -495,16 +495,7 @@ class replica_bitcell_array(bitcell_base_array): else: bitcell = getattr(props, "bitcell_{}port".format(OPTS.num_ports)) - wl_layer = bitcell.wl_layer - wl_dir = bitcell.wl_dir - - bl_layer = bitcell.bl_layer - bl_dir = bitcell.bl_dir - - vdd_layer = bitcell.vdd_layer vdd_dir = bitcell.vdd_dir - - gnd_layer = bitcell.gnd_layer gnd_dir = bitcell.gnd_dir # vdd/gnd are only connected in the perimeter cells @@ -514,8 +505,6 @@ class replica_bitcell_array(bitcell_base_array): top_bot_mult = 1 left_right_mult = 1 - vdd_locs = [] - gnd_locs = [] # There are always vertical pins for the WLs on the left/right if we have unused wordlines self.left_gnd_locs = self.route_side_pin("gnd", "left", left_right_mult) self.right_gnd_locs = self.route_side_pin("gnd","right", left_right_mult) From 449c68ccae12a4e2b5e0cb5e23863b8d6b4e4129 Mon Sep 17 00:00:00 2001 From: Eren Dogan Date: Fri, 22 Jul 2022 18:11:14 +0300 Subject: [PATCH 3/7] Fix file setup in code format test --- compiler/tests/00_code_format_check_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler/tests/00_code_format_check_test.py b/compiler/tests/00_code_format_check_test.py index 7fb5862d..24e203ab 100755 --- a/compiler/tests/00_code_format_check_test.py +++ b/compiler/tests/00_code_format_check_test.py @@ -51,7 +51,7 @@ def setup_files(path): files = [] for (dir, _, current_files) in os.walk(path): for f in current_files: - files.append(os.getenv("OPENRAM_HOME")) + files.append(os.path.join(dir, f)) nametest = re.compile("\.py$", re.IGNORECASE) select_files = list(filter(nametest.search, files)) return select_files From 64c72ee19dfe87a1eb945121cd04b0698b5602e5 Mon Sep 17 00:00:00 2001 From: Eren Dogan Date: Fri, 22 Jul 2022 18:15:27 +0300 Subject: [PATCH 4/7] Fix typo --- compiler/tests/00_code_format_check_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler/tests/00_code_format_check_test.py b/compiler/tests/00_code_format_check_test.py index 24e203ab..7249ba7a 100755 --- a/compiler/tests/00_code_format_check_test.py +++ b/compiler/tests/00_code_format_check_test.py @@ -92,7 +92,7 @@ def check_file_format_carriage(file_name): if len(key_positions)>10: line_numbers = key_positions[:10] + [" ..."] else: - line_numbers = key_positoins + line_numbers = key_positions debug.info(0, '\nFound ' + str(len(key_positions)) + ' carriage returns in ' + str(file_name) + ' (lines ' + ",".join(str(x) for x in line_numbers) + ')') f.close() From 2a778dca828f3a28952556cdd30af23bbd3b0ed0 Mon Sep 17 00:00:00 2001 From: Eren Dogan Date: Fri, 22 Jul 2022 18:22:40 +0300 Subject: [PATCH 5/7] Add whitespace check to code format test --- compiler/tests/00_code_format_check_test.py | 23 +++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/compiler/tests/00_code_format_check_test.py b/compiler/tests/00_code_format_check_test.py index 7249ba7a..97ffda09 100755 --- a/compiler/tests/00_code_format_check_test.py +++ b/compiler/tests/00_code_format_check_test.py @@ -28,6 +28,7 @@ class code_format_test(openram_test): continue errors += check_file_format_tab(code) errors += check_file_format_carriage(code) + errors += check_file_format_whitespace(code) for code in source_codes: if re.search("gdsMill", code): @@ -99,6 +100,28 @@ def check_file_format_carriage(file_name): return len(key_positions) +def check_file_format_whitespace(file_name): + """ + Check if file contains a line with whitespace at the end + and return the number of these lines. + """ + + f = open(file_name, "r") + key_positions = [] + for num, line in enumerate(f.readlines()): + if re.match(r".*[ \t]$", line): + key_positions.append(num) + if len(key_positions) > 0: + if len(key_positions) > 10: + line_numbers = key_positions[:10] + [" ..."] + else: + line_numbers = key_positions + debug.info(0, "\nFound " + str(len(key_positions)) + " ending whitespace in " + + str(file_name) + " (lines " + ",".join(str(x) for x in line_numbers) + ")") + f.close() + return len(key_positions) + + def check_print_output(file_name): """Check if any files (except debug.py) call the _print_ function. We should use the debug output with verbosity instead!""" From e3fe8c3229beff0eab94af8ab784501a9383c66c Mon Sep 17 00:00:00 2001 From: Eren Dogan Date: Fri, 22 Jul 2022 19:52:38 +0300 Subject: [PATCH 6/7] Remove line ending whitespace --- compiler/base/geometry.py | 2 +- compiler/base/hierarchy_layout.py | 2 +- compiler/base/lef.py | 5 +- compiler/base/pin_layout.py | 4 +- compiler/base/timing_graph.py | 11 +- compiler/base/utils.py | 2 +- compiler/base/wire.py | 2 +- compiler/characterizer/analytical_util.py | 96 ++++++------ compiler/characterizer/cacti.py | 20 ++- compiler/characterizer/charutils.py | 2 +- compiler/characterizer/delay.py | 48 +++--- compiler/characterizer/elmore.py | 14 +- compiler/characterizer/linear_regression.py | 9 +- compiler/characterizer/measurements.py | 3 +- compiler/characterizer/model_check.py | 5 +- compiler/characterizer/neural_network.py | 11 +- compiler/characterizer/regression_model.py | 65 ++++----- compiler/characterizer/setup_hold.py | 8 +- compiler/characterizer/trim_spice.py | 6 +- compiler/datasheet/datasheet.py | 2 +- compiler/debug.py | 9 +- compiler/drc/custom_cell_properties.py | 51 ++++--- compiler/globals.py | 16 +- compiler/model_data_util.py | 138 ++++++++---------- compiler/modules/bitcell_1port.py | 2 +- compiler/modules/bitcell_2port.py | 2 +- compiler/modules/bitcell_base.py | 54 +++---- compiler/modules/bitcell_base_array.py | 4 +- compiler/modules/col_cap_bitcell_1port.py | 2 +- compiler/modules/col_cap_bitcell_2port.py | 2 +- compiler/modules/column_mux_array.py | 2 +- compiler/modules/control_logic.py | 2 +- compiler/modules/hierarchical_decoder.py | 2 +- compiler/modules/local_bitcell_array.py | 2 +- compiler/modules/nand2_dec.py | 14 +- compiler/modules/nand3_dec.py | 14 +- compiler/modules/nand4_dec.py | 14 +- compiler/modules/pgate.py | 2 +- compiler/modules/pnand4.py | 14 +- compiler/modules/port_address.py | 2 +- compiler/modules/replica_bitcell_1port.py | 2 +- compiler/modules/replica_bitcell_2port.py | 2 +- compiler/modules/row_cap_bitcell_1port.py | 2 +- compiler/modules/row_cap_bitcell_2port.py | 2 +- compiler/modules/sense_amp.py | 32 ++-- compiler/modules/sram.py | 2 +- compiler/modules/sram_1bank.py | 2 +- compiler/modules/wordline_driver_array.py | 2 +- compiler/printGDS.py | 2 +- compiler/processGDS.py | 2 +- compiler/router/grid.py | 19 +-- compiler/router/grid_cell.py | 3 +- compiler/router/pin_group.py | 6 +- compiler/router/router.py | 52 +++---- compiler/router/signal_escape_router.py | 20 ++- compiler/router/supply_grid.py | 6 +- compiler/router/supply_grid_router.py | 9 +- compiler/tests/01_library_test.py | 2 +- .../tests/14_replica_column_1rw_1r_test.py | 4 +- .../tests/18_port_data_spare_cols_test.py | 2 +- compiler/tests/19_psingle_bank_test.py | 2 +- .../tests/20_psram_1bank_2mux_1rw_1w_test.py | 2 +- .../20_psram_1bank_2mux_1rw_1w_wmask_test.py | 2 +- .../tests/20_sram_1bank_4mux_1rw_1r_test.py | 2 +- compiler/tests/21_ngspice_delay_test.py | 2 +- compiler/tests/21_regression_delay_test.py | 2 +- .../tests/22_psram_1bank_2mux_func_test.py | 2 +- .../tests/22_psram_1bank_4mux_func_test.py | 2 +- .../tests/22_psram_1bank_8mux_func_test.py | 2 +- .../tests/22_psram_1bank_nomux_func_test.py | 2 +- .../tests/22_sram_1bank_2mux_func_test.py | 2 +- .../22_sram_1bank_2mux_global_func_test.py | 2 +- .../tests/22_sram_1bank_4mux_func_test.py | 2 +- .../tests/22_sram_1bank_8mux_func_test.py | 2 +- compiler/tests/26_ngspice_pex_pinv_test.py | 4 +- compiler/tests/30_openram_back_end_test.py | 2 +- compiler/tests/regress.py | 8 +- compiler/verify/assura.py | 16 +- compiler/verify/calibre.py | 12 +- compiler/verify/magic.py | 2 +- 80 files changed, 431 insertions(+), 479 deletions(-) diff --git a/compiler/base/geometry.py b/compiler/base/geometry.py index 3ca87e2e..7451d465 100644 --- a/compiler/base/geometry.py +++ b/compiler/base/geometry.py @@ -359,7 +359,7 @@ class instance(geometry): for offset in range(len(normalized_br_offsets)): for port in range(len(br_names)): cell_br_meta.append([br_names[offset], row, col, port]) - + if normalized_storage_nets == []: debug.error("normalized storage nets should not be empty! Check if the GDS labels Q and Q_bar are correctly set on M1 of the cell",1) Q_x = normalized_storage_nets[0][0] diff --git a/compiler/base/hierarchy_layout.py b/compiler/base/hierarchy_layout.py index fe108c01..fc22f3c0 100644 --- a/compiler/base/hierarchy_layout.py +++ b/compiler/base/hierarchy_layout.py @@ -1317,7 +1317,7 @@ class layout(): return None intermediate_layers = self.get_metal_layers(from_layer, to_layer) - + via = None cur_layer = from_layer while cur_layer != to_layer: diff --git a/compiler/base/lef.py b/compiler/base/lef.py index 799890e0..9ebd823d 100644 --- a/compiler/base/lef.py +++ b/compiler/base/lef.py @@ -75,7 +75,7 @@ class lef: # return # To maintain the indent level easily - self.indent = "" + self.indent = "" if OPTS.detailed_lef: debug.info(3, "Writing detailed LEF to {0}".format(lef_name)) @@ -88,7 +88,7 @@ class lef: for pin_name in self.pins: self.lef_write_pin(pin_name) - + self.lef_write_obstructions(OPTS.detailed_lef) self.lef_write_footer() self.lef.close() @@ -220,4 +220,3 @@ class lef: round(item[1], self.round_grid))) self.lef.write(" ;\n") - diff --git a/compiler/base/pin_layout.py b/compiler/base/pin_layout.py index f9b66612..4021a8b5 100644 --- a/compiler/base/pin_layout.py +++ b/compiler/base/pin_layout.py @@ -45,7 +45,7 @@ class pin_layout: if self.same_lpp(layer_name_pp, lpp): self._layer = layer_name break - + else: try: from tech import layer_override @@ -57,7 +57,7 @@ class pin_layout: return except: debug.error("Layer {} is not a valid routing layer in the tech file.".format(layer_name_pp), -1) - + self.lpp = layer[self.layer] self._recompute_hash() diff --git a/compiler/base/timing_graph.py b/compiler/base/timing_graph.py index edbd7128..46d7b518 100644 --- a/compiler/base/timing_graph.py +++ b/compiler/base/timing_graph.py @@ -119,7 +119,7 @@ class timing_graph(): # If at the last output, include the final output load if i == len(path) - 2: cout += load - + if params["model_name"] == "cacti": delays.append(path_edge_mod.cacti_delay(corner, cur_slew, cout, params)) cur_slew = delays[-1].slew @@ -130,14 +130,14 @@ class timing_graph(): return_value=1) return delays - + def get_edge_mods(self, path): """Return all edge mods associated with path""" - + if len(path) == 0: return [] - - return [self.edge_mods[(path[i], path[i+1])] for i in range(len(path)-1)] + + return [self.edge_mods[(path[i], path[i+1])] for i in range(len(path)-1)] def __str__(self): """ override print function output """ @@ -153,4 +153,3 @@ class timing_graph(): """ override print function output """ return str(self) - diff --git a/compiler/base/utils.py b/compiler/base/utils.py index f80b23e3..082caf21 100644 --- a/compiler/base/utils.py +++ b/compiler/base/utils.py @@ -163,7 +163,7 @@ def get_gds_pins(pin_names, name, gds_filename, units): if layer_override[pin_name]: lpp = layer_override[pin_name.textString] except: - pass + pass lpp = (lpp[0], None) cell[str(pin_name)].append(pin_layout(pin_name, rect, lpp)) diff --git a/compiler/base/wire.py b/compiler/base/wire.py index 7114687c..a276c035 100644 --- a/compiler/base/wire.py +++ b/compiler/base/wire.py @@ -68,7 +68,7 @@ class wire(wire_path): This is contact direction independent pitch, i.e. we take the maximum contact dimension """ - + # This is here for the unit tests which may not have # initialized the static parts of the layout class yet. from base import layout diff --git a/compiler/characterizer/analytical_util.py b/compiler/characterizer/analytical_util.py index 41120982..df37123b 100644 --- a/compiler/characterizer/analytical_util.py +++ b/compiler/characterizer/analytical_util.py @@ -18,7 +18,7 @@ def get_data_names(file_name, exclude_area=True): """ Returns just the data names in the first row of the CSV """ - + with open(file_name, newline='') as csvfile: csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|') row_iter = 0 @@ -32,16 +32,16 @@ def get_data_names(file_name, exclude_area=True): area_ind = names.index('area') except ValueError: area_ind = -1 - - if area_ind != -1: + + if area_ind != -1: names = names[:area_ind] + names[area_ind+1:] - return names - + return names + def get_data(file_name): """ Returns data in CSV as lists of features """ - + with open(file_name, newline='') as csvfile: csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|') row_iter = 0 @@ -56,14 +56,14 @@ def get_data(file_name): area_ind = feature_names.index('area') except ValueError: area_ind = -1 - + try: process_ind = feature_names.index('process') except: debug.error('Process not included as a feature.') continue - - + + data = [] split_str = row[0].split(',') @@ -79,24 +79,24 @@ def get_data(file_name): for i in range(len(data)): input_list[i].append(data[i]) - + return input_list - -def apply_samples_to_data(all_data, algo_samples): + +def apply_samples_to_data(all_data, algo_samples): # Take samples from algorithm and match them to samples in data data_samples, unused_data = [], [] sample_positions = set() for sample in algo_samples: sample_positions.add(find_sample_position_with_min_error(all_data, sample)) - + for i in range(len(all_data)): if i in sample_positions: data_samples.append(all_data[i]) else: unused_data.append(all_data[i]) - + return data_samples, unused_data - + def find_sample_position_with_min_error(data, sampled_vals): min_error = 0 sample_pos = 0 @@ -108,19 +108,19 @@ def find_sample_position_with_min_error(data, sampled_vals): sample_pos = count count += 1 return sample_pos - + def squared_error(list_a, list_b): error_sum = 0; for a,b in zip(list_a, list_b): error_sum+=(a-b)**2 - return error_sum - + return error_sum + def get_max_min_from_datasets(dir): if not os.path.isdir(dir): debug.warning("Input Directory not found:{}".format(dir)) return [], [], [] - + # Assuming all files are CSV data_files = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))] maxs,mins,sums,total_count = [],[],[],0 @@ -133,7 +133,7 @@ def get_max_min_from_datasets(dir): data_min.append(min(feature_list)) data_sum.append(sum(feature_list)) count = len(feature_list) - + # Aggregate the data if not maxs or not mins or not sums: maxs,mins,sums,total_count = data_max,data_min,data_sum,count @@ -143,15 +143,15 @@ def get_max_min_from_datasets(dir): mins[i] = min(data_min[i], mins[i]) sums[i] = data_sum[i]+sums[i] total_count+=count - + avgs = [s/total_count for s in sums] return maxs,mins,avgs - + def get_max_min_from_file(path): if not os.path.isfile(path): debug.warning("Input file not found: {}".format(path)) return [], [], [] - + data = get_data(path) # Get max, min, sum, and count from every file @@ -163,23 +163,23 @@ def get_max_min_from_file(path): count = len(feature_list) avgs = [s/count for s in data_sum] - return data_max, data_min, avgs - + return data_max, data_min, avgs + def get_data_and_scale(file_name, sample_dir): maxs,mins,avgs = get_max_min_from_datasets(sample_dir) - + # Get data all_data = get_data(file_name) - + # Scale data from file self_scaled_data = [[] for _ in range(len(all_data[0]))] self_maxs,self_mins = [],[] for feature_list, cur_max, cur_min in zip(all_data,maxs, mins): for i in range(len(feature_list)): self_scaled_data[i].append((feature_list[i]-cur_min)/(cur_max-cur_min)) - + return np.asarray(self_scaled_data) - + def rescale_data(data, old_maxs, old_mins, new_maxs, new_mins): # unscale from old values, rescale by new values data_new_scaling = [] @@ -188,24 +188,24 @@ def rescale_data(data, old_maxs, old_mins, new_maxs, new_mins): for val, old_max,old_min, cur_max, cur_min in zip(data_row, old_maxs,old_mins, new_maxs, new_mins): unscaled_data = val*(old_max-old_min) + old_min scaled_row.append((unscaled_data-cur_min)/(cur_max-cur_min)) - + data_new_scaling.append(scaled_row) - return data_new_scaling - + return data_new_scaling + def sample_from_file(num_samples, file_name, sample_dir=None): """ Get a portion of the data from CSV file and scale it based on max/min of dataset. Duplicate samples are trimmed. """ - + if sample_dir: maxs,mins,avgs = get_max_min_from_datasets(sample_dir) else: maxs,mins,avgs = [], [], [] - + # Get data - all_data = get_data(file_name) + all_data = get_data(file_name) # Get algorithms sample points, assuming hypercube for now num_labels = 1 @@ -230,7 +230,7 @@ def sample_from_file(num_samples, file_name, sample_dir=None): if len(maxs)!=0 and len(mins)!=0: sampled_data = rescale_data(sampled_data, self_maxs,self_mins, maxs, mins) unused_new_scaling = rescale_data(unused_data, self_maxs,self_mins, maxs, mins) - + return np.asarray(sampled_data), np.asarray(unused_new_scaling) def get_scaled_data(file_name): @@ -240,10 +240,10 @@ def get_scaled_data(file_name): maxs,mins,avgs = get_max_min_from_file(file_name) else: maxs,mins,avgs = [], [], [] - + # Get data - all_data = get_data(file_name) - + all_data = get_data(file_name) + # Data is scaled by max/min and data format is changed to points vs feature lists self_scaled_data = scale_data_and_transform(all_data) data_np = np.asarray(self_scaled_data) @@ -253,7 +253,7 @@ def scale_data_and_transform(data): """ Assume data is a list of features, change to a list of points and max/min scale """ - + scaled_data = [[] for _ in range(len(data[0]))] for feature_list in data: max_val = max(feature_list) @@ -265,8 +265,8 @@ def scale_data_and_transform(data): else: scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val)) return scaled_data - -def scale_input_datapoint(point, file_path): + +def scale_input_datapoint(point, file_path): """ Input data has no output and needs to be scaled like the model inputs during training. @@ -290,8 +290,8 @@ def unscale_data(data, file_path, pos=None): else: debug.error("Must provide reference data to unscale") return None - - # Hard coded to only convert the last max/min (i.e. the label of the data) + + # Hard coded to only convert the last max/min (i.e. the label of the data) if pos == None: maxs,mins,avgs = maxs[-1],mins[-1],avgs[-1] else: @@ -299,10 +299,10 @@ def unscale_data(data, file_path, pos=None): unscaled_data = [] for data_row in data: unscaled_val = data_row*(maxs-mins) + mins - unscaled_data.append(unscaled_val) + unscaled_data.append(unscaled_val) return unscaled_data - + def abs_error(labels, preds): total_error = 0 for label_i, pred_i in zip(labels, preds): @@ -316,10 +316,10 @@ def max_error(labels, preds): cur_error = abs(label_i[0]-pred_i[0])/label_i[0] mx_error = max(cur_error, mx_error) return mx_error - + def min_error(labels, preds): mn_error = 1 for label_i, pred_i in zip(labels, preds): cur_error = abs(label_i[0]-pred_i[0])/label_i[0] mn_error = min(cur_error, mn_error) - return mn_error + return mn_error diff --git a/compiler/characterizer/cacti.py b/compiler/characterizer/cacti.py index df9f540b..5dab9618 100644 --- a/compiler/characterizer/cacti.py +++ b/compiler/characterizer/cacti.py @@ -9,15 +9,15 @@ from .simulation import simulation from globals import OPTS import debug -import tech +import tech import math -class cacti(simulation): +class cacti(simulation): """ Delay model for the SRAM which which """ - + def __init__(self, sram, spfile, corner): super().__init__(sram, spfile, corner) @@ -33,8 +33,8 @@ class cacti(simulation): self.create_signal_names() self.add_graph_exclusions() self.set_params() - - def set_params(self): + + def set_params(self): """Set parameters specific to the corner being simulated""" self.params = {} # Set the specific functions to use for timing defined in the SRAM module @@ -42,14 +42,14 @@ class cacti(simulation): # Only parameter right now is r_on which is dependent on Vdd self.params["r_nch_on"] = self.vdd_voltage / tech.spice["i_on_n"] self.params["r_pch_on"] = self.vdd_voltage / tech.spice["i_on_p"] - + def get_lib_values(self, load_slews): """ Return the analytical model results for the SRAM. """ if OPTS.num_rw_ports > 1 or OPTS.num_w_ports > 0 and OPTS.num_r_ports > 0: debug.warning("In analytical mode, all ports have the timing of the first read port.") - + # Probe set to 0th bit, does not matter for analytical delay. self.set_probe('0' * self.addr_size, 0) self.create_graph() @@ -77,7 +77,7 @@ class cacti(simulation): slew = 0 path_delays = self.graph.get_timing(bl_path, self.corner, slew, load_farad, self.params) total_delay = self.sum_delays(path_delays) - + delay_ns = total_delay.delay/1e-9 slew_ns = total_delay.slew/1e-9 max_delay = max(max_delay, total_delay.delay) @@ -95,7 +95,7 @@ class cacti(simulation): elif "slew" in mname and port in self.read_ports: port_data[port][mname].append(total_delay.slew / 1e-9) - # Margin for error in period. Calculated by averaging required margin for a small and large + # Margin for error in period. Calculated by averaging required margin for a small and large # memory. FIXME: margin is quite large, should be looked into. period_margin = 1.85 sram_data = {"min_period": (max_delay / 1e-9) * 2 * period_margin, @@ -118,5 +118,3 @@ class cacti(simulation): debug.info(1, "Dynamic Power: {0} mW".format(power.dynamic)) debug.info(1, "Leakage Power: {0} mW".format(power.leakage)) return power - - \ No newline at end of file diff --git a/compiler/characterizer/charutils.py b/compiler/characterizer/charutils.py index 59ef3177..70f80774 100644 --- a/compiler/characterizer/charutils.py +++ b/compiler/characterizer/charutils.py @@ -37,7 +37,7 @@ def parse_spice_list(filename, key): except IOError: debug.error("Unable to open spice output file: {0}".format(full_filename),1) debug.archive() - + contents = f.read().lower() f.close() # val = re.search(r"{0}\s*=\s*(-?\d+.?\d*\S*)\s+.*".format(key), contents) diff --git a/compiler/characterizer/delay.py b/compiler/characterizer/delay.py index 27d72195..40a623c6 100644 --- a/compiler/characterizer/delay.py +++ b/compiler/characterizer/delay.py @@ -235,10 +235,10 @@ class delay(simulation): qbar_meas = voltage_at_measure("v_qbar_{0}".format(meas_tag), qbar_name) return {bit_polarity.NONINVERTING: q_meas, bit_polarity.INVERTING: qbar_meas} - + def create_sen_and_bitline_path_measures(self): """Create measurements for the s_en and bitline paths for individual delays per stage.""" - + # FIXME: There should be a default_read_port variable in this case, pathing is done with this # but is never mentioned otherwise port = self.read_ports[0] @@ -253,37 +253,37 @@ class delay(simulation): debug.check(len(bl_paths)==1, 'Found {0} paths which contain the bitline net.'.format(len(bl_paths))) sen_path = sen_paths[0] bitline_path = bl_paths[0] - + # Get the measures self.sen_path_meas = self.create_delay_path_measures(sen_path) self.bl_path_meas = self.create_delay_path_measures(bitline_path) all_meas = self.sen_path_meas + self.bl_path_meas - + # Paths could have duplicate measurements, remove them before they go to the stim file all_meas = self.remove_duplicate_meas_names(all_meas) # FIXME: duplicate measurements still exist in the member variables, since they have the same # name it will still work, but this could cause an issue in the future. - - return all_meas + + return all_meas def remove_duplicate_meas_names(self, measures): """Returns new list of measurements without duplicate names""" - + name_set = set() unique_measures = [] for meas in measures: if meas.name not in name_set: name_set.add(meas.name) unique_measures.append(meas) - + return unique_measures - + def create_delay_path_measures(self, path): """Creates measurements for each net along given path.""" # Determine the directions (RISE/FALL) of signals path_dirs = self.get_meas_directions(path) - + # Create the measurements path_meas = [] for i in range(len(path) - 1): @@ -297,26 +297,26 @@ class delay(simulation): # Some bitcell logic is hardcoded for only read zeroes, force that here as well. path_meas[-1].meta_str = sram_op.READ_ZERO path_meas[-1].meta_add_delay = True - + return path_meas - + def get_meas_directions(self, path): """Returns SPICE measurements directions based on path.""" - + # Get the edges modules which define the path edge_mods = self.graph.get_edge_mods(path) - + # Convert to booleans based on function of modules (inverting/non-inverting) mod_type_bools = [mod.is_non_inverting() for mod in edge_mods] - + # FIXME: obtuse hack to differentiate s_en input from bitline in sense amps if self.sen_name in path: - # Force the sense amp to be inverting for s_en->DOUT. + # Force the sense amp to be inverting for s_en->DOUT. # bitline->DOUT is non-inverting, but the module cannot differentiate inputs. s_en_index = path.index(self.sen_name) mod_type_bools[s_en_index] = False debug.info(2, 'Forcing sen->dout to be inverting.') - + # Use these to determine direction list assuming delay start on neg. edge of clock (FALL) # Also, use shorthand that 'FALL' == False, 'RISE' == True to simplify logic bool_dirs = [False] @@ -324,9 +324,9 @@ class delay(simulation): for mod_bool in mod_type_bools: cur_dir = (cur_dir == mod_bool) bool_dirs.append(cur_dir) - + # Convert from boolean to string - return ['RISE' if dbool else 'FALL' for dbool in bool_dirs] + return ['RISE' if dbool else 'FALL' for dbool in bool_dirs] def set_load_slew(self, load, slew): """ Set the load and slew """ @@ -827,7 +827,7 @@ class delay(simulation): debug.error("Failed to Measure Read Port Values:\n\t\t{0}".format(read_port_dict), 1) result[port].update(read_port_dict) - + self.path_delays = self.check_path_measures() return (True, result) @@ -932,7 +932,7 @@ class delay(simulation): def check_path_measures(self): """Get and check all the delays along the sen and bitline paths""" - + # Get and set measurement, no error checking done other than prints. debug.info(2, "Checking measures in Delay Path") value_dict = {} @@ -1179,7 +1179,7 @@ class delay(simulation): #char_sram_data["sen_path_names"] = sen_names # FIXME: low-to-high delays are altered to be independent of the period. This makes the lib results less accurate. self.alter_lh_char_data(char_port_data) - + return (char_sram_data, char_port_data) def alter_lh_char_data(self, char_port_data): @@ -1222,14 +1222,14 @@ class delay(simulation): for meas in self.sen_path_meas: sen_name_list.append(meas.name) sen_delay_list.append(value_dict[meas.name]) - + bl_name_list = [] bl_delay_list = [] for meas in self.bl_path_meas: bl_name_list.append(meas.name) bl_delay_list.append(value_dict[meas.name]) - return sen_name_list, sen_delay_list, bl_name_list, bl_delay_list + return sen_name_list, sen_delay_list, bl_name_list, bl_delay_list def calculate_inverse_address(self): """Determine dummy test address based on probe address and column mux size.""" diff --git a/compiler/characterizer/elmore.py b/compiler/characterizer/elmore.py index c4ca44a2..5fee4223 100644 --- a/compiler/characterizer/elmore.py +++ b/compiler/characterizer/elmore.py @@ -10,11 +10,11 @@ from .simulation import simulation from globals import OPTS import debug -class elmore(simulation): +class elmore(simulation): """ Delay model for the SRAM which calculates Elmore delays along the SRAM critical path. """ - + def __init__(self, sram, spfile, corner): super().__init__(sram, spfile, corner) @@ -30,13 +30,13 @@ class elmore(simulation): self.set_corner(corner) self.create_signal_names() self.add_graph_exclusions() - - def set_params(self): + + def set_params(self): """Set parameters specific to the corner being simulated""" self.params = {} # Set the specific functions to use for timing defined in the SRAM module self.params["model_name"] = OPTS.model_name - + def get_lib_values(self, load_slews): """ Return the analytical model results for the SRAM. @@ -66,7 +66,7 @@ class elmore(simulation): for load,slew in load_slews: # Calculate delay based on slew and load path_delays = self.graph.get_timing(bl_path, self.corner, slew, load, self.params) - + total_delay = self.sum_delays(path_delays) max_delay = max(max_delay, total_delay.delay) debug.info(1, @@ -84,7 +84,7 @@ class elmore(simulation): elif "slew" in mname and port in self.read_ports: port_data[port][mname].append(total_delay.slew / 1e3) - # Margin for error in period. Calculated by averaging required margin for a small and large + # Margin for error in period. Calculated by averaging required margin for a small and large # memory. FIXME: margin is quite large, should be looked into. period_margin = 1.85 sram_data = {"min_period": (max_delay / 1e3) * 2 * period_margin, diff --git a/compiler/characterizer/linear_regression.py b/compiler/characterizer/linear_regression.py index f12e607a..68921e2c 100644 --- a/compiler/characterizer/linear_regression.py +++ b/compiler/characterizer/linear_regression.py @@ -26,18 +26,17 @@ class linear_regression(regression_model): """ Supervised training of model. """ - + #model = LinearRegression() model = self.get_model() model.fit(features, labels) return model - - def model_prediction(self, model, features): + + def model_prediction(self, model, features): """ Have the model perform a prediction and unscale the prediction as the model is trained with scaled values. """ - + pred = model.predict(features) return pred - \ No newline at end of file diff --git a/compiler/characterizer/measurements.py b/compiler/characterizer/measurements.py index 448dee36..fcbb562f 100644 --- a/compiler/characterizer/measurements.py +++ b/compiler/characterizer/measurements.py @@ -184,7 +184,7 @@ class voltage_when_measure(spice_measurement): trig_voltage = self.trig_val_of_vdd * vdd_voltage return (meas_name, trig_name, targ_name, trig_voltage, self.trig_dir_str, trig_td) - + class voltage_at_measure(spice_measurement): """Generates a spice measurement to measure the voltage at a specific time. The time is considered variant with different periods.""" @@ -211,4 +211,3 @@ class voltage_at_measure(spice_measurement): meas_name = self.name targ_name = self.targ_name_no_port return (meas_name, targ_name, time_at) - diff --git a/compiler/characterizer/model_check.py b/compiler/characterizer/model_check.py index fcbc51c2..9ef48b1c 100644 --- a/compiler/characterizer/model_check.py +++ b/compiler/characterizer/model_check.py @@ -82,7 +82,7 @@ class model_check(delay): replicated here. """ delay.create_signal_names(self) - + # Signal names are all hardcoded, need to update to make it work for probe address and different configurations. wl_en_driver_signals = ["Xsram{1}Xcontrol{{}}.Xbuf_wl_en.Zb{0}_int".format(stage, OPTS.hier_seperator) for stage in range(1, self.get_num_wl_en_driver_stages())] wl_driver_signals = ["Xsram{2}Xbank0{2}Xwordline_driver{{}}{2}Xwl_driver_inv{0}{2}Zb{1}_int".format(self.wordline_row, stage, OPTS.hier_seperator) for stage in range(1, self.get_num_wl_driver_stages())] @@ -448,6 +448,3 @@ class model_check(delay): name_dict[self.sae_model_name] = name_dict["sae_measures"] return name_dict - - - diff --git a/compiler/characterizer/neural_network.py b/compiler/characterizer/neural_network.py index 6f1aa7c1..ae65b26d 100644 --- a/compiler/characterizer/neural_network.py +++ b/compiler/characterizer/neural_network.py @@ -25,20 +25,19 @@ class neural_network(regression_model): """ Training multilayer model """ - + flat_labels = np.ravel(labels) model = self.get_model() model.fit(features, flat_labels) - + return model - - def model_prediction(self, model, features): + + def model_prediction(self, model, features): """ Have the model perform a prediction and unscale the prediction as the model is trained with scaled values. """ - + pred = model.predict(features) reshape_pred = np.reshape(pred, (len(pred),1)) return reshape_pred - \ No newline at end of file diff --git a/compiler/characterizer/regression_model.py b/compiler/characterizer/regression_model.py index 69f00485..e9b4ec4d 100644 --- a/compiler/characterizer/regression_model.py +++ b/compiler/characterizer/regression_model.py @@ -25,7 +25,7 @@ data_fnames = ["rise_delay.csv", "read0_power.csv", "leakage_data.csv", "sim_time.csv"] -# Positions must correspond to data_fname list +# Positions must correspond to data_fname list lib_dnames = ["delay_lh", "delay_hl", "slew_lh", @@ -35,13 +35,13 @@ lib_dnames = ["delay_lh", "read1_power", "read0_power", "leakage_power", - "sim_time"] + "sim_time"] # Check if another data dir was specified -if OPTS.sim_data_path == None: +if OPTS.sim_data_path == None: data_dir = OPTS.openram_tech+relative_data_path else: - data_dir = OPTS.sim_data_path - + data_dir = OPTS.sim_data_path + data_path = data_dir + '/' + data_file class regression_model(simulation): @@ -52,23 +52,23 @@ class regression_model(simulation): def get_lib_values(self, load_slews): """ - A model and prediction is created for each output needed for the LIB + A model and prediction is created for each output needed for the LIB """ - + debug.info(1, "Characterizing SRAM using regression models.") log_num_words = math.log(OPTS.num_words, 2) - model_inputs = [log_num_words, - OPTS.word_size, + model_inputs = [log_num_words, + OPTS.word_size, OPTS.words_per_row, OPTS.local_array_size, - process_transform[self.process], - self.vdd_voltage, - self.temperature] + process_transform[self.process], + self.vdd_voltage, + self.temperature] # Area removed for now # self.sram.width * self.sram.height, # Include above inputs, plus load and slew which are added below self.num_inputs = len(model_inputs)+2 - + self.create_measurement_names() models = self.train_models() @@ -85,22 +85,22 @@ class regression_model(simulation): port_data[port]['delay_hl'].append(sram_vals['fall_delay']) port_data[port]['slew_lh'].append(sram_vals['rise_slew']) port_data[port]['slew_hl'].append(sram_vals['fall_slew']) - + port_data[port]['write1_power'].append(sram_vals['write1_power']) port_data[port]['write0_power'].append(sram_vals['write0_power']) port_data[port]['read1_power'].append(sram_vals['read1_power']) port_data[port]['read0_power'].append(sram_vals['read0_power']) - + # Disabled power not modeled. Copied from other power predictions port_data[port]['disabled_write1_power'].append(sram_vals['write1_power']) port_data[port]['disabled_write0_power'].append(sram_vals['write0_power']) port_data[port]['disabled_read1_power'].append(sram_vals['read1_power']) port_data[port]['disabled_read0_power'].append(sram_vals['read0_power']) - - debug.info(1, '{}, {}, {}, {}, {}'.format(slew, - load, - port, - sram_vals['rise_delay'], + + debug.info(1, '{}, {}, {}, {}, {}'.format(slew, + load, + port, + sram_vals['rise_delay'], sram_vals['rise_slew'])) # Estimate the period as double the delay with margin period_margin = 0.1 @@ -112,19 +112,19 @@ class regression_model(simulation): return (sram_data, port_data) - def get_predictions(self, model_inputs, models): + def get_predictions(self, model_inputs, models): """ Generate a model and prediction for LIB output """ - - #Scaled the inputs using first data file as a reference + + #Scaled the inputs using first data file as a reference scaled_inputs = np.asarray([scale_input_datapoint(model_inputs, data_path)]) predictions = {} out_pos = 0 for dname in self.output_names: m = models[dname] - + scaled_pred = self.model_prediction(m, scaled_inputs) pred = unscale_data(scaled_pred.tolist(), data_path, pos=self.num_inputs+out_pos) debug.info(2,"Unscaled Prediction = {}".format(pred)) @@ -149,7 +149,7 @@ class regression_model(simulation): output_num+=1 return models - + def score_model(self): num_inputs = 9 #FIXME - should be defined somewhere else self.output_names = get_data_names(data_path)[num_inputs:] @@ -165,15 +165,15 @@ class regression_model(simulation): scr = model.score(features, output_label) debug.info(1, "{}, {}".format(o_name, scr)) output_num+=1 - - + + def cross_validation(self, test_only=None): """Wrapper for sklean cross validation function for OpenRAM regression models. Returns the mean accuracy for each model/output.""" - + from sklearn.model_selection import cross_val_score untrained_model = self.get_model() - + num_inputs = 9 #FIXME - should be defined somewhere else self.output_names = get_data_names(data_path)[num_inputs:] data = get_scaled_data(data_path) @@ -193,9 +193,9 @@ class regression_model(simulation): debug.info(1, "{}, {}, {}".format(o_name, scores.mean(), scores.std())) model_scores[o_name] = scores.mean() output_num+=1 - - return model_scores - + + return model_scores + # Fixme - only will work for sklearn regression models def save_model(self, model_name, model): try: @@ -205,4 +205,3 @@ class regression_model(simulation): OPTS.model_dict[model_name+"_coef"] = list(model.coef_[0]) debug.info(1,"Coefs of {}:{}".format(model_name,OPTS.model_dict[model_name+"_coef"])) OPTS.model_dict[model_name+"_intercept"] = float(model.intercept_) - \ No newline at end of file diff --git a/compiler/characterizer/setup_hold.py b/compiler/characterizer/setup_hold.py index 72e973d5..ec1a9e5e 100644 --- a/compiler/characterizer/setup_hold.py +++ b/compiler/characterizer/setup_hold.py @@ -22,7 +22,7 @@ class setup_hold(): def __init__(self, corner): # This must match the spice model order self.dff = factory.create(module_type=OPTS.dff) - + self.period = tech.spice["feasible_period"] debug.info(2, "Feasible period from technology file: {0} ".format(self.period)) @@ -106,8 +106,8 @@ class setup_hold(): setup=0) def write_clock(self): - """ - Create the clock signal for setup/hold analysis. + """ + Create the clock signal for setup/hold analysis. First period initializes the FF while the second is used for characterization. """ @@ -206,7 +206,7 @@ class setup_hold(): self.stim.run_sim(self.stim_sp) clk_to_q = convert_to_float(parse_spice_list("timing", "clk2q_delay")) - # We use a 1/2 speed clock for some reason... + # We use a 1/2 speed clock for some reason... setuphold_time = (target_time - 2 * self.period) if mode == "SETUP": # SETUP is clk-din, not din-clk passing_setuphold_time = -1 * setuphold_time diff --git a/compiler/characterizer/trim_spice.py b/compiler/characterizer/trim_spice.py index e8499d5c..affba296 100644 --- a/compiler/characterizer/trim_spice.py +++ b/compiler/characterizer/trim_spice.py @@ -46,9 +46,9 @@ class trim_spice(): self.col_addr_size = int(log(self.words_per_row, 2)) self.bank_addr_size = self.col_addr_size + self.row_addr_size self.addr_size = self.bank_addr_size + int(log(self.num_banks, 2)) - + def trim(self, address, data_bit): - """ + """ Reduce the spice netlist but KEEP the given bits at the address (and things that will add capacitive load!) """ @@ -62,7 +62,7 @@ class trim_spice(): col_address = int(address[0:self.col_addr_size], 2) else: col_address = 0 - + # 1. Keep cells in the bitcell array based on WL and BL wl_name = "wl_{}".format(wl_address) bl_name = "bl_{}".format(int(self.words_per_row*data_bit + col_address)) diff --git a/compiler/datasheet/datasheet.py b/compiler/datasheet/datasheet.py index d1fb3731..e7551cd3 100644 --- a/compiler/datasheet/datasheet.py +++ b/compiler/datasheet/datasheet.py @@ -31,7 +31,7 @@ class datasheet(): if OPTS.output_datasheet_info: datasheet_path = OPTS.output_path else: - datasheet_path = OPTS.openram_temp + datasheet_path = OPTS.openram_temp with open(datasheet_path + "/datasheet.info") as info: self.html += '