diff --git a/README.md b/README.md index b1fcc41a..e77ad7c6 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,7 @@ You must set two environment variables: + OPENRAM\_HOME should point to the compiler source directory. + OPENERAM\_TECH should point to one or more root technology directories (colon separated). +You should also add OPENRAM\_HOME to your PYTHONPATH. For example add this to your .bashrc: @@ -62,10 +63,10 @@ For example add this to your .bashrc: ``` You should also add OPENRAM\_HOME to your PYTHONPATH: - ``` export PYTHONPATH=$OPENRAM_HOME ``` + Note that if you want symbols to resolve in your editor, you may also want to add the specific technology directory that you use and any custom technology modules as well. For example: ``` diff --git a/compiler/base/geometry.py b/compiler/base/geometry.py index 3ca87e2e..7451d465 100644 --- a/compiler/base/geometry.py +++ b/compiler/base/geometry.py @@ -359,7 +359,7 @@ class instance(geometry): for offset in range(len(normalized_br_offsets)): for port in range(len(br_names)): cell_br_meta.append([br_names[offset], row, col, port]) - + if normalized_storage_nets == []: debug.error("normalized storage nets should not be empty! Check if the GDS labels Q and Q_bar are correctly set on M1 of the cell",1) Q_x = normalized_storage_nets[0][0] diff --git a/compiler/base/hierarchy_layout.py b/compiler/base/hierarchy_layout.py index fe108c01..fc22f3c0 100644 --- a/compiler/base/hierarchy_layout.py +++ b/compiler/base/hierarchy_layout.py @@ -1317,7 +1317,7 @@ class layout(): return None intermediate_layers = self.get_metal_layers(from_layer, to_layer) - + via = None cur_layer = from_layer while cur_layer != to_layer: diff --git a/compiler/base/lef.py b/compiler/base/lef.py index 799890e0..9ebd823d 100644 --- a/compiler/base/lef.py +++ b/compiler/base/lef.py @@ -75,7 +75,7 @@ class lef: # return # To maintain the indent level easily - self.indent = "" + self.indent = "" if OPTS.detailed_lef: debug.info(3, "Writing detailed LEF to {0}".format(lef_name)) @@ -88,7 +88,7 @@ class lef: for pin_name in self.pins: self.lef_write_pin(pin_name) - + self.lef_write_obstructions(OPTS.detailed_lef) self.lef_write_footer() self.lef.close() @@ -220,4 +220,3 @@ class lef: round(item[1], self.round_grid))) self.lef.write(" ;\n") - diff --git a/compiler/base/pin_layout.py b/compiler/base/pin_layout.py index f9b66612..4021a8b5 100644 --- a/compiler/base/pin_layout.py +++ b/compiler/base/pin_layout.py @@ -45,7 +45,7 @@ class pin_layout: if self.same_lpp(layer_name_pp, lpp): self._layer = layer_name break - + else: try: from tech import layer_override @@ -57,7 +57,7 @@ class pin_layout: return except: debug.error("Layer {} is not a valid routing layer in the tech file.".format(layer_name_pp), -1) - + self.lpp = layer[self.layer] self._recompute_hash() diff --git a/compiler/base/timing_graph.py b/compiler/base/timing_graph.py index edbd7128..46d7b518 100644 --- a/compiler/base/timing_graph.py +++ b/compiler/base/timing_graph.py @@ -119,7 +119,7 @@ class timing_graph(): # If at the last output, include the final output load if i == len(path) - 2: cout += load - + if params["model_name"] == "cacti": delays.append(path_edge_mod.cacti_delay(corner, cur_slew, cout, params)) cur_slew = delays[-1].slew @@ -130,14 +130,14 @@ class timing_graph(): return_value=1) return delays - + def get_edge_mods(self, path): """Return all edge mods associated with path""" - + if len(path) == 0: return [] - - return [self.edge_mods[(path[i], path[i+1])] for i in range(len(path)-1)] + + return [self.edge_mods[(path[i], path[i+1])] for i in range(len(path)-1)] def __str__(self): """ override print function output """ @@ -153,4 +153,3 @@ class timing_graph(): """ override print function output """ return str(self) - diff --git a/compiler/base/utils.py b/compiler/base/utils.py index f80b23e3..082caf21 100644 --- a/compiler/base/utils.py +++ b/compiler/base/utils.py @@ -163,7 +163,7 @@ def get_gds_pins(pin_names, name, gds_filename, units): if layer_override[pin_name]: lpp = layer_override[pin_name.textString] except: - pass + pass lpp = (lpp[0], None) cell[str(pin_name)].append(pin_layout(pin_name, rect, lpp)) diff --git a/compiler/base/wire.py b/compiler/base/wire.py index 7114687c..a276c035 100644 --- a/compiler/base/wire.py +++ b/compiler/base/wire.py @@ -68,7 +68,7 @@ class wire(wire_path): This is contact direction independent pitch, i.e. we take the maximum contact dimension """ - + # This is here for the unit tests which may not have # initialized the static parts of the layout class yet. from base import layout diff --git a/compiler/characterizer/analytical_util.py b/compiler/characterizer/analytical_util.py index 41120982..11e58f73 100644 --- a/compiler/characterizer/analytical_util.py +++ b/compiler/characterizer/analytical_util.py @@ -1,325 +1,325 @@ -# -# Copyright (c) 2016-2019 Regents of the University of California and The Board -# of Regents for the Oklahoma Agricultural and Mechanical College -# (acting for and on behalf of Oklahoma State University) -# All rights reserved. -# - -import debug - -import csv -import math -import numpy as np -import os - -process_transform = {'SS':0.0, 'TT': 0.5, 'FF':1.0} - -def get_data_names(file_name, exclude_area=True): - """ - Returns just the data names in the first row of the CSV - """ - - with open(file_name, newline='') as csvfile: - csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|') - row_iter = 0 - # reader is iterable not a list, probably a better way to do this - for row in csv_reader: - # Return names from first row - names = row[0].split(',') - break - if exclude_area: - try: - area_ind = names.index('area') - except ValueError: - area_ind = -1 - - if area_ind != -1: - names = names[:area_ind] + names[area_ind+1:] - return names - -def get_data(file_name): - """ - Returns data in CSV as lists of features - """ - - with open(file_name, newline='') as csvfile: - csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|') - row_iter = 0 - removed_items = 1 - for row in csv_reader: - row_iter += 1 - if row_iter == 1: - feature_names = row[0].split(',') - input_list = [[] for _ in range(len(feature_names)-removed_items)] - try: - # Save to remove area - area_ind = feature_names.index('area') - except ValueError: - area_ind = -1 - - try: - process_ind = feature_names.index('process') - except: - debug.error('Process not included as a feature.') - continue - - - - data = [] - split_str = row[0].split(',') - for i in range(len(split_str)): - if i == process_ind: - data.append(process_transform[split_str[i]]) - elif i == area_ind: - continue - else: - data.append(float(split_str[i])) - - data[0] = math.log(data[0], 2) - - for i in range(len(data)): - input_list[i].append(data[i]) - - return input_list - -def apply_samples_to_data(all_data, algo_samples): - # Take samples from algorithm and match them to samples in data - data_samples, unused_data = [], [] - sample_positions = set() - for sample in algo_samples: - sample_positions.add(find_sample_position_with_min_error(all_data, sample)) - - for i in range(len(all_data)): - if i in sample_positions: - data_samples.append(all_data[i]) - else: - unused_data.append(all_data[i]) - - return data_samples, unused_data - -def find_sample_position_with_min_error(data, sampled_vals): - min_error = 0 - sample_pos = 0 - count = 0 - for data_slice in data: - error = squared_error(data_slice, sampled_vals) - if min_error == 0 or error < min_error: - min_error = error - sample_pos = count - count += 1 - return sample_pos - -def squared_error(list_a, list_b): - error_sum = 0; - for a,b in zip(list_a, list_b): - error_sum+=(a-b)**2 - return error_sum - - -def get_max_min_from_datasets(dir): - if not os.path.isdir(dir): - debug.warning("Input Directory not found:{}".format(dir)) - return [], [], [] - - # Assuming all files are CSV - data_files = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))] - maxs,mins,sums,total_count = [],[],[],0 - for file in data_files: - data = get_data(os.path.join(dir, file)) - # Get max, min, sum, and count from every file - data_max, data_min, data_sum, count = [],[],[], 0 - for feature_list in data: - data_max.append(max(feature_list)) - data_min.append(min(feature_list)) - data_sum.append(sum(feature_list)) - count = len(feature_list) - - # Aggregate the data - if not maxs or not mins or not sums: - maxs,mins,sums,total_count = data_max,data_min,data_sum,count - else: - for i in range(len(maxs)): - maxs[i] = max(data_max[i], maxs[i]) - mins[i] = min(data_min[i], mins[i]) - sums[i] = data_sum[i]+sums[i] - total_count+=count - - avgs = [s/total_count for s in sums] - return maxs,mins,avgs - -def get_max_min_from_file(path): - if not os.path.isfile(path): - debug.warning("Input file not found: {}".format(path)) - return [], [], [] - - - data = get_data(path) - # Get max, min, sum, and count from every file - data_max, data_min, data_sum, count = [],[],[], 0 - for feature_list in data: - data_max.append(max(feature_list)) - data_min.append(min(feature_list)) - data_sum.append(sum(feature_list)) - count = len(feature_list) - - avgs = [s/count for s in data_sum] - return data_max, data_min, avgs - -def get_data_and_scale(file_name, sample_dir): - maxs,mins,avgs = get_max_min_from_datasets(sample_dir) - - # Get data - all_data = get_data(file_name) - - # Scale data from file - self_scaled_data = [[] for _ in range(len(all_data[0]))] - self_maxs,self_mins = [],[] - for feature_list, cur_max, cur_min in zip(all_data,maxs, mins): - for i in range(len(feature_list)): - self_scaled_data[i].append((feature_list[i]-cur_min)/(cur_max-cur_min)) - - return np.asarray(self_scaled_data) - -def rescale_data(data, old_maxs, old_mins, new_maxs, new_mins): - # unscale from old values, rescale by new values - data_new_scaling = [] - for data_row in data: - scaled_row = [] - for val, old_max,old_min, cur_max, cur_min in zip(data_row, old_maxs,old_mins, new_maxs, new_mins): - unscaled_data = val*(old_max-old_min) + old_min - scaled_row.append((unscaled_data-cur_min)/(cur_max-cur_min)) - - data_new_scaling.append(scaled_row) - - return data_new_scaling - -def sample_from_file(num_samples, file_name, sample_dir=None): - """ - Get a portion of the data from CSV file and scale it based on max/min of dataset. - Duplicate samples are trimmed. - """ - - if sample_dir: - maxs,mins,avgs = get_max_min_from_datasets(sample_dir) - else: - maxs,mins,avgs = [], [], [] - - # Get data - all_data = get_data(file_name) - - # Get algorithms sample points, assuming hypercube for now - num_labels = 1 - inp_dims = len(all_data) - num_labels - samples = np.random.rand(num_samples, inp_dims) - - - # Scale data from file - self_scaled_data = [[] for _ in range(len(all_data[0]))] - self_maxs,self_mins = [],[] - for feature_list in all_data: - max_val = max(feature_list) - self_maxs.append(max_val) - min_val = min(feature_list) - self_mins.append(min_val) - for i in range(len(feature_list)): - self_scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val)) - # Apply algorithm sampling points to available data - sampled_data, unused_data = apply_samples_to_data(self_scaled_data,samples) - - #unscale values and rescale using all available data (both sampled and unused points rescaled) - if len(maxs)!=0 and len(mins)!=0: - sampled_data = rescale_data(sampled_data, self_maxs,self_mins, maxs, mins) - unused_new_scaling = rescale_data(unused_data, self_maxs,self_mins, maxs, mins) - - return np.asarray(sampled_data), np.asarray(unused_new_scaling) - -def get_scaled_data(file_name): - """Get data from CSV file and scale it based on max/min of dataset""" - - if file_name: - maxs,mins,avgs = get_max_min_from_file(file_name) - else: - maxs,mins,avgs = [], [], [] - - # Get data - all_data = get_data(file_name) - - # Data is scaled by max/min and data format is changed to points vs feature lists - self_scaled_data = scale_data_and_transform(all_data) - data_np = np.asarray(self_scaled_data) - return data_np - -def scale_data_and_transform(data): - """ - Assume data is a list of features, change to a list of points and max/min scale - """ - - scaled_data = [[] for _ in range(len(data[0]))] - for feature_list in data: - max_val = max(feature_list) - min_val = min(feature_list) - - for i in range(len(feature_list)): - if max_val == min_val: - scaled_data[i].append(0.0) - else: - scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val)) - return scaled_data - -def scale_input_datapoint(point, file_path): - """ - Input data has no output and needs to be scaled like the model inputs during - training. - """ - maxs, mins, avgs = get_max_min_from_file(file_path) - debug.info(3, "maxs={}".format(maxs)) - debug.info(3, "mins={}".format(mins)) - debug.info(3, "point={}".format(point)) - - scaled_point = [] - for feature, mx, mn in zip(point, maxs, mins): - if mx == mn: - scaled_point.append(0.0) - else: - scaled_point.append((feature-mn)/(mx-mn)) - return scaled_point - -def unscale_data(data, file_path, pos=None): - if file_path: - maxs,mins,avgs = get_max_min_from_file(file_path) - else: - debug.error("Must provide reference data to unscale") - return None - - # Hard coded to only convert the last max/min (i.e. the label of the data) - if pos == None: - maxs,mins,avgs = maxs[-1],mins[-1],avgs[-1] - else: - maxs,mins,avgs = maxs[pos],mins[pos],avgs[pos] - unscaled_data = [] - for data_row in data: - unscaled_val = data_row*(maxs-mins) + mins - unscaled_data.append(unscaled_val) - - return unscaled_data - -def abs_error(labels, preds): - total_error = 0 - for label_i, pred_i in zip(labels, preds): - cur_error = abs(label_i[0]-pred_i[0])/label_i[0] - total_error += cur_error - return total_error/len(labels) - -def max_error(labels, preds): - mx_error = 0 - for label_i, pred_i in zip(labels, preds): - cur_error = abs(label_i[0]-pred_i[0])/label_i[0] - mx_error = max(cur_error, mx_error) - return mx_error - -def min_error(labels, preds): - mn_error = 1 - for label_i, pred_i in zip(labels, preds): - cur_error = abs(label_i[0]-pred_i[0])/label_i[0] - mn_error = min(cur_error, mn_error) - return mn_error +# +# Copyright (c) 2016-2019 Regents of the University of California and The Board +# of Regents for the Oklahoma Agricultural and Mechanical College +# (acting for and on behalf of Oklahoma State University) +# All rights reserved. +# + +import debug + +import csv +import math +import numpy as np +import os + +process_transform = {'SS':0.0, 'TT': 0.5, 'FF':1.0} + +def get_data_names(file_name, exclude_area=True): + """ + Returns just the data names in the first row of the CSV + """ + + with open(file_name, newline='') as csvfile: + csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|') + row_iter = 0 + # reader is iterable not a list, probably a better way to do this + for row in csv_reader: + # Return names from first row + names = row[0].split(',') + break + if exclude_area: + try: + area_ind = names.index('area') + except ValueError: + area_ind = -1 + + if area_ind != -1: + names = names[:area_ind] + names[area_ind+1:] + return names + +def get_data(file_name): + """ + Returns data in CSV as lists of features + """ + + with open(file_name, newline='') as csvfile: + csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|') + row_iter = 0 + removed_items = 1 + for row in csv_reader: + row_iter += 1 + if row_iter == 1: + feature_names = row[0].split(',') + input_list = [[] for _ in range(len(feature_names)-removed_items)] + try: + # Save to remove area + area_ind = feature_names.index('area') + except ValueError: + area_ind = -1 + + try: + process_ind = feature_names.index('process') + except: + debug.error('Process not included as a feature.') + continue + + + + data = [] + split_str = row[0].split(',') + for i in range(len(split_str)): + if i == process_ind: + data.append(process_transform[split_str[i]]) + elif i == area_ind: + continue + else: + data.append(float(split_str[i])) + + data[0] = math.log(data[0], 2) + + for i in range(len(data)): + input_list[i].append(data[i]) + + return input_list + +def apply_samples_to_data(all_data, algo_samples): + # Take samples from algorithm and match them to samples in data + data_samples, unused_data = [], [] + sample_positions = set() + for sample in algo_samples: + sample_positions.add(find_sample_position_with_min_error(all_data, sample)) + + for i in range(len(all_data)): + if i in sample_positions: + data_samples.append(all_data[i]) + else: + unused_data.append(all_data[i]) + + return data_samples, unused_data + +def find_sample_position_with_min_error(data, sampled_vals): + min_error = 0 + sample_pos = 0 + count = 0 + for data_slice in data: + error = squared_error(data_slice, sampled_vals) + if min_error == 0 or error < min_error: + min_error = error + sample_pos = count + count += 1 + return sample_pos + +def squared_error(list_a, list_b): + error_sum = 0; + for a,b in zip(list_a, list_b): + error_sum+=(a-b)**2 + return error_sum + + +def get_max_min_from_datasets(dir): + if not os.path.isdir(dir): + debug.warning("Input Directory not found:{}".format(dir)) + return [], [], [] + + # Assuming all files are CSV + data_files = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))] + maxs,mins,sums,total_count = [],[],[],0 + for file in data_files: + data = get_data(os.path.join(dir, file)) + # Get max, min, sum, and count from every file + data_max, data_min, data_sum, count = [],[],[], 0 + for feature_list in data: + data_max.append(max(feature_list)) + data_min.append(min(feature_list)) + data_sum.append(sum(feature_list)) + count = len(feature_list) + + # Aggregate the data + if not maxs or not mins or not sums: + maxs,mins,sums,total_count = data_max,data_min,data_sum,count + else: + for i in range(len(maxs)): + maxs[i] = max(data_max[i], maxs[i]) + mins[i] = min(data_min[i], mins[i]) + sums[i] = data_sum[i]+sums[i] + total_count+=count + + avgs = [s/total_count for s in sums] + return maxs,mins,avgs + +def get_max_min_from_file(path): + if not os.path.isfile(path): + debug.warning("Input file not found: {}".format(path)) + return [], [], [] + + + data = get_data(path) + # Get max, min, sum, and count from every file + data_max, data_min, data_sum, count = [],[],[], 0 + for feature_list in data: + data_max.append(max(feature_list)) + data_min.append(min(feature_list)) + data_sum.append(sum(feature_list)) + count = len(feature_list) + + avgs = [s/count for s in data_sum] + return data_max, data_min, avgs + +def get_data_and_scale(file_name, sample_dir): + maxs,mins,avgs = get_max_min_from_datasets(sample_dir) + + # Get data + all_data = get_data(file_name) + + # Scale data from file + self_scaled_data = [[] for _ in range(len(all_data[0]))] + self_maxs,self_mins = [],[] + for feature_list, cur_max, cur_min in zip(all_data,maxs, mins): + for i in range(len(feature_list)): + self_scaled_data[i].append((feature_list[i]-cur_min)/(cur_max-cur_min)) + + return np.asarray(self_scaled_data) + +def rescale_data(data, old_maxs, old_mins, new_maxs, new_mins): + # unscale from old values, rescale by new values + data_new_scaling = [] + for data_row in data: + scaled_row = [] + for val, old_max,old_min, cur_max, cur_min in zip(data_row, old_maxs,old_mins, new_maxs, new_mins): + unscaled_data = val*(old_max-old_min) + old_min + scaled_row.append((unscaled_data-cur_min)/(cur_max-cur_min)) + + data_new_scaling.append(scaled_row) + + return data_new_scaling + +def sample_from_file(num_samples, file_name, sample_dir=None): + """ + Get a portion of the data from CSV file and scale it based on max/min of dataset. + Duplicate samples are trimmed. + """ + + if sample_dir: + maxs,mins,avgs = get_max_min_from_datasets(sample_dir) + else: + maxs,mins,avgs = [], [], [] + + # Get data + all_data = get_data(file_name) + + # Get algorithms sample points, assuming hypercube for now + num_labels = 1 + inp_dims = len(all_data) - num_labels + samples = np.random.rand(num_samples, inp_dims) + + + # Scale data from file + self_scaled_data = [[] for _ in range(len(all_data[0]))] + self_maxs,self_mins = [],[] + for feature_list in all_data: + max_val = max(feature_list) + self_maxs.append(max_val) + min_val = min(feature_list) + self_mins.append(min_val) + for i in range(len(feature_list)): + self_scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val)) + # Apply algorithm sampling points to available data + sampled_data, unused_data = apply_samples_to_data(self_scaled_data,samples) + + #unscale values and rescale using all available data (both sampled and unused points rescaled) + if len(maxs)!=0 and len(mins)!=0: + sampled_data = rescale_data(sampled_data, self_maxs,self_mins, maxs, mins) + unused_new_scaling = rescale_data(unused_data, self_maxs,self_mins, maxs, mins) + + return np.asarray(sampled_data), np.asarray(unused_new_scaling) + +def get_scaled_data(file_name): + """Get data from CSV file and scale it based on max/min of dataset""" + + if file_name: + maxs,mins,avgs = get_max_min_from_file(file_name) + else: + maxs,mins,avgs = [], [], [] + + # Get data + all_data = get_data(file_name) + + # Data is scaled by max/min and data format is changed to points vs feature lists + self_scaled_data = scale_data_and_transform(all_data) + data_np = np.asarray(self_scaled_data) + return data_np + +def scale_data_and_transform(data): + """ + Assume data is a list of features, change to a list of points and max/min scale + """ + + scaled_data = [[] for _ in range(len(data[0]))] + for feature_list in data: + max_val = max(feature_list) + min_val = min(feature_list) + + for i in range(len(feature_list)): + if max_val == min_val: + scaled_data[i].append(0.0) + else: + scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val)) + return scaled_data + +def scale_input_datapoint(point, file_path): + """ + Input data has no output and needs to be scaled like the model inputs during + training. + """ + maxs, mins, avgs = get_max_min_from_file(file_path) + debug.info(3, "maxs={}".format(maxs)) + debug.info(3, "mins={}".format(mins)) + debug.info(3, "point={}".format(point)) + + scaled_point = [] + for feature, mx, mn in zip(point, maxs, mins): + if mx == mn: + scaled_point.append(0.0) + else: + scaled_point.append((feature-mn)/(mx-mn)) + return scaled_point + +def unscale_data(data, file_path, pos=None): + if file_path: + maxs,mins,avgs = get_max_min_from_file(file_path) + else: + debug.error("Must provide reference data to unscale") + return None + + # Hard coded to only convert the last max/min (i.e. the label of the data) + if pos == None: + maxs,mins,avgs = maxs[-1],mins[-1],avgs[-1] + else: + maxs,mins,avgs = maxs[pos],mins[pos],avgs[pos] + unscaled_data = [] + for data_row in data: + unscaled_val = data_row*(maxs-mins) + mins + unscaled_data.append(unscaled_val) + + return unscaled_data + +def abs_error(labels, preds): + total_error = 0 + for label_i, pred_i in zip(labels, preds): + cur_error = abs(label_i[0]-pred_i[0])/label_i[0] + total_error += cur_error + return total_error/len(labels) + +def max_error(labels, preds): + mx_error = 0 + for label_i, pred_i in zip(labels, preds): + cur_error = abs(label_i[0]-pred_i[0])/label_i[0] + mx_error = max(cur_error, mx_error) + return mx_error + +def min_error(labels, preds): + mn_error = 1 + for label_i, pred_i in zip(labels, preds): + cur_error = abs(label_i[0]-pred_i[0])/label_i[0] + mn_error = min(cur_error, mn_error) + return mn_error diff --git a/compiler/characterizer/cacti.py b/compiler/characterizer/cacti.py index df9f540b..5dab9618 100644 --- a/compiler/characterizer/cacti.py +++ b/compiler/characterizer/cacti.py @@ -9,15 +9,15 @@ from .simulation import simulation from globals import OPTS import debug -import tech +import tech import math -class cacti(simulation): +class cacti(simulation): """ Delay model for the SRAM which which """ - + def __init__(self, sram, spfile, corner): super().__init__(sram, spfile, corner) @@ -33,8 +33,8 @@ class cacti(simulation): self.create_signal_names() self.add_graph_exclusions() self.set_params() - - def set_params(self): + + def set_params(self): """Set parameters specific to the corner being simulated""" self.params = {} # Set the specific functions to use for timing defined in the SRAM module @@ -42,14 +42,14 @@ class cacti(simulation): # Only parameter right now is r_on which is dependent on Vdd self.params["r_nch_on"] = self.vdd_voltage / tech.spice["i_on_n"] self.params["r_pch_on"] = self.vdd_voltage / tech.spice["i_on_p"] - + def get_lib_values(self, load_slews): """ Return the analytical model results for the SRAM. """ if OPTS.num_rw_ports > 1 or OPTS.num_w_ports > 0 and OPTS.num_r_ports > 0: debug.warning("In analytical mode, all ports have the timing of the first read port.") - + # Probe set to 0th bit, does not matter for analytical delay. self.set_probe('0' * self.addr_size, 0) self.create_graph() @@ -77,7 +77,7 @@ class cacti(simulation): slew = 0 path_delays = self.graph.get_timing(bl_path, self.corner, slew, load_farad, self.params) total_delay = self.sum_delays(path_delays) - + delay_ns = total_delay.delay/1e-9 slew_ns = total_delay.slew/1e-9 max_delay = max(max_delay, total_delay.delay) @@ -95,7 +95,7 @@ class cacti(simulation): elif "slew" in mname and port in self.read_ports: port_data[port][mname].append(total_delay.slew / 1e-9) - # Margin for error in period. Calculated by averaging required margin for a small and large + # Margin for error in period. Calculated by averaging required margin for a small and large # memory. FIXME: margin is quite large, should be looked into. period_margin = 1.85 sram_data = {"min_period": (max_delay / 1e-9) * 2 * period_margin, @@ -118,5 +118,3 @@ class cacti(simulation): debug.info(1, "Dynamic Power: {0} mW".format(power.dynamic)) debug.info(1, "Leakage Power: {0} mW".format(power.leakage)) return power - - \ No newline at end of file diff --git a/compiler/characterizer/charutils.py b/compiler/characterizer/charutils.py index 59ef3177..70f80774 100644 --- a/compiler/characterizer/charutils.py +++ b/compiler/characterizer/charutils.py @@ -37,7 +37,7 @@ def parse_spice_list(filename, key): except IOError: debug.error("Unable to open spice output file: {0}".format(full_filename),1) debug.archive() - + contents = f.read().lower() f.close() # val = re.search(r"{0}\s*=\s*(-?\d+.?\d*\S*)\s+.*".format(key), contents) diff --git a/compiler/characterizer/delay.py b/compiler/characterizer/delay.py index 27d72195..40a623c6 100644 --- a/compiler/characterizer/delay.py +++ b/compiler/characterizer/delay.py @@ -235,10 +235,10 @@ class delay(simulation): qbar_meas = voltage_at_measure("v_qbar_{0}".format(meas_tag), qbar_name) return {bit_polarity.NONINVERTING: q_meas, bit_polarity.INVERTING: qbar_meas} - + def create_sen_and_bitline_path_measures(self): """Create measurements for the s_en and bitline paths for individual delays per stage.""" - + # FIXME: There should be a default_read_port variable in this case, pathing is done with this # but is never mentioned otherwise port = self.read_ports[0] @@ -253,37 +253,37 @@ class delay(simulation): debug.check(len(bl_paths)==1, 'Found {0} paths which contain the bitline net.'.format(len(bl_paths))) sen_path = sen_paths[0] bitline_path = bl_paths[0] - + # Get the measures self.sen_path_meas = self.create_delay_path_measures(sen_path) self.bl_path_meas = self.create_delay_path_measures(bitline_path) all_meas = self.sen_path_meas + self.bl_path_meas - + # Paths could have duplicate measurements, remove them before they go to the stim file all_meas = self.remove_duplicate_meas_names(all_meas) # FIXME: duplicate measurements still exist in the member variables, since they have the same # name it will still work, but this could cause an issue in the future. - - return all_meas + + return all_meas def remove_duplicate_meas_names(self, measures): """Returns new list of measurements without duplicate names""" - + name_set = set() unique_measures = [] for meas in measures: if meas.name not in name_set: name_set.add(meas.name) unique_measures.append(meas) - + return unique_measures - + def create_delay_path_measures(self, path): """Creates measurements for each net along given path.""" # Determine the directions (RISE/FALL) of signals path_dirs = self.get_meas_directions(path) - + # Create the measurements path_meas = [] for i in range(len(path) - 1): @@ -297,26 +297,26 @@ class delay(simulation): # Some bitcell logic is hardcoded for only read zeroes, force that here as well. path_meas[-1].meta_str = sram_op.READ_ZERO path_meas[-1].meta_add_delay = True - + return path_meas - + def get_meas_directions(self, path): """Returns SPICE measurements directions based on path.""" - + # Get the edges modules which define the path edge_mods = self.graph.get_edge_mods(path) - + # Convert to booleans based on function of modules (inverting/non-inverting) mod_type_bools = [mod.is_non_inverting() for mod in edge_mods] - + # FIXME: obtuse hack to differentiate s_en input from bitline in sense amps if self.sen_name in path: - # Force the sense amp to be inverting for s_en->DOUT. + # Force the sense amp to be inverting for s_en->DOUT. # bitline->DOUT is non-inverting, but the module cannot differentiate inputs. s_en_index = path.index(self.sen_name) mod_type_bools[s_en_index] = False debug.info(2, 'Forcing sen->dout to be inverting.') - + # Use these to determine direction list assuming delay start on neg. edge of clock (FALL) # Also, use shorthand that 'FALL' == False, 'RISE' == True to simplify logic bool_dirs = [False] @@ -324,9 +324,9 @@ class delay(simulation): for mod_bool in mod_type_bools: cur_dir = (cur_dir == mod_bool) bool_dirs.append(cur_dir) - + # Convert from boolean to string - return ['RISE' if dbool else 'FALL' for dbool in bool_dirs] + return ['RISE' if dbool else 'FALL' for dbool in bool_dirs] def set_load_slew(self, load, slew): """ Set the load and slew """ @@ -827,7 +827,7 @@ class delay(simulation): debug.error("Failed to Measure Read Port Values:\n\t\t{0}".format(read_port_dict), 1) result[port].update(read_port_dict) - + self.path_delays = self.check_path_measures() return (True, result) @@ -932,7 +932,7 @@ class delay(simulation): def check_path_measures(self): """Get and check all the delays along the sen and bitline paths""" - + # Get and set measurement, no error checking done other than prints. debug.info(2, "Checking measures in Delay Path") value_dict = {} @@ -1179,7 +1179,7 @@ class delay(simulation): #char_sram_data["sen_path_names"] = sen_names # FIXME: low-to-high delays are altered to be independent of the period. This makes the lib results less accurate. self.alter_lh_char_data(char_port_data) - + return (char_sram_data, char_port_data) def alter_lh_char_data(self, char_port_data): @@ -1222,14 +1222,14 @@ class delay(simulation): for meas in self.sen_path_meas: sen_name_list.append(meas.name) sen_delay_list.append(value_dict[meas.name]) - + bl_name_list = [] bl_delay_list = [] for meas in self.bl_path_meas: bl_name_list.append(meas.name) bl_delay_list.append(value_dict[meas.name]) - return sen_name_list, sen_delay_list, bl_name_list, bl_delay_list + return sen_name_list, sen_delay_list, bl_name_list, bl_delay_list def calculate_inverse_address(self): """Determine dummy test address based on probe address and column mux size.""" diff --git a/compiler/characterizer/elmore.py b/compiler/characterizer/elmore.py index c4ca44a2..5fee4223 100644 --- a/compiler/characterizer/elmore.py +++ b/compiler/characterizer/elmore.py @@ -10,11 +10,11 @@ from .simulation import simulation from globals import OPTS import debug -class elmore(simulation): +class elmore(simulation): """ Delay model for the SRAM which calculates Elmore delays along the SRAM critical path. """ - + def __init__(self, sram, spfile, corner): super().__init__(sram, spfile, corner) @@ -30,13 +30,13 @@ class elmore(simulation): self.set_corner(corner) self.create_signal_names() self.add_graph_exclusions() - - def set_params(self): + + def set_params(self): """Set parameters specific to the corner being simulated""" self.params = {} # Set the specific functions to use for timing defined in the SRAM module self.params["model_name"] = OPTS.model_name - + def get_lib_values(self, load_slews): """ Return the analytical model results for the SRAM. @@ -66,7 +66,7 @@ class elmore(simulation): for load,slew in load_slews: # Calculate delay based on slew and load path_delays = self.graph.get_timing(bl_path, self.corner, slew, load, self.params) - + total_delay = self.sum_delays(path_delays) max_delay = max(max_delay, total_delay.delay) debug.info(1, @@ -84,7 +84,7 @@ class elmore(simulation): elif "slew" in mname and port in self.read_ports: port_data[port][mname].append(total_delay.slew / 1e3) - # Margin for error in period. Calculated by averaging required margin for a small and large + # Margin for error in period. Calculated by averaging required margin for a small and large # memory. FIXME: margin is quite large, should be looked into. period_margin = 1.85 sram_data = {"min_period": (max_delay / 1e3) * 2 * period_margin, diff --git a/compiler/characterizer/linear_regression.py b/compiler/characterizer/linear_regression.py index f12e607a..68921e2c 100644 --- a/compiler/characterizer/linear_regression.py +++ b/compiler/characterizer/linear_regression.py @@ -26,18 +26,17 @@ class linear_regression(regression_model): """ Supervised training of model. """ - + #model = LinearRegression() model = self.get_model() model.fit(features, labels) return model - - def model_prediction(self, model, features): + + def model_prediction(self, model, features): """ Have the model perform a prediction and unscale the prediction as the model is trained with scaled values. """ - + pred = model.predict(features) return pred - \ No newline at end of file diff --git a/compiler/characterizer/measurements.py b/compiler/characterizer/measurements.py index 448dee36..fcbb562f 100644 --- a/compiler/characterizer/measurements.py +++ b/compiler/characterizer/measurements.py @@ -184,7 +184,7 @@ class voltage_when_measure(spice_measurement): trig_voltage = self.trig_val_of_vdd * vdd_voltage return (meas_name, trig_name, targ_name, trig_voltage, self.trig_dir_str, trig_td) - + class voltage_at_measure(spice_measurement): """Generates a spice measurement to measure the voltage at a specific time. The time is considered variant with different periods.""" @@ -211,4 +211,3 @@ class voltage_at_measure(spice_measurement): meas_name = self.name targ_name = self.targ_name_no_port return (meas_name, targ_name, time_at) - diff --git a/compiler/characterizer/model_check.py b/compiler/characterizer/model_check.py index fcbc51c2..9ef48b1c 100644 --- a/compiler/characterizer/model_check.py +++ b/compiler/characterizer/model_check.py @@ -82,7 +82,7 @@ class model_check(delay): replicated here. """ delay.create_signal_names(self) - + # Signal names are all hardcoded, need to update to make it work for probe address and different configurations. wl_en_driver_signals = ["Xsram{1}Xcontrol{{}}.Xbuf_wl_en.Zb{0}_int".format(stage, OPTS.hier_seperator) for stage in range(1, self.get_num_wl_en_driver_stages())] wl_driver_signals = ["Xsram{2}Xbank0{2}Xwordline_driver{{}}{2}Xwl_driver_inv{0}{2}Zb{1}_int".format(self.wordline_row, stage, OPTS.hier_seperator) for stage in range(1, self.get_num_wl_driver_stages())] @@ -448,6 +448,3 @@ class model_check(delay): name_dict[self.sae_model_name] = name_dict["sae_measures"] return name_dict - - - diff --git a/compiler/characterizer/neural_network.py b/compiler/characterizer/neural_network.py index 6f1aa7c1..ae65b26d 100644 --- a/compiler/characterizer/neural_network.py +++ b/compiler/characterizer/neural_network.py @@ -25,20 +25,19 @@ class neural_network(regression_model): """ Training multilayer model """ - + flat_labels = np.ravel(labels) model = self.get_model() model.fit(features, flat_labels) - + return model - - def model_prediction(self, model, features): + + def model_prediction(self, model, features): """ Have the model perform a prediction and unscale the prediction as the model is trained with scaled values. """ - + pred = model.predict(features) reshape_pred = np.reshape(pred, (len(pred),1)) return reshape_pred - \ No newline at end of file diff --git a/compiler/characterizer/regression_model.py b/compiler/characterizer/regression_model.py index 69f00485..e9b4ec4d 100644 --- a/compiler/characterizer/regression_model.py +++ b/compiler/characterizer/regression_model.py @@ -25,7 +25,7 @@ data_fnames = ["rise_delay.csv", "read0_power.csv", "leakage_data.csv", "sim_time.csv"] -# Positions must correspond to data_fname list +# Positions must correspond to data_fname list lib_dnames = ["delay_lh", "delay_hl", "slew_lh", @@ -35,13 +35,13 @@ lib_dnames = ["delay_lh", "read1_power", "read0_power", "leakage_power", - "sim_time"] + "sim_time"] # Check if another data dir was specified -if OPTS.sim_data_path == None: +if OPTS.sim_data_path == None: data_dir = OPTS.openram_tech+relative_data_path else: - data_dir = OPTS.sim_data_path - + data_dir = OPTS.sim_data_path + data_path = data_dir + '/' + data_file class regression_model(simulation): @@ -52,23 +52,23 @@ class regression_model(simulation): def get_lib_values(self, load_slews): """ - A model and prediction is created for each output needed for the LIB + A model and prediction is created for each output needed for the LIB """ - + debug.info(1, "Characterizing SRAM using regression models.") log_num_words = math.log(OPTS.num_words, 2) - model_inputs = [log_num_words, - OPTS.word_size, + model_inputs = [log_num_words, + OPTS.word_size, OPTS.words_per_row, OPTS.local_array_size, - process_transform[self.process], - self.vdd_voltage, - self.temperature] + process_transform[self.process], + self.vdd_voltage, + self.temperature] # Area removed for now # self.sram.width * self.sram.height, # Include above inputs, plus load and slew which are added below self.num_inputs = len(model_inputs)+2 - + self.create_measurement_names() models = self.train_models() @@ -85,22 +85,22 @@ class regression_model(simulation): port_data[port]['delay_hl'].append(sram_vals['fall_delay']) port_data[port]['slew_lh'].append(sram_vals['rise_slew']) port_data[port]['slew_hl'].append(sram_vals['fall_slew']) - + port_data[port]['write1_power'].append(sram_vals['write1_power']) port_data[port]['write0_power'].append(sram_vals['write0_power']) port_data[port]['read1_power'].append(sram_vals['read1_power']) port_data[port]['read0_power'].append(sram_vals['read0_power']) - + # Disabled power not modeled. Copied from other power predictions port_data[port]['disabled_write1_power'].append(sram_vals['write1_power']) port_data[port]['disabled_write0_power'].append(sram_vals['write0_power']) port_data[port]['disabled_read1_power'].append(sram_vals['read1_power']) port_data[port]['disabled_read0_power'].append(sram_vals['read0_power']) - - debug.info(1, '{}, {}, {}, {}, {}'.format(slew, - load, - port, - sram_vals['rise_delay'], + + debug.info(1, '{}, {}, {}, {}, {}'.format(slew, + load, + port, + sram_vals['rise_delay'], sram_vals['rise_slew'])) # Estimate the period as double the delay with margin period_margin = 0.1 @@ -112,19 +112,19 @@ class regression_model(simulation): return (sram_data, port_data) - def get_predictions(self, model_inputs, models): + def get_predictions(self, model_inputs, models): """ Generate a model and prediction for LIB output """ - - #Scaled the inputs using first data file as a reference + + #Scaled the inputs using first data file as a reference scaled_inputs = np.asarray([scale_input_datapoint(model_inputs, data_path)]) predictions = {} out_pos = 0 for dname in self.output_names: m = models[dname] - + scaled_pred = self.model_prediction(m, scaled_inputs) pred = unscale_data(scaled_pred.tolist(), data_path, pos=self.num_inputs+out_pos) debug.info(2,"Unscaled Prediction = {}".format(pred)) @@ -149,7 +149,7 @@ class regression_model(simulation): output_num+=1 return models - + def score_model(self): num_inputs = 9 #FIXME - should be defined somewhere else self.output_names = get_data_names(data_path)[num_inputs:] @@ -165,15 +165,15 @@ class regression_model(simulation): scr = model.score(features, output_label) debug.info(1, "{}, {}".format(o_name, scr)) output_num+=1 - - + + def cross_validation(self, test_only=None): """Wrapper for sklean cross validation function for OpenRAM regression models. Returns the mean accuracy for each model/output.""" - + from sklearn.model_selection import cross_val_score untrained_model = self.get_model() - + num_inputs = 9 #FIXME - should be defined somewhere else self.output_names = get_data_names(data_path)[num_inputs:] data = get_scaled_data(data_path) @@ -193,9 +193,9 @@ class regression_model(simulation): debug.info(1, "{}, {}, {}".format(o_name, scores.mean(), scores.std())) model_scores[o_name] = scores.mean() output_num+=1 - - return model_scores - + + return model_scores + # Fixme - only will work for sklearn regression models def save_model(self, model_name, model): try: @@ -205,4 +205,3 @@ class regression_model(simulation): OPTS.model_dict[model_name+"_coef"] = list(model.coef_[0]) debug.info(1,"Coefs of {}:{}".format(model_name,OPTS.model_dict[model_name+"_coef"])) OPTS.model_dict[model_name+"_intercept"] = float(model.intercept_) - \ No newline at end of file diff --git a/compiler/characterizer/setup_hold.py b/compiler/characterizer/setup_hold.py index 72e973d5..ec1a9e5e 100644 --- a/compiler/characterizer/setup_hold.py +++ b/compiler/characterizer/setup_hold.py @@ -22,7 +22,7 @@ class setup_hold(): def __init__(self, corner): # This must match the spice model order self.dff = factory.create(module_type=OPTS.dff) - + self.period = tech.spice["feasible_period"] debug.info(2, "Feasible period from technology file: {0} ".format(self.period)) @@ -106,8 +106,8 @@ class setup_hold(): setup=0) def write_clock(self): - """ - Create the clock signal for setup/hold analysis. + """ + Create the clock signal for setup/hold analysis. First period initializes the FF while the second is used for characterization. """ @@ -206,7 +206,7 @@ class setup_hold(): self.stim.run_sim(self.stim_sp) clk_to_q = convert_to_float(parse_spice_list("timing", "clk2q_delay")) - # We use a 1/2 speed clock for some reason... + # We use a 1/2 speed clock for some reason... setuphold_time = (target_time - 2 * self.period) if mode == "SETUP": # SETUP is clk-din, not din-clk passing_setuphold_time = -1 * setuphold_time diff --git a/compiler/characterizer/trim_spice.py b/compiler/characterizer/trim_spice.py index e8499d5c..affba296 100644 --- a/compiler/characterizer/trim_spice.py +++ b/compiler/characterizer/trim_spice.py @@ -46,9 +46,9 @@ class trim_spice(): self.col_addr_size = int(log(self.words_per_row, 2)) self.bank_addr_size = self.col_addr_size + self.row_addr_size self.addr_size = self.bank_addr_size + int(log(self.num_banks, 2)) - + def trim(self, address, data_bit): - """ + """ Reduce the spice netlist but KEEP the given bits at the address (and things that will add capacitive load!) """ @@ -62,7 +62,7 @@ class trim_spice(): col_address = int(address[0:self.col_addr_size], 2) else: col_address = 0 - + # 1. Keep cells in the bitcell array based on WL and BL wl_name = "wl_{}".format(wl_address) bl_name = "bl_{}".format(int(self.words_per_row*data_bit + col_address)) diff --git a/compiler/datasheet/datasheet.py b/compiler/datasheet/datasheet.py index d1fb3731..e7551cd3 100644 --- a/compiler/datasheet/datasheet.py +++ b/compiler/datasheet/datasheet.py @@ -31,7 +31,7 @@ class datasheet(): if OPTS.output_datasheet_info: datasheet_path = OPTS.output_path else: - datasheet_path = OPTS.openram_temp + datasheet_path = OPTS.openram_temp with open(datasheet_path + "/datasheet.info") as info: self.html += '