Revert "Fix whitespace"

This commit is contained in:
Matt Guthaus 2022-07-22 13:39:48 -07:00 committed by GitHub
parent f1e452c8e3
commit 5d6e56763f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
81 changed files with 752 additions and 727 deletions

View File

@ -359,7 +359,7 @@ class instance(geometry):
for offset in range(len(normalized_br_offsets)):
for port in range(len(br_names)):
cell_br_meta.append([br_names[offset], row, col, port])
if normalized_storage_nets == []:
debug.error("normalized storage nets should not be empty! Check if the GDS labels Q and Q_bar are correctly set on M1 of the cell",1)
Q_x = normalized_storage_nets[0][0]

View File

@ -1317,7 +1317,7 @@ class layout():
return None
intermediate_layers = self.get_metal_layers(from_layer, to_layer)
via = None
cur_layer = from_layer
while cur_layer != to_layer:

View File

@ -75,7 +75,7 @@ class lef:
# return
# To maintain the indent level easily
self.indent = ""
self.indent = ""
if OPTS.detailed_lef:
debug.info(3, "Writing detailed LEF to {0}".format(lef_name))
@ -88,7 +88,7 @@ class lef:
for pin_name in self.pins:
self.lef_write_pin(pin_name)
self.lef_write_obstructions(OPTS.detailed_lef)
self.lef_write_footer()
self.lef.close()
@ -220,3 +220,4 @@ class lef:
round(item[1],
self.round_grid)))
self.lef.write(" ;\n")

View File

@ -45,7 +45,7 @@ class pin_layout:
if self.same_lpp(layer_name_pp, lpp):
self._layer = layer_name
break
else:
try:
from tech import layer_override
@ -57,7 +57,7 @@ class pin_layout:
return
except:
debug.error("Layer {} is not a valid routing layer in the tech file.".format(layer_name_pp), -1)
self.lpp = layer[self.layer]
self._recompute_hash()

View File

@ -119,7 +119,7 @@ class timing_graph():
# If at the last output, include the final output load
if i == len(path) - 2:
cout += load
if params["model_name"] == "cacti":
delays.append(path_edge_mod.cacti_delay(corner, cur_slew, cout, params))
cur_slew = delays[-1].slew
@ -130,14 +130,14 @@ class timing_graph():
return_value=1)
return delays
def get_edge_mods(self, path):
"""Return all edge mods associated with path"""
if len(path) == 0:
return []
return [self.edge_mods[(path[i], path[i+1])] for i in range(len(path)-1)]
return [self.edge_mods[(path[i], path[i+1])] for i in range(len(path)-1)]
def __str__(self):
""" override print function output """
@ -153,3 +153,4 @@ class timing_graph():
""" override print function output """
return str(self)

View File

@ -163,7 +163,7 @@ def get_gds_pins(pin_names, name, gds_filename, units):
if layer_override[pin_name]:
lpp = layer_override[pin_name.textString]
except:
pass
pass
lpp = (lpp[0], None)
cell[str(pin_name)].append(pin_layout(pin_name, rect, lpp))

View File

@ -68,7 +68,7 @@ class wire(wire_path):
This is contact direction independent pitch,
i.e. we take the maximum contact dimension
"""
# This is here for the unit tests which may not have
# initialized the static parts of the layout class yet.
from base import layout

View File

@ -1,325 +1,325 @@
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
import csv
import math
import numpy as np
import os
process_transform = {'SS':0.0, 'TT': 0.5, 'FF':1.0}
def get_data_names(file_name, exclude_area=True):
"""
Returns just the data names in the first row of the CSV
"""
with open(file_name, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
row_iter = 0
# reader is iterable not a list, probably a better way to do this
for row in csv_reader:
# Return names from first row
names = row[0].split(',')
break
if exclude_area:
try:
area_ind = names.index('area')
except ValueError:
area_ind = -1
if area_ind != -1:
names = names[:area_ind] + names[area_ind+1:]
return names
def get_data(file_name):
"""
Returns data in CSV as lists of features
"""
with open(file_name, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
row_iter = 0
removed_items = 1
for row in csv_reader:
row_iter += 1
if row_iter == 1:
feature_names = row[0].split(',')
input_list = [[] for _ in range(len(feature_names)-removed_items)]
try:
# Save to remove area
area_ind = feature_names.index('area')
except ValueError:
area_ind = -1
try:
process_ind = feature_names.index('process')
except:
debug.error('Process not included as a feature.')
continue
data = []
split_str = row[0].split(',')
for i in range(len(split_str)):
if i == process_ind:
data.append(process_transform[split_str[i]])
elif i == area_ind:
continue
else:
data.append(float(split_str[i]))
data[0] = math.log(data[0], 2)
for i in range(len(data)):
input_list[i].append(data[i])
return input_list
def apply_samples_to_data(all_data, algo_samples):
# Take samples from algorithm and match them to samples in data
data_samples, unused_data = [], []
sample_positions = set()
for sample in algo_samples:
sample_positions.add(find_sample_position_with_min_error(all_data, sample))
for i in range(len(all_data)):
if i in sample_positions:
data_samples.append(all_data[i])
else:
unused_data.append(all_data[i])
return data_samples, unused_data
def find_sample_position_with_min_error(data, sampled_vals):
min_error = 0
sample_pos = 0
count = 0
for data_slice in data:
error = squared_error(data_slice, sampled_vals)
if min_error == 0 or error < min_error:
min_error = error
sample_pos = count
count += 1
return sample_pos
def squared_error(list_a, list_b):
error_sum = 0;
for a,b in zip(list_a, list_b):
error_sum+=(a-b)**2
return error_sum
def get_max_min_from_datasets(dir):
if not os.path.isdir(dir):
debug.warning("Input Directory not found:{}".format(dir))
return [], [], []
# Assuming all files are CSV
data_files = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
maxs,mins,sums,total_count = [],[],[],0
for file in data_files:
data = get_data(os.path.join(dir, file))
# Get max, min, sum, and count from every file
data_max, data_min, data_sum, count = [],[],[], 0
for feature_list in data:
data_max.append(max(feature_list))
data_min.append(min(feature_list))
data_sum.append(sum(feature_list))
count = len(feature_list)
# Aggregate the data
if not maxs or not mins or not sums:
maxs,mins,sums,total_count = data_max,data_min,data_sum,count
else:
for i in range(len(maxs)):
maxs[i] = max(data_max[i], maxs[i])
mins[i] = min(data_min[i], mins[i])
sums[i] = data_sum[i]+sums[i]
total_count+=count
avgs = [s/total_count for s in sums]
return maxs,mins,avgs
def get_max_min_from_file(path):
if not os.path.isfile(path):
debug.warning("Input file not found: {}".format(path))
return [], [], []
data = get_data(path)
# Get max, min, sum, and count from every file
data_max, data_min, data_sum, count = [],[],[], 0
for feature_list in data:
data_max.append(max(feature_list))
data_min.append(min(feature_list))
data_sum.append(sum(feature_list))
count = len(feature_list)
avgs = [s/count for s in data_sum]
return data_max, data_min, avgs
def get_data_and_scale(file_name, sample_dir):
maxs,mins,avgs = get_max_min_from_datasets(sample_dir)
# Get data
all_data = get_data(file_name)
# Scale data from file
self_scaled_data = [[] for _ in range(len(all_data[0]))]
self_maxs,self_mins = [],[]
for feature_list, cur_max, cur_min in zip(all_data,maxs, mins):
for i in range(len(feature_list)):
self_scaled_data[i].append((feature_list[i]-cur_min)/(cur_max-cur_min))
return np.asarray(self_scaled_data)
def rescale_data(data, old_maxs, old_mins, new_maxs, new_mins):
# unscale from old values, rescale by new values
data_new_scaling = []
for data_row in data:
scaled_row = []
for val, old_max,old_min, cur_max, cur_min in zip(data_row, old_maxs,old_mins, new_maxs, new_mins):
unscaled_data = val*(old_max-old_min) + old_min
scaled_row.append((unscaled_data-cur_min)/(cur_max-cur_min))
data_new_scaling.append(scaled_row)
return data_new_scaling
def sample_from_file(num_samples, file_name, sample_dir=None):
"""
Get a portion of the data from CSV file and scale it based on max/min of dataset.
Duplicate samples are trimmed.
"""
if sample_dir:
maxs,mins,avgs = get_max_min_from_datasets(sample_dir)
else:
maxs,mins,avgs = [], [], []
# Get data
all_data = get_data(file_name)
# Get algorithms sample points, assuming hypercube for now
num_labels = 1
inp_dims = len(all_data) - num_labels
samples = np.random.rand(num_samples, inp_dims)
# Scale data from file
self_scaled_data = [[] for _ in range(len(all_data[0]))]
self_maxs,self_mins = [],[]
for feature_list in all_data:
max_val = max(feature_list)
self_maxs.append(max_val)
min_val = min(feature_list)
self_mins.append(min_val)
for i in range(len(feature_list)):
self_scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val))
# Apply algorithm sampling points to available data
sampled_data, unused_data = apply_samples_to_data(self_scaled_data,samples)
#unscale values and rescale using all available data (both sampled and unused points rescaled)
if len(maxs)!=0 and len(mins)!=0:
sampled_data = rescale_data(sampled_data, self_maxs,self_mins, maxs, mins)
unused_new_scaling = rescale_data(unused_data, self_maxs,self_mins, maxs, mins)
return np.asarray(sampled_data), np.asarray(unused_new_scaling)
def get_scaled_data(file_name):
"""Get data from CSV file and scale it based on max/min of dataset"""
if file_name:
maxs,mins,avgs = get_max_min_from_file(file_name)
else:
maxs,mins,avgs = [], [], []
# Get data
all_data = get_data(file_name)
# Data is scaled by max/min and data format is changed to points vs feature lists
self_scaled_data = scale_data_and_transform(all_data)
data_np = np.asarray(self_scaled_data)
return data_np
def scale_data_and_transform(data):
"""
Assume data is a list of features, change to a list of points and max/min scale
"""
scaled_data = [[] for _ in range(len(data[0]))]
for feature_list in data:
max_val = max(feature_list)
min_val = min(feature_list)
for i in range(len(feature_list)):
if max_val == min_val:
scaled_data[i].append(0.0)
else:
scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val))
return scaled_data
def scale_input_datapoint(point, file_path):
"""
Input data has no output and needs to be scaled like the model inputs during
training.
"""
maxs, mins, avgs = get_max_min_from_file(file_path)
debug.info(3, "maxs={}".format(maxs))
debug.info(3, "mins={}".format(mins))
debug.info(3, "point={}".format(point))
scaled_point = []
for feature, mx, mn in zip(point, maxs, mins):
if mx == mn:
scaled_point.append(0.0)
else:
scaled_point.append((feature-mn)/(mx-mn))
return scaled_point
def unscale_data(data, file_path, pos=None):
if file_path:
maxs,mins,avgs = get_max_min_from_file(file_path)
else:
debug.error("Must provide reference data to unscale")
return None
# Hard coded to only convert the last max/min (i.e. the label of the data)
if pos == None:
maxs,mins,avgs = maxs[-1],mins[-1],avgs[-1]
else:
maxs,mins,avgs = maxs[pos],mins[pos],avgs[pos]
unscaled_data = []
for data_row in data:
unscaled_val = data_row*(maxs-mins) + mins
unscaled_data.append(unscaled_val)
return unscaled_data
def abs_error(labels, preds):
total_error = 0
for label_i, pred_i in zip(labels, preds):
cur_error = abs(label_i[0]-pred_i[0])/label_i[0]
total_error += cur_error
return total_error/len(labels)
def max_error(labels, preds):
mx_error = 0
for label_i, pred_i in zip(labels, preds):
cur_error = abs(label_i[0]-pred_i[0])/label_i[0]
mx_error = max(cur_error, mx_error)
return mx_error
def min_error(labels, preds):
mn_error = 1
for label_i, pred_i in zip(labels, preds):
cur_error = abs(label_i[0]-pred_i[0])/label_i[0]
mn_error = min(cur_error, mn_error)
return mn_error
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
import csv
import math
import numpy as np
import os
process_transform = {'SS':0.0, 'TT': 0.5, 'FF':1.0}
def get_data_names(file_name, exclude_area=True):
"""
Returns just the data names in the first row of the CSV
"""
with open(file_name, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
row_iter = 0
# reader is iterable not a list, probably a better way to do this
for row in csv_reader:
# Return names from first row
names = row[0].split(',')
break
if exclude_area:
try:
area_ind = names.index('area')
except ValueError:
area_ind = -1
if area_ind != -1:
names = names[:area_ind] + names[area_ind+1:]
return names
def get_data(file_name):
"""
Returns data in CSV as lists of features
"""
with open(file_name, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
row_iter = 0
removed_items = 1
for row in csv_reader:
row_iter += 1
if row_iter == 1:
feature_names = row[0].split(',')
input_list = [[] for _ in range(len(feature_names)-removed_items)]
try:
# Save to remove area
area_ind = feature_names.index('area')
except ValueError:
area_ind = -1
try:
process_ind = feature_names.index('process')
except:
debug.error('Process not included as a feature.')
continue
data = []
split_str = row[0].split(',')
for i in range(len(split_str)):
if i == process_ind:
data.append(process_transform[split_str[i]])
elif i == area_ind:
continue
else:
data.append(float(split_str[i]))
data[0] = math.log(data[0], 2)
for i in range(len(data)):
input_list[i].append(data[i])
return input_list
def apply_samples_to_data(all_data, algo_samples):
# Take samples from algorithm and match them to samples in data
data_samples, unused_data = [], []
sample_positions = set()
for sample in algo_samples:
sample_positions.add(find_sample_position_with_min_error(all_data, sample))
for i in range(len(all_data)):
if i in sample_positions:
data_samples.append(all_data[i])
else:
unused_data.append(all_data[i])
return data_samples, unused_data
def find_sample_position_with_min_error(data, sampled_vals):
min_error = 0
sample_pos = 0
count = 0
for data_slice in data:
error = squared_error(data_slice, sampled_vals)
if min_error == 0 or error < min_error:
min_error = error
sample_pos = count
count += 1
return sample_pos
def squared_error(list_a, list_b):
error_sum = 0;
for a,b in zip(list_a, list_b):
error_sum+=(a-b)**2
return error_sum
def get_max_min_from_datasets(dir):
if not os.path.isdir(dir):
debug.warning("Input Directory not found:{}".format(dir))
return [], [], []
# Assuming all files are CSV
data_files = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
maxs,mins,sums,total_count = [],[],[],0
for file in data_files:
data = get_data(os.path.join(dir, file))
# Get max, min, sum, and count from every file
data_max, data_min, data_sum, count = [],[],[], 0
for feature_list in data:
data_max.append(max(feature_list))
data_min.append(min(feature_list))
data_sum.append(sum(feature_list))
count = len(feature_list)
# Aggregate the data
if not maxs or not mins or not sums:
maxs,mins,sums,total_count = data_max,data_min,data_sum,count
else:
for i in range(len(maxs)):
maxs[i] = max(data_max[i], maxs[i])
mins[i] = min(data_min[i], mins[i])
sums[i] = data_sum[i]+sums[i]
total_count+=count
avgs = [s/total_count for s in sums]
return maxs,mins,avgs
def get_max_min_from_file(path):
if not os.path.isfile(path):
debug.warning("Input file not found: {}".format(path))
return [], [], []
data = get_data(path)
# Get max, min, sum, and count from every file
data_max, data_min, data_sum, count = [],[],[], 0
for feature_list in data:
data_max.append(max(feature_list))
data_min.append(min(feature_list))
data_sum.append(sum(feature_list))
count = len(feature_list)
avgs = [s/count for s in data_sum]
return data_max, data_min, avgs
def get_data_and_scale(file_name, sample_dir):
maxs,mins,avgs = get_max_min_from_datasets(sample_dir)
# Get data
all_data = get_data(file_name)
# Scale data from file
self_scaled_data = [[] for _ in range(len(all_data[0]))]
self_maxs,self_mins = [],[]
for feature_list, cur_max, cur_min in zip(all_data,maxs, mins):
for i in range(len(feature_list)):
self_scaled_data[i].append((feature_list[i]-cur_min)/(cur_max-cur_min))
return np.asarray(self_scaled_data)
def rescale_data(data, old_maxs, old_mins, new_maxs, new_mins):
# unscale from old values, rescale by new values
data_new_scaling = []
for data_row in data:
scaled_row = []
for val, old_max,old_min, cur_max, cur_min in zip(data_row, old_maxs,old_mins, new_maxs, new_mins):
unscaled_data = val*(old_max-old_min) + old_min
scaled_row.append((unscaled_data-cur_min)/(cur_max-cur_min))
data_new_scaling.append(scaled_row)
return data_new_scaling
def sample_from_file(num_samples, file_name, sample_dir=None):
"""
Get a portion of the data from CSV file and scale it based on max/min of dataset.
Duplicate samples are trimmed.
"""
if sample_dir:
maxs,mins,avgs = get_max_min_from_datasets(sample_dir)
else:
maxs,mins,avgs = [], [], []
# Get data
all_data = get_data(file_name)
# Get algorithms sample points, assuming hypercube for now
num_labels = 1
inp_dims = len(all_data) - num_labels
samples = np.random.rand(num_samples, inp_dims)
# Scale data from file
self_scaled_data = [[] for _ in range(len(all_data[0]))]
self_maxs,self_mins = [],[]
for feature_list in all_data:
max_val = max(feature_list)
self_maxs.append(max_val)
min_val = min(feature_list)
self_mins.append(min_val)
for i in range(len(feature_list)):
self_scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val))
# Apply algorithm sampling points to available data
sampled_data, unused_data = apply_samples_to_data(self_scaled_data,samples)
#unscale values and rescale using all available data (both sampled and unused points rescaled)
if len(maxs)!=0 and len(mins)!=0:
sampled_data = rescale_data(sampled_data, self_maxs,self_mins, maxs, mins)
unused_new_scaling = rescale_data(unused_data, self_maxs,self_mins, maxs, mins)
return np.asarray(sampled_data), np.asarray(unused_new_scaling)
def get_scaled_data(file_name):
"""Get data from CSV file and scale it based on max/min of dataset"""
if file_name:
maxs,mins,avgs = get_max_min_from_file(file_name)
else:
maxs,mins,avgs = [], [], []
# Get data
all_data = get_data(file_name)
# Data is scaled by max/min and data format is changed to points vs feature lists
self_scaled_data = scale_data_and_transform(all_data)
data_np = np.asarray(self_scaled_data)
return data_np
def scale_data_and_transform(data):
"""
Assume data is a list of features, change to a list of points and max/min scale
"""
scaled_data = [[] for _ in range(len(data[0]))]
for feature_list in data:
max_val = max(feature_list)
min_val = min(feature_list)
for i in range(len(feature_list)):
if max_val == min_val:
scaled_data[i].append(0.0)
else:
scaled_data[i].append((feature_list[i]-min_val)/(max_val-min_val))
return scaled_data
def scale_input_datapoint(point, file_path):
"""
Input data has no output and needs to be scaled like the model inputs during
training.
"""
maxs, mins, avgs = get_max_min_from_file(file_path)
debug.info(3, "maxs={}".format(maxs))
debug.info(3, "mins={}".format(mins))
debug.info(3, "point={}".format(point))
scaled_point = []
for feature, mx, mn in zip(point, maxs, mins):
if mx == mn:
scaled_point.append(0.0)
else:
scaled_point.append((feature-mn)/(mx-mn))
return scaled_point
def unscale_data(data, file_path, pos=None):
if file_path:
maxs,mins,avgs = get_max_min_from_file(file_path)
else:
debug.error("Must provide reference data to unscale")
return None
# Hard coded to only convert the last max/min (i.e. the label of the data)
if pos == None:
maxs,mins,avgs = maxs[-1],mins[-1],avgs[-1]
else:
maxs,mins,avgs = maxs[pos],mins[pos],avgs[pos]
unscaled_data = []
for data_row in data:
unscaled_val = data_row*(maxs-mins) + mins
unscaled_data.append(unscaled_val)
return unscaled_data
def abs_error(labels, preds):
total_error = 0
for label_i, pred_i in zip(labels, preds):
cur_error = abs(label_i[0]-pred_i[0])/label_i[0]
total_error += cur_error
return total_error/len(labels)
def max_error(labels, preds):
mx_error = 0
for label_i, pred_i in zip(labels, preds):
cur_error = abs(label_i[0]-pred_i[0])/label_i[0]
mx_error = max(cur_error, mx_error)
return mx_error
def min_error(labels, preds):
mn_error = 1
for label_i, pred_i in zip(labels, preds):
cur_error = abs(label_i[0]-pred_i[0])/label_i[0]
mn_error = min(cur_error, mn_error)
return mn_error

View File

@ -9,15 +9,15 @@
from .simulation import simulation
from globals import OPTS
import debug
import tech
import tech
import math
class cacti(simulation):
class cacti(simulation):
"""
Delay model for the SRAM which which
"""
def __init__(self, sram, spfile, corner):
super().__init__(sram, spfile, corner)
@ -33,8 +33,8 @@ class cacti(simulation):
self.create_signal_names()
self.add_graph_exclusions()
self.set_params()
def set_params(self):
def set_params(self):
"""Set parameters specific to the corner being simulated"""
self.params = {}
# Set the specific functions to use for timing defined in the SRAM module
@ -42,14 +42,14 @@ class cacti(simulation):
# Only parameter right now is r_on which is dependent on Vdd
self.params["r_nch_on"] = self.vdd_voltage / tech.spice["i_on_n"]
self.params["r_pch_on"] = self.vdd_voltage / tech.spice["i_on_p"]
def get_lib_values(self, load_slews):
"""
Return the analytical model results for the SRAM.
"""
if OPTS.num_rw_ports > 1 or OPTS.num_w_ports > 0 and OPTS.num_r_ports > 0:
debug.warning("In analytical mode, all ports have the timing of the first read port.")
# Probe set to 0th bit, does not matter for analytical delay.
self.set_probe('0' * self.addr_size, 0)
self.create_graph()
@ -77,7 +77,7 @@ class cacti(simulation):
slew = 0
path_delays = self.graph.get_timing(bl_path, self.corner, slew, load_farad, self.params)
total_delay = self.sum_delays(path_delays)
delay_ns = total_delay.delay/1e-9
slew_ns = total_delay.slew/1e-9
max_delay = max(max_delay, total_delay.delay)
@ -95,7 +95,7 @@ class cacti(simulation):
elif "slew" in mname and port in self.read_ports:
port_data[port][mname].append(total_delay.slew / 1e-9)
# Margin for error in period. Calculated by averaging required margin for a small and large
# Margin for error in period. Calculated by averaging required margin for a small and large
# memory. FIXME: margin is quite large, should be looked into.
period_margin = 1.85
sram_data = {"min_period": (max_delay / 1e-9) * 2 * period_margin,
@ -118,3 +118,5 @@ class cacti(simulation):
debug.info(1, "Dynamic Power: {0} mW".format(power.dynamic))
debug.info(1, "Leakage Power: {0} mW".format(power.leakage))
return power

View File

@ -37,7 +37,7 @@ def parse_spice_list(filename, key):
except IOError:
debug.error("Unable to open spice output file: {0}".format(full_filename),1)
debug.archive()
contents = f.read().lower()
f.close()
# val = re.search(r"{0}\s*=\s*(-?\d+.?\d*\S*)\s+.*".format(key), contents)

View File

@ -235,10 +235,10 @@ class delay(simulation):
qbar_meas = voltage_at_measure("v_qbar_{0}".format(meas_tag), qbar_name)
return {bit_polarity.NONINVERTING: q_meas, bit_polarity.INVERTING: qbar_meas}
def create_sen_and_bitline_path_measures(self):
"""Create measurements for the s_en and bitline paths for individual delays per stage."""
# FIXME: There should be a default_read_port variable in this case, pathing is done with this
# but is never mentioned otherwise
port = self.read_ports[0]
@ -253,37 +253,37 @@ class delay(simulation):
debug.check(len(bl_paths)==1, 'Found {0} paths which contain the bitline net.'.format(len(bl_paths)))
sen_path = sen_paths[0]
bitline_path = bl_paths[0]
# Get the measures
self.sen_path_meas = self.create_delay_path_measures(sen_path)
self.bl_path_meas = self.create_delay_path_measures(bitline_path)
all_meas = self.sen_path_meas + self.bl_path_meas
# Paths could have duplicate measurements, remove them before they go to the stim file
all_meas = self.remove_duplicate_meas_names(all_meas)
# FIXME: duplicate measurements still exist in the member variables, since they have the same
# name it will still work, but this could cause an issue in the future.
return all_meas
return all_meas
def remove_duplicate_meas_names(self, measures):
"""Returns new list of measurements without duplicate names"""
name_set = set()
unique_measures = []
for meas in measures:
if meas.name not in name_set:
name_set.add(meas.name)
unique_measures.append(meas)
return unique_measures
def create_delay_path_measures(self, path):
"""Creates measurements for each net along given path."""
# Determine the directions (RISE/FALL) of signals
path_dirs = self.get_meas_directions(path)
# Create the measurements
path_meas = []
for i in range(len(path) - 1):
@ -297,26 +297,26 @@ class delay(simulation):
# Some bitcell logic is hardcoded for only read zeroes, force that here as well.
path_meas[-1].meta_str = sram_op.READ_ZERO
path_meas[-1].meta_add_delay = True
return path_meas
def get_meas_directions(self, path):
"""Returns SPICE measurements directions based on path."""
# Get the edges modules which define the path
edge_mods = self.graph.get_edge_mods(path)
# Convert to booleans based on function of modules (inverting/non-inverting)
mod_type_bools = [mod.is_non_inverting() for mod in edge_mods]
# FIXME: obtuse hack to differentiate s_en input from bitline in sense amps
if self.sen_name in path:
# Force the sense amp to be inverting for s_en->DOUT.
# Force the sense amp to be inverting for s_en->DOUT.
# bitline->DOUT is non-inverting, but the module cannot differentiate inputs.
s_en_index = path.index(self.sen_name)
mod_type_bools[s_en_index] = False
debug.info(2, 'Forcing sen->dout to be inverting.')
# Use these to determine direction list assuming delay start on neg. edge of clock (FALL)
# Also, use shorthand that 'FALL' == False, 'RISE' == True to simplify logic
bool_dirs = [False]
@ -324,9 +324,9 @@ class delay(simulation):
for mod_bool in mod_type_bools:
cur_dir = (cur_dir == mod_bool)
bool_dirs.append(cur_dir)
# Convert from boolean to string
return ['RISE' if dbool else 'FALL' for dbool in bool_dirs]
return ['RISE' if dbool else 'FALL' for dbool in bool_dirs]
def set_load_slew(self, load, slew):
""" Set the load and slew """
@ -827,7 +827,7 @@ class delay(simulation):
debug.error("Failed to Measure Read Port Values:\n\t\t{0}".format(read_port_dict), 1)
result[port].update(read_port_dict)
self.path_delays = self.check_path_measures()
return (True, result)
@ -932,7 +932,7 @@ class delay(simulation):
def check_path_measures(self):
"""Get and check all the delays along the sen and bitline paths"""
# Get and set measurement, no error checking done other than prints.
debug.info(2, "Checking measures in Delay Path")
value_dict = {}
@ -1179,7 +1179,7 @@ class delay(simulation):
#char_sram_data["sen_path_names"] = sen_names
# FIXME: low-to-high delays are altered to be independent of the period. This makes the lib results less accurate.
self.alter_lh_char_data(char_port_data)
return (char_sram_data, char_port_data)
def alter_lh_char_data(self, char_port_data):
@ -1222,14 +1222,14 @@ class delay(simulation):
for meas in self.sen_path_meas:
sen_name_list.append(meas.name)
sen_delay_list.append(value_dict[meas.name])
bl_name_list = []
bl_delay_list = []
for meas in self.bl_path_meas:
bl_name_list.append(meas.name)
bl_delay_list.append(value_dict[meas.name])
return sen_name_list, sen_delay_list, bl_name_list, bl_delay_list
return sen_name_list, sen_delay_list, bl_name_list, bl_delay_list
def calculate_inverse_address(self):
"""Determine dummy test address based on probe address and column mux size."""

View File

@ -10,11 +10,11 @@ from .simulation import simulation
from globals import OPTS
import debug
class elmore(simulation):
class elmore(simulation):
"""
Delay model for the SRAM which calculates Elmore delays along the SRAM critical path.
"""
def __init__(self, sram, spfile, corner):
super().__init__(sram, spfile, corner)
@ -30,13 +30,13 @@ class elmore(simulation):
self.set_corner(corner)
self.create_signal_names()
self.add_graph_exclusions()
def set_params(self):
def set_params(self):
"""Set parameters specific to the corner being simulated"""
self.params = {}
# Set the specific functions to use for timing defined in the SRAM module
self.params["model_name"] = OPTS.model_name
def get_lib_values(self, load_slews):
"""
Return the analytical model results for the SRAM.
@ -66,7 +66,7 @@ class elmore(simulation):
for load,slew in load_slews:
# Calculate delay based on slew and load
path_delays = self.graph.get_timing(bl_path, self.corner, slew, load, self.params)
total_delay = self.sum_delays(path_delays)
max_delay = max(max_delay, total_delay.delay)
debug.info(1,
@ -84,7 +84,7 @@ class elmore(simulation):
elif "slew" in mname and port in self.read_ports:
port_data[port][mname].append(total_delay.slew / 1e3)
# Margin for error in period. Calculated by averaging required margin for a small and large
# Margin for error in period. Calculated by averaging required margin for a small and large
# memory. FIXME: margin is quite large, should be looked into.
period_margin = 1.85
sram_data = {"min_period": (max_delay / 1e3) * 2 * period_margin,

View File

@ -26,17 +26,18 @@ class linear_regression(regression_model):
"""
Supervised training of model.
"""
#model = LinearRegression()
model = self.get_model()
model.fit(features, labels)
return model
def model_prediction(self, model, features):
def model_prediction(self, model, features):
"""
Have the model perform a prediction and unscale the prediction
as the model is trained with scaled values.
"""
pred = model.predict(features)
return pred

View File

@ -184,7 +184,7 @@ class voltage_when_measure(spice_measurement):
trig_voltage = self.trig_val_of_vdd * vdd_voltage
return (meas_name, trig_name, targ_name, trig_voltage, self.trig_dir_str, trig_td)
class voltage_at_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage at a specific time.
The time is considered variant with different periods."""
@ -211,3 +211,4 @@ class voltage_at_measure(spice_measurement):
meas_name = self.name
targ_name = self.targ_name_no_port
return (meas_name, targ_name, time_at)

View File

@ -82,7 +82,7 @@ class model_check(delay):
replicated here.
"""
delay.create_signal_names(self)
# Signal names are all hardcoded, need to update to make it work for probe address and different configurations.
wl_en_driver_signals = ["Xsram{1}Xcontrol{{}}.Xbuf_wl_en.Zb{0}_int".format(stage, OPTS.hier_seperator) for stage in range(1, self.get_num_wl_en_driver_stages())]
wl_driver_signals = ["Xsram{2}Xbank0{2}Xwordline_driver{{}}{2}Xwl_driver_inv{0}{2}Zb{1}_int".format(self.wordline_row, stage, OPTS.hier_seperator) for stage in range(1, self.get_num_wl_driver_stages())]
@ -448,3 +448,6 @@ class model_check(delay):
name_dict[self.sae_model_name] = name_dict["sae_measures"]
return name_dict

View File

@ -25,19 +25,20 @@ class neural_network(regression_model):
"""
Training multilayer model
"""
flat_labels = np.ravel(labels)
model = self.get_model()
model.fit(features, flat_labels)
return model
def model_prediction(self, model, features):
def model_prediction(self, model, features):
"""
Have the model perform a prediction and unscale the prediction
as the model is trained with scaled values.
"""
pred = model.predict(features)
reshape_pred = np.reshape(pred, (len(pred),1))
return reshape_pred

View File

@ -25,7 +25,7 @@ data_fnames = ["rise_delay.csv",
"read0_power.csv",
"leakage_data.csv",
"sim_time.csv"]
# Positions must correspond to data_fname list
# Positions must correspond to data_fname list
lib_dnames = ["delay_lh",
"delay_hl",
"slew_lh",
@ -35,13 +35,13 @@ lib_dnames = ["delay_lh",
"read1_power",
"read0_power",
"leakage_power",
"sim_time"]
"sim_time"]
# Check if another data dir was specified
if OPTS.sim_data_path == None:
if OPTS.sim_data_path == None:
data_dir = OPTS.openram_tech+relative_data_path
else:
data_dir = OPTS.sim_data_path
data_dir = OPTS.sim_data_path
data_path = data_dir + '/' + data_file
class regression_model(simulation):
@ -52,23 +52,23 @@ class regression_model(simulation):
def get_lib_values(self, load_slews):
"""
A model and prediction is created for each output needed for the LIB
A model and prediction is created for each output needed for the LIB
"""
debug.info(1, "Characterizing SRAM using regression models.")
log_num_words = math.log(OPTS.num_words, 2)
model_inputs = [log_num_words,
OPTS.word_size,
model_inputs = [log_num_words,
OPTS.word_size,
OPTS.words_per_row,
OPTS.local_array_size,
process_transform[self.process],
self.vdd_voltage,
self.temperature]
process_transform[self.process],
self.vdd_voltage,
self.temperature]
# Area removed for now
# self.sram.width * self.sram.height,
# Include above inputs, plus load and slew which are added below
self.num_inputs = len(model_inputs)+2
self.create_measurement_names()
models = self.train_models()
@ -85,22 +85,22 @@ class regression_model(simulation):
port_data[port]['delay_hl'].append(sram_vals['fall_delay'])
port_data[port]['slew_lh'].append(sram_vals['rise_slew'])
port_data[port]['slew_hl'].append(sram_vals['fall_slew'])
port_data[port]['write1_power'].append(sram_vals['write1_power'])
port_data[port]['write0_power'].append(sram_vals['write0_power'])
port_data[port]['read1_power'].append(sram_vals['read1_power'])
port_data[port]['read0_power'].append(sram_vals['read0_power'])
# Disabled power not modeled. Copied from other power predictions
port_data[port]['disabled_write1_power'].append(sram_vals['write1_power'])
port_data[port]['disabled_write0_power'].append(sram_vals['write0_power'])
port_data[port]['disabled_read1_power'].append(sram_vals['read1_power'])
port_data[port]['disabled_read0_power'].append(sram_vals['read0_power'])
debug.info(1, '{}, {}, {}, {}, {}'.format(slew,
load,
port,
sram_vals['rise_delay'],
debug.info(1, '{}, {}, {}, {}, {}'.format(slew,
load,
port,
sram_vals['rise_delay'],
sram_vals['rise_slew']))
# Estimate the period as double the delay with margin
period_margin = 0.1
@ -112,19 +112,19 @@ class regression_model(simulation):
return (sram_data, port_data)
def get_predictions(self, model_inputs, models):
def get_predictions(self, model_inputs, models):
"""
Generate a model and prediction for LIB output
"""
#Scaled the inputs using first data file as a reference
#Scaled the inputs using first data file as a reference
scaled_inputs = np.asarray([scale_input_datapoint(model_inputs, data_path)])
predictions = {}
out_pos = 0
for dname in self.output_names:
m = models[dname]
scaled_pred = self.model_prediction(m, scaled_inputs)
pred = unscale_data(scaled_pred.tolist(), data_path, pos=self.num_inputs+out_pos)
debug.info(2,"Unscaled Prediction = {}".format(pred))
@ -149,7 +149,7 @@ class regression_model(simulation):
output_num+=1
return models
def score_model(self):
num_inputs = 9 #FIXME - should be defined somewhere else
self.output_names = get_data_names(data_path)[num_inputs:]
@ -165,15 +165,15 @@ class regression_model(simulation):
scr = model.score(features, output_label)
debug.info(1, "{}, {}".format(o_name, scr))
output_num+=1
def cross_validation(self, test_only=None):
"""Wrapper for sklean cross validation function for OpenRAM regression models.
Returns the mean accuracy for each model/output."""
from sklearn.model_selection import cross_val_score
untrained_model = self.get_model()
num_inputs = 9 #FIXME - should be defined somewhere else
self.output_names = get_data_names(data_path)[num_inputs:]
data = get_scaled_data(data_path)
@ -193,9 +193,9 @@ class regression_model(simulation):
debug.info(1, "{}, {}, {}".format(o_name, scores.mean(), scores.std()))
model_scores[o_name] = scores.mean()
output_num+=1
return model_scores
return model_scores
# Fixme - only will work for sklearn regression models
def save_model(self, model_name, model):
try:
@ -205,3 +205,4 @@ class regression_model(simulation):
OPTS.model_dict[model_name+"_coef"] = list(model.coef_[0])
debug.info(1,"Coefs of {}:{}".format(model_name,OPTS.model_dict[model_name+"_coef"]))
OPTS.model_dict[model_name+"_intercept"] = float(model.intercept_)

View File

@ -22,7 +22,7 @@ class setup_hold():
def __init__(self, corner):
# This must match the spice model order
self.dff = factory.create(module_type=OPTS.dff)
self.period = tech.spice["feasible_period"]
debug.info(2, "Feasible period from technology file: {0} ".format(self.period))
@ -106,8 +106,8 @@ class setup_hold():
setup=0)
def write_clock(self):
"""
Create the clock signal for setup/hold analysis.
"""
Create the clock signal for setup/hold analysis.
First period initializes the FF
while the second is used for characterization.
"""
@ -206,7 +206,7 @@ class setup_hold():
self.stim.run_sim(self.stim_sp)
clk_to_q = convert_to_float(parse_spice_list("timing", "clk2q_delay"))
# We use a 1/2 speed clock for some reason...
# We use a 1/2 speed clock for some reason...
setuphold_time = (target_time - 2 * self.period)
if mode == "SETUP": # SETUP is clk-din, not din-clk
passing_setuphold_time = -1 * setuphold_time

View File

@ -46,9 +46,9 @@ class trim_spice():
self.col_addr_size = int(log(self.words_per_row, 2))
self.bank_addr_size = self.col_addr_size + self.row_addr_size
self.addr_size = self.bank_addr_size + int(log(self.num_banks, 2))
def trim(self, address, data_bit):
"""
"""
Reduce the spice netlist but KEEP the given bits at the
address (and things that will add capacitive load!)
"""
@ -62,7 +62,7 @@ class trim_spice():
col_address = int(address[0:self.col_addr_size], 2)
else:
col_address = 0
# 1. Keep cells in the bitcell array based on WL and BL
wl_name = "wl_{}".format(wl_address)
bl_name = "bl_{}".format(int(self.words_per_row*data_bit + col_address))

View File

@ -31,7 +31,7 @@ class datasheet():
if OPTS.output_datasheet_info:
datasheet_path = OPTS.output_path
else:
datasheet_path = OPTS.openram_temp
datasheet_path = OPTS.openram_temp
with open(datasheet_path + "/datasheet.info") as info:
self.html += '<!--'
for row in info:

View File

@ -29,7 +29,7 @@ def check(check, str):
if globals.OPTS.debug:
pdb.set_trace()
assert 0
@ -108,7 +108,7 @@ def info(lev, str):
print_raw("[{0}/{1}]: {2}".format(class_name,
frm[0].f_code.co_name, str))
def archive():
from globals import OPTS
try:
@ -121,7 +121,7 @@ def archive():
info(0, "Archiving failed files to {}.zip".format(zip_file))
shutil.make_archive(zip_file, 'zip', OPTS.openram_temp)
def bp():
"""
An empty function so you can set soft breakpoints in pdb.
@ -130,7 +130,8 @@ def bp():
2) Run "python3 -m pdb openram.py config.py" or "python3 -m pdb 05_bitcell_array.test" (for example)
3) When pdb starts, run "break debug.bp" to set a SOFT breakpoint. (Or you can add this to your ~/.pdbrc)
4) Then run "cont" to continue.
5) You can now set additional breakpoints or display commands
5) You can now set additional breakpoints or display commands
and whenever you encounter the debug.bp() they won't be "reset".
"""
pass

View File

@ -11,14 +11,14 @@ class cell:
# Some cells may have body bias (well taps) exposed as ports
self._body_bias = body_bias
# Specifies if this is a hard (i.e. GDS) cell
self._hard_cell = hard_cell
self._boundary_layer = boundary_layer
# Specifies the port directions
self._port_types_map = {x: y for (x, y) in zip(port_order, port_types)}
# Specifies a map from OpenRAM names to cell names
# by default it is 1:1
if not port_map:
@ -31,13 +31,13 @@ class cell:
# Create an index array
self._port_indices = [self._port_order.index(x) for x in self._original_port_order]
# Update ordered name list
self._port_names = [self._port_map[x] for x in self._port_order]
# Update ordered type list
self._port_types = [self._port_types_map[x] for x in self._port_order]
@property
def hard_cell(self):
return self._hard_cell
@ -73,21 +73,21 @@ class cell:
@property
def port_indices(self):
return self._port_indices
@property
def port_map(self):
return self._port_map
@port_map.setter
def port_map(self, port_map):
self._port_map = port_map
# Update ordered name list to use the new names
self._port_names = [self._port_map[x] for x in self._port_order]
@property
def body_bias(self):
return self._body_bias
@body_bias.setter
def body_bias(self, body_bias):
# It is assumed it is [nwell, pwell]
@ -96,7 +96,7 @@ class cell:
self._port_types['vnb'] = "GROUND"
self._port_map['vpb'] = body_bias[1]
self._port_types['vpb'] = "POWER"
@property
def port_types(self):
return self._port_types
@ -108,7 +108,7 @@ class cell:
self._port_types_map = {x: y for (x, y) in zip(self._port_order, self._port_types)}
# Update ordered type list
self._port_types = [self._port_types_map[x] for x in self._port_order]
@property
def boundary_layer(self):
return self._boundary_layer
@ -116,8 +116,8 @@ class cell:
@boundary_layer.setter
def boundary_layer(self, x):
self._boundary_layer = x
class _pins:
def __init__(self, pin_dict):
# make the pins elements of the class to allow "." access.
@ -148,7 +148,7 @@ class bitcell(cell):
super().__init__(port_order, port_types, port_map)
self.end_caps = end_caps
if not mirror:
self.mirror = _mirror_axis(True, False)
else:
@ -166,7 +166,7 @@ class bitcell(cell):
self.gnd_layer = "m1"
self.gnd_dir = "H"
class cell_properties():
"""
This contains meta information about the custom designed cells. For
@ -194,16 +194,16 @@ class cell_properties():
self._inv_dec = cell(["A", "Z", "vdd", "gnd"],
["INPUT", "OUTPUT", "POWER", "GROUND"])
self._nand2_dec = cell(["A", "B", "Z", "vdd", "gnd"],
["INPUT", "INPUT", "OUTPUT", "POWER", "GROUND"])
self._nand3_dec = cell(["A", "B", "C", "Z", "vdd", "gnd"],
["INPUT", "INPUT", "INPUT", "OUTPUT", "POWER", "GROUND"])
self._nand4_dec = cell(["A", "B", "C", "D", "Z", "vdd", "gnd"],
["INPUT", "INPUT", "INPUT", "INPUT", "OUTPUT", "POWER", "GROUND"])
self._dff = cell(["D", "Q", "clk", "vdd", "gnd"],
["INPUT", "OUTPUT", "INPUT", "POWER", "GROUND"])
@ -230,7 +230,7 @@ class cell_properties():
self._row_cap_2port = bitcell(["wl0", "wl1", "gnd"],
["INPUT", "INPUT", "POWER", "GROUND"])
@property
def ptx(self):
return self._ptx
@ -246,15 +246,15 @@ class cell_properties():
@property
def nand2_dec(self):
return self._nand2_dec
@property
def nand3_dec(self):
return self._nand3_dec
@property
def nand4_dec(self):
return self._nand4_dec
@property
def dff(self):
return self._dff
@ -270,7 +270,7 @@ class cell_properties():
@property
def bitcell_1port(self):
return self._bitcell_1port
@property
def bitcell_2port(self):
return self._bitcell_2port
@ -282,7 +282,7 @@ class cell_properties():
@property
def row_cap_1port(self):
return self._row_cap_1port
@property
def col_cap_2port(self):
return self._col_cap_2port
@ -290,3 +290,4 @@ class cell_properties():
@property
def row_cap_2port(self):
return self._row_cap_2port

View File

@ -180,7 +180,7 @@ def check_versions():
else:
OPTS.coverage_exe = ""
debug.warning("Failed to find coverage installation. This can be installed with pip3 install coverage")
try:
import coverage
OPTS.coverage = 1
@ -249,7 +249,7 @@ def setup_bitcell():
OPTS.bitcell = "bitcell_{}port".format(OPTS.num_ports)
OPTS.dummy_bitcell = "dummy_" + OPTS.bitcell
OPTS.replica_bitcell = "replica_" + OPTS.bitcell
# See if bitcell exists
try:
c = importlib.import_module("modules." + OPTS.bitcell)
@ -388,7 +388,7 @@ def end_openram():
verify.print_lvs_stats()
verify.print_pex_stats()
def purge_temp():
""" Remove the temp directory. """
debug.info(1,
@ -406,7 +406,7 @@ def purge_temp():
os.remove(i)
else:
shutil.rmtree(i)
def cleanup_paths():
"""
@ -420,7 +420,7 @@ def cleanup_paths():
elif os.path.exists(OPTS.openram_temp):
purge_temp()
def setup_paths():
""" Set up the non-tech related paths. """
debug.info(2, "Setting up paths...")
@ -447,12 +447,12 @@ def setup_paths():
# Only add the unique subdir one time
if tempdir not in OPTS.openram_temp:
OPTS.openram_temp += tempdir
if not OPTS.openram_temp.endswith('/'):
OPTS.openram_temp += "/"
debug.info(1, "Temporary files saved in " + OPTS.openram_temp)
def is_exe(fpath):
""" Return true if the given is an executable file that exists. """
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
@ -490,7 +490,7 @@ def init_paths():
#from pprint import pprint
#pprint(s)
#print("Test {0} in dir {1}".format(s[2].filename, OPTS.openram_temp))
# Don't delete the output dir, it may have other files!
# make the directory if it doesn't exist

View File

@ -18,13 +18,13 @@ def gen_regex_float_group(num, separator):
for i in range(num-1):
full_regex+=separator+float_regex
return full_regex
def import_module(mod_name, mod_path):
spec = importlib.util.spec_from_file_location(mod_name, mod_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def get_config_mods(openram_dir):
# Get dataset name used by all the files e.g. sram_1b_16
files_names = [name for name in os.listdir(openram_dir) if os.path.isfile(openram_dir+'/'+name)]
@ -32,8 +32,8 @@ def get_config_mods(openram_dir):
dataset_name = log[:-4]
sys.path.append(openram_dir)
print("Extracting dataset:{}".format(dataset_name))
# Check that the config files exist (including special extended config)
# Check that the config files exist (including special extended config)
dir_path = openram_dir+"/"
#sys.path.append(dir_path)
#imp_mod = None
@ -43,16 +43,16 @@ def get_config_mods(openram_dir):
# imp_mod = None
# else:
# imp_mod = import_module(dataset_name, openram_dir+"/"+dataset_name+".py")
if not os.path.exists(openram_dir+'/'+dataset_name+extended_name+".py"):
print("Extended Python module for {} not found.".format(dataset_name))
imp_mod_extended = None
else:
imp_mod_extended = import_module(dataset_name+extended_name, openram_dir+"/"+dataset_name+extended_name+".py")
datasheet_fname = openram_dir+"/"+dataset_name+data_file_ext
return dataset_name, imp_mod_extended, datasheet_fname
return dataset_name, imp_mod_extended, datasheet_fname
def get_corners(datafile_contents, dataset_name, tech):
"""Search through given datasheet to find all corners available"""
@ -60,18 +60,18 @@ def get_corners(datafile_contents, dataset_name, tech):
corner_regex = r"{}.*{},([-+]?[0-9]*\.?[0-9]*),([-+]?[0-9]*\.?[0-9]*),([tsfTSF][tsfTSF]),".format(dataset_name, tech)
corners = re.findall(corner_regex,datafile_contents)
return corners # List of corner tuples in order (T, V, P)
feature_names = ['num_words',
'word_size',
feature_names = ['num_words',
'word_size',
'words_per_row',
'local_array_size',
'area',
'process',
'voltage',
'area',
'process',
'voltage',
'temperature',
'slew',
'load']
output_names = ['rise_delay',
output_names = ['rise_delay',
'fall_delay',
'rise_slew',
'fall_slew',
@ -79,20 +79,20 @@ output_names = ['rise_delay',
'write0_power',
'read1_power',
'read0_power',
'leakage_power']
multivalue_names = ['cell_rise_0',
'leakage_power']
multivalue_names = ['cell_rise_0',
'cell_fall_0',
'rise_transition_0',
'fall_transition_0']
singlevalue_names = ['write_rise_power_0',
'write_fall_power_0',
'read_rise_power_0',
'read_fall_power_0']
def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
'read_fall_power_0']
def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
writer = csv.writer(csv_file,lineterminator='\n')
# If the file was opened to write and not append then we write the header
if mode == 'w':
@ -102,7 +102,7 @@ def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
load_slews = imp_mod.use_specified_load_slew
except:
load_slews = None
if load_slews != None:
num_items = len(load_slews)
num_loads_or_slews = len(load_slews)
@ -110,7 +110,7 @@ def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
# These are the defaults for openram
num_items = 9
num_loads_or_slews = 3
try:
f = open(datasheet_fname, "r")
except IOError:
@ -118,13 +118,13 @@ def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
return None
print("Opened file",datasheet_fname)
contents = f.read()
f.close()
f.close()
available_corners = get_corners(contents, dataset_name, imp_mod.tech_name)
# Loop through corners, adding data for each corner
for (temp, voltage, process) in available_corners:
# Loop through corners, adding data for each corner
for (temp, voltage, process) in available_corners:
# Create a regex to search the datasheet for specified outputs
voltage_str = "".join(['\\'+i if i=='.' else i for i in str(voltage)])
area_regex = r"Area \(&microm<sup>2<\/sup>\)<\/td><td>(\d+)"
@ -141,14 +141,14 @@ def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
voltage_str,
process,
float_regex)
loads_regex = r"{},{}.*{},{},{},.*loads,\[{}".format(
dataset_name,
imp_mod.num_words,
str(temp),
voltage_str,
process,
float_regex)
float_regex)
float_regex = gen_regex_float_group(num_items, ', ')
multivalue_regexs = []
@ -160,10 +160,10 @@ def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
voltage_str,
process,
value_identifier,
float_regex)
multivalue_regexs.append(regex_str)
singlevalue_regexs = []
float_regex)
multivalue_regexs.append(regex_str)
singlevalue_regexs = []
for value_identifier in singlevalue_names:
regex_str = r"{},{}.*{},{},{},.*{},([-+]?[0-9]*\.?[0-9]*)".format(
dataset_name,
@ -172,15 +172,15 @@ def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
voltage_str,
process,
value_identifier,
float_regex)
singlevalue_regexs.append(regex_str)
float_regex)
singlevalue_regexs.append(regex_str)
area_vals = re.search(area_regex,contents)
leakage_vals = re.search(leakage_regex,contents)
if load_slews == None:
inp_slew_vals = re.search(inp_slews_regex,contents)
load_vals = re.search(loads_regex,contents)
datasheet_multivalues = [re.search(r,contents) for r in multivalue_regexs]
datasheet_singlevalues = [re.search(r,contents) for r in singlevalue_regexs]
for dval in datasheet_multivalues+datasheet_singlevalues:
@ -196,24 +196,24 @@ def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
las = imp_mod.local_array_size
except:
las = DEFAULT_LAS
# All the extracted values are delays but val[2] is the max delay
feature_vals = [imp_mod.num_words,
feature_vals = [imp_mod.num_words,
imp_mod.word_size,
imp_mod.words_per_row,
las,
area_vals[1],
process,
voltage,
area_vals[1],
process,
voltage,
temp]
if load_slews == None:
c = 1
c = 1
for i in range(num_loads_or_slews):
for j in range(num_loads_or_slews):
multi_values = [val[i+j+c] for val in datasheet_multivalues]
single_values = [val[1] for val in datasheet_singlevalues]
writer.writerow(feature_vals+[inp_slew_vals[i+1], load_vals[j+1]]+multi_values+single_values+[leakage_vals[1]])
writer.writerow(feature_vals+[inp_slew_vals[i+1], load_vals[j+1]]+multi_values+single_values+[leakage_vals[1]])
c+=2
else:
# if num loads and num slews are not equal then this might break because of how OpenRAM formats
@ -222,7 +222,7 @@ def write_to_csv(dataset_name, csv_file, datasheet_fname, imp_mod, mode):
for load,slew in load_slews:
multi_values = [val[c] for val in datasheet_multivalues]
single_values = [val[1] for val in datasheet_singlevalues]
writer.writerow(feature_vals+[slew, load]+multi_values+single_values+[leakage_vals[1]])
writer.writerow(feature_vals+[slew, load]+multi_values+single_values+[leakage_vals[1]])
c+=1
@ -244,26 +244,50 @@ def extract_data(openram_dir, out_dir, is_first):
write_to_csv(dataset_name, data_file, datasheet_fname, inp_mod, mode)
return out_dir
def gen_model_csv(openram_dir_path, out_dir):
if not os.path.isdir(input_dir_path):
print("Path does not exist: {}".format(input_dir_path))
return
if not os.path.isdir(out_path):
print("Path does not exist: {}".format(out_path))
return
return
is_first = True
oram_dirs = [openram_dir_path+'/'+name for name in os.listdir(openram_dir_path) if os.path.isdir(openram_dir_path+'/'+name)]
for dir in oram_dirs:
extract_data(dir, out_dir, is_first)
is_first = False
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: python model_data_util.py path_to_openram_dirs out_dir_path")
else:
input_dir_path = sys.argv[1]
out_path = sys.argv[2]
gen_model_csv(input_dir_path, out_path)
input_dir_path = sys.argv[1]
out_path = sys.argv[2]
gen_model_csv(input_dir_path, out_path)

View File

@ -31,5 +31,5 @@ class bitcell_1port(bitcell_base):
def is_non_inverting(self):
"""Return input to output polarity for module"""
return False

View File

@ -102,5 +102,5 @@ class bitcell_2port(bitcell_base):
def is_non_inverting(self):
"""Return input to output polarity for module"""
return False

View File

@ -169,7 +169,7 @@ class bitcell_base(design):
"""
return
def get_all_wl_names(self):
""" Creates a list of all wordline pin names """
row_pins = ["wl"]
@ -207,39 +207,39 @@ class bitcell_base(design):
is_nchannel = True
stack = 2 # for access and inv tx
is_cell = False
return self.tr_r_on(drc["minwidth_tx"], is_nchannel, stack, is_cell)
return self.tr_r_on(drc["minwidth_tx"], is_nchannel, stack, is_cell)
def get_input_capacitance(self):
"""Input cap of input, passes width of gates to gate cap function"""
# Input cap of both access TX connected to the wordline
return self.gate_c(2*parameter["6T_access_size"])
return self.gate_c(2*parameter["6T_access_size"])
def get_intrinsic_capacitance(self):
"""Get the drain capacitances of the TXs in the gate."""
stack = 1
mult = 1
# FIXME: Need to define TX sizes of bitcell storage node. Using
# FIXME: Need to define TX sizes of bitcell storage node. Using
# min_width as a temp value
# Add the inverter drain Cap and the bitline TX drain Cap
nmos_drain_c = self.drain_c_(drc["minwidth_tx"]*mult,
nmos_drain_c = self.drain_c_(drc["minwidth_tx"]*mult,
stack,
mult)
pmos_drain_c = self.drain_c_(drc["minwidth_tx"]*mult,
pmos_drain_c = self.drain_c_(drc["minwidth_tx"]*mult,
stack,
mult)
bl_nmos_drain_c = self.drain_c_(parameter["6T_access_size"],
bl_nmos_drain_c = self.drain_c_(parameter["6T_access_size"],
stack,
mult)
return nmos_drain_c + pmos_drain_c + bl_nmos_drain_c
mult)
return nmos_drain_c + pmos_drain_c + bl_nmos_drain_c
def module_wire_c(self):
"""Capacitance of bitline"""
# FIXME: entire bitline cap is calculated here because of the current
# graph implementation so array dims are all re-calculated here. May
# be incorrect if dim calculations change
# be incorrect if dim calculations change
cells_in_col = OPTS.num_words/OPTS.words_per_row
return cells_in_col*self.height*spice["wire_c_per_um"]
@ -247,15 +247,15 @@ class bitcell_base(design):
"""Resistance of bitline"""
# FIXME: entire bitline r is calculated here because of the current
# graph implementation so array dims are all re-calculated. May
# be incorrect if dim calculations change
# be incorrect if dim calculations change
cells_in_col = OPTS.num_words/OPTS.words_per_row
return cells_in_col*self.height*spice["wire_r_per_um"]
def cacti_rc_delay(self, inputramptime, tf, vs1, vs2, rise, extra_param_dict):
return cells_in_col*self.height*spice["wire_r_per_um"]
def cacti_rc_delay(self, inputramptime, tf, vs1, vs2, rise, extra_param_dict):
""" Special RC delay function used by CACTI for bitline delay
"""
import math
vdd = extra_param_dict['vdd']
vdd = extra_param_dict['vdd']
m = vdd / inputramptime #v_wl = vdd for OpenRAM
# vdd == V_b_pre in OpenRAM. Bitline swing is assumed 10% of vdd
tstep = tf * math.log(vdd/(vdd - 0.1*vdd))
@ -264,4 +264,4 @@ class bitcell_base(design):
else:
delay = math.sqrt(2*tstep*(vdd-spice["nom_threshold"])/m)
return delay
return delay

View File

@ -128,7 +128,7 @@ class bitcell_base_array(design):
if len(self.all_ports) > 1:
temp.extend(self.get_rbl_wordline_names(1))
return temp
def add_bitline_pins(self):
bitline_names = self.cell.get_all_bitline_names()
for col in range(self.column_size):
@ -165,7 +165,7 @@ class bitcell_base_array(design):
""" Add the layout pins """
self.add_bitline_pins()
self.add_wl_pins()
def _adjust_x_offset(self, xoffset, col, col_offset):
tempx = xoffset
dir_y = False

View File

@ -12,7 +12,7 @@ from .bitcell_base import bitcell_base
class col_cap_bitcell_1port(bitcell_base):
"""
Column end cap cell.
Column end cap cell.
"""
def __init__(self, name="col_cap_bitcell_1port"):

View File

@ -12,7 +12,7 @@ from .bitcell_base import bitcell_base
class col_cap_bitcell_2port(bitcell_base):
"""
Column end cap cell.
Column end cap cell.
"""
def __init__(self, name="col_cap_bitcell_2port"):

View File

@ -175,7 +175,7 @@ class column_mux_array(design):
# Add the column x offset to find the right select bit
gate_offset = self.mux_inst[col].get_pin("sel").bc()
# use the y offset from the sel pin and the x offset from the gate
offset = vector(gate_offset.x,
self.get_pin("sel_{}".format(sel_index)).cy())

View File

@ -757,7 +757,7 @@ class control_logic(design):
else:
via_height=None
via_width=0
min_y = min([x.y for x in vdd_pin_locs])
max_y = max([x.y for x in vdd_pin_locs])
bot_pos = vector(max_row_x_loc, min_y - 0.5 * via_height)

View File

@ -597,7 +597,7 @@ class hierarchical_decoder(design):
for inst in all_insts:
self.copy_layout_pin(inst, "vdd")
self.copy_layout_pin(inst, "gnd")
self.route_vertical_pins("vdd", self.and_inst, xside="rx",)
self.route_vertical_pins("gnd", self.and_inst, xside="lx",)

View File

@ -172,7 +172,7 @@ class local_bitcell_array(bitcell_base_array):
if len(self.all_ports) > 1:
wl_offset = vector(self.bitcell_array_inst.rx() + self.wl_array.width + driver_to_array_spacing,
self.bitcell_array.get_replica_bottom() + self.wl_array.height + self.cell.height)
self.wl_insts[1].place(wl_offset,
self.wl_insts[1].place(wl_offset,
mirror="XY")
self.height = self.bitcell_array.height

View File

@ -80,20 +80,20 @@ class nand2_dec(design):
is_nchannel = True
stack = 2
is_cell = False
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
def get_input_capacitance(self):
"""Input cap of input, passes width of gates to gate cap function"""
return self.gate_c(self.nmos_width+self.pmos_width)
return self.gate_c(self.nmos_width+self.pmos_width)
def get_intrinsic_capacitance(self):
"""Get the drain capacitances of the TXs in the gate."""
nmos_stack = 2
mult = 1
nmos_drain_c = self.drain_c_(self.nmos_width*mult,
nmos_drain_c = self.drain_c_(self.nmos_width*mult,
nmos_stack,
mult)
pmos_drain_c = self.drain_c_(self.pmos_width*mult,
pmos_drain_c = self.drain_c_(self.pmos_width*mult,
1,
mult)
return nmos_drain_c + pmos_drain_c
mult)
return nmos_drain_c + pmos_drain_c

View File

@ -80,20 +80,20 @@ class nand3_dec(design):
is_nchannel = True
stack = 3
is_cell = False
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
def get_input_capacitance(self):
"""Input cap of input, passes width of gates to gate cap function"""
return self.gate_c(self.nmos_width+self.pmos_width)
return self.gate_c(self.nmos_width+self.pmos_width)
def get_intrinsic_capacitance(self):
"""Get the drain capacitances of the TXs in the gate."""
nmos_stack = 3
mult = 1
nmos_drain_c = self.drain_c_(self.nmos_width*mult,
nmos_drain_c = self.drain_c_(self.nmos_width*mult,
nmos_stack,
mult)
pmos_drain_c = self.drain_c_(self.pmos_width*mult,
pmos_drain_c = self.drain_c_(self.pmos_width*mult,
1,
mult)
return nmos_drain_c + pmos_drain_c
mult)
return nmos_drain_c + pmos_drain_c

View File

@ -80,20 +80,20 @@ class nand4_dec(design):
is_nchannel = True
stack = 4
is_cell = False
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
def get_input_capacitance(self):
"""Input cap of input, passes width of gates to gate cap function"""
return self.gate_c(self.nmos_width+self.pmos_width)
return self.gate_c(self.nmos_width+self.pmos_width)
def get_intrinsic_capacitance(self):
"""Get the drain capacitances of the TXs in the gate."""
nmos_stack = 4
mult = 1
nmos_drain_c = self.drain_c_(self.nmos_width*mult,
nmos_drain_c = self.drain_c_(self.nmos_width*mult,
nmos_stack,
mult)
pmos_drain_c = self.drain_c_(self.pmos_width*mult,
pmos_drain_c = self.drain_c_(self.pmos_width*mult,
1,
mult)
return nmos_drain_c + pmos_drain_c
mult)
return nmos_drain_c + pmos_drain_c

View File

@ -317,7 +317,7 @@ class pgate(design):
contact_xoffset = nmos_pos.x + nmos.active_width \
+ self.active_space
# Allow an nimplant below it under the rail
contact_yoffset = max(0.5 * self.implant_width + self.implant_enclose_active,
contact_yoffset = max(0.5 * self.implant_width + self.implant_enclose_active,
self.get_tx_insts("nmos")[0].by())
contact_offset = vector(contact_xoffset, contact_yoffset)

View File

@ -368,19 +368,19 @@ class pnand4(pgate):
is_nchannel = True
stack = 4
is_cell = False
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
def get_input_capacitance(self):
"""Input cap of input, passes width of gates to gate cap function"""
return self.gate_c(self.nmos_width+self.pmos_width)
return self.gate_c(self.nmos_width+self.pmos_width)
def get_intrinsic_capacitance(self):
"""Get the drain capacitances of the TXs in the gate."""
nmos_stack = 4
nmos_drain_c = self.drain_c_(self.nmos_width*self.tx_mults,
nmos_drain_c = self.drain_c_(self.nmos_width*self.tx_mults,
nmos_stack,
self.tx_mults)
pmos_drain_c = self.drain_c_(self.pmos_width*self.tx_mults,
pmos_drain_c = self.drain_c_(self.pmos_width*self.tx_mults,
1,
self.tx_mults)
return nmos_drain_c + pmos_drain_c
self.tx_mults)
return nmos_drain_c + pmos_drain_c

View File

@ -88,7 +88,7 @@ class port_address(design):
self.copy_layout_pin(self.row_decoder_inst, "vdd")
self.copy_layout_pin(self.row_decoder_inst, "gnd")
# Also connect the B input of the RBL and_dec to vdd
if OPTS.local_array_size == 0:
rbl_b_pin = self.rbl_driver_inst.get_pin("B")

View File

@ -51,5 +51,5 @@ class replica_bitcell_1port(bitcell_base):
def is_non_inverting(self):
"""Return input to output polarity for module"""
return False

View File

@ -52,5 +52,5 @@ class replica_bitcell_2port(bitcell_base):
def is_non_inverting(self):
"""Return input to output polarity for module"""
return False

View File

@ -12,7 +12,7 @@ from .bitcell_base import bitcell_base
class row_cap_bitcell_1port(bitcell_base):
"""
Row end cap cell.
Row end cap cell.
"""
def __init__(self, name="row_cap_bitcell_1port"):

View File

@ -12,7 +12,7 @@ from .bitcell_base import bitcell_base
class row_cap_bitcell_2port(bitcell_base):
"""
Row end cap cell.
Row end cap cell.
"""
def __init__(self, name="row_cap_bitcell_2port"):

View File

@ -73,48 +73,48 @@ class sense_amp(design):
def build_graph(self, graph, inst_name, port_nets):
"""Adds edges based on inputs/outputs. Overrides base class function."""
self.add_graph_edges(graph, port_nets)
def is_non_inverting(self):
"""Return input to output polarity for module"""
#FIXME: This only applied to bl/br -> dout and not s_en->dout
return True
return True
def get_on_resistance(self):
"""On resistance of pinv, defined by single nmos"""
is_nchannel = True
stack = 1
is_cell = False
return self.tr_r_on(parameter["sa_inv_nmos_size"], is_nchannel, stack, is_cell)
is_cell = False
return self.tr_r_on(parameter["sa_inv_nmos_size"], is_nchannel, stack, is_cell)
def get_input_capacitance(self):
"""Input cap of input, passes width of gates to gate cap function"""
return self.gate_c(parameter["sa_inv_nmos_size"])
return self.gate_c(parameter["sa_inv_nmos_size"])
def get_intrinsic_capacitance(self):
"""Get the drain capacitances of the TXs in the gate."""
stack = 1
mult = 1
# Add the inverter drain Cap and the bitline TX drain Cap
nmos_drain_c = self.drain_c_(parameter["sa_inv_nmos_size"]*mult,
nmos_drain_c = self.drain_c_(parameter["sa_inv_nmos_size"]*mult,
stack,
mult)
pmos_drain_c = self.drain_c_(parameter["sa_inv_pmos_size"]*mult,
pmos_drain_c = self.drain_c_(parameter["sa_inv_pmos_size"]*mult,
stack,
mult)
bitline_pmos_size = 8
bl_pmos_drain_c = self.drain_c_(drc("minwidth_tx")*bitline_pmos_size,
bl_pmos_drain_c = self.drain_c_(drc("minwidth_tx")*bitline_pmos_size,
stack,
mult)
mult)
return nmos_drain_c + pmos_drain_c + bl_pmos_drain_c
def cacti_rc_delay(self, inputramptime, tf, vs1, vs2, rise, extra_param_dict):
def cacti_rc_delay(self, inputramptime, tf, vs1, vs2, rise, extra_param_dict):
""" Special RC delay function used by CACTI for sense amp delay
"""
import math
c_senseamp = extra_param_dict['load']
vdd = extra_param_dict['vdd']
vdd = extra_param_dict['vdd']
tau = c_senseamp/spice["sa_transconductance"]
return tau*math.log(vdd/(0.1*vdd))

View File

@ -78,7 +78,7 @@ class sram():
def save(self):
""" Save all the output files while reporting time to do it as well. """
# Import this at the last minute so that the proper tech file
# Import this at the last minute so that the proper tech file
# is loaded and the right tools are selected
import verify

View File

@ -336,7 +336,7 @@ class sram_1bank(sram_base):
self.add_dnwell(inflate=2.5)
# Route the supplies together and/or to the ring/stripes.
# This is done with the original bbox since the escape routes need to
# This is done with the original bbox since the escape routes need to
# be outside of the ring for OpenLane
rt = router_tech(self.supply_stack, 1)
init_bbox = self.get_bbox(side="ring",

View File

@ -124,7 +124,7 @@ class wordline_driver_array(design):
en_pin = self.add_layout_pin_segment_center(text="en",
layer="m2",
start=en_bottom_pos,
end=en_top_pos)
end=en_top_pos)
for row in range(self.rows):
and_inst = self.wld_inst[row]

View File

@ -7,7 +7,7 @@ if len(sys.argv) < 2:
print("Usage: {0} file.gds".format(sys.argv[0]))
sys.exit(1)
gds_file = sys.argv[1]
gds_file = sys.argv[1]
arrayCellLayout = gdsMill.VlsiLayout()
reader = gdsMill.Gds2reader(arrayCellLayout,debugToTerminal = 1)
reader.loadFromFile(gds_file)

View File

@ -19,7 +19,7 @@ struct = layout.structures[layout.rootStructureName]
for text in struct.texts:
print(text.textString)
text.magFactor=""
writer = gdsMill.Gds2writer(layout)
writer.writeToFile(out_gds_file)

View File

@ -109,7 +109,7 @@ class grid:
for k in self.map:
self.map[k].target=False
self.target = set()
def set_target(self, n):
if not isinstance(n, vector3d):
for item in n:
@ -119,7 +119,7 @@ class grid:
self.map[n].target=True
self.map[n].blocked=False
self.target.add(n)
def add_source(self, track_list):
debug.info(3, "Adding source list={0}".format(str(track_list)))
for n in track_list:
@ -158,7 +158,7 @@ class grid:
for y in range(self.ll.y - ring_offset - margin - ring_width + 1, self.ur.y + ring_offset + margin + ring_width, 1):
for layer in layers:
perimeter_list.append(vector3d(x, y, layer))
if side=="all" or "right" in side:
for x in range(self.ur.x + offset, self.ur.x + width + offset, 1):
for y in range(self.ll.y - ring_offset - margin - ring_width + 1, self.ur.y + ring_offset + margin + ring_width, 1):
@ -181,14 +181,14 @@ class grid:
self.add_map(perimeter_list)
return perimeter_list
def add_perimeter_target(self, side="all", layers=[0, 1]):
debug.info(3, "Adding perimeter target")
perimeter_list = self.get_perimeter_list(side, layers)
self.set_target(perimeter_list)
def is_target(self, point):
"""
Point is in the target set, so we are done.
@ -213,3 +213,8 @@ class grid:
"""
path.set_path(False)
path.set_blocked(True)

View File

@ -20,7 +20,7 @@ class grid_cell:
def reset(self):
"""
Reset the dynamic info about routing.
Reset the dynamic info about routing.
"""
self.min_cost=-1
self.min_path=None
@ -49,3 +49,4 @@ class grid_cell:
type_string += "P"
return type_string

View File

@ -70,7 +70,7 @@ class pin_group:
def add_pin(self, pin):
self.pins.add(pin)
self.remove_redundant_pins()
def __repr__(self):
""" override repr function output """
return str(self)
@ -641,13 +641,13 @@ class pin_group:
# way than blockages.
blockages = sufficient | insufficient | blockage_in_tracks
self.blockages.update(blockages)
# If we have a blockage, we must remove the grids
# Remember, this excludes the pin blockages already
blocked_grids = self.router.get_blocked_grids()
pin_set.difference_update(blocked_grids)
partial_set.difference_update(blocked_grids)
# At least one of the groups must have some valid tracks
if (len(pin_set) == 0 and len(partial_set) == 0):
# debug.warning("Pin is very close to metal blockage.\nAttempting to expand blocked pin {}".format(self.pins))

View File

@ -34,7 +34,7 @@ class router(router_tech):
route on top of this. The blockages from the gds/module will be
considered.
"""
router_tech.__init__(self, layers, route_track_width)
self.cell = design
@ -91,7 +91,7 @@ class router(router_tech):
def get_bbox(self):
return self.bbox
def create_routing_grid(self, router_type=None):
"""
Create (or recreate) a sprase routing grid with A* expansion functions.
@ -178,7 +178,7 @@ class router(router_tech):
self.reader.loadFromFile(self.gds_filename)
self.top_name = self.layout.rootStructureName
# print_time("GDS read",datetime.now(), start_time)
# This finds the pin shapes and sorts them into "groups" that
# are connected. This must come before the blockages, so we
# can not count the pins themselves
@ -374,7 +374,7 @@ class router(router_tech):
def set_supply_rail_blocked(self, value):
# This is just a virtual function
pass
def prepare_blockages(self, src=None, dest=None):
"""
Reset and add all of the blockages in the design.
@ -384,7 +384,7 @@ class router(router_tech):
# Start fresh. Not the best for run-time, but simpler.
self.clear_all_blockages()
# This adds the initial blockges of the design
# which includes all blockages due to non-pin shapes
# print("BLOCKING:", self.blocked_grids)
@ -457,7 +457,7 @@ class router(router_tech):
"""
blockage_grids = {y for x in self.pin_groups[pin_name] for y in x.blockages}
self.set_blockages(blockage_grids, False)
def clear_all_blockages(self):
"""
Clear all blockages on the grid.
@ -498,7 +498,7 @@ class router(router_tech):
self.blocked_grids.update(blockage_list)
def get_blocked_grids(self):
"""
"""
Return the blocked grids with their flag set
"""
#return set([x for x in self.blocked_grids if self.rg.is_blocked(x)])
@ -518,7 +518,7 @@ class router(router_tech):
new_shape = pin_layout("blockage{}".format(len(self.blockages)),
rect,
lpp)
# If there is a rectangle that is the same in the pins,
# it isn't a blockage!
if new_shape not in self.all_pins and not self.pin_contains(new_shape):
@ -529,7 +529,7 @@ class router(router_tech):
if pin.contains(shape):
return True
return False
def convert_point_to_units(self, p):
"""
Convert a path set of tracks to center line path.
@ -543,7 +543,7 @@ class router(router_tech):
Convert a wave to a set of center points
"""
return [self.convert_point_to_units(i) for i in wave]
def convert_shape_to_tracks(self, shape):
"""
Convert a rectangular shape into track units.
@ -767,7 +767,7 @@ class router(router_tech):
"""
for t in tracks:
debug.check(t[2] == tracks[0][2], "Different layers used.")
# For each shape, convert it to a pin
pins = [self.convert_track_to_pin(t) for t in tracks]
# Now find the bounding box
@ -777,10 +777,10 @@ class router(router_tech):
maxy = max([p.uy() for p in pins])
ll = vector(minx, miny)
ur = vector(maxx, maxy)
p = pin_layout("", [ll, ur], self.get_layer(tracks[0][2]))
return p
def convert_track_to_shape_pin(self, track):
"""
Convert a grid point into a rectangle shape
@ -977,7 +977,7 @@ class router(router_tech):
self.pin_groups[name].append(pg)
self.new_pins[name] = pg.pins
def add_ring_supply_pin(self, name, width=3, space=3):
"""
Adds a ring supply pin that goes outside the given bbox.
@ -1011,7 +1011,7 @@ class router(router_tech):
layers=[0]))
horizontal_layer_grids = left_grids | right_grids
# Must move to the same layer to find layer 1 corner grids
vertical_layer_grids = set()
for x in top_grids | bottom_grids:
@ -1027,7 +1027,7 @@ class router(router_tech):
pg.grids = (left_grids | right_grids | top_grids | bottom_grids)
pg.enclosures = pg.compute_enclosures()
pg.pins = set(pg.enclosures)
self.cell.pin_map[name].update(pg.pins)
self.pin_groups[name].append(pg)
self.new_pins[name] = pg.pins
@ -1043,7 +1043,7 @@ class router(router_tech):
def get_new_pins(self, name):
return self.new_pins[name]
def add_perimeter_target(self, side="all"):
"""
This will mark all the cells on the perimeter of the original layout as a target.
@ -1206,7 +1206,7 @@ class router(router_tech):
closest_track_pin, closest_part_pin = self.find_closest_pin(track_pins, offgrid_pin_parts)
debug.check(closest_track_pin and closest_part_pin, "Found no closest pins.")
# Find the bbox of the on-grid track and the off-grid pin part
closest_track_pin.bbox([closest_part_pin])
@ -1313,10 +1313,10 @@ class router(router_tech):
self.paths.append(grid_utils.flatten_set(path))
self.add_route(path)
self.create_route_connector(path,
self.create_route_connector(path,
self.source_name,
self.source_components)
self.create_route_connector(path,
self.create_route_connector(path,
self.target_name,
self.target_components)
self.path_blockages.append(self.paths[-1])
@ -1404,7 +1404,7 @@ class router(router_tech):
self.cell.add_label(text="{0},{1}".format(g[0], g[1]),
layer="text",
offset=shape[0])
def del_router_info(self):
"""
Erase all of the comments on the current level.
@ -1489,7 +1489,7 @@ class router(router_tech):
# Else if we came from a different layer, we can only add
# a signle grid
return self.convert_track_to_pin(v)
return None
def get_ll_pin(self, pin_name):
@ -1503,9 +1503,9 @@ class router(router_tech):
else:
if pin.lx() <= keep_pin.lx() and pin.by() <= keep_pin.by():
keep_pin = pin
return keep_pin
def check_all_routed(self, pin_name):
"""
Check that all pin groups are routed.
@ -1513,8 +1513,8 @@ class router(router_tech):
for pg in self.pin_groups[pin_name]:
if not pg.is_routed():
return False
# FIXME: This should be replaced with vector.snap_to_grid at some point
def snap_to_grid(offset):
"""

View File

@ -37,7 +37,7 @@ class signal_escape_router(router):
y_dist = min(loc.y - self.ll.y, self.ur.y - loc.y)
return min(x_dist, y_dist)
def escape_route(self, pin_names):
"""
Takes a list of tuples (name, side) and routes them. After routing,
@ -52,7 +52,7 @@ class signal_escape_router(router):
# Order the routes by closest to the perimeter first
# This prevents some pins near the perimeter from being blocked by other pins
ordered_pin_names = sorted(pin_names, key=lambda x: self.perimeter_dist(x))
# Route the supply pins to the supply rails
# Route vdd first since we want it to be shorter
start_time = datetime.now()
@ -60,18 +60,18 @@ class signal_escape_router(router):
self.route_signal(pin_name)
# if pin_name == "dout0[1]":
# self.write_debug_gds("postroute.gds", True)
print_time("Maze routing pins",datetime.now(), start_time, 3)
#self.write_debug_gds("final_escape_router.gds",False)
return True
def route_signal(self, pin_name, side="all"):
for detour_scale in [5 * pow(2, x) for x in range(5)]:
debug.info(1, "Escape routing {0} with scale {1}".format(pin_name, detour_scale))
# Clear everything in the routing grid.
self.rg.reinit()
@ -86,11 +86,11 @@ class signal_escape_router(router):
# Marks the grid cells all along the perimeter as a target
self.add_perimeter_target(side)
# if pin_name == "dout0[3]":
# self.write_debug_gds("pre_route.gds", False)
# breakpoint()
# Actually run the A* router
if self.run_router(detour_scale=detour_scale):
new_pin = self.get_perimeter_pin()
@ -100,5 +100,7 @@ class signal_escape_router(router):
# if pin_name == "dout0[3]":
# self.write_debug_gds("pre_route.gds", False)
# breakpoint()
self.write_debug_gds("debug_route.gds", True)

View File

@ -23,10 +23,10 @@ class supply_grid(signal_grid):
def reinit(self):
""" Reinitialize everything for a new route. """
self.source = set()
self.target = set()
# Reset all the cells in the map
for p in self.map.values():
p.reset()
@ -77,3 +77,5 @@ class supply_grid(signal_grid):
wave = wave_path.neighbor(direct)
return wave_path

View File

@ -66,14 +66,14 @@ class supply_grid_router(router):
# Block everything
self.prepare_blockages()
self.clear_blockages(self.gnd_name)
# Determine the rail locations
self.route_supply_rails(self.gnd_name, 0)
# Block everything
self.prepare_blockages()
self.clear_blockages(self.vdd_name)
self.clear_blockages(self.vdd_name)
# Determine the rail locations
self.route_supply_rails(self.vdd_name, 1)
print_time("Routing supply rails", datetime.now(), start_time, 3)
@ -359,7 +359,7 @@ class supply_grid_router(router):
# easier to debug.
self.prepare_blockages()
self.clear_blockages(self.vdd_name)
# Add the single component of the pin as the source
# which unmarks it as a blockage too
self.add_pin_component_source(pin_name, index)
@ -392,3 +392,4 @@ class supply_grid_router(router):
debug.info(4, "Blocking supply rail")
for rail_name in self.supply_rail_tracks:
self.rg.set_blocked(self.supply_rail_tracks[rail_name])

View File

@ -28,7 +28,6 @@ class code_format_test(openram_test):
continue
errors += check_file_format_tab(code)
errors += check_file_format_carriage(code)
errors += check_file_format_whitespace(code)
for code in source_codes:
if re.search("gdsMill", code):
@ -52,7 +51,7 @@ def setup_files(path):
files = []
for (dir, _, current_files) in os.walk(path):
for f in current_files:
files.append(os.path.join(dir, f))
files.append(os.getenv("OPENRAM_HOME"))
nametest = re.compile("\.py$", re.IGNORECASE)
select_files = list(filter(nametest.search, files))
return select_files
@ -93,35 +92,13 @@ def check_file_format_carriage(file_name):
if len(key_positions)>10:
line_numbers = key_positions[:10] + [" ..."]
else:
line_numbers = key_positions
line_numbers = key_positoins
debug.info(0, '\nFound ' + str(len(key_positions)) + ' carriage returns in ' +
str(file_name) + ' (lines ' + ",".join(str(x) for x in line_numbers) + ')')
f.close()
return len(key_positions)
def check_file_format_whitespace(file_name):
"""
Check if file contains a line with whitespace at the end
and return the number of these lines.
"""
f = open(file_name, "r")
key_positions = []
for num, line in enumerate(f.readlines()):
if re.match(r".*[ \t]$", line):
key_positions.append(num)
if len(key_positions) > 0:
if len(key_positions) > 10:
line_numbers = key_positions[:10] + [" ..."]
else:
line_numbers = key_positions
debug.info(0, "\nFound " + str(len(key_positions)) + " ending whitespace in " +
str(file_name) + " (lines " + ",".join(str(x) for x in line_numbers) + ")")
f.close()
return len(key_positions)
def check_print_output(file_name):
"""Check if any files (except debug.py) call the _print_ function. We should
use the debug output with verbosity instead!"""

View File

@ -43,7 +43,7 @@ class library_lvs_test(openram_test):
self.assertEqual(drc_errors + lvs_errors, 0)
globals.end_openram()
def setup_files():
gds_dir = OPTS.openram_tech + "/gds_lib"
sp_dir = OPTS.openram_tech + "/lvs_lib"

View File

@ -32,7 +32,7 @@ class replica_column_test(openram_test):
debug.info(2, "Testing one right replica column for dual port")
a = factory.create(module_type="replica_column", rows=4, rbl=[0, 1], replica_bit=5)
self.local_check(a)
debug.info(2, "Testing two (left, right) replica columns for dual port")
a = factory.create(module_type="replica_column", rows=4, rbl=[1, 1], replica_bit=1)
self.local_check(a)
@ -40,7 +40,7 @@ class replica_column_test(openram_test):
debug.info(2, "Testing two (left, right) replica columns for dual port")
a = factory.create(module_type="replica_column", rows=4, rbl=[1, 1], replica_bit=6)
self.local_check(a)
globals.end_openram()
# run the test from the command line

View File

@ -63,7 +63,7 @@ class port_data_spare_cols_test(openram_test):
OPTS.num_r_ports = 1
OPTS.num_w_ports = 1
globals.setup_bitcell()
c.num_words=16
c.words_per_row=1
factory.reset()

View File

@ -31,7 +31,7 @@ class psingle_bank_test(openram_test):
OPTS.num_w_ports = 0
OPTS.num_r_ports = 0
globals.setup_bitcell()
c = sram_config(word_size=4,
num_words=16)

View File

@ -15,7 +15,7 @@ from globals import OPTS
from sram_factory import factory
import debug
class psram_1bank_2mux_1rw_1w_test(openram_test):
def runTest(self):

View File

@ -28,7 +28,7 @@ class psram_1bank_2mux_1rw_1w_wmask_test(openram_test):
OPTS.num_w_ports = 1
OPTS.num_r_ports = 0
globals.setup_bitcell()
c = sram_config(word_size=8,
write_size=4,
num_words=32,

View File

@ -22,7 +22,7 @@ class sram_1bank_4mux_1rw_1r_test(openram_test):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
from modules import sram_config
OPTS.num_rw_ports = 1
OPTS.num_r_ports = 1
OPTS.num_w_ports = 0

View File

@ -96,7 +96,7 @@ class timing_sram_test(openram_test):
'slew_hl': [2.039655],
'slew_lh': [2.039655],
'write0_power': [19.31883],
'write1_power': [15.297369999999999]}
'write1_power': [15.297369999999999]}
else:
self.assertTrue(False) # other techs fail

View File

@ -58,7 +58,7 @@ class regression_model_test(openram_test):
debug.info(1, "Probe address {0} probe data bit {1}".format(probe_address, probe_data))
corner = (OPTS.process_corners[0], OPTS.supply_voltages[0], OPTS.temperatures[0])
#m = linear_regression(s.s, tempspice, corner)
m = neural_network(s.s, tempspice, corner)
only_test = ['rise_delay']

View File

@ -24,7 +24,7 @@ class psram_1bank_2mux_func_test(openram_test):
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
OPTS.bitcell = "pbitcell"
OPTS.replica_bitcell="replica_pbitcell"
OPTS.dummy_bitcell="dummy_pbitcell"

View File

@ -25,7 +25,7 @@ class psram_1bank_4mux_func_test(openram_test):
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
OPTS.bitcell = "pbitcell"
OPTS.replica_bitcell="replica_pbitcell"
OPTS.dummy_bitcell="dummy_pbitcell"

View File

@ -25,7 +25,7 @@ class psram_1bank_8mux_func_test(openram_test):
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
OPTS.bitcell = "pbitcell"
OPTS.replica_bitcell="replica_pbitcell"
OPTS.dummy_bitcell="dummy_pbitcell"

View File

@ -24,7 +24,7 @@ class psram_1bank_nomux_func_test(openram_test):
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
OPTS.bitcell = "pbitcell"
OPTS.replica_bitcell="replica_pbitcell"
OPTS.dummy_bitcell="dummy_pbitcell"

View File

@ -25,7 +25,7 @@ class sram_1bank_2mux_func_test(openram_test):
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
# This is a hack to reload the characterizer __init__ with the spice version
from importlib import reload
import characterizer

View File

@ -25,7 +25,7 @@ class sram_1bank_2mux_func_test(openram_test):
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
# This is a hack to reload the characterizer __init__ with the spice version
from importlib import reload
import characterizer

View File

@ -25,7 +25,7 @@ class sram_1bank_4mux_func_test(openram_test):
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
# This is a hack to reload the characterizer __init__ with the spice version
from importlib import reload
import characterizer

View File

@ -25,7 +25,7 @@ class sram_1bank_8mux_func_test(openram_test):
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
# This is a hack to reload the characterizer __init__ with the spice version
from importlib import reload
import characterizer

View File

@ -81,8 +81,8 @@ class ngspice_pex_pinv_test(openram_test):
test_sim = self.write_simulation(sim_file, test_module, top_level_name)
test_sim.run_sim("stim.sp")
delay = parse_spice_list(log_file_name, "pinv_delay")
os.chdir(cwd)
os.chdir(cwd)
return delay
def write_simulation(self, sim_file, cir_file, top_module_name):

View File

@ -78,7 +78,7 @@ class openram_back_end_test(openram_test):
filename = "{0}{1}".format(out_path, out_file)
debug.info(1, "Checking for file: " + filename)
self.assertEqual(os.path.exists(filename), True)
# Make sure there is any .lib file
import glob
files = glob.glob('{0}/*.lib'.format(out_path))

View File

@ -55,9 +55,9 @@ def fork_tests(num_threads):
results = []
test_partitions = partition_unit_tests(suite, num_threads)
suite._tests[:] = []
def do_fork(suite):
for test_partition in test_partitions:
test_suite = unittest.TestSuite(test_partition)
test_partition[:] = []
@ -103,9 +103,9 @@ if num_threads == 1:
final_suite = suite
else:
final_suite = ConcurrentTestSuite(suite, fork_tests(num_threads))
test_result = test_runner.run(final_suite)
# import verify
# verify.print_drc_stats()
# verify.print_lvs_stats()

View File

@ -73,8 +73,8 @@ def write_drc_script(cell_name, gds_name, extract, final_verification, output_pa
f.write("#!/bin/sh\n")
f.write("assura {0} 2> {1} 1> {2}\n".format(drc_runset, drc_log_file, drc_log_file))
f.close()
def run_drc(name, gds_name, final_verification=False):
"""Run DRC check on a given top-level name which is
implemented in gds_name."""
@ -85,7 +85,7 @@ def run_drc(name, gds_name, final_verification=False):
write_drc_script(name, gds_name, True, final_verification, OPTS.openram_temp)
(outfile, errfile, resultsfile) = run_script(name, "drc")
# count and report errors
errors = 0
try:
@ -168,7 +168,7 @@ def write_lvs_script(cell_name, gds_name, sp_name, final_verification, output_pa
f.write("assura {0} 2> {1} 1> {2}\n".format(lvs_runset, lvs_log_file, lvs_log_file))
f.close()
def run_lvs(name, gds_name, sp_name, final_verification=False):
"""Run LVS check on a given top-level name which is
implemented in gds_name and sp_name. """
@ -179,7 +179,7 @@ def run_lvs(name, gds_name, sp_name, final_verification=False):
write_lvs_script(name, gds_name, sp_name, final_verification, OPTS.openram_temp)
(outfile, errfile, resultsfile) = run_script(name, "drc")
errors = 0
try:
f = open(OPTS.openram_temp + name + ".csm", "r")
@ -205,14 +205,14 @@ def run_pex(name, gds_name, sp_name, output=None, final_verification=False):
global num_pex_runs
num_pex_runs += 1
def print_drc_stats():
debug.info(1, "DRC runs: {0}".format(num_drc_runs))
def print_lvs_stats():
debug.info(1, "LVS runs: {0}".format(num_lvs_runs))
def print_pex_stats():
debug.info(1, "PEX runs: {0}".format(num_pex_runs))

View File

@ -35,7 +35,7 @@ def write_drc_script(cell_name, gds_name, extract, final_verification=False, out
if not output_path:
output_path = OPTS.openram_temp
from tech import drc
drc_rules = drc["drc_rules"]
@ -125,7 +125,7 @@ def write_lvs_script(cell_name, gds_name, sp_name, final_verification=False, out
f = open(run_file, "w")
f.write("#!/bin/sh\n")
cmd = "{0} -gui -lvs lvs_runset -batch".format(OPTS.lvs_exe[1])
f.write(cmd)
f.write("\n")
f.close()
@ -139,7 +139,7 @@ def write_pex_script(cell_name, extract, output, final_verification=False, outpu
if not output_path:
output_path = OPTS.openram_temp
if not output:
output = cell_name + ".pex.sp"
@ -443,14 +443,14 @@ def correct_port(name, output_file_name, ref_file_name):
output_file.write(part2)
output_file.close()
def print_drc_stats():
debug.info(1, "DRC runs: {0}".format(num_drc_runs))
def print_lvs_stats():
debug.info(1, "LVS runs: {0}".format(num_lvs_runs))
def print_pex_stats():
debug.info(1, "PEX runs: {0}".format(num_pex_runs))

View File

@ -267,7 +267,7 @@ def write_lvs_script(cell_name, gds_name, sp_name, final_verification=False, out
if os.path.exists(full_setup_file):
# Copy setup.tcl file into temp dir
shutil.copy(full_setup_file, output_path)
setup_file_object = open(output_path + "/setup.tcl", 'a')
setup_file_object.write("# Increase the column sizes for ease of reading long names\n")
setup_file_object.write("::netgen::format 120\n")