mirror of https://github.com/VLSIDA/OpenRAM.git
Merge branch 'dev' into laptop_checkpoint
This commit is contained in:
commit
e976c4043b
|
|
@ -0,0 +1,57 @@
|
|||
name: ci
|
||||
on: [push]
|
||||
jobs:
|
||||
scn4me_subm:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v1
|
||||
- name: SCMOS test
|
||||
run: |
|
||||
. /home/github-runner/setup-paths.sh
|
||||
export OPENRAM_HOME="`pwd`/compiler"
|
||||
export OPENRAM_TECH="`pwd`/technology:/software/PDKs/skywater-tech"
|
||||
export OPENRAM_TMP="${{ github.workspace }}/scn4me_subm_temp"
|
||||
#python3-coverage run -p $OPENRAM_HOME/tests/regress.py -j 12 -t scn4m_subm
|
||||
$OPENRAM_HOME/tests/regress.py -j 12 -t scn4m_subm
|
||||
- name: Archive
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: scn4me_subm Archives
|
||||
path: ${{ github.workspace }}/scn4me_subm_temp/*/*
|
||||
freepdk45:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v1
|
||||
- name: FreePDK45 test
|
||||
run: |
|
||||
. /home/github-runner/setup-paths.sh
|
||||
export OPENRAM_HOME="`pwd`/compiler"
|
||||
export OPENRAM_TECH="`pwd`/technology:/software/PDKs/skywater-tech"
|
||||
export OPENRAM_TMP="${{ github.workspace }}/freepdk45_temp"
|
||||
#python3-coverage run -p $OPENRAM_HOME/tests/regress.py -j 12 -t freepdk45
|
||||
$OPENRAM_HOME/tests/regress.py -j 12 -t freepdk45
|
||||
- name: Archive
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: FreePDK45 Archives
|
||||
path: ${{ github.workspace }}/freepdk45_temp/*/*
|
||||
# coverage_stats:
|
||||
# if: ${{ always() }}
|
||||
# needs: [scn4me_subm, freepdk45]
|
||||
# runs-on: self-hosted
|
||||
# steps:
|
||||
# - name: Coverage stats
|
||||
# run: |
|
||||
# python3-coverage combine
|
||||
# python3-coverage report
|
||||
# python3-coverage html -d ${{ github.workspace }}/coverage_html
|
||||
# - name: Archive coverage
|
||||
# uses: actions/upload-artifact@v2
|
||||
# with:
|
||||
# name: code-coverage-report
|
||||
# path: ${{ github.workspace }}/coverage_html/
|
||||
|
||||
|
|
@ -10,3 +10,4 @@
|
|||
**/model_data
|
||||
outputs
|
||||
technology/freepdk45/ncsu_basekit
|
||||
.idea
|
||||
|
|
|
|||
|
|
@ -1,48 +0,0 @@
|
|||
before_script:
|
||||
- . /home/gitlab-runner/setup-paths.sh
|
||||
- export OPENRAM_HOME="`pwd`/compiler"
|
||||
- export OPENRAM_TECH="`pwd`/technology:/home/PDKs/skywater-tech"
|
||||
|
||||
stages:
|
||||
- test
|
||||
- coverage
|
||||
|
||||
freepdk45:
|
||||
stage: test
|
||||
script:
|
||||
- coverage run -p $OPENRAM_HOME/tests/regress.py -j 6 -t freepdk45
|
||||
artifacts:
|
||||
paths:
|
||||
- .coverage.*
|
||||
expire_in: 1 week
|
||||
|
||||
scn4m_subm:
|
||||
stage: test
|
||||
script:
|
||||
- coverage run -p $OPENRAM_HOME/tests/regress.py -j 6 -t scn4m_subm
|
||||
artifacts:
|
||||
paths:
|
||||
- .coverage.*
|
||||
expire_in: 1 week
|
||||
|
||||
# s8:
|
||||
# stage: test
|
||||
# script:
|
||||
# - coverage run -p $OPENRAM_HOME/tests/regress.py -t s8
|
||||
# artifacts:
|
||||
# paths:
|
||||
# - .coverage.*
|
||||
# expire_in: 1 week
|
||||
|
||||
coverage:
|
||||
stage: coverage
|
||||
script:
|
||||
- coverage combine
|
||||
- coverage report
|
||||
- coverage html -d coverage_html
|
||||
artifacts:
|
||||
paths:
|
||||
- coverage_html
|
||||
expire_in: 1 week
|
||||
coverage: '/TOTAL.+ ([0-9]{1,3}%)/'
|
||||
|
||||
|
|
@ -4,13 +4,9 @@
|
|||
[](./LICENSE)
|
||||
|
||||
Master:
|
||||
[](https://github.com/VLSIDA/OpenRAM/commits/master)
|
||||

|
||||
[](https://github.com/VLSIDA/OpenRAM/archive/master.zip)
|
||||
|
||||
Dev:
|
||||
[](https://github.com/VLSIDA/OpenRAM/commits/dev)
|
||||

|
||||
[](https://github.com/VLSIDA/OpenRAM/archive/dev.zip)
|
||||
|
||||
An open-source static random access memory (SRAM) compiler.
|
||||
|
|
|
|||
|
|
@ -359,7 +359,9 @@ class instance(geometry):
|
|||
for offset in range(len(normalized_br_offsets)):
|
||||
for port in range(len(br_names)):
|
||||
cell_br_meta.append([br_names[offset], row, col, port])
|
||||
|
||||
|
||||
if normalized_storage_nets == []:
|
||||
debug.error("normalized storage nets should not be empty! Check if the GDS labels Q and Q_bar are correctly set on M1 of the cell",1)
|
||||
Q_x = normalized_storage_nets[0][0]
|
||||
Q_y = normalized_storage_nets[0][1]
|
||||
|
||||
|
|
|
|||
|
|
@ -396,9 +396,14 @@ class pin_layout:
|
|||
# Add the text in the middle of the pin.
|
||||
# This fixes some pin label offsetting when GDS gets
|
||||
# imported into Magic.
|
||||
try:
|
||||
zoom = GDS["zoom"]
|
||||
except KeyError:
|
||||
zoom = None
|
||||
newLayout.addText(text=self.name,
|
||||
layerNumber=layer_num,
|
||||
purposeNumber=label_purpose,
|
||||
magnification=zoom,
|
||||
offsetInMicrons=self.center())
|
||||
|
||||
def compute_overlap(self, other):
|
||||
|
|
|
|||
|
|
@ -7,8 +7,7 @@
|
|||
#
|
||||
import os
|
||||
import debug
|
||||
import globals
|
||||
from globals import OPTS,find_exe,get_tool
|
||||
from globals import OPTS, find_exe, get_tool
|
||||
from .lib import *
|
||||
from .delay import *
|
||||
from .elmore import *
|
||||
|
|
@ -21,7 +20,7 @@ from .model_check import *
|
|||
from .analytical_util import *
|
||||
from .regression_model import *
|
||||
|
||||
debug.info(1,"Initializing characterizer...")
|
||||
debug.info(1, "Initializing characterizer...")
|
||||
OPTS.spice_exe = ""
|
||||
|
||||
if not OPTS.analytical_delay:
|
||||
|
|
@ -30,17 +29,17 @@ if not OPTS.analytical_delay:
|
|||
if OPTS.spice_name != "":
|
||||
OPTS.spice_exe=find_exe(OPTS.spice_name)
|
||||
if OPTS.spice_exe=="" or OPTS.spice_exe==None:
|
||||
debug.error("{0} not found. Unable to perform characterization.".format(OPTS.spice_name),1)
|
||||
debug.error("{0} not found. Unable to perform characterization.".format(OPTS.spice_name), 1)
|
||||
else:
|
||||
(OPTS.spice_name,OPTS.spice_exe) = get_tool("spice",["hspice", "ngspice", "ngspice.exe", "xa"])
|
||||
(OPTS.spice_name, OPTS.spice_exe) = get_tool("spice", ["ngspice", "ngspice.exe", "hspice", "xa"])
|
||||
|
||||
# set the input dir for spice files if using ngspice
|
||||
if OPTS.spice_name == "ngspice":
|
||||
os.environ["NGSPICE_INPUT_DIR"] = "{0}".format(OPTS.openram_temp)
|
||||
|
||||
if OPTS.spice_exe == "":
|
||||
debug.error("No recognizable spice version found. Unable to perform characterization.",1)
|
||||
debug.error("No recognizable spice version found. Unable to perform characterization.", 1)
|
||||
else:
|
||||
debug.info(1,"Analytical model enabled.")
|
||||
debug.info(1, "Analytical model enabled.")
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
# (acting for and on behalf of Oklahoma State University)
|
||||
# All rights reserved.
|
||||
#
|
||||
import os
|
||||
import re
|
||||
import debug
|
||||
from globals import OPTS
|
||||
|
|
@ -20,6 +21,8 @@ def parse_spice_list(filename, key):
|
|||
if OPTS.spice_name == "xa" :
|
||||
# customsim has a different output file name
|
||||
full_filename="{0}xa.meas".format(OPTS.openram_temp)
|
||||
elif OPTS.spice_name == "spectre":
|
||||
full_filename = os.path.join(OPTS.openram_temp, "delay_stim.measure")
|
||||
else:
|
||||
# ngspice/hspice using a .lis file
|
||||
full_filename="{0}{1}.lis".format(OPTS.openram_temp, filename)
|
||||
|
|
|
|||
|
|
@ -170,10 +170,10 @@ class delay(simulation):
|
|||
meas.targ_name_no_port))
|
||||
self.dout_volt_meas[-1].meta_str = meas.meta_str
|
||||
|
||||
if not OPTS.use_pex:
|
||||
self.sen_meas = delay_measure("delay_sen", self.clk_frmt, self.sen_name + "{}", "FALL", "RISE", measure_scale=1e9)
|
||||
else:
|
||||
if OPTS.use_pex and OPTS.pex_exe[0] != 'calibre':
|
||||
self.sen_meas = delay_measure("delay_sen", self.clk_frmt, self.sen_name, "FALL", "RISE", measure_scale=1e9)
|
||||
else:
|
||||
self.sen_meas = delay_measure("delay_sen", self.clk_frmt, self.sen_name + "{}", "FALL", "RISE", measure_scale=1e9)
|
||||
|
||||
self.sen_meas.meta_str = sram_op.READ_ZERO
|
||||
self.sen_meas.meta_add_delay = True
|
||||
|
|
@ -220,13 +220,13 @@ class delay(simulation):
|
|||
storage_names = cell_inst.mod.get_storage_net_names()
|
||||
debug.check(len(storage_names) == 2, ("Only inverting/non-inverting storage nodes"
|
||||
"supported for characterization. Storage nets={}").format(storage_names))
|
||||
if not OPTS.use_pex:
|
||||
q_name = cell_name + '.' + str(storage_names[0])
|
||||
qbar_name = cell_name + '.' + str(storage_names[1])
|
||||
else:
|
||||
if OPTS.use_pex and OPTS.pex_exe[0] != "calibre":
|
||||
bank_num = self.sram.get_bank_num(self.sram.name, bit_row, bit_col)
|
||||
q_name = "bitcell_Q_b{0}_r{1}_c{2}".format(bank_num, bit_row, bit_col)
|
||||
qbar_name = "bitcell_Q_bar_b{0}_r{1}_c{2}".format(bank_num, bit_row, bit_col)
|
||||
else:
|
||||
q_name = cell_name + '.' + str(storage_names[0])
|
||||
qbar_name = cell_name + '.' + str(storage_names[1])
|
||||
|
||||
# Bit measures, measurements times to be defined later. The measurement names must be unique
|
||||
# but they is enforced externally. {} added to names to differentiate between ports allow the
|
||||
|
|
@ -387,6 +387,9 @@ class delay(simulation):
|
|||
self.delay_stim_sp = "delay_stim.sp"
|
||||
temp_stim = "{0}/{1}".format(OPTS.openram_temp, self.delay_stim_sp)
|
||||
self.sf = open(temp_stim, "w")
|
||||
|
||||
if OPTS.spice_name == "spectre":
|
||||
self.sf.write("simulator lang=spice\n")
|
||||
self.sf.write("* Delay stimulus for period of {0}n load={1}fF slew={2}ns\n\n".format(self.period,
|
||||
self.load,
|
||||
self.slew))
|
||||
|
|
@ -415,7 +418,9 @@ class delay(simulation):
|
|||
t_rise=self.slew,
|
||||
t_fall=self.slew)
|
||||
|
||||
# self.load_all_measure_nets()
|
||||
self.write_delay_measures()
|
||||
# self.write_simulation_saves()
|
||||
|
||||
# run until the end of the cycle time
|
||||
self.stim.write_control(self.cycle_times[-1] + self.period)
|
||||
|
|
@ -593,6 +598,69 @@ class delay(simulation):
|
|||
self.sf.write("* Write ports {}\n".format(write_port))
|
||||
self.write_delay_measures_write_port(write_port)
|
||||
|
||||
def load_pex_net(self, net: str):
|
||||
from subprocess import check_output, CalledProcessError
|
||||
prefix = (self.sram_instance_name + ".").lower()
|
||||
if not net.lower().startswith(prefix) or not OPTS.use_pex or not OPTS.calibre_pex:
|
||||
return net
|
||||
original_net = net
|
||||
net = net[len(prefix):]
|
||||
net = net.replace(".", "_").replace("[", "\[").replace("]", "\]")
|
||||
for pattern in ["\sN_{}_[MXmx]\S+_[gsd]".format(net), net]:
|
||||
try:
|
||||
match = check_output(["grep", "-m1", "-o", "-iE", pattern, self.sp_file])
|
||||
return prefix + match.decode().strip()
|
||||
except CalledProcessError:
|
||||
pass
|
||||
return original_net
|
||||
|
||||
def load_all_measure_nets(self):
|
||||
measurement_nets = set()
|
||||
for port, meas in zip(self.targ_read_ports * len(self.read_meas_lists) +
|
||||
self.targ_write_ports * len(self.write_meas_lists),
|
||||
self.read_meas_lists + self.write_meas_lists):
|
||||
for measurement in meas:
|
||||
visited = getattr(measurement, 'pex_visited', False)
|
||||
for prop in ["trig_name_no_port", "targ_name_no_port"]:
|
||||
if hasattr(measurement, prop):
|
||||
net = getattr(measurement, prop).format(port)
|
||||
if not visited:
|
||||
net = self.load_pex_net(net)
|
||||
setattr(measurement, prop, net)
|
||||
measurement_nets.add(net)
|
||||
measurement.pex_visited = True
|
||||
self.measurement_nets = measurement_nets
|
||||
return measurement_nets
|
||||
|
||||
def write_simulation_saves(self):
|
||||
for net in self.measurement_nets:
|
||||
self.sf.write(".plot V({0}) \n".format(net))
|
||||
probe_nets = set()
|
||||
sram_name = self.sram_instance_name
|
||||
col = self.bitline_column
|
||||
row = self.wordline_row
|
||||
for port in set(self.targ_read_ports + self.targ_write_ports):
|
||||
probe_nets.add("WEB{}".format(port))
|
||||
probe_nets.add("{}.w_en{}".format(self.sram_instance_name, port))
|
||||
probe_nets.add("{0}.Xbank0.Xport_data{1}.Xwrite_driver_array{1}.Xwrite_driver{2}.en_bar".format(
|
||||
self.sram_instance_name, port, self.bitline_column))
|
||||
probe_nets.add("{}.Xbank0.br_{}_{}".format(self.sram_instance_name, port,
|
||||
self.bitline_column))
|
||||
if not OPTS.use_pex:
|
||||
continue
|
||||
probe_nets.add(
|
||||
"{0}.vdd_Xbank0_Xbitcell_array_xbitcell_array_xbit_r{1}_c{2}".format(sram_name, row, col - 1))
|
||||
probe_nets.add(
|
||||
"{0}.p_en_bar{1}_Xbank0_Xport_data{1}_Xprecharge_array{1}_Xpre_column_{2}".format(sram_name, port, col))
|
||||
probe_nets.add(
|
||||
"{0}.vdd_Xbank0_Xport_data{1}_Xprecharge_array{1}_xpre_column_{2}".format(sram_name, port, col))
|
||||
probe_nets.add("{0}.vdd_Xbank0_Xport_data{1}_Xwrite_driver_array{1}_xwrite_driver{2}".format(sram_name,
|
||||
port, col))
|
||||
probe_nets.update(self.measurement_nets)
|
||||
for net in probe_nets:
|
||||
debug.info(2, "Probe: {}".format(net))
|
||||
self.sf.write(".plot V({}) \n".format(self.load_pex_net(net)))
|
||||
|
||||
def write_power_measures(self):
|
||||
"""
|
||||
Write the measure statements to quantify the leakage power only.
|
||||
|
|
|
|||
|
|
@ -236,10 +236,9 @@ class lib:
|
|||
self.lib.write(" slew_lower_threshold_pct_rise : 10.0 ;\n")
|
||||
self.lib.write(" slew_upper_threshold_pct_rise : 90.0 ;\n\n")
|
||||
|
||||
self.lib.write(" nom_voltage : {};\n".format(tech.spice["nom_supply_voltage"]))
|
||||
self.lib.write(" nom_temperature : {};\n".format(tech.spice["nom_temperature"]))
|
||||
self.lib.write(" nom_process : {};\n".format(1.0))
|
||||
|
||||
self.lib.write(" nom_voltage : {};\n".format(self.voltage))
|
||||
self.lib.write(" nom_temperature : {};\n".format(self.temperature))
|
||||
self.lib.write(" nom_process : 1.0;\n")
|
||||
self.lib.write(" default_cell_leakage_power : 0.0 ;\n")
|
||||
self.lib.write(" default_leakage_power_density : 0.0 ;\n")
|
||||
self.lib.write(" default_input_pin_cap : 1.0 ;\n")
|
||||
|
|
@ -250,7 +249,7 @@ class lib:
|
|||
self.lib.write(" default_max_fanout : 4.0 ;\n")
|
||||
self.lib.write(" default_connection_class : universal ;\n\n")
|
||||
|
||||
self.lib.write(" voltage_map ( VDD, {} );\n".format(tech.spice["nom_supply_voltage"]))
|
||||
self.lib.write(" voltage_map ( VDD, {} );\n".format(self.voltage))
|
||||
self.lib.write(" voltage_map ( GND, 0 );\n\n")
|
||||
|
||||
def create_list(self,values):
|
||||
|
|
|
|||
|
|
@ -467,7 +467,7 @@ class simulation():
|
|||
"""
|
||||
|
||||
port = self.read_ports[0]
|
||||
if not OPTS.use_pex:
|
||||
if not OPTS.use_pex or (OPTS.use_pex and OPTS.pex_exe[0] == "calibre"):
|
||||
self.graph.get_all_paths('{}{}'.format("clk", port),
|
||||
'{}{}_{}'.format(self.dout_name, port, self.probe_data))
|
||||
|
||||
|
|
@ -523,7 +523,7 @@ class simulation():
|
|||
debug.check(len(sa_mods) == 1, "Only expected one type of Sense Amp. Cannot perform s_en checks.")
|
||||
enable_name = sa_mods[0].get_enable_name()
|
||||
sen_name = self.get_alias_in_path(paths, enable_name, sa_mods[0])
|
||||
if OPTS.use_pex:
|
||||
if OPTS.use_pex and OPTS.pex_exe[0] != "calibre":
|
||||
sen_name = sen_name.split('.')[-1]
|
||||
return sen_name
|
||||
|
||||
|
|
@ -581,7 +581,7 @@ class simulation():
|
|||
exclude_set = self.get_bl_name_search_exclusions()
|
||||
for int_net in [cell_bl, cell_br]:
|
||||
bl_names.append(self.get_alias_in_path(paths, int_net, cell_mod, exclude_set))
|
||||
if OPTS.use_pex:
|
||||
if OPTS.use_pex and OPTS.pex_exe[0] != "calibre":
|
||||
for i in range(len(bl_names)):
|
||||
bl_names[i] = bl_names[i].split('.')[-1]
|
||||
return bl_names[0], bl_names[1]
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class stimuli():
|
|||
def inst_model(self, pins, model_name):
|
||||
""" Function to instantiate a generic model with a set of pins """
|
||||
|
||||
if OPTS.use_pex:
|
||||
if OPTS.use_pex and OPTS.pex_exe[0] != "calibre":
|
||||
self.inst_pex_model(pins, model_name)
|
||||
else:
|
||||
self.sf.write("X{0} ".format(model_name))
|
||||
|
|
@ -246,28 +246,51 @@ class stimuli():
|
|||
reltol = 0.001 # 0.1%
|
||||
timestep = 10 # ps, was 5ps but ngspice was complaining the timestep was too small in certain tests.
|
||||
|
||||
# UIC is needed for ngspice to converge
|
||||
self.sf.write(".TRAN {0}p {1}n UIC\n".format(timestep, end_time))
|
||||
self.sf.write(".TEMP {}\n".format(self.temperature))
|
||||
if OPTS.spice_name == "ngspice":
|
||||
# UIC is needed for ngspice to converge
|
||||
self.sf.write(".TRAN {0}p {1}n UIC\n".format(timestep, end_time))
|
||||
# ngspice sometimes has convergence problems if not using gear method
|
||||
# which is more accurate, but slower than the default trapezoid method
|
||||
# Do not remove this or it may not converge due to some "pa_00" nodes
|
||||
# unless you figure out what these are.
|
||||
self.sf.write(".OPTIONS POST=1 RELTOL={0} PROBE method=gear\n".format(reltol))
|
||||
elif OPTS.spice_name == "spectre":
|
||||
self.sf.write("simulator lang=spectre\n")
|
||||
if OPTS.use_pex:
|
||||
nestlvl = 1
|
||||
spectre_save = "selected"
|
||||
else:
|
||||
nestlvl = 10
|
||||
spectre_save = "lvlpub"
|
||||
self.sf.write('saveOptions options save={} nestlvl={} pwr=total \n'.format(
|
||||
spectre_save, nestlvl))
|
||||
self.sf.write("simulatorOptions options reltol=1e-3 vabstol=1e-6 iabstol=1e-12 temp={0} try_fast_op=no "
|
||||
"rforce=10m maxnotes=10 maxwarns=10 "
|
||||
" preservenode=all topcheck=fixall "
|
||||
"digits=5 cols=80 dc_pivot_check=yes pivrel=1e-3 "
|
||||
" \n".format(self.temperature))
|
||||
self.sf.write('tran tran step={} stop={}n ic=node write=spectre.dc errpreset=moderate '
|
||||
' annotate=status maxiters=5 \n'.format("5p", end_time))
|
||||
self.sf.write("simulator lang=spice\n")
|
||||
else:
|
||||
self.sf.write(".TRAN {0}p {1}n UIC\n".format(timestep, end_time))
|
||||
self.sf.write(".OPTIONS POST=1 RUNLVL={0} PROBE\n".format(runlvl))
|
||||
if OPTS.spice_name == "hspice": # for cadence plots
|
||||
self.sf.write(".OPTIONS PSF=1 \n")
|
||||
self.sf.write(".OPTIONS HIER_DELIM=1 \n")
|
||||
|
||||
# create plots for all signals
|
||||
self.sf.write("* probe is used for hspice/xa, while plot is used in ngspice\n")
|
||||
if OPTS.verbose_level>0:
|
||||
if OPTS.spice_name in ["hspice", "xa"]:
|
||||
self.sf.write(".probe V(*)\n")
|
||||
if not OPTS.use_pex: # Don't save all for extracted simulations
|
||||
self.sf.write("* probe is used for hspice/xa, while plot is used in ngspice\n")
|
||||
if OPTS.verbose_level>0:
|
||||
if OPTS.spice_name in ["hspice", "xa"]:
|
||||
self.sf.write(".probe V(*)\n")
|
||||
else:
|
||||
self.sf.write(".plot V(*)\n")
|
||||
else:
|
||||
self.sf.write(".plot V(*)\n")
|
||||
else:
|
||||
self.sf.write("*.probe V(*)\n")
|
||||
self.sf.write("*.plot V(*)\n")
|
||||
self.sf.write("*.probe V(*)\n")
|
||||
self.sf.write("*.plot V(*)\n")
|
||||
|
||||
# end the stimulus file
|
||||
self.sf.write(".end\n\n")
|
||||
|
|
@ -314,6 +337,16 @@ class stimuli():
|
|||
OPTS.openram_temp,
|
||||
OPTS.num_sim_threads)
|
||||
valid_retcode=0
|
||||
elif OPTS.spice_name == "spectre":
|
||||
if OPTS.use_pex:
|
||||
extra_options = " +dcopt +postlayout "
|
||||
else:
|
||||
extra_options = ""
|
||||
cmd = ("{0} -64 {1} -format psfbin -raw {2} {3} -maxwarnstolog 1000 "
|
||||
" +mt={4} -maxnotestolog 1000 "
|
||||
.format(OPTS.spice_exe, temp_stim, OPTS.openram_temp, extra_options,
|
||||
OPTS.num_sim_threads))
|
||||
valid_retcode = 0
|
||||
elif OPTS.spice_name == "hspice":
|
||||
# TODO: Should make multithreading parameter a configuration option
|
||||
cmd = "{0} -mt {1} -i {2} -o {3}timing".format(OPTS.spice_exe,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ word_size = 32
|
|||
num_words = 256
|
||||
write_size = 8
|
||||
|
||||
local_array_size = 16
|
||||
#local_array_size = 16
|
||||
|
||||
num_rw_ports = 1
|
||||
num_r_ports = 1
|
||||
|
|
@ -11,9 +11,9 @@ num_w_ports = 0
|
|||
tech_name = "sky130"
|
||||
nominal_corner_only = True
|
||||
|
||||
route_supplies = False
|
||||
#route_supplies = False
|
||||
check_lvsdrc = True
|
||||
perimeter_pins = False
|
||||
#perimeter_pins = False
|
||||
#netlist_only = True
|
||||
#analytical_delay = False
|
||||
output_name = "sram_{0}rw{1}r{2}w_{3}_{4}_{5}".format(num_rw_ports,
|
||||
|
|
@ -19,9 +19,10 @@ import re
|
|||
import copy
|
||||
import importlib
|
||||
import getpass
|
||||
import subprocess
|
||||
|
||||
|
||||
VERSION = "1.1.9"
|
||||
VERSION = "1.1.13"
|
||||
NAME = "OpenRAM v{}".format(VERSION)
|
||||
USAGE = "openram.py [options] <config file>\nUse -h for help.\n"
|
||||
|
||||
|
|
@ -115,10 +116,6 @@ def parse_args():
|
|||
if OPTS.tech_name == "s8":
|
||||
OPTS.tech_name = "sky130"
|
||||
|
||||
if OPTS.openram_temp:
|
||||
# If they define the temp directory, we can only use one thread at a time!
|
||||
OPTS.num_threads = 1
|
||||
|
||||
return (options, args)
|
||||
|
||||
|
||||
|
|
@ -161,6 +158,17 @@ def check_versions():
|
|||
# or, this could be done in each module (e.g. verify, characterizer, etc.)
|
||||
global OPTS
|
||||
|
||||
def cmd_exists(cmd):
|
||||
return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
|
||||
|
||||
if cmd_exists("coverage"):
|
||||
OPTS.coverage_exe = "coverage run -p "
|
||||
elif cmd_exists("python3-coverage"):
|
||||
OPTS.coverage_exe = "python3-coverage run -p "
|
||||
else:
|
||||
OPTS.coverage_exe = ""
|
||||
debug.warning("Failed to find coverage installation. This can be installed with pip3 install coverage")
|
||||
|
||||
try:
|
||||
import coverage
|
||||
OPTS.coverage = 1
|
||||
|
|
@ -413,7 +421,7 @@ def setup_paths():
|
|||
# Add all of the subdirs to the python path
|
||||
# These subdirs are modules and don't need
|
||||
# to be added: characterizer, verify
|
||||
subdirlist = [ item for item in os.listdir(OPENRAM_HOME) if os.path.isdir(os.path.join(OPENRAM_HOME, item)) ]
|
||||
subdirlist = [item for item in os.listdir(OPENRAM_HOME) if os.path.isdir(os.path.join(OPENRAM_HOME, item))]
|
||||
for subdir in subdirlist:
|
||||
full_path = "{0}/{1}".format(OPENRAM_HOME, subdir)
|
||||
debug.check(os.path.isdir(full_path),
|
||||
|
|
@ -421,10 +429,10 @@ def setup_paths():
|
|||
if "__pycache__" not in full_path:
|
||||
sys.path.append("{0}".format(full_path))
|
||||
|
||||
# Use a unique temp directory
|
||||
if not OPTS.openram_temp:
|
||||
OPTS.openram_temp = "/tmp/openram_{0}_{1}_temp/".format(getpass.getuser(),
|
||||
os.getpid())
|
||||
# Use a unique temp subdirectory
|
||||
OPTS.openram_temp += "/openram_{0}_{1}_temp/".format(getpass.getuser(),
|
||||
os.getpid())
|
||||
|
||||
if not OPTS.openram_temp.endswith('/'):
|
||||
OPTS.openram_temp += "/"
|
||||
debug.info(1, "Temporary files saved in " + OPTS.openram_temp)
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ class port_data(design.design):
|
|||
self.br_names = self.bitcell.get_all_br_names()
|
||||
self.wl_names = self.bitcell.get_all_wl_names()
|
||||
# used for bl/br names
|
||||
self.precharge = factory.create(module_type="precharge",
|
||||
self.precharge = factory.create(module_type=OPTS.precharge,
|
||||
bitcell_bl=self.bl_names[0],
|
||||
bitcell_br=self.br_names[0])
|
||||
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class precharge_array(design.design):
|
|||
self.DRC_LVS()
|
||||
|
||||
def add_modules(self):
|
||||
self.pc_cell = factory.create(module_type="precharge",
|
||||
self.pc_cell = factory.create(module_type=OPTS.precharge,
|
||||
size=self.size,
|
||||
bitcell_bl=self.bitcell_bl,
|
||||
bitcell_br=self.bitcell_br)
|
||||
|
|
|
|||
|
|
@ -146,10 +146,10 @@ class sense_amp_array(design.design):
|
|||
inst = self.local_insts[i]
|
||||
|
||||
for gnd_pin in inst.get_pins("gnd"):
|
||||
self.copy_power_pin(gnd_pin, directions=("V", "V"))
|
||||
self.copy_power_pin(gnd_pin)
|
||||
|
||||
for vdd_pin in inst.get_pins("vdd"):
|
||||
self.copy_power_pin(vdd_pin, directions=("V", "V"))
|
||||
self.copy_power_pin(vdd_pin)
|
||||
|
||||
bl_pin = inst.get_pin(inst.mod.get_bl_names())
|
||||
br_pin = inst.get_pin(inst.mod.get_br_names())
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ class options(optparse.Values):
|
|||
# If user defined the temporary location in their environment, use it
|
||||
openram_temp = os.path.abspath(os.environ.get("OPENRAM_TMP"))
|
||||
except:
|
||||
openram_temp = None
|
||||
openram_temp = "/tmp"
|
||||
|
||||
# This is the verbosity level to control debug information. 0 is none, 1
|
||||
# is minimal, etc.
|
||||
|
|
@ -171,6 +171,7 @@ class options(optparse.Values):
|
|||
nand2_dec = "pnand2"
|
||||
nand3_dec = "pnand3"
|
||||
nand4_dec = "pnand4" # Not available right now
|
||||
precharge = "precharge"
|
||||
precharge_array = "precharge_array"
|
||||
ptx = "ptx"
|
||||
replica_bitline = "replica_bitline"
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@
|
|||
#
|
||||
from enum import Enum
|
||||
from vector3d import vector3d
|
||||
import debug
|
||||
|
||||
|
||||
class direction(Enum):
|
||||
NORTH = 1
|
||||
|
|
@ -20,31 +22,30 @@ class direction(Enum):
|
|||
SOUTHEAST = 9
|
||||
SOUTHWEST = 10
|
||||
|
||||
|
||||
def get_offset(direct):
|
||||
"""
|
||||
Returns the vector offset for a given direction.
|
||||
"""
|
||||
if direct==direction.NORTH:
|
||||
offset = vector3d(0,1,0)
|
||||
offset = vector3d(0, 1, 0)
|
||||
elif direct==direction.SOUTH:
|
||||
offset = vector3d(0,-1,0)
|
||||
offset = vector3d(0, -1 ,0)
|
||||
elif direct==direction.EAST:
|
||||
offset = vector3d(1,0,0)
|
||||
offset = vector3d(1, 0, 0)
|
||||
elif direct==direction.WEST:
|
||||
offset = vector3d(-1,0,0)
|
||||
offset = vector3d(-1, 0, 0)
|
||||
elif direct==direction.UP:
|
||||
offset = vector3d(0,0,1)
|
||||
offset = vector3d(0, 0, 1)
|
||||
elif direct==direction.DOWN:
|
||||
offset = vector3d(0,0,-1)
|
||||
offset = vector3d(0, 0, -1)
|
||||
elif direct==direction.NORTHEAST:
|
||||
offset = vector3d(1,1,0)
|
||||
offset = vector3d(1, 1, 0)
|
||||
elif direct==direction.NORTHWEST:
|
||||
offset = vector3d(-1,1,0)
|
||||
offset = vector3d(-1, 1, 0)
|
||||
elif direct==direction.SOUTHEAST:
|
||||
offset = vector3d(1,-1,0)
|
||||
offset = vector3d(1, -1, 0)
|
||||
elif direct==direction.SOUTHWEST:
|
||||
offset = vector3d(-1,-1,0)
|
||||
offset = vector3d(-1, -1, 0)
|
||||
else:
|
||||
debug.error("Invalid direction {}".format(direct))
|
||||
|
||||
|
|
@ -67,8 +68,8 @@ class direction(Enum):
|
|||
return [direction.get_offset(d) for d in direction.all_directions()]
|
||||
|
||||
def all_neighbors(cell):
|
||||
return [cell+x for x in direction.all_offsets()]
|
||||
return [cell + x for x in direction.all_offsets()]
|
||||
|
||||
def cardinal_neighbors(cell):
|
||||
return [cell+x for x in direction.cardinal_offsets()]
|
||||
return [cell + x for x in direction.cardinal_offsets()]
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,6 @@ class pin_group:
|
|||
# Remove any redundant pins (i.e. contained in other pins)
|
||||
self.remove_redundant_pins()
|
||||
|
||||
|
||||
self.router = router
|
||||
# These are the corresponding pin grids for each pin group.
|
||||
self.grids = set()
|
||||
|
|
@ -101,13 +100,11 @@ class pin_group:
|
|||
if local_debug:
|
||||
debug.info(0, "INITIAL: {}".format(pin_list))
|
||||
|
||||
new_pin_list = pin_list.copy()
|
||||
|
||||
remove_indices = set()
|
||||
add_indices = set(range(len(pin_list)))
|
||||
# This is n^2, but the number is small
|
||||
for index1, pin1 in enumerate(pin_list):
|
||||
# If we remove this pin, it can't contain other pins
|
||||
if index1 in remove_indices:
|
||||
if index1 not in add_indices:
|
||||
continue
|
||||
|
||||
for index2, pin2 in enumerate(pin_list):
|
||||
|
|
@ -117,17 +114,15 @@ class pin_group:
|
|||
if index1 == index2:
|
||||
continue
|
||||
# If we already removed it, can't remove it again...
|
||||
if index2 in remove_indices:
|
||||
if index2 not in add_indices:
|
||||
continue
|
||||
|
||||
if pin1.contains(pin2):
|
||||
if local_debug:
|
||||
debug.info(0, "{0} contains {1}".format(pin1, pin2))
|
||||
remove_indices.add(index2)
|
||||
add_indices.remove(index2)
|
||||
|
||||
# Remove them in decreasing order to not invalidate the indices
|
||||
for i in sorted(remove_indices, reverse=True):
|
||||
del new_pin_list[i]
|
||||
new_pin_list = [pin_list[x] for x in add_indices]
|
||||
|
||||
if local_debug:
|
||||
debug.info(0, "FINAL : {}".format(new_pin_list))
|
||||
|
|
@ -423,13 +418,15 @@ class pin_group:
|
|||
# We may have started with an empty set
|
||||
debug.check(len(self.grids) > 0, "Cannot seed an grid empty set.")
|
||||
|
||||
common_blockages = self.router.get_blocked_grids() & self.grids
|
||||
|
||||
# Start with the ll and make the widest row
|
||||
row = [ll]
|
||||
# Move in dir1 while we can
|
||||
while True:
|
||||
next_cell = row[-1] + offset1
|
||||
# Can't move if not in the pin shape
|
||||
if next_cell in self.grids and next_cell not in self.router.get_blocked_grids():
|
||||
if next_cell in self.grids and next_cell not in common_blockages:
|
||||
row.append(next_cell)
|
||||
else:
|
||||
break
|
||||
|
|
@ -438,7 +435,7 @@ class pin_group:
|
|||
next_row = [x + offset2 for x in row]
|
||||
for cell in next_row:
|
||||
# Can't move if any cell is not in the pin shape
|
||||
if cell not in self.grids or cell in self.router.get_blocked_grids():
|
||||
if cell not in self.grids or cell in common_blockages:
|
||||
break
|
||||
else:
|
||||
row = next_row
|
||||
|
|
@ -619,6 +616,11 @@ class pin_group:
|
|||
# Set of track adjacent to or paritally overlap a pin (not full DRC connection)
|
||||
partial_set = set()
|
||||
|
||||
# for pin in self.pins:
|
||||
# lx = pin.lx()
|
||||
# ly = pin.by()
|
||||
# if lx > 87.9 and lx < 87.99 and ly > 18.56 and ly < 18.6:
|
||||
# breakpoint()
|
||||
for pin in self.pins:
|
||||
debug.info(4, " Converting {0}".format(pin))
|
||||
# Determine which tracks the pin overlaps
|
||||
|
|
@ -632,7 +634,8 @@ class pin_group:
|
|||
blockage_in_tracks = self.router.convert_blockage(pin)
|
||||
# Must include the pins here too because these are computed in a different
|
||||
# way than blockages.
|
||||
self.blockages.update(sufficient | insufficient | blockage_in_tracks)
|
||||
blockages = sufficient | insufficient | blockage_in_tracks
|
||||
self.blockages.update(blockages)
|
||||
|
||||
# If we have a blockage, we must remove the grids
|
||||
# Remember, this excludes the pin blockages already
|
||||
|
|
|
|||
|
|
@ -504,14 +504,21 @@ class router(router_tech):
|
|||
ll = vector(boundary[0], boundary[1])
|
||||
ur = vector(boundary[2], boundary[3])
|
||||
rect = [ll, ur]
|
||||
new_pin = pin_layout("blockage{}".format(len(self.blockages)),
|
||||
rect,
|
||||
lpp)
|
||||
new_shape = pin_layout("blockage{}".format(len(self.blockages)),
|
||||
rect,
|
||||
lpp)
|
||||
|
||||
# If there is a rectangle that is the same in the pins,
|
||||
# it isn't a blockage!
|
||||
if new_pin not in self.all_pins:
|
||||
self.blockages.append(new_pin)
|
||||
if new_shape not in self.all_pins and not self.pin_contains(new_shape):
|
||||
self.blockages.append(new_shape)
|
||||
|
||||
def pin_contains(self, shape):
|
||||
for pin in self.all_pins:
|
||||
if pin.contains(shape):
|
||||
return True
|
||||
return False
|
||||
|
||||
def convert_point_to_units(self, p):
|
||||
"""
|
||||
Convert a path set of tracks to center line path.
|
||||
|
|
@ -1048,6 +1055,7 @@ class router(router_tech):
|
|||
# Double check source and taget are not same node, if so, we are done!
|
||||
for k, v in self.rg.map.items():
|
||||
if v.source and v.target:
|
||||
self.paths.append([k])
|
||||
return True
|
||||
|
||||
# returns the path in tracks
|
||||
|
|
|
|||
|
|
@ -37,12 +37,12 @@ class supply_tree_router(router):
|
|||
"""
|
||||
Route the two nets in a single layer)
|
||||
"""
|
||||
debug.info(1,"Running supply router on {0} and {1}...".format(vdd_name, gnd_name))
|
||||
debug.info(1, "Running supply router on {0} and {1}...".format(vdd_name, gnd_name))
|
||||
self.vdd_name = vdd_name
|
||||
self.gnd_name = gnd_name
|
||||
|
||||
# Clear the pins if we have previously routed
|
||||
if (hasattr(self,'rg')):
|
||||
if (hasattr(self, 'rg')):
|
||||
self.clear_pins()
|
||||
else:
|
||||
# Creat a routing grid over the entire area
|
||||
|
|
@ -53,14 +53,14 @@ class supply_tree_router(router):
|
|||
# Get the pin shapes
|
||||
start_time = datetime.now()
|
||||
self.find_pins_and_blockages([self.vdd_name, self.gnd_name])
|
||||
print_time("Finding pins and blockages",datetime.now(), start_time, 3)
|
||||
print_time("Finding pins and blockages", datetime.now(), start_time, 3)
|
||||
|
||||
# Route the supply pins to the supply rails
|
||||
# Route vdd first since we want it to be shorter
|
||||
start_time = datetime.now()
|
||||
self.route_pins(vdd_name)
|
||||
self.route_pins(gnd_name)
|
||||
print_time("Maze routing supplies",datetime.now(), start_time, 3)
|
||||
print_time("Maze routing supplies", datetime.now(), start_time, 3)
|
||||
|
||||
# self.write_debug_gds("final_tree_router.gds",False)
|
||||
|
||||
|
|
@ -79,11 +79,11 @@ class supply_tree_router(router):
|
|||
"""
|
||||
|
||||
remaining_components = sum(not x.is_routed() for x in self.pin_groups[pin_name])
|
||||
debug.info(1,"Routing {0} with {1} pin components to connect.".format(pin_name,
|
||||
remaining_components))
|
||||
debug.info(1, "Routing {0} with {1} pin components to connect.".format(pin_name,
|
||||
remaining_components))
|
||||
|
||||
# Create full graph
|
||||
debug.info(2,"Creating adjacency matrix")
|
||||
debug.info(2, "Creating adjacency matrix")
|
||||
pin_size = len(self.pin_groups[pin_name])
|
||||
adj_matrix = [[0] * pin_size for i in range(pin_size)]
|
||||
|
||||
|
|
@ -95,7 +95,7 @@ class supply_tree_router(router):
|
|||
adj_matrix[index1][index2] = dist
|
||||
|
||||
# Find MST
|
||||
debug.info(2,"Finding MinimumSpanning Tree")
|
||||
debug.info(2, "Finding MinimumSpanning Tree")
|
||||
X = csr_matrix(adj_matrix)
|
||||
Tcsr = minimum_spanning_tree(X)
|
||||
mst = Tcsr.toarray().astype(int)
|
||||
|
|
@ -144,6 +144,7 @@ class supply_tree_router(router):
|
|||
self.add_pin_component_source(pin_name, src_idx)
|
||||
|
||||
# Marks all pin components except index as target
|
||||
# which unmarks it as a blockage too
|
||||
self.add_pin_component_target(pin_name, dest_idx)
|
||||
|
||||
# Actually run the A* router
|
||||
|
|
|
|||
|
|
@ -325,13 +325,13 @@ class sram_1bank(sram_base):
|
|||
# they might create some blockages
|
||||
self.add_layout_pins()
|
||||
|
||||
# Route the supplies first since the MST is not blockage aware
|
||||
# and signals can route to anywhere on sides (it is flexible)
|
||||
self.route_supplies()
|
||||
|
||||
# Route the pins to the perimeter
|
||||
if OPTS.perimeter_pins:
|
||||
self.route_escape_pins()
|
||||
|
||||
# Route the supplies first since the MST is not blockage aware
|
||||
# and signals can route to anywhere on sides (it is flexible)
|
||||
self.route_supplies()
|
||||
|
||||
def route_dffs(self, add_routes=True):
|
||||
|
||||
|
|
|
|||
|
|
@ -196,12 +196,13 @@ class sram_base(design, verilog, lef):
|
|||
|
||||
self.add_lvs_correspondence_points()
|
||||
|
||||
#self.offset_all_coordinates()
|
||||
# self.offset_all_coordinates()
|
||||
|
||||
highest_coord = self.find_highest_coords()
|
||||
self.width = highest_coord[0]
|
||||
self.height = highest_coord[1]
|
||||
if OPTS.use_pex:
|
||||
if OPTS.use_pex and OPTS.pex_exe[0] != "calibre":
|
||||
debug.info(2, "adding global pex labels")
|
||||
self.add_global_pex_labels()
|
||||
self.add_boundary(ll=vector(0, 0),
|
||||
ur=vector(self.width, self.height))
|
||||
|
|
|
|||
|
|
@ -46,12 +46,15 @@ class openram_back_end_test(openram_test):
|
|||
if OPTS.spice_name:
|
||||
options += " -s {}".format(OPTS.spice_name)
|
||||
|
||||
if OPTS.tech_name:
|
||||
options += " -t {}".format(OPTS.tech_name)
|
||||
|
||||
# Always perform code coverage
|
||||
if OPTS.coverage == 0:
|
||||
debug.warning("Failed to find coverage installation. This can be installed with pip3 install coverage")
|
||||
exe_name = "{0}/openram.py ".format(OPENRAM_HOME)
|
||||
else:
|
||||
exe_name = "coverage run -p {0}/openram.py ".format(OPENRAM_HOME)
|
||||
exe_name = "{0}{1}/openram.py ".format(OPTS.coverage_exe, OPENRAM_HOME)
|
||||
config_name = "{0}/tests/configs/config_back_end.py".format(OPENRAM_HOME)
|
||||
cmd = "{0} -o {1} -p {2} {3} {4} 2>&1 > {5}/output.log".format(exe_name,
|
||||
out_file,
|
||||
|
|
|
|||
|
|
@ -46,12 +46,15 @@ class openram_front_end_test(openram_test):
|
|||
if OPTS.spice_name:
|
||||
options += " -s {}".format(OPTS.spice_name)
|
||||
|
||||
if OPTS.tech_name:
|
||||
options += " -t {}".format(OPTS.tech_name)
|
||||
|
||||
# Always perform code coverage
|
||||
if OPTS.coverage == 0:
|
||||
debug.warning("Failed to find coverage installation. This can be installed with pip3 install coverage")
|
||||
exe_name = "{0}/openram.py ".format(OPENRAM_HOME)
|
||||
else:
|
||||
exe_name = "coverage run -p {0}/openram.py ".format(OPENRAM_HOME)
|
||||
exe_name = "{0}{1}/openram.py ".format(OPTS.coverage_exe, OPENRAM_HOME)
|
||||
config_name = "{0}/tests/configs/config_front_end.py".format(OPENRAM_HOME)
|
||||
cmd = "{0} -n -o {1} -p {2} {3} {4} 2>&1 > {5}/output.log".format(exe_name,
|
||||
out_file,
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ def fork_tests(num_threads):
|
|||
sys.stdin.close()
|
||||
test_suite_result = AutoTimingTestResultDecorator(TestProtocolClient(stream))
|
||||
test_suite.run(test_suite_result)
|
||||
except:
|
||||
except EBADF:
|
||||
try:
|
||||
stream.write(traceback.format_exc())
|
||||
finally:
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import os
|
|||
import shutil
|
||||
import re
|
||||
import debug
|
||||
import utils
|
||||
from globals import OPTS
|
||||
from run_script import run_script
|
||||
|
||||
|
|
@ -141,7 +142,7 @@ def write_pex_script(cell_name, extract, output, final_verification=False, outpu
|
|||
if not output_path:
|
||||
output_path = OPTS.openram_temp
|
||||
|
||||
if output == None:
|
||||
if not output:
|
||||
output = cell_name + ".pex.sp"
|
||||
|
||||
# check if lvs report has been done
|
||||
|
|
@ -166,6 +167,17 @@ def write_pex_script(cell_name, extract, output, final_verification=False, outpu
|
|||
'pexPexReportFile': cell_name + ".pex.report",
|
||||
'pexMaskDBFile': cell_name + ".maskdb",
|
||||
'cmnFDIDEFLayoutPath': cell_name + ".def",
|
||||
'cmnRunMT': "1",
|
||||
'cmnNumTurbo': "16",
|
||||
'pexPowerNames': "vdd",
|
||||
'pexGroundNames': "gnd",
|
||||
'pexPexGroundName': "1",
|
||||
'pexPexGroundNameValue': "gnd",
|
||||
'pexPexSeparator': "1",
|
||||
'pexPexSeparatorValue': "_",
|
||||
'pexPexNetlistNameSource': 'SOURCENAMES',
|
||||
'pexSVRFCmds': '{SOURCE CASE YES} {LAYOUT CASE YES}',
|
||||
'pexIncludeCmdsType': 'SVRF',
|
||||
}
|
||||
|
||||
# write the runset file
|
||||
|
|
@ -174,12 +186,73 @@ def write_pex_script(cell_name, extract, output, final_verification=False, outpu
|
|||
f.write("*{0}: {1}\n".format(k, pex_runset[k]))
|
||||
f.close()
|
||||
|
||||
# write the rules file
|
||||
f = open(output_path + "pex_rules", "w")
|
||||
f.write('// Rules file, created by OpenRAM, (c) Bob Vanhoof\n')
|
||||
f.write('\n')
|
||||
f.write('LAYOUT PATH "' + output_path + cell_name + '.gds"\n')
|
||||
f.write('LAYOUT PRIMARY ' + cell_name + '\n')
|
||||
f.write('LAYOUT SYSTEM GDSII\n')
|
||||
f.write('\n')
|
||||
f.write('SOURCE PATH "' + output_path + cell_name + '.sp"\n')
|
||||
f.write('SOURCE PRIMARY ' + cell_name +'\n')
|
||||
f.write('SOURCE SYSTEM SPICE\n')
|
||||
f.write('SOURCE CASE YES\n')
|
||||
f.write('\n')
|
||||
f.write('MASK SVDB DIRECTORY "svdb" QUERY XRC\n')
|
||||
f.write('\n')
|
||||
f.write('LVS REPORT "' + output_path + cell_name + '.pex.report"\n')
|
||||
f.write('LVS REPORT OPTION NONE\n')
|
||||
f.write('LVS FILTER UNUSED OPTION NONE SOURCE\n')
|
||||
f.write('LVS FILTER UNUSED OPTION NONE LAYOUT\n')
|
||||
f.write('LVS POWER NAME vdd\n')
|
||||
f.write('LVS GROUND NAME gnd\n')
|
||||
f.write('LVS RECOGNIZE GATES ALL\n')
|
||||
f.write('LVS CELL SUPPLY YES\n')
|
||||
f.write('LVS PUSH DEVICES SEPARATE PROPERTIES YES\n')
|
||||
f.write('\n')
|
||||
f.write('PEX NETLIST "' + output + '" HSPICE 1 SOURCENAMES GROUND gnd\n')
|
||||
f.write('PEX REDUCE ANALOG NO\n')
|
||||
f.write('PEX NETLIST UPPERCASE KEYWORDS NO\n')
|
||||
f.write('PEX NETLIST VIRTUAL CONNECT YES\n')
|
||||
f.write('PEX NETLIST NOXREF NET NAMES YES\n')
|
||||
f.write('PEX NETLIST MUTUAL RESISTANCE YES\n')
|
||||
f.write('PEX NETLIST EXPORT PORTS YES\n')
|
||||
f.write('PEX PROBE FILE "probe_file"\n')
|
||||
f.write('\n')
|
||||
f.write('VIRTUAL CONNECT COLON NO\n')
|
||||
f.write('VIRTUAL CONNECT REPORT NO\n')
|
||||
f.write('VIRTUAL CONNECT NAME vdd gnd\n')
|
||||
f.write('\n')
|
||||
f.write('DRC ICSTATION YES\n')
|
||||
f.write('\n')
|
||||
f.write('INCLUDE "'+ pex_rules +'"\n')
|
||||
f.close()
|
||||
|
||||
# write probe file
|
||||
# TODO: get from cell name
|
||||
f = open(output_path + "probe_file", "w")
|
||||
f.write('CELL cell_1rw\n')
|
||||
f.write(' Q 0.100 0.510 11\n')
|
||||
f.write(' Q_bar 0.520 0.510 11\n')
|
||||
f.close()
|
||||
|
||||
# Create an auxiliary script to run calibre with the runset
|
||||
run_file = output_path + "run_pex.sh"
|
||||
f = open(run_file, "w")
|
||||
f.write("#!/bin/sh\n")
|
||||
cmd = "{0} -gui -pex pex_runset -batch".format(OPTS.pex_exe[1])
|
||||
|
||||
cmd = "{0} -lvs -hier -genhcells -spice svdb/{1}.sp -turbo -hyper cmp {2}".format(OPTS.pex_exe[1],
|
||||
cell_name,
|
||||
'pex_rules')
|
||||
f.write(cmd)
|
||||
f.write("\n")
|
||||
cmd = "sed '/dummy/d' svdb/{0}.hcells | sed '/replica_column/d' | sed '/replica_cell/d' > hcell_file".format(cell_name)
|
||||
f.write(cmd)
|
||||
f.write("\n")
|
||||
cmd = "{0} -xrc -pdb -turbo -xcell hcell_file -full -rc {1}".format(OPTS.pex_exe[1], 'pex_rules')
|
||||
f.write(cmd)
|
||||
f.write("\n")
|
||||
cmd = "{0} -xrc -fmt -full {1}".format(OPTS.pex_exe[1], 'pex_rules')
|
||||
f.write(cmd)
|
||||
f.write("\n")
|
||||
f.close()
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -11,5 +11,5 @@ MM5 Q Q_bar vdd vdd PMOS_VTG W=90n L=50n
|
|||
* Access transistors
|
||||
MM3 bl_noconn wl Q gnd NMOS_VTG W=135.00n L=50n
|
||||
MM2 br_noconn wl Q_bar gnd NMOS_VTG W=135.00n L=50n
|
||||
.ENDS cell_1rw
|
||||
.ENDS dummy_cell_1rw
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue