mirror of https://github.com/VLSIDA/OpenRAM.git
Fixed various issues with imported code from CACTI, added CACTI as an option for analytical sim, added placeholder names in tech files for CACTI
This commit is contained in:
parent
2c9f755a73
commit
ebc91814e5
|
|
@ -428,8 +428,11 @@ class spice():
|
||||||
c_intrinsic = self.get_intrinsic_capacitance()
|
c_intrinsic = self.get_intrinsic_capacitance()
|
||||||
# Calculate tau with provided output load then calc delay
|
# Calculate tau with provided output load then calc delay
|
||||||
tf = rd*(c_intrinsic+c_load)
|
tf = rd*(c_intrinsic+c_load)
|
||||||
this_delay = horowitz(inrisetime, tf, 0.5, 0.5, True)
|
# FIXME: horowitz disabled until other parameters have been
|
||||||
inrisetime = this_delay / (1.0 - 0.5);
|
# fixed due to divide by zero issues
|
||||||
|
#this_delay = self.horowitz(inrisetime, tf, 0.5, 0.5, True)
|
||||||
|
this_delay = 0
|
||||||
|
inrisetime = this_delay / (1.0 - 0.5)
|
||||||
return delay_data(this_delay, inrisetime)
|
return delay_data(this_delay, inrisetime)
|
||||||
|
|
||||||
def analytical_delay(self, corner, slew, load=0.0):
|
def analytical_delay(self, corner, slew, load=0.0):
|
||||||
|
|
@ -495,50 +498,39 @@ class spice():
|
||||||
self.cell_name))
|
self.cell_name))
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def horowitz(inputramptime, # input rise time
|
def horowitz(self,
|
||||||
|
inputramptime, # input rise time
|
||||||
tf, # time constant of gate
|
tf, # time constant of gate
|
||||||
vs1, # threshold voltage
|
vs1, # threshold voltage
|
||||||
vs2, # threshold voltage
|
vs2, # threshold voltage
|
||||||
rise): # whether input rises or fall
|
rise): # whether input rises or fall
|
||||||
{
|
|
||||||
if inputramptime == 0 and vs1 == vs2:
|
if inputramptime == 0 and vs1 == vs2:
|
||||||
return tf * (-math.log(vs1) if vs1 < 1 else math.log(vs1))
|
return tf * (-math.log(vs1) if vs1 < 1 else math.log(vs1))
|
||||||
|
|
||||||
a = inputramptime / tf
|
a = inputramptime / tf
|
||||||
if rise == RISE:
|
if rise == True:
|
||||||
b = 0.5;
|
b = 0.5
|
||||||
td = tf * sqrt(math.log(vs1)*math.log(vs1) + 2*a*b*(1.0 - vs1)) + tf*(math.log(vs1) - math.log(vs2))
|
td = tf * math.sqrt(math.log(vs1)*math.log(vs1) + 2*a*b*(1.0 - vs1)) + tf*(math.log(vs1) - math.log(vs2))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
b = 0.4;
|
b = 0.4
|
||||||
td = tf * sqrt(math.log(1.0 - vs1)*math.log(1.0 - vs1) + 2*a*b*(vs1)) + tf*(math.log(1.0 - vs1) - math.log(1.0 - vs2))
|
td = tf * math.sqrt(math.log(1.0 - vs1)*math.log(1.0 - vs1) + 2*a*b*(vs1)) + tf*(math.log(1.0 - vs1) - math.log(1.0 - vs2))
|
||||||
|
|
||||||
return td
|
return td
|
||||||
|
|
||||||
def tr_r_on(width, is_nchannel, stack, _is_cell):
|
def tr_r_on(self, width, is_nchannel, stack, _is_cell):
|
||||||
|
|
||||||
# FIXME: temp code until parameters have been determined
|
restrans = tech.spice["r_nch_on"] if is_nchannel else tech.spice["r_pch_on"]
|
||||||
if _is_cell:
|
|
||||||
dt = tech.sram_cell #SRAM cell access transistor
|
|
||||||
else:
|
|
||||||
dt = tech.peri_global
|
|
||||||
|
|
||||||
|
|
||||||
restrans = dt.R_nch_on if is_nchannel else dt.R_pch_on
|
|
||||||
return stack * restrans / width
|
return stack * restrans / width
|
||||||
|
|
||||||
def gate_c(width, wirelength, _is_cell)
|
def gate_c(self, width, wirelength, _is_cell):
|
||||||
|
|
||||||
if _is_cell:
|
return (tech.spice["c_g_ideal"] + tech.spice["c_overlap"] + 3*tech.spice["c_fringe"])*width +\
|
||||||
dt = tech.sram_cell #SRAM cell access transistor
|
tech.spice["l_phy"]*tech.spice["cpolywire"]
|
||||||
|
|
||||||
else:
|
def drain_c_(self,
|
||||||
dt = tech.peri_global
|
width,
|
||||||
|
|
||||||
|
|
||||||
return (dt.C_g_ideal + dt.C_overlap + 3*dt.C_fringe)*width + dt.l_phy*Cpolywire
|
|
||||||
|
|
||||||
def drain_c_(width,
|
|
||||||
nchannel,
|
nchannel,
|
||||||
stack,
|
stack,
|
||||||
next_arg_thresh_folding_width_or_height_cell,
|
next_arg_thresh_folding_width_or_height_cell,
|
||||||
|
|
@ -551,11 +543,11 @@ class spice():
|
||||||
if _is_cell:
|
if _is_cell:
|
||||||
dt = tech.sram_cell # SRAM cell access transistor
|
dt = tech.sram_cell # SRAM cell access transistor
|
||||||
|
|
||||||
else
|
else:
|
||||||
dt = tech.peri_global
|
dt = tech.peri_global
|
||||||
|
|
||||||
|
|
||||||
c_junc_area = dt.C_junc;
|
c_junc_area = dt.C_junc
|
||||||
c_junc_sidewall = dt.C_junc_sidewall
|
c_junc_sidewall = dt.C_junc_sidewall
|
||||||
c_fringe = 2*dt.C_fringe
|
c_fringe = 2*dt.C_fringe
|
||||||
c_overlap = 2*dt.C_overlap
|
c_overlap = 2*dt.C_overlap
|
||||||
|
|
@ -583,15 +575,15 @@ class spice():
|
||||||
num_folded_tr = int(ceil(width / w_folded_tr))
|
num_folded_tr = int(ceil(width / w_folded_tr))
|
||||||
|
|
||||||
if num_folded_tr < 2:
|
if num_folded_tr < 2:
|
||||||
w_folded_tr = width;
|
w_folded_tr = width
|
||||||
|
|
||||||
|
# only for drain
|
||||||
total_drain_w = (tech.w_poly_contact + 2 * tech.spacing_poly_to_contact) + # only for drain
|
total_drain_w = (tech.w_poly_contact + 2 * tech.spacing_poly_to_contact) +\
|
||||||
(stack - 1) * tech.spacing_poly_to_poly
|
(stack - 1) * tech.spacing_poly_to_poly
|
||||||
drain_h_for_sidewall = w_folded_tr
|
drain_h_for_sidewall = w_folded_tr
|
||||||
total_drain_height_for_cap_wrt_gate = w_folded_tr + 2 * w_folded_tr * (stack - 1)
|
total_drain_height_for_cap_wrt_gate = w_folded_tr + 2 * w_folded_tr * (stack - 1)
|
||||||
if num_folded_tr > 1:
|
if num_folded_tr > 1:
|
||||||
total_drain_w += (num_folded_tr - 2) * (tech.w_poly_contact + 2 * tech.spacing_poly_to_contact) +
|
total_drain_w += (num_folded_tr - 2) * (tech.w_poly_contact + 2 * tech.spacing_poly_to_contact) +\
|
||||||
(num_folded_tr - 1) * ((stack - 1) * tech.spacing_poly_to_poly)
|
(num_folded_tr - 1) * ((stack - 1) * tech.spacing_poly_to_poly)
|
||||||
|
|
||||||
if num_folded_tr%2 == 0:
|
if num_folded_tr%2 == 0:
|
||||||
|
|
|
||||||
|
|
@ -636,6 +636,8 @@ class lib:
|
||||||
from .elmore import elmore as model
|
from .elmore import elmore as model
|
||||||
elif model_name_lc == "neural_network":
|
elif model_name_lc == "neural_network":
|
||||||
from .neural_network import neural_network as model
|
from .neural_network import neural_network as model
|
||||||
|
elif model_name_lc == "cacti":
|
||||||
|
from .cacti import cacti as model
|
||||||
else:
|
else:
|
||||||
debug.error("{} model not recognized. See options.py for available models.".format(OPTS.model_name))
|
debug.error("{} model not recognized. See options.py for available models.".format(OPTS.model_name))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -456,6 +456,11 @@ parameter["sa_inv_pmos_size"] = 0.54 # micro-meters
|
||||||
parameter["sa_inv_nmos_size"] = 0.27 # micro-meters
|
parameter["sa_inv_nmos_size"] = 0.27 # micro-meters
|
||||||
parameter["bitcell_drain_cap"] = 0.1 # In Femto-Farad, approximation of drain capacitance
|
parameter["bitcell_drain_cap"] = 0.1 # In Femto-Farad, approximation of drain capacitance
|
||||||
|
|
||||||
|
# Spice Values uses to calculate analytical delay based on CACTI equations
|
||||||
|
# FIXME: temp values used currently. Need to be derived from simulations or the SPICE model
|
||||||
|
spice["r_nch_on"] = 0
|
||||||
|
spice["r_pch_on"] = 0
|
||||||
|
|
||||||
###################################################
|
###################################################
|
||||||
# Technology Tool Preferences
|
# Technology Tool Preferences
|
||||||
###################################################
|
###################################################
|
||||||
|
|
|
||||||
|
|
@ -403,6 +403,11 @@ parameter["sa_inv_pmos_size"] = 18 * _lambda_
|
||||||
parameter["sa_inv_nmos_size"] = 9 * _lambda_
|
parameter["sa_inv_nmos_size"] = 9 * _lambda_
|
||||||
parameter["bitcell_drain_cap"] = 0.2 # In Femto-Farad, approximation of drain capacitance
|
parameter["bitcell_drain_cap"] = 0.2 # In Femto-Farad, approximation of drain capacitance
|
||||||
|
|
||||||
|
# Spice Values uses to calculate analytical delay based on CACTI equations
|
||||||
|
# FIXME: temp values used currently. Need to be derived from simulations or the SPICE model
|
||||||
|
spice["r_nch_on"] = 0
|
||||||
|
spice["r_pch_on"] = 0
|
||||||
|
|
||||||
###################################################
|
###################################################
|
||||||
# Technology Tool Preferences
|
# Technology Tool Preferences
|
||||||
###################################################
|
###################################################
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue