mirror of https://github.com/VLSIDA/OpenRAM.git
Added functions for testing accuracy of current regression model and associated test.
This commit is contained in:
parent
84783bbac5
commit
331e6f8dd5
|
|
@ -19,13 +19,16 @@ class linear_regression(regression_model):
|
|||
def __init__(self, sram, spfile, corner):
|
||||
super().__init__(sram, spfile, corner)
|
||||
|
||||
def get_model(self):
|
||||
return Ridge()
|
||||
|
||||
def generate_model(self, features, labels):
|
||||
"""
|
||||
Supervised training of model.
|
||||
"""
|
||||
|
||||
#model = LinearRegression()
|
||||
model = Ridge()
|
||||
model = self.get_model()
|
||||
model.fit(features, labels)
|
||||
return model
|
||||
|
||||
|
|
|
|||
|
|
@ -150,6 +150,43 @@ class regression_model(simulation):
|
|||
|
||||
return models
|
||||
|
||||
def score_model(self):
|
||||
num_inputs = 9 #FIXME - should be defined somewhere else
|
||||
self.output_names = get_data_names(data_path)[num_inputs:]
|
||||
data = get_scaled_data(data_path)
|
||||
features, labels = data[:, :num_inputs], data[:,num_inputs:]
|
||||
|
||||
output_num = 0
|
||||
models = {}
|
||||
debug.info(1, "Output name, score")
|
||||
for o_name in self.output_names:
|
||||
output_label = labels[:,output_num]
|
||||
model = self.generate_model(features, output_label)
|
||||
scr = model.score(features, output_label)
|
||||
debug.info(1, "{}, {}".format(o_name, scr))
|
||||
output_num+=1
|
||||
|
||||
|
||||
def cross_validation(self):
|
||||
from sklearn.model_selection import cross_val_score
|
||||
untrained_model = self.get_model()
|
||||
|
||||
num_inputs = 9 #FIXME - should be defined somewhere else
|
||||
self.output_names = get_data_names(data_path)[num_inputs:]
|
||||
data = get_scaled_data(data_path)
|
||||
features, labels = data[:, :num_inputs], data[:,num_inputs:]
|
||||
|
||||
output_num = 0
|
||||
models = {}
|
||||
debug.info(1, "Output name, mean_accuracy, std_dev")
|
||||
for o_name in self.output_names:
|
||||
output_label = labels[:,output_num]
|
||||
scores = cross_val_score(untrained_model, features, output_label, cv=5)
|
||||
debug.info(1, "{}, {}, {}".format(o_name, scores.mean(), scores.std()))
|
||||
output_num+=1
|
||||
|
||||
|
||||
|
||||
# Fixme - only will work for sklearn regression models
|
||||
def save_model(self, model_name, model):
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,85 @@
|
|||
#!/usr/bin/env python3
|
||||
# See LICENSE for licensing information.
|
||||
#
|
||||
# Copyright (c) 2016-2021 Regents of the University of California and The Board
|
||||
# of Regents for the Oklahoma Agricultural and Mechanical College
|
||||
# (acting for and on behalf of Oklahoma State University)
|
||||
# All rights reserved.
|
||||
#
|
||||
import unittest
|
||||
from testutils import *
|
||||
import sys, os
|
||||
sys.path.append(os.getenv("OPENRAM_HOME"))
|
||||
import globals
|
||||
from globals import OPTS
|
||||
from sram_factory import factory
|
||||
import debug
|
||||
|
||||
# @unittest.skip("SKIPPING 21_regression_model_test")
|
||||
class regression_model_test(openram_test):
|
||||
""" Compare the accuracy of the analytical model with a spice simulation. """
|
||||
|
||||
def runTest(self):
|
||||
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
|
||||
globals.init_openram(config_file)
|
||||
OPTS.analytical_delay = False
|
||||
OPTS.netlist_only = True
|
||||
|
||||
# This is a hack to reload the characterizer __init__ with the spice version
|
||||
from importlib import reload
|
||||
import characterizer
|
||||
reload(characterizer)
|
||||
from characterizer import linear_regression
|
||||
from sram import sram
|
||||
from sram_config import sram_config
|
||||
c = sram_config(word_size=1,
|
||||
num_words=16,
|
||||
num_banks=1)
|
||||
c.words_per_row=1
|
||||
c.recompute_sizes()
|
||||
debug.info(1, "Testing timing for sample 1bit, 16words SRAM with 1 bank")
|
||||
s = factory.create(module_type="sram", sram_config=c)
|
||||
|
||||
tempspice = OPTS.openram_temp + "temp.sp"
|
||||
s.sp_write(tempspice)
|
||||
|
||||
probe_address = "1" * s.s.addr_size
|
||||
probe_data = s.s.word_size - 1
|
||||
debug.info(1, "Probe address {0} probe data bit {1}".format(probe_address, probe_data))
|
||||
|
||||
corner = (OPTS.process_corners[0], OPTS.supply_voltages[0], OPTS.temperatures[0])
|
||||
|
||||
m = linear_regression(s.s, tempspice, corner)
|
||||
m.cross_validation()
|
||||
|
||||
# Only compare the delays
|
||||
# spice_delays = {key:value for key, value in spice_data.items() if 'delay' in key}
|
||||
# spice_delays['min_period'] = spice_data['min_period']
|
||||
# model_delays = {key:value for key, value in model_data.items() if 'delay' in key}
|
||||
# model_delays['min_period'] = model_data['min_period']
|
||||
# debug.info(1,"Spice Delays={}".format(spice_delays))
|
||||
# debug.info(1,"Model Delays={}".format(model_delays))
|
||||
|
||||
# if OPTS.tech_name == "freepdk45":
|
||||
# error_tolerance = 0.25
|
||||
# elif OPTS.tech_name == "scn4m_subm":
|
||||
# error_tolerance = 0.25
|
||||
# else:
|
||||
# self.assertTrue(False) # other techs fail
|
||||
|
||||
# print('spice_delays', spice_delays)
|
||||
# print('model_delays', model_delays)
|
||||
|
||||
# # Check if no too many or too few results
|
||||
# self.assertTrue(len(spice_delays.keys())==len(model_delays.keys()))
|
||||
|
||||
# self.assertTrue(self.check_golden_data(spice_delays,model_delays,error_tolerance))
|
||||
|
||||
globals.end_openram()
|
||||
|
||||
# run the test from the command line
|
||||
if __name__ == "__main__":
|
||||
(OPTS, args) = globals.parse_args()
|
||||
del sys.argv[1:]
|
||||
header(__file__, OPTS.tech_name)
|
||||
unittest.main(testRunner=debugTestRunner())
|
||||
Loading…
Reference in New Issue