/* * Copyright (c) 2012-2020 Stephen Williams (steve@icarus.com) * * This source code is free software; you can redistribute it * and/or modify it in source code form under the terms of the GNU * General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ # include "PExpr.h" # include "pform_types.h" # include "netlist.h" # include "netclass.h" # include "netdarray.h" # include "netenum.h" # include "netqueue.h" # include "netparray.h" # include "netscalar.h" # include "netstruct.h" # include "netvector.h" # include "netmisc.h" # include # include "ivl_assert.h" using namespace std; /* * Elaborations of types may vary depending on the scope that it is * done in, so keep a per-scope cache of the results. */ ivl_type_t data_type_t::elaborate_type(Design*des, NetScope*scope) { // User-defined types must be elaborated in the context // where they were defined. if (!name.nil()) scope = scope->find_typedef_scope(des, this); ivl_assert(*this, scope); Definitions*use_definitions = scope; map::iterator pos = cache_type_elaborate_.lower_bound(use_definitions); if (pos != cache_type_elaborate_.end() && pos->first == use_definitions) return pos->second; ivl_type_t tmp = elaborate_type_raw(des, scope); cache_type_elaborate_.insert(pos, pair(scope, tmp)); return tmp; } ivl_type_t data_type_t::elaborate_type_raw(Design*des, NetScope*) const { cerr << get_fileline() << ": internal error: " << "Elaborate method not implemented for " << typeid(*this).name() << "." << endl; des->errors += 1; return 0; } ivl_type_t atom_type_t::elaborate_type_raw(Design*des, NetScope*) const { switch (type_code) { case INTEGER: return netvector_t::integer_type(signed_flag); case TIME: if (signed_flag) return &netvector_t::time_signed; else return &netvector_t::time_unsigned; case LONGINT: if (signed_flag) return &netvector_t::atom2s64; else return &netvector_t::atom2u64; case INT: if (signed_flag) return &netvector_t::atom2s32; else return &netvector_t::atom2u32; case SHORTINT: if (signed_flag) return &netvector_t::atom2s16; else return &netvector_t::atom2u16; case BYTE: if (signed_flag) return &netvector_t::atom2s8; else return &netvector_t::atom2u8; default: cerr << get_fileline() << ": internal error: " << "atom_type_t type_code=" << type_code << "." << endl; des->errors += 1; return 0; } } ivl_type_t class_type_t::elaborate_type_raw(Design*des, NetScope*scope) const { if (save_elaborated_type) return save_elaborated_type; return scope->find_class(des, name); } /* * elaborate_type_raw for enumerations is actually mostly performed * during scope elaboration so that the enumeration literals are * available at the right time. At that time, the netenum_t* object is * stashed in the scope so that I can retrieve it here. */ ivl_type_t enum_type_t::elaborate_type_raw(Design*, NetScope*scope) const { ivl_assert(*this, scope); ivl_type_t tmp = scope->enumeration_for_key(this); if (tmp == 0 && scope->unit()) tmp = scope->unit()->enumeration_for_key(this); return tmp; } ivl_type_t vector_type_t::elaborate_type_raw(Design*des, NetScope*scope) const { vector packed; if (pdims.get()) evaluate_ranges(des, scope, this, packed, *pdims); netvector_t*tmp = new netvector_t(packed, base_type); tmp->set_signed(signed_flag); tmp->set_isint(integer_flag); tmp->set_implicit(implicit_flag); return tmp; } ivl_type_t real_type_t::elaborate_type_raw(Design*, NetScope*) const { switch (type_code_) { case REAL: return &netreal_t::type_real; case SHORTREAL: return &netreal_t::type_shortreal; } return 0; } ivl_type_t string_type_t::elaborate_type_raw(Design*, NetScope*) const { return &netstring_t::type_string; } ivl_type_t parray_type_t::elaborate_type_raw(Design*des, NetScope*scope) const { vectorpacked; if (dims.get()) evaluate_ranges(des, scope, this, packed, *dims); ivl_type_t etype = base_type->elaborate_type(des, scope); if (!etype->packed()) { cerr << this->get_fileline() << " error: Packed array "; if (!name.nil()) cerr << "`" << name << "` "; cerr << "base-type `"; if (base_type->name.nil()) cerr << *base_type; else cerr << base_type->name; cerr << "` is not packed." << endl; des->errors++; } return new netparray_t(packed, etype); } ivl_type_t struct_type_t::elaborate_type_raw(Design*des, NetScope*scope) const { netstruct_t*res = new netstruct_t; res->set_line(*this); res->packed(packed_flag); res->set_signed(signed_flag); if (union_flag) res->union_flag(true); for (list::iterator cur = members->begin() ; cur != members->end() ; ++ cur) { // Elaborate the type of the member. struct_member_t*curp = *cur; ivl_type_t mem_vec = curp->type->elaborate_type(des, scope); if (mem_vec == 0) continue; // There may be several names that are the same type: // name1, name2, ...; // Process all the member, and give them a type. for (list::iterator cur_name = curp->names->begin() ; cur_name != curp->names->end() ; ++ cur_name) { decl_assignment_t*namep = *cur_name; netstruct_t::member_t memb; memb.name = namep->name; memb.net_type = elaborate_array_type(des, scope, *this, mem_vec, namep->index); res->append_member(des, memb); } } return res; } static ivl_type_t elaborate_darray_check_type(Design *des, const LineInfo &li, ivl_type_t type, const char *darray_type) { if (dynamic_cast(type) || dynamic_cast(type) || dynamic_cast(type) || dynamic_cast(type)) return type; cerr << li.get_fileline() << ": Sorry: " << darray_type << " of type `" << *type << "` is not yet supported." << endl; des->errors++; // Return something to recover return new netvector_t(IVL_VT_LOGIC); } static ivl_type_t elaborate_queue_type(Design *des, NetScope *scope, const LineInfo &li, ivl_type_t base_type, PExpr *ridx) { base_type = elaborate_darray_check_type(des, li, base_type, "Queue"); long max_idx = -1; if (ridx) { NetExpr*tmp = elab_and_eval(des, scope, ridx, -1, true); NetEConst*cv = dynamic_cast(tmp); if (cv == 0) { cerr << li.get_fileline() << ": error: " << "queue bound must be constant." << endl; des->errors++; } else { verinum res = cv->value(); if (res.is_defined()) { max_idx = res.as_long(); if (max_idx < 0) { cerr << li.get_fileline() << ": error: " << "queue bound must be positive (" << max_idx << ")." << endl; des->errors++; max_idx = -1; } } else { cerr << li.get_fileline() << ": error: " << "queue bound must be defined." << endl; des->errors++; } } delete cv; } return new netqueue_t(base_type, max_idx); } // If dims is not empty create a unpacked array type and clear dims, otherwise // return the base type. Also check that we actually support the base type. static ivl_type_t elaborate_static_array_type(Design *des, const LineInfo &li, ivl_type_t base_type, std::vector &dims) { if (dims.empty()) return base_type; if (dynamic_cast(base_type)) { cerr << li.get_fileline() << ": sorry: " << "array of queue type is not yet supported." << endl; des->errors++; // Recover base_type = new netvector_t(IVL_VT_LOGIC); } else if (dynamic_cast(base_type)) { cerr << li.get_fileline() << ": sorry: " << "array of dynamic array type is not yet supported." << endl; des->errors++; // Recover base_type = new netvector_t(IVL_VT_LOGIC); } ivl_type_t type = new netuarray_t(dims, base_type); dims.clear(); return type; } ivl_type_t elaborate_array_type(Design *des, NetScope *scope, const LineInfo &li, ivl_type_t base_type, const list &dims) { const long warn_dimension_size = 1 << 30; std::vector dimensions; dimensions.reserve(dims.size()); ivl_type_t type = base_type; for (list::const_iterator cur = dims.begin(); cur != dims.end() ; ++cur) { PExpr *lidx = cur->first; PExpr *ridx = cur->second; if (lidx == 0 && ridx == 0) { // Special case: If we encounter an undefined dimensions, // then turn this into a dynamic array and put all the // packed dimensions there. type = elaborate_static_array_type(des, li, type, dimensions); type = elaborate_darray_check_type(des, li, type, "Dynamic array"); type = new netdarray_t(type); continue; } else if (dynamic_cast(lidx)) { // Special case: Detect the mark for a QUEUE declaration, // which is the dimensions [null:max_idx]. type = elaborate_static_array_type(des, li, type, dimensions); type = elaborate_queue_type(des, scope, li, type, ridx); continue; } long index_l, index_r; evaluate_range(des, scope, &li, *cur, index_l, index_r); if (abs(index_r - index_l) > warn_dimension_size) { cerr << li.get_fileline() << ": warning: " << "Array dimension is greater than " << warn_dimension_size << "." << endl; } dimensions.push_back(netrange_t(index_l, index_r)); } return elaborate_static_array_type(des, li, type, dimensions); } ivl_type_t uarray_type_t::elaborate_type_raw(Design*des, NetScope*scope) const { ivl_type_t btype = base_type->elaborate_type(des, scope); return elaborate_array_type(des, scope, *this, btype, *dims.get()); }