OpenSTA/search/PathGroup.cc

1042 lines
32 KiB
C++

// OpenSTA, Static Timing Analyzer
// Copyright (c) 2025, Parallax Software, Inc.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//
// The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software.
//
// Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
//
// This notice may not be removed or altered from any source distribution.
#include "PathGroup.hh"
#include <algorithm>
#include <limits>
#include <map>
#include <set>
#include "ContainerHelpers.hh"
#include "StringUtil.hh"
#include "Stats.hh"
#include "Debug.hh"
#include "Mutex.hh"
#include "Fuzzy.hh"
#include "MinMax.hh"
#include "DispatchQueue.hh"
#include "ExceptionPath.hh"
#include "Sdc.hh"
#include "Mode.hh"
#include "Graph.hh"
#include "PathEnd.hh"
#include "Tag.hh"
#include "Scene.hh"
#include "Search.hh"
#include "VisitPathEnds.hh"
#include "PathEnum.hh"
namespace sta {
size_t PathGroup::group_path_count_max = std::numeric_limits<size_t>::max();
PathGroup *
PathGroup::makePathGroupSlack(const char *name,
int group_path_count,
int endpoint_path_count,
bool unique_pins,
bool unique_edges,
float slack_min,
float slack_max,
const StaState *sta)
{
return new PathGroup(name, group_path_count, endpoint_path_count,
unique_pins, unique_edges, slack_min, slack_max,
true, MinMax::min(), sta);
}
PathGroup *
PathGroup::makePathGroupArrival(const char *name,
int group_path_count,
int endpoint_path_count,
bool unique_pins,
bool unique_edges,
const MinMax *min_max,
const StaState *sta)
{
return new PathGroup(name, group_path_count, endpoint_path_count,
unique_pins, unique_edges, 0.0, 0.0,
false, min_max, sta);
}
PathGroup::PathGroup(const char *name,
size_t group_path_count,
size_t endpoint_path_count,
bool unique_pins,
bool unique_edges,
float slack_min,
float slack_max,
bool cmp_slack,
const MinMax *min_max,
const StaState *sta) :
name_(name),
group_path_count_(group_path_count),
endpoint_path_count_(endpoint_path_count),
unique_pins_(unique_pins),
unique_edges_(unique_edges),
slack_min_(slack_min),
slack_max_(slack_max),
min_max_(min_max),
compare_slack_(cmp_slack),
threshold_(min_max->initValue()),
sta_(sta)
{
}
PathGroup::~PathGroup()
{
deleteContents(path_ends_);
}
bool
PathGroup::saveable(PathEnd *path_end)
{
float threshold;
{
LockGuard lock(lock_);
threshold = threshold_;
}
if (compare_slack_) {
// Crpr increases the slack, so check the slack
// without crpr first because it is expensive to find.
Slack slack = path_end->slackNoCrpr(sta_);
if (!delayIsInitValue(slack, min_max_)
&& delayLessEqual(slack, threshold, sta_)
&& delayLessEqual(slack, slack_max_, sta_)) {
// Now check with crpr.
slack = path_end->slack(sta_);
return delayLessEqual(slack, threshold, sta_)
&& delayLessEqual(slack, slack_max_, sta_)
&& delayGreaterEqual(slack, slack_min_, sta_);
}
}
else {
const Arrival &arrival = path_end->dataArrivalTime(sta_);
return !delayIsInitValue(arrival, min_max_)
&& delayGreaterEqual(arrival, threshold, min_max_, sta_);
}
return false;
}
// endpoint_path_count > 1 with slack_min requires
// saving endpoints with slack > slack_min so that
// path enumeration can find them. Use the path end
// with the min(max) delay to prune ends that cannot
// onion peel down to slack_min.
bool
PathGroup::enumMinSlackUnderMin(PathEnd *path_end)
{
if (compare_slack_
&& endpoint_path_count_ > 1
&& slack_min_ > -INF) {
const Path *path = path_end->path();
const Tag *tag = path->tag(sta_);
VertexPathIterator other_iter(path->vertex(sta_),
path->scene(sta_),
path->tgtClkMinMax(sta_),
path->transition(sta_), sta_);
while (other_iter.hasNext()) {
Path *other = other_iter.next();
if (Tag::matchCrpr(other->tag(sta_), tag)) {
PathEnd *end_min = path_end->copy();
end_min->setPath(other);
float slack = delayAsFloat(end_min->slackNoCrpr(sta_));
bool slack_under = fuzzyGreater(slack, slack_min_);
delete end_min;
if (slack_under)
return true;
}
}
}
return false;
}
void
PathGroup::insert(PathEnd *path_end)
{
LockGuard lock(lock_);
path_ends_.push_back(path_end);
path_end->setPathGroup(this);
if (group_path_count_ != group_path_count_max
&& path_ends_.size() > group_path_count_ * 2)
prune();
}
void
PathGroup::prune()
{
sort();
VertexPathCountMap path_counts;
size_t end_count = 0;
for (unsigned i = 0; i < path_ends_.size(); i++) {
PathEnd *path_end = path_ends_[i];
Vertex *vertex = path_end->vertex(sta_);
// Squish up to endpoint_path_count path ends per vertex
// up to the front of path_ends_.
if (end_count < group_path_count_
&& path_counts[vertex] < endpoint_path_count_) {
path_ends_[end_count++] = path_end;
path_counts[vertex]++;
}
else
delete path_end;
}
path_ends_.resize(end_count);
// Set a threshold to the bottom of the sorted list that future
// inserts need to beat.
PathEnd *last_end = path_ends_[end_count - 1];
if (compare_slack_)
threshold_ = delayAsFloat(last_end->slack(sta_));
else
threshold_ = delayAsFloat(last_end->dataArrivalTime(sta_));
}
void
PathGroup::pushEnds(PathEndSeq &path_ends)
{
ensureSortedMaxPaths();
for (PathEnd *path_end : path_ends_)
path_ends.push_back(path_end);
}
void
PathGroup::ensureSortedMaxPaths()
{
if (path_ends_.size() > group_path_count_)
prune();
else
sort();
}
void
PathGroup::sort()
{
sta::sort(path_ends_, PathEndLess(sta_));
}
void
PathGroup::clear()
{
LockGuard lock(lock_);
threshold_ = min_max_->initValue();
path_ends_.clear();
}
////////////////////////////////////////////////////////////////
const char *PathGroups::path_delay_group_name_ = "path delay";
const char *PathGroups::gated_clk_group_name_ = "gated clock";
const char *PathGroups::async_group_name_ = "asynchronous";
const char *PathGroups::unconstrained_group_name_ = "unconstrained";
PathGroups::PathGroups(int group_path_count,
int endpoint_path_count,
bool unique_pins,
bool unique_edges,
float slack_min,
float slack_max,
StringSeq &group_names,
bool setup,
bool hold,
bool recovery,
bool removal,
bool clk_gating_setup,
bool clk_gating_hold,
bool unconstrained,
const Mode *mode) :
StaState(mode),
mode_(mode),
group_path_count_(group_path_count),
endpoint_path_count_(endpoint_path_count),
unique_pins_(unique_pins),
unique_edges_(unique_edges),
slack_min_(slack_min),
slack_max_(slack_max)
{
StringSet groups;
for (std::string &group_name : group_names)
groups.insert(group_name);
makeGroups(group_path_count, endpoint_path_count, unique_pins, unique_edges,
slack_min, slack_max, groups,
setup, recovery, clk_gating_setup, unconstrained,
MinMax::max());
makeGroups(group_path_count, endpoint_path_count, unique_pins, unique_edges,
slack_min, slack_max, groups,
hold, removal, clk_gating_hold, unconstrained,
MinMax::min());
}
void
PathGroups::makeGroups(int group_path_count,
int endpoint_path_count,
bool unique_pins,
bool unique_edges,
float slack_min,
float slack_max,
StringSet &group_names,
bool setup_hold,
bool async,
bool gated_clk,
bool unconstrained,
const MinMax *min_max)
{
int mm_index = min_max->index();
if (setup_hold) {
const Sdc *sdc = mode_->sdc();
for (const auto& [name, group] : sdc->groupPaths()) {
if (reportGroup(name, group_names)) {
PathGroup *group = PathGroup::makePathGroupSlack(name,
group_path_count,
endpoint_path_count,
unique_pins,
unique_edges,
slack_min, slack_max,
this);
named_map_[mm_index][name] = group;
}
}
for (Clock *clk : sdc->clocks()) {
const char *clk_name = clk->name();
if (reportGroup(clk_name, group_names)) {
PathGroup *group = PathGroup::makePathGroupSlack(clk_name,
group_path_count,
endpoint_path_count,
unique_pins,
unique_edges,
slack_min, slack_max,
this);
clk_map_[mm_index][clk] = group;
}
}
}
if (setup_hold
&& reportGroup(path_delay_group_name_, group_names))
path_delay_[mm_index] = PathGroup::makePathGroupSlack(path_delay_group_name_,
group_path_count,
endpoint_path_count,
unique_pins,
unique_edges,
slack_min, slack_max,
this);
else
path_delay_[mm_index] = nullptr;
if (gated_clk
&& reportGroup(gated_clk_group_name_, group_names))
gated_clk_[mm_index] = PathGroup::makePathGroupSlack(gated_clk_group_name_,
group_path_count,
endpoint_path_count,
unique_pins,
unique_edges,
slack_min, slack_max,
this);
else
gated_clk_[mm_index] = nullptr;
if (async
&& reportGroup(async_group_name_, group_names))
async_[mm_index] = PathGroup::makePathGroupSlack(async_group_name_,
group_path_count,
endpoint_path_count,
unique_pins,
unique_edges,
slack_min, slack_max,
this);
else
async_[mm_index] = nullptr;
if (unconstrained
&& reportGroup(unconstrained_group_name_, group_names))
unconstrained_[mm_index] =
PathGroup::makePathGroupArrival(unconstrained_group_name_,
group_path_count, endpoint_path_count,
unique_pins, unique_edges, min_max, this);
else
unconstrained_[mm_index] = nullptr;
}
PathGroups::~PathGroups()
{
for (auto mm_index : MinMax::rangeIndex()) {
deleteContents(named_map_[mm_index]);
deleteContents(clk_map_[mm_index]);
delete path_delay_[mm_index];
delete gated_clk_[mm_index];
delete async_[mm_index];
delete unconstrained_[mm_index];
}
}
PathGroup *
PathGroups::findPathGroup(const char *name,
const MinMax *min_max) const
{
auto itr = named_map_[min_max->index()].find(name);
if (itr != named_map_[min_max->index()].end())
return itr->second;
else
return nullptr;
}
PathGroup *
PathGroups::findPathGroup(const Clock *clock,
const MinMax *min_max) const
{
auto itr = clk_map_[min_max->index()].find(clock);
if (itr != clk_map_[min_max->index()].end())
return itr->second;
else
return nullptr;
}
bool
PathGroups::reportGroup(const char *group_name,
StringSet &group_names) const
{
return group_names.empty()
|| group_names.contains(group_name);
}
PathGroupSeq
PathGroups::pathGroups(const PathEnd *path_end) const
{
PathGroupSeq path_groups;
PathGroup *path_group = nullptr;
ExceptionPathSeq group_paths = search_->groupPathsTo(path_end);
const MinMax *min_max = path_end->minMax(this);
int mm_index = min_max->index();
if (path_end->isUnconstrained())
path_group = unconstrained_[mm_index];
// GroupPaths have precedence.
else if (!group_paths.empty()) {
for (ExceptionPath *group_path : group_paths) {
if (group_path->isDefault()) {
if (path_delay_[mm_index])
path_groups.push_back(path_delay_[mm_index]);
}
else {
const char *group_name = group_path->name();
PathGroup *group = findPathGroup(group_name, min_max);
if (group)
path_groups.push_back(group);
}
}
}
else if (path_end->isCheck() || path_end->isLatchCheck()) {
const TimingRole *check_role = path_end->checkRole(this);
const Clock *tgt_clk = path_end->targetClk(this);
if (check_role == TimingRole::removal()
|| check_role == TimingRole::recovery())
path_group = async_[mm_index];
else
path_group = findPathGroup(tgt_clk, min_max);
}
else if (path_end->isOutputDelay()
|| path_end->isDataCheck())
path_group = findPathGroup(path_end->targetClk(this), min_max);
else if (path_end->isGatedClock())
path_group = gated_clk_[mm_index];
else if (path_end->isPathDelay()) {
// Path delays that end at timing checks are part of the target clk group
// unless -ignore_clock_latency is true.
PathDelay *path_delay = path_end->pathDelay();
const Clock *tgt_clk = path_end->targetClk(this);
if (tgt_clk
&& !path_delay->ignoreClkLatency())
path_group = findPathGroup(tgt_clk, min_max);
else
path_group = path_delay_[mm_index];
}
if (path_group)
path_groups.push_back(path_group);
return path_groups;
}
// Mirrors PathGroups::pathGroup.
StringSeq
PathGroups::pathGroupNames(const PathEnd *path_end,
const StaState *sta)
{
StringSeq group_names;
const char *group_name = nullptr;
const Search *search = sta->search();
ExceptionPathSeq group_paths = search->groupPathsTo(path_end);
if (path_end->isUnconstrained())
group_name = unconstrained_group_name_;
else if (!group_paths.empty()) {
// GroupPaths have precedence.
for (ExceptionPath *group_path : group_paths) {
if (group_path->isDefault())
group_names.push_back(path_delay_group_name_);
else
group_names.push_back(group_path->name());
}
}
else if (path_end->isCheck() || path_end->isLatchCheck()) {
const TimingRole *check_role = path_end->checkRole(sta);
const Clock *tgt_clk = path_end->targetClk(sta);
if (check_role == TimingRole::removal()
|| check_role == TimingRole::recovery())
group_name = async_group_name_;
else
group_name = tgt_clk->name();
}
else if (path_end->isOutputDelay()
|| path_end->isDataCheck()) {
const Clock *tgt_clk = path_end->targetClk(sta);
if (tgt_clk)
group_name = tgt_clk->name();
}
else if (path_end->isGatedClock())
group_name = gated_clk_group_name_;
else if (path_end->isPathDelay()) {
// Path delays that end at timing checks are part of the target clk group
// unless -ignore_clock_latency is true.
PathDelay *path_delay = path_end->pathDelay();
const Clock *tgt_clk = path_end->targetClk(sta);
if (tgt_clk
&& !path_delay->ignoreClkLatency())
group_name = tgt_clk->name();
else
group_name = path_delay_group_name_;
}
if (group_name)
group_names.push_back(group_name);
return group_names;
}
GroupPath *
PathGroups::groupPathTo(const PathEnd *path_end,
const StaState *sta)
{
const Path *path = path_end->path();
const Pin *pin = path->pin(sta);
ExceptionPath *exception =
sta->search()->exceptionTo(ExceptionPathType::group_path, path,
pin, path->transition(sta),
path_end->targetClkEdge(sta),
path->minMax(sta), false, false,
path->sdc(sta));
return dynamic_cast<GroupPath*>(exception);
}
void
PathGroups::pushEnds(PathEndSeq &path_ends)
{
for (const MinMax *min_max : MinMax::range()) {
int mm_index = min_max->index();
for (std::string &group_name : pathGroupNames()) {
PathGroup *path_group = findPathGroup(group_name.c_str(), min_max);
if (path_group)
path_group->pushEnds(path_ends);
}
if (async_[mm_index])
async_[mm_index]->pushEnds(path_ends);
if (gated_clk_[mm_index])
gated_clk_[mm_index]->pushEnds(path_ends);
if (path_delay_[mm_index])
path_delay_[mm_index]->pushEnds(path_ends);
const Sdc *sdc = mode_->sdc();
ClockSeq clks = sdc->sortedClocks();
for (Clock *clk : clks) {
PathGroup *path_group = findPathGroup(clk, min_max);
if (path_group)
path_group->pushEnds(path_ends);
}
}
}
StringSeq
PathGroups::pathGroupNames()
{
std::set<std::string> group_names1;
const Sdc *sdc = mode_->sdc();
for (const auto& [name, group] : sdc->groupPaths())
group_names1.insert(name);
StringSeq group_names2;
for (const std::string &name : group_names1)
group_names2.push_back(name);
sort(group_names2);
return group_names2;
}
void
PathGroups::pushUnconstrainedPathEnds(PathEndSeq &path_ends,
const MinMaxAll *min_max)
{
std::set<PathGroup *> groups;
for (const MinMax *mm : min_max->range()) {
int mm_index = mm->index();
PathGroup *group = unconstrained_[mm_index];
if (group
// For multiple scene path APs use the same group.
// Only report it once.
&& !groups.contains(group)) {
group->pushEnds(path_ends);
groups.insert(group);
}
}
}
////////////////////////////////////////////////////////////////
using PathGroupEndMap = std::map<PathGroup*, PathEnd*>;
using PathGroupEndsMap = std::map<PathGroup*, PathEndSeq*>;
using PathEndNoCrprSet = std::set<PathEnd*, PathEndNoCrprLess>;
static bool
exceptionToEmpty(ExceptionTo *to);
void
PathGroups::makePathEnds(ExceptionTo *to,
const SceneSeq &scenes,
const MinMaxAll *min_max,
bool sort_by_slack,
bool unconstrained_paths,
// Return value.
PathEndSeq &path_ends)
{
Stats stats(debug_, report_);
makeGroupPathEnds(to, group_path_count_, endpoint_path_count_,
unique_pins_, unique_edges_, scenes, min_max);
pushEnds(path_ends);
if (sort_by_slack) {
sort(path_ends, PathEndLess(this));
}
if (unconstrained_paths
&& path_ends.empty())
// No constrained paths, so report unconstrained paths.
pushUnconstrainedPathEnds(path_ends, min_max);
stats.report("Make path ends");
}
////////////////////////////////////////////////////////////////
// Visit each path end for a vertex and add the worst one in each
// path group to the group.
class MakePathEnds1 : public PathEndVisitor
{
public:
MakePathEnds1(PathGroups *path_groups);
MakePathEnds1(const MakePathEnds1&) = default;
virtual PathEndVisitor *copy() const;
virtual void visit(PathEnd *path_end);
virtual void vertexEnd(Vertex *vertex);
private:
void visitPathEnd(PathEnd *path_end,
PathGroup *group);
PathGroups *path_groups_;
PathGroupEndMap ends_;
PathEndLess cmp_;
};
MakePathEnds1::MakePathEnds1(PathGroups *path_groups) :
path_groups_(path_groups),
cmp_(path_groups)
{
}
PathEndVisitor *
MakePathEnds1::copy() const
{
return new MakePathEnds1(*this);
}
void
MakePathEnds1::visit(PathEnd *path_end)
{
for (PathGroup *group : path_groups_->pathGroups(path_end))
visitPathEnd(path_end, group);
}
void
MakePathEnds1::visitPathEnd(PathEnd *path_end,
PathGroup *group)
{
if (group->saveable(path_end)) {
// Only keep the path end with the smallest slack/latest arrival.
PathEnd *worst_end = findKey(ends_, group);
if (worst_end) {
if (cmp_(path_end, worst_end)) {
ends_[group] = path_end->copy();
delete worst_end;
}
}
else
ends_[group] = path_end->copy();
}
}
// Save the worst end for each path group.
void
MakePathEnds1::vertexEnd(Vertex *)
{
for (auto [group, end] : ends_) {
// visitPathEnd already confirmed slack is saveable.
if (end) {
group->insert(end);
// Clear ends_ for next vertex.
ends_[group] = nullptr;
}
}
}
////////////////////////////////////////////////////////////////
// Visit each path end and add it to the corresponding path group.
// After collecting the ends do parallel path enumeration to find the
// path ends for the group.
class MakePathEndsAll : public PathEndVisitor
{
public:
MakePathEndsAll(int endpoint_path_count,
PathGroups *path_groups);
MakePathEndsAll(const MakePathEndsAll&) = default;
virtual ~MakePathEndsAll();
virtual PathEndVisitor *copy() const;
virtual void visit(PathEnd *path_end);
virtual void vertexEnd(Vertex *vertex);
private:
void visitPathEnd(PathEnd *path_end,
PathGroup *group);
int endpoint_path_count_;
PathGroups *path_groups_;
const StaState *sta_;
PathGroupEndsMap ends_;
PathEndSlackLess slack_cmp_;
PathEndNoCrprLess path_no_crpr_cmp_;
};
MakePathEndsAll::MakePathEndsAll(int endpoint_path_count,
PathGroups *path_groups) :
endpoint_path_count_(endpoint_path_count),
path_groups_(path_groups),
sta_(path_groups),
slack_cmp_(path_groups),
path_no_crpr_cmp_(path_groups)
{
}
PathEndVisitor *
MakePathEndsAll::copy() const
{
return new MakePathEndsAll(*this);
}
MakePathEndsAll::~MakePathEndsAll()
{
deleteContents(ends_);
}
void
MakePathEndsAll::visit(PathEnd *path_end)
{
for (PathGroup *group : path_groups_->pathGroups(path_end))
visitPathEnd(path_end, group);
}
void
MakePathEndsAll::visitPathEnd(PathEnd *path_end,
PathGroup *group)
{
PathEndSeq *ends = findKey(ends_, group);
if (ends == nullptr) {
ends = new PathEndSeq;
ends_[group] = ends;
}
ends->push_back(path_end->copy());
}
void
MakePathEndsAll::vertexEnd(Vertex *)
{
Debug *debug = sta_->debug();
for (auto [group, ends] : ends_) {
if (ends) {
sort(ends, slack_cmp_);
PathEndNoCrprSet unique_ends(path_no_crpr_cmp_);
auto end_iter = ends->begin();
int n = 0;
while (end_iter != ends->end()
&& n < endpoint_path_count_) {
PathEnd *path_end = *end_iter++;
// Only save the worst path end for each crpr tag.
// PathEnum will peel the others.
if (!unique_ends.contains(path_end)) {
debugPrint(debug, "path_group", 2, "insert %s %s %s %d",
path_end->vertex(sta_)->to_string(sta_).c_str(),
path_end->typeName(),
path_end->transition(sta_)->shortName(),
path_end->path()->tag(sta_)->index());
// Give the group a copy of the path end because
// it may delete it during pruning.
if (group->saveable(path_end)
|| group->enumMinSlackUnderMin(path_end)) {
group->insert(path_end->copy());
unique_ends.insert(path_end);
n++;
}
}
else
debugPrint(debug, "path_group", 3, "prune %s %s %s %d",
path_end->vertex(sta_)->to_string(sta_).c_str(),
path_end->typeName(),
path_end->transition(sta_)->shortName(),
path_end->path()->tag(sta_)->index());
}
// Clear ends for next vertex.
deleteContents(*ends);
}
}
}
////////////////////////////////////////////////////////////////
void
PathGroups::makeGroupPathEnds(ExceptionTo *to,
int group_path_count,
int endpoint_path_count,
bool unique_pins,
bool unique_edges,
const SceneSeq &scenes,
const MinMaxAll *min_max)
{
if (endpoint_path_count == 1) {
MakePathEnds1 make_path_ends(this);
makeGroupPathEnds(to, scenes, min_max, &make_path_ends);
}
else {
MakePathEndsAll make_path_ends(endpoint_path_count, this);
makeGroupPathEnds(to, scenes, min_max, &make_path_ends);
for (const MinMax *path_min_max : MinMax::range()) {
int mm_index = path_min_max->index();
for (const Mode *mode : Scene::modes(scenes)) {
const Sdc *sdc = mode->sdc();
for (const auto& [name, groups] : sdc->groupPaths()) {
PathGroup *group = findPathGroup(name, path_min_max);
if (group)
enumPathEnds(group, group_path_count, endpoint_path_count,
unique_pins, unique_edges, true);
}
}
const Sdc *sdc = mode_->sdc();
for (const Clock *clk : sdc->clocks()) {
PathGroup *group = findPathGroup(clk, path_min_max);
if (group)
enumPathEnds(group, group_path_count, endpoint_path_count,
unique_pins, unique_edges, true);
}
PathGroup *group = unconstrained_[mm_index];
if (group)
enumPathEnds(group, group_path_count, endpoint_path_count,
unique_pins, unique_edges, false);
group = path_delay_[mm_index];
if (group)
enumPathEnds(group, group_path_count, endpoint_path_count,
unique_pins, unique_edges, true);
group = gated_clk_[mm_index];
if (group)
enumPathEnds(group, group_path_count, endpoint_path_count,
unique_pins, unique_edges, true);
group = async_[mm_index];
if (group)
enumPathEnds(group, group_path_count, endpoint_path_count,
unique_pins, unique_edges, true);
}
}
}
void
PathGroups::enumPathEnds(PathGroup *group,
int group_path_count,
int endpoint_path_count,
bool unique_pins,
bool unique_edges,
bool cmp_slack)
{
// Insert the worst max_path path ends in the group into a path
// enumerator.
PathEnum path_enum(group_path_count, endpoint_path_count,
unique_pins, unique_edges, cmp_slack, this);
for (PathEnd *end : group->pathEnds()) {
if (group->saveable(end)
|| group->enumMinSlackUnderMin(end))
path_enum.insert(end);
}
group->clear();
// Parallel path enumeratation to find the endpoint_path_count/max path ends.
for (int n = 0; path_enum.hasNext() && n < group_path_count; n++) {
PathEnd *end = path_enum.next();
if (group->saveable(end))
group->insert(end);
else
delete end;
}
}
void
PathGroups::makeGroupPathEnds(ExceptionTo *to,
const SceneSeq &scenes,
const MinMaxAll *min_max,
PathEndVisitor *visitor)
{
if (exceptionToEmpty(to))
makeGroupPathEnds(search_->endpoints(), scenes, min_max, visitor);
else {
// Only visit -to filter pins.
ModeSeq modes = Scene::modes(scenes);
VertexSet endpoints = makeVertexSet(this);
PinSet pins = to->allPins(network_);
for (const Pin *pin : pins) {
Vertex *vertex, *bidirect_drvr_vertex;
graph_->pinVertices(pin, vertex, bidirect_drvr_vertex);
if (vertex
&& search_->isEndpoint(vertex, modes))
endpoints.insert(vertex);
if (bidirect_drvr_vertex
&& search_->isEndpoint(bidirect_drvr_vertex, modes))
endpoints.insert(bidirect_drvr_vertex);
}
makeGroupPathEnds(endpoints, scenes, min_max, visitor);
}
}
static bool
exceptionToEmpty(ExceptionTo *to)
{
return to == nullptr
|| (to->pins() == nullptr
&& to->instances() == nullptr);
}
////////////////////////////////////////////////////////////////
class MakeEndpointPathEnds : public VertexVisitor
{
public:
MakeEndpointPathEnds(PathEndVisitor *path_end_visitor,
const SceneSet &scenes,
const MinMaxAll *min_max,
const StaState *sta);
MakeEndpointPathEnds(const MakeEndpointPathEnds &make_path_ends);
~MakeEndpointPathEnds();
virtual VertexVisitor *copy() const;
virtual void visit(Vertex *vertex);
private:
VisitPathEnds visit_path_ends_;
PathEndVisitor *path_end_visitor_;
const SceneSet scenes_;
const MinMaxAll *min_max_;
const StaState *sta_;
};
MakeEndpointPathEnds::MakeEndpointPathEnds(PathEndVisitor *path_end_visitor,
const SceneSet &scenes,
const MinMaxAll *min_max,
const StaState *sta) :
visit_path_ends_(sta),
path_end_visitor_(path_end_visitor->copy()),
scenes_(scenes),
min_max_(min_max),
sta_(sta)
{
}
MakeEndpointPathEnds::MakeEndpointPathEnds(const MakeEndpointPathEnds &make_path_ends) :
visit_path_ends_(make_path_ends.sta_),
path_end_visitor_(make_path_ends.path_end_visitor_->copy()),
scenes_(make_path_ends.scenes_),
min_max_(make_path_ends.min_max_),
sta_(make_path_ends.sta_)
{
}
MakeEndpointPathEnds::~MakeEndpointPathEnds()
{
delete path_end_visitor_;
}
VertexVisitor *
MakeEndpointPathEnds::copy() const
{
return new MakeEndpointPathEnds(path_end_visitor_, scenes_, min_max_, sta_);
}
void
MakeEndpointPathEnds::visit(Vertex *vertex)
{
visit_path_ends_.visitPathEnds(vertex, scenes_, min_max_,
true, path_end_visitor_);
}
////////////////////////////////////////////////////////////////
void
PathGroups::makeGroupPathEnds(VertexSet &endpoints,
const SceneSeq &scenes,
const MinMaxAll *min_max,
PathEndVisitor *visitor)
{
if (thread_count_ == 1) {
MakeEndpointPathEnds end_visitor(visitor, Scene::sceneSet(scenes),
min_max, this);
for (Vertex *endpoint : endpoints)
end_visitor.visit(endpoint);
}
else {
std::vector<MakeEndpointPathEnds>
visitors(thread_count_,
MakeEndpointPathEnds(visitor, Scene::sceneSet(scenes),
min_max, this));
for (const auto endpoint : endpoints) {
dispatch_queue_->dispatch( [endpoint, &visitors](int i)
{ visitors[i].visit(endpoint); } );
}
dispatch_queue_->finishTasks();
}
}
} // namespace