From 16c2678e514c1a7c13a2f997ecf04ab40861154a Mon Sep 17 00:00:00 2001 From: dsengupta0628 Date: Tue, 3 Mar 2026 15:20:25 +0000 Subject: [PATCH] handle MT issues in setVertexArrivals, fix non-determ crash Signed-off-by: dsengupta0628 --- graph/Graph.cc | 6 +++++ include/sta/Graph.hh | 3 +++ include/sta/Search.hh | 5 ++++ search/Search.cc | 56 +++++++++++++++++++++++++++++++++++++++---- 4 files changed, 65 insertions(+), 5 deletions(-) diff --git a/graph/Graph.cc b/graph/Graph.cc index 2fe4d934..96323427 100644 --- a/graph/Graph.cc +++ b/graph/Graph.cc @@ -1126,6 +1126,12 @@ Vertex::deletePaths() tag_group_index_ = tag_group_index_max; } +void +Vertex::setPathsDeferred(Path *paths) +{ + paths_ = paths; +} + bool Vertex::hasFanin() const { diff --git a/include/sta/Graph.hh b/include/sta/Graph.hh index 28625854..5adb8b63 100644 --- a/include/sta/Graph.hh +++ b/include/sta/Graph.hh @@ -264,6 +264,9 @@ public: Path *makePaths(uint32_t count); void setPaths(Path *paths); void deletePaths(); + // Set paths_ without deleting the old array. + // Caller is responsible for deferred deletion of the old array. + void setPathsDeferred(Path *paths); TagGroupIndex tagGroupIndex() const; void setTagGroupIndex(TagGroupIndex tag_index); // Slew is annotated by sdc set_annotated_transition cmd. diff --git a/include/sta/Search.hh b/include/sta/Search.hh index 09bc1687..de2ff958 100644 --- a/include/sta/Search.hh +++ b/include/sta/Search.hh @@ -414,6 +414,7 @@ protected: void initVars(); void deleteTags(); void deleteTagsPrev(); + void deletePendingPaths(); void deleteUnusedTagGroups(); void seedInvalidArrivals(); void seedArrivals(); @@ -600,6 +601,10 @@ protected: std::mutex invalid_arrivals_lock_; BfsFwdIterator *arrival_iter_; ArrivalVisitor *arrival_visitor_; + // Old vertex path arrays deferred for deletion until after the parallel + // BFS level completes, preventing use-after-free in concurrent CRPR readers. + std::vector paths_pending_delete_; + std::mutex paths_pending_delete_lock_; // Some requireds exist. bool requireds_exist_; diff --git a/search/Search.cc b/search/Search.cc index 248477d4..f8a9c51d 100644 --- a/search/Search.cc +++ b/search/Search.cc @@ -708,6 +708,20 @@ Search::deleteTagsPrev() for (TagGroup** tag_groups: tag_groups_prev_) delete [] tag_groups; tag_groups_prev_.clear(); + + deletePendingPaths(); +} + +// Free old vertex path arrays that were deferred during parallel BFS visits. +// Called after visitParallel completes so no thread can still hold a pointer +// into any of these arrays. +void +Search::deletePendingPaths() +{ + LockGuard lock(paths_pending_delete_lock_); + for (Path *paths : paths_pending_delete_) + delete [] paths; + paths_pending_delete_.clear(); } void @@ -2815,8 +2829,28 @@ void Search::setVertexArrivals(Vertex *vertex, TagGroupBldr *tag_bldr) { - if (tag_bldr->empty()) - deletePathsIncr(vertex); + if (tag_bldr->empty()) { + // Inline the deletePathsIncr logic using deferred deletion so that + // concurrent CRPR/latch readers that hold a pointer into the old path + // array are not left with a dangling pointer. + tnsNotifyBefore(vertex); + if (worst_slacks_) + worst_slacks_->worstSlackNotifyBefore(vertex); + TagGroup *tag_group = tagGroup(vertex); + if (tag_group) { + Path *old_paths = vertex->paths(); + // Clear the tag group index first so concurrent readers observe a + // null tag group (and return early from Path::vertexPath) before + // we touch paths_. + vertex->setTagGroupIndex(tag_group_index_max); + vertex->setPathsDeferred(nullptr); + tag_group->decrRefCount(); + if (old_paths) { + LockGuard lock(paths_pending_delete_lock_); + paths_pending_delete_.push_back(old_paths); + } + } + } else { TagGroup *prev_tag_group = tagGroup(vertex); Path *prev_paths = vertex->paths(); @@ -2827,13 +2861,25 @@ Search::setVertexArrivals(Vertex *vertex, } else { if (prev_tag_group) { - vertex->deletePaths(); + // Clear the tag group index before replacing paths so concurrent + // readers see a consistent null-tag-group state during the + // transition and do not mix the old tag group with the new array. + vertex->setTagGroupIndex(tag_group_index_max); prev_tag_group->decrRefCount(); requiredInvalid(vertex); } size_t path_count = tag_group->pathCount(); - Path *paths = vertex->makePaths(path_count); - tag_bldr->copyPaths(tag_group, paths); + // Allocate the new array and switch paths_ directly from old to new + // without creating a null window or freeing the old array immediately. + // This prevents concurrent CRPR/latch readers from observing either a + // null pointer or a freed (dangling) pointer. + Path *new_paths = new Path[path_count]; + vertex->setPathsDeferred(new_paths); + if (prev_paths) { + LockGuard lock(paths_pending_delete_lock_); + paths_pending_delete_.push_back(prev_paths); + } + tag_bldr->copyPaths(tag_group, new_paths); vertex->setTagGroupIndex(tag_group->index()); tag_group->incrRefCount(); }