Internals: Fix typos (#5803)

Signed-off-by: Bartłomiej Chmiel <bchmiel@antmicro.com>
This commit is contained in:
Bartłomiej Chmiel 2025-02-25 18:44:14 +01:00 committed by GitHub
parent 3ab89d5be7
commit 6a8f97e184
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 8 additions and 8 deletions

View File

@ -195,7 +195,7 @@ public:
//
// The simplest thing that could possibly work would be to assume that our
// predictions of task runtimes are precise, and that every thread will
// make progress at an equal rate. Simulate a single "clock", pack the the
// make progress at an equal rate. Simulate a single "clock", pack the
// highest priority ready task into whatever thread becomes ready earliest,
// repeating until no tasks remain.
//
@ -671,7 +671,7 @@ const std::vector<AstCFunc*> createThreadFunctions(const ThreadSchedule& schedul
funcp->entryPoint(true);
funcp->argTypes("void* voidSelf, bool even_cycle");
// Setup vlSelf an vlSyms
// Setup vlSelf and vlSyms
funcp->addStmtsp(new AstCStmt{fl, EmitCBase::voidSelfAssign(modp)});
funcp->addStmtsp(new AstCStmt{fl, EmitCBase::symClassAssign()});
@ -798,7 +798,7 @@ void implementExecGraph(AstExecGraph* const execGraphp, const ThreadSchedule& sc
if (execGraphp->depGraphp()->empty()) return;
// Create a function to be run by each thread. Note this moves all AstMTaskBody nodes form the
// AstExecGrap into the AstCFunc created
// AstExecGraph into the AstCFunc created
const std::vector<AstCFunc*>& funcps = createThreadFunctions(schedule, execGraphp->name());
UASSERT(!funcps.empty(), "Non-empty ExecGraph yields no threads?");
@ -818,7 +818,7 @@ void implement(AstNetlist* netlistp) {
finalizeCosts(execGraphp->depGraphp());
// Schedule the mtasks: statically associate each mtask with a thread,
// and determine the order in which each thread will runs its mtasks.
// and determine the order in which each thread will run its mtasks.
const ThreadSchedule& schedule = PackThreads::apply(*execGraphp->depGraphp());
// Wrap each MTask body into a CFunc for better profiling/debugging

View File

@ -158,7 +158,7 @@ static void partCheckCachedScoreVsActual(uint32_t cached, uint32_t actual) {
// We keep MTaskEdge graph edges in a PairingHeap, sorted by score and id
struct EdgeKey final {
// Node: Structure layout chosen to minimize padding in PairingHeao<*>::Node
// Node: Structure layout chosen to minimize padding in PairingHeap<*>::Node
uint64_t m_id; // Unique ID part of edge score
uint32_t m_score; // Score part of ID
void increase(uint32_t score) {
@ -177,7 +177,7 @@ using EdgeHeap = PairingHeap<EdgeKey>;
// MTask utility classes
struct MergeCandidateKey final {
// Note: Structure layout chosen to minimize padding in PairingHeao<*>::Node
// Note: Structure layout chosen to minimize padding in PairingHeap<*>::Node
uint64_t m_id; // Unique ID part of edge score
uint32_t m_score; // Score part of ID
bool operator<(const MergeCandidateKey& other) const {
@ -1940,7 +1940,7 @@ class FixDataHazards final {
}
// Handle nodes containing DPI calls, we want to serialize those
// by default unless user gave --threads-dpi-concurrent.
// by default unless user gave '--threads-dpi none'.
// Same basic strategy as above to serialize access to SC vars.
if (!v3Global.opt.threadsDpiPure() || !v3Global.opt.threadsDpiUnpure()) {
TasksByRank tasksByRank;
@ -1961,7 +1961,7 @@ class FixDataHazards final {
tasksByRank[writerMtaskp->rank()].insert(writerMtaskp);
}
}
// Not: Find all reader tasks for this variable, group by rank.
// Note: Find all reader tasks for this variable, group by rank.
// There was "broken" code here to find readers, but fixing it to
// work properly harmed performance on some tests, see issue #3360.
}