Bug Summary

File:llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
Warning:line 977, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name HexagonMachineScheduler.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/Hexagon -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/Hexagon -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp

1//===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// MachineScheduler schedules machine instructions after phi elimination. It
10// preserves LiveIntervals so it can be invoked before register allocation.
11//
12//===----------------------------------------------------------------------===//
13
14#include "HexagonMachineScheduler.h"
15#include "HexagonInstrInfo.h"
16#include "HexagonSubtarget.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/CodeGen/DFAPacketizer.h"
19#include "llvm/CodeGen/MachineBasicBlock.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineInstr.h"
22#include "llvm/CodeGen/MachineLoopInfo.h"
23#include "llvm/CodeGen/RegisterClassInfo.h"
24#include "llvm/CodeGen/RegisterPressure.h"
25#include "llvm/CodeGen/ScheduleDAG.h"
26#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
27#include "llvm/CodeGen/TargetInstrInfo.h"
28#include "llvm/CodeGen/TargetOpcodes.h"
29#include "llvm/CodeGen/TargetRegisterInfo.h"
30#include "llvm/CodeGen/TargetSchedule.h"
31#include "llvm/CodeGen/TargetSubtargetInfo.h"
32#include "llvm/IR/Function.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/raw_ostream.h"
36#include <algorithm>
37#include <cassert>
38#include <iomanip>
39#include <limits>
40#include <memory>
41#include <sstream>
42
43using namespace llvm;
44
45#define DEBUG_TYPE"machine-scheduler" "machine-scheduler"
46
47static cl::opt<bool> IgnoreBBRegPressure("ignore-bb-reg-pressure",
48 cl::Hidden, cl::ZeroOrMore, cl::init(false));
49
50static cl::opt<bool> UseNewerCandidate("use-newer-candidate",
51 cl::Hidden, cl::ZeroOrMore, cl::init(true));
52
53static cl::opt<unsigned> SchedDebugVerboseLevel("misched-verbose-level",
54 cl::Hidden, cl::ZeroOrMore, cl::init(1));
55
56// Check if the scheduler should penalize instructions that are available to
57// early due to a zero-latency dependence.
58static cl::opt<bool> CheckEarlyAvail("check-early-avail", cl::Hidden,
59 cl::ZeroOrMore, cl::init(true));
60
61// This value is used to determine if a register class is a high pressure set.
62// We compute the maximum number of registers needed and divided by the total
63// available. Then, we compare the result to this value.
64static cl::opt<float> RPThreshold("hexagon-reg-pressure", cl::Hidden,
65 cl::init(0.75f), cl::desc("High register pressure threhold."));
66
67/// Return true if there is a dependence between SUd and SUu.
68static bool hasDependence(const SUnit *SUd, const SUnit *SUu,
69 const HexagonInstrInfo &QII) {
70 if (SUd->Succs.size() == 0)
71 return false;
72
73 // Enable .cur formation.
74 if (QII.mayBeCurLoad(*SUd->getInstr()))
75 return false;
76
77 if (QII.canExecuteInBundle(*SUd->getInstr(), *SUu->getInstr()))
78 return false;
79
80 for (const auto &S : SUd->Succs) {
81 // Since we do not add pseudos to packets, might as well
82 // ignore order dependencies.
83 if (S.isCtrl())
84 continue;
85
86 if (S.getSUnit() == SUu && S.getLatency() > 0)
87 return true;
88 }
89 return false;
90}
91
92/// Check if scheduling of this SU is possible
93/// in the current packet.
94/// It is _not_ precise (statefull), it is more like
95/// another heuristic. Many corner cases are figured
96/// empirically.
97bool VLIWResourceModel::isResourceAvailable(SUnit *SU, bool IsTop) {
98 if (!SU || !SU->getInstr())
99 return false;
100
101 // First see if the pipeline could receive this instruction
102 // in the current cycle.
103 switch (SU->getInstr()->getOpcode()) {
104 default:
105 if (!ResourcesModel->canReserveResources(*SU->getInstr()))
106 return false;
107 break;
108 case TargetOpcode::EXTRACT_SUBREG:
109 case TargetOpcode::INSERT_SUBREG:
110 case TargetOpcode::SUBREG_TO_REG:
111 case TargetOpcode::REG_SEQUENCE:
112 case TargetOpcode::IMPLICIT_DEF:
113 case TargetOpcode::COPY:
114 case TargetOpcode::INLINEASM:
115 case TargetOpcode::INLINEASM_BR:
116 break;
117 }
118
119 MachineBasicBlock *MBB = SU->getInstr()->getParent();
120 auto &QST = MBB->getParent()->getSubtarget<HexagonSubtarget>();
121 const auto &QII = *QST.getInstrInfo();
122
123 // Now see if there are no other dependencies to instructions already
124 // in the packet.
125 if (IsTop) {
126 for (unsigned i = 0, e = Packet.size(); i != e; ++i)
127 if (hasDependence(Packet[i], SU, QII))
128 return false;
129 } else {
130 for (unsigned i = 0, e = Packet.size(); i != e; ++i)
131 if (hasDependence(SU, Packet[i], QII))
132 return false;
133 }
134 return true;
135}
136
137/// Keep track of available resources.
138bool VLIWResourceModel::reserveResources(SUnit *SU, bool IsTop) {
139 bool startNewCycle = false;
140 // Artificially reset state.
141 if (!SU) {
142 ResourcesModel->clearResources();
143 Packet.clear();
144 TotalPackets++;
145 return false;
146 }
147 // If this SU does not fit in the packet or the packet is now full
148 // start a new one.
149 if (!isResourceAvailable(SU, IsTop) ||
150 Packet.size() >= SchedModel->getIssueWidth()) {
151 ResourcesModel->clearResources();
152 Packet.clear();
153 TotalPackets++;
154 startNewCycle = true;
155 }
156
157 switch (SU->getInstr()->getOpcode()) {
158 default:
159 ResourcesModel->reserveResources(*SU->getInstr());
160 break;
161 case TargetOpcode::EXTRACT_SUBREG:
162 case TargetOpcode::INSERT_SUBREG:
163 case TargetOpcode::SUBREG_TO_REG:
164 case TargetOpcode::REG_SEQUENCE:
165 case TargetOpcode::IMPLICIT_DEF:
166 case TargetOpcode::KILL:
167 case TargetOpcode::CFI_INSTRUCTION:
168 case TargetOpcode::EH_LABEL:
169 case TargetOpcode::COPY:
170 case TargetOpcode::INLINEASM:
171 case TargetOpcode::INLINEASM_BR:
172 break;
173 }
174 Packet.push_back(SU);
175
176#ifndef NDEBUG1
177 LLVM_DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n")do { } while (false);
178 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
179 LLVM_DEBUG(dbgs() << "\t[" << i << "] SU(")do { } while (false);
180 LLVM_DEBUG(dbgs() << Packet[i]->NodeNum << ")\t")do { } while (false);
181 LLVM_DEBUG(Packet[i]->getInstr()->dump())do { } while (false);
182 }
183#endif
184
185 return startNewCycle;
186}
187
188/// schedule - Called back from MachineScheduler::runOnMachineFunction
189/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
190/// only includes instructions that have DAG nodes, not scheduling boundaries.
191void VLIWMachineScheduler::schedule() {
192 LLVM_DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "do { } while (false)
193 << printMBBReference(*BB) << " " << BB->getName()do { } while (false)
194 << " in_func " << BB->getParent()->getName()do { } while (false)
195 << " at loop depth " << MLI->getLoopDepth(BB) << " \n")do { } while (false);
196
197 buildDAGWithRegPressure();
198
199 Topo.InitDAGTopologicalSorting();
200
201 // Postprocess the DAG to add platform-specific artificial dependencies.
202 postprocessDAG();
203
204 SmallVector<SUnit*, 8> TopRoots, BotRoots;
205 findRootsAndBiasEdges(TopRoots, BotRoots);
206
207 // Initialize the strategy before modifying the DAG.
208 SchedImpl->initialize(this);
209
210 LLVM_DEBUG(unsigned maxH = 0;do { } while (false)
211 for (unsigned su = 0, e = SUnits.size(); su != e;do { } while (false)
212 ++su) if (SUnits[su].getHeight() > maxH) maxH =do { } while (false)
213 SUnits[su].getHeight();do { } while (false)
214 dbgs() << "Max Height " << maxH << "\n";)do { } while (false);
215 LLVM_DEBUG(unsigned maxD = 0;do { } while (false)
216 for (unsigned su = 0, e = SUnits.size(); su != e;do { } while (false)
217 ++su) if (SUnits[su].getDepth() > maxD) maxD =do { } while (false)
218 SUnits[su].getDepth();do { } while (false)
219 dbgs() << "Max Depth " << maxD << "\n";)do { } while (false);
220 LLVM_DEBUG(dump())do { } while (false);
221
222 initQueues(TopRoots, BotRoots);
223
224 bool IsTopNode = false;
225 while (true) {
226 LLVM_DEBUG(do { } while (false)
227 dbgs() << "** VLIWMachineScheduler::schedule picking next node\n")do { } while (false);
228 SUnit *SU = SchedImpl->pickNode(IsTopNode);
229 if (!SU) break;
230
231 if (!checkSchedLimit())
232 break;
233
234 scheduleMI(SU, IsTopNode);
235
236 // Notify the scheduling strategy after updating the DAG.
237 SchedImpl->schedNode(SU, IsTopNode);
238
239 updateQueues(SU, IsTopNode);
240 }
241 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.")(static_cast<void> (0));
242
243 placeDebugValues();
244
245 LLVM_DEBUG({do { } while (false)
246 dbgs() << "*** Final schedule for "do { } while (false)
247 << printMBBReference(*begin()->getParent()) << " ***\n";do { } while (false)
248 dumpSchedule();do { } while (false)
249 dbgs() << '\n';do { } while (false)
250 })do { } while (false);
251}
252
253void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
254 DAG = static_cast<VLIWMachineScheduler*>(dag);
255 SchedModel = DAG->getSchedModel();
256
257 Top.init(DAG, SchedModel);
258 Bot.init(DAG, SchedModel);
259
260 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
261 // are disabled, then these HazardRecs will be disabled.
262 const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries();
263 const TargetSubtargetInfo &STI = DAG->MF.getSubtarget();
264 const TargetInstrInfo *TII = STI.getInstrInfo();
265 delete Top.HazardRec;
266 delete Bot.HazardRec;
267 Top.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
268 Bot.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
269
270 delete Top.ResourceModel;
271 delete Bot.ResourceModel;
272 Top.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
273 Bot.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
274
275 const std::vector<unsigned> &MaxPressure =
276 DAG->getRegPressure().MaxSetPressure;
277 HighPressureSets.assign(MaxPressure.size(), 0);
278 for (unsigned i = 0, e = MaxPressure.size(); i < e; ++i) {
279 unsigned Limit = DAG->getRegClassInfo()->getRegPressureSetLimit(i);
280 HighPressureSets[i] =
281 ((float) MaxPressure[i] > ((float) Limit * RPThreshold));
282 }
283
284 assert((!ForceTopDown || !ForceBottomUp) &&(static_cast<void> (0))
285 "-misched-topdown incompatible with -misched-bottomup")(static_cast<void> (0));
286}
287
288void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
289 if (SU->isScheduled)
290 return;
291
292 for (const SDep &PI : SU->Preds) {
293 unsigned PredReadyCycle = PI.getSUnit()->TopReadyCycle;
294 unsigned MinLatency = PI.getLatency();
295#ifndef NDEBUG1
296 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
297#endif
298 if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
299 SU->TopReadyCycle = PredReadyCycle + MinLatency;
300 }
301 Top.releaseNode(SU, SU->TopReadyCycle);
302}
303
304void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
305 if (SU->isScheduled)
306 return;
307
308 assert(SU->getInstr() && "Scheduled SUnit must have instr")(static_cast<void> (0));
309
310 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
311 I != E; ++I) {
312 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
313 unsigned MinLatency = I->getLatency();
314#ifndef NDEBUG1
315 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
316#endif
317 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
318 SU->BotReadyCycle = SuccReadyCycle + MinLatency;
319 }
320 Bot.releaseNode(SU, SU->BotReadyCycle);
321}
322
323/// Does this SU have a hazard within the current instruction group.
324///
325/// The scheduler supports two modes of hazard recognition. The first is the
326/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
327/// supports highly complicated in-order reservation tables
328/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
329///
330/// The second is a streamlined mechanism that checks for hazards based on
331/// simple counters that the scheduler itself maintains. It explicitly checks
332/// for instruction dispatch limitations, including the number of micro-ops that
333/// can dispatch per cycle.
334///
335/// TODO: Also check whether the SU must start a new group.
336bool ConvergingVLIWScheduler::VLIWSchedBoundary::checkHazard(SUnit *SU) {
337 if (HazardRec->isEnabled())
338 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
339
340 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
341 if (IssueCount + uops > SchedModel->getIssueWidth())
342 return true;
343
344 return false;
345}
346
347void ConvergingVLIWScheduler::VLIWSchedBoundary::releaseNode(SUnit *SU,
348 unsigned ReadyCycle) {
349 if (ReadyCycle < MinReadyCycle)
350 MinReadyCycle = ReadyCycle;
351
352 // Check for interlocks first. For the purpose of other heuristics, an
353 // instruction that cannot issue appears as if it's not in the ReadyQueue.
354 if (ReadyCycle > CurrCycle || checkHazard(SU))
355
356 Pending.push(SU);
357 else
358 Available.push(SU);
359}
360
361/// Move the boundary of scheduled code by one cycle.
362void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
363 unsigned Width = SchedModel->getIssueWidth();
364 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
365
366 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&(static_cast<void> (0))
367 "MinReadyCycle uninitialized")(static_cast<void> (0));
368 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
369
370 if (!HazardRec->isEnabled()) {
371 // Bypass HazardRec virtual calls.
372 CurrCycle = NextCycle;
373 } else {
374 // Bypass getHazardType calls in case of long latency.
375 for (; CurrCycle != NextCycle; ++CurrCycle) {
376 if (isTop())
377 HazardRec->AdvanceCycle();
378 else
379 HazardRec->RecedeCycle();
380 }
381 }
382 CheckPending = true;
383
384 LLVM_DEBUG(dbgs() << "*** Next cycle " << Available.getName() << " cycle "do { } while (false)
385 << CurrCycle << '\n')do { } while (false);
386}
387
388/// Move the boundary of scheduled code by one SUnit.
389void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpNode(SUnit *SU) {
390 bool startNewCycle = false;
391
392 // Update the reservation table.
393 if (HazardRec->isEnabled()) {
394 if (!isTop() && SU->isCall) {
395 // Calls are scheduled with their preceding instructions. For bottom-up
396 // scheduling, clear the pipeline state before emitting.
397 HazardRec->Reset();
398 }
399 HazardRec->EmitInstruction(SU);
400 }
401
402 // Update DFA model.
403 startNewCycle = ResourceModel->reserveResources(SU, isTop());
404
405 // Check the instruction group dispatch limit.
406 // TODO: Check if this SU must end a dispatch group.
407 IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
408 if (startNewCycle) {
409 LLVM_DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n')do { } while (false);
410 bumpCycle();
411 }
412 else
413 LLVM_DEBUG(dbgs() << "*** IssueCount " << IssueCount << " at cycle "do { } while (false)
414 << CurrCycle << '\n')do { } while (false);
415}
416
417/// Release pending ready nodes in to the available queue. This makes them
418/// visible to heuristics.
419void ConvergingVLIWScheduler::VLIWSchedBoundary::releasePending() {
420 // If the available queue is empty, it is safe to reset MinReadyCycle.
421 if (Available.empty())
422 MinReadyCycle = std::numeric_limits<unsigned>::max();
423
424 // Check to see if any of the pending instructions are ready to issue. If
425 // so, add them to the available queue.
426 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
427 SUnit *SU = *(Pending.begin()+i);
428 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
429
430 if (ReadyCycle < MinReadyCycle)
431 MinReadyCycle = ReadyCycle;
432
433 if (ReadyCycle > CurrCycle)
434 continue;
435
436 if (checkHazard(SU))
437 continue;
438
439 Available.push(SU);
440 Pending.remove(Pending.begin()+i);
441 --i; --e;
442 }
443 CheckPending = false;
444}
445
446/// Remove SU from the ready set for this boundary.
447void ConvergingVLIWScheduler::VLIWSchedBoundary::removeReady(SUnit *SU) {
448 if (Available.isInQueue(SU))
449 Available.remove(Available.find(SU));
450 else {
451 assert(Pending.isInQueue(SU) && "bad ready count")(static_cast<void> (0));
452 Pending.remove(Pending.find(SU));
453 }
454}
455
456/// If this queue only has one ready candidate, return it. As a side effect,
457/// advance the cycle until at least one node is ready. If multiple instructions
458/// are ready, return NULL.
459SUnit *ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() {
460 if (CheckPending)
12
Assuming field 'CheckPending' is false
13
Taking false branch
461 releasePending();
462
463 auto AdvanceCycle = [this]() {
464 if (Available.empty())
465 return true;
466 if (Available.size() == 1 && Pending.size() > 0)
467 return !ResourceModel->isResourceAvailable(*Available.begin(), isTop()) ||
468 getWeakLeft(*Available.begin(), isTop()) != 0;
469 return false;
470 };
471 for (unsigned i = 0; AdvanceCycle(); ++i) {
14
Loop condition is false. Execution continues on line 478
472 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&(static_cast<void> (0))
473 "permanent hazard")(static_cast<void> (0)); (void)i;
474 ResourceModel->reserveResources(nullptr, isTop());
475 bumpCycle();
476 releasePending();
477 }
478 if (Available.size() == 1)
15
Assuming the condition is false
16
Taking false branch
479 return *Available.begin();
480 return nullptr;
17
Returning null pointer, which participates in a condition later
481}
482
483#ifndef NDEBUG1
484void ConvergingVLIWScheduler::traceCandidate(const char *Label,
485 const ReadyQueue &Q, SUnit *SU, int Cost, PressureChange P) {
486 dbgs() << Label << " " << Q.getName() << " ";
487 if (P.isValid())
488 dbgs() << DAG->TRI->getRegPressureSetName(P.getPSet()) << ":"
489 << P.getUnitInc() << " ";
490 else
491 dbgs() << " ";
492 dbgs() << "cost(" << Cost << ")\t";
493 DAG->dumpNode(*SU);
494}
495
496// Very detailed queue dump, to be used with higher verbosity levels.
497void ConvergingVLIWScheduler::readyQueueVerboseDump(
498 const RegPressureTracker &RPTracker, SchedCandidate &Candidate,
499 ReadyQueue &Q) {
500 RegPressureTracker &TempTracker = const_cast<RegPressureTracker &>(RPTracker);
501
502 dbgs() << ">>> " << Q.getName() << "\n";
503 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
504 RegPressureDelta RPDelta;
505 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
506 DAG->getRegionCriticalPSets(),
507 DAG->getRegPressure().MaxSetPressure);
508 std::stringstream dbgstr;
509 dbgstr << "SU(" << std::setw(3) << (*I)->NodeNum << ")";
510 dbgs() << dbgstr.str();
511 SchedulingCost(Q, *I, Candidate, RPDelta, true);
512 dbgs() << "\t";
513 (*I)->getInstr()->dump();
514 }
515 dbgs() << "\n";
516}
517#endif
518
519/// isSingleUnscheduledPred - If SU2 is the only unscheduled predecessor
520/// of SU, return true (we may have duplicates)
521static inline bool isSingleUnscheduledPred(SUnit *SU, SUnit *SU2) {
522 if (SU->NumPredsLeft == 0)
523 return false;
524
525 for (auto &Pred : SU->Preds) {
526 // We found an available, but not scheduled, predecessor.
527 if (!Pred.getSUnit()->isScheduled && (Pred.getSUnit() != SU2))
528 return false;
529 }
530
531 return true;
532}
533
534/// isSingleUnscheduledSucc - If SU2 is the only unscheduled successor
535/// of SU, return true (we may have duplicates)
536static inline bool isSingleUnscheduledSucc(SUnit *SU, SUnit *SU2) {
537 if (SU->NumSuccsLeft == 0)
538 return false;
539
540 for (auto &Succ : SU->Succs) {
541 // We found an available, but not scheduled, successor.
542 if (!Succ.getSUnit()->isScheduled && (Succ.getSUnit() != SU2))
543 return false;
544 }
545 return true;
546}
547
548/// Check if the instruction changes the register pressure of a register in the
549/// high pressure set. The function returns a negative value if the pressure
550/// decreases and a positive value is the pressure increases. If the instruction
551/// doesn't use a high pressure register or doesn't change the register
552/// pressure, then return 0.
553int ConvergingVLIWScheduler::pressureChange(const SUnit *SU, bool isBotUp) {
554 PressureDiff &PD = DAG->getPressureDiff(SU);
555 for (auto &P : PD) {
556 if (!P.isValid())
557 continue;
558 // The pressure differences are computed bottom-up, so the comparision for
559 // an increase is positive in the bottom direction, but negative in the
560 // top-down direction.
561 if (HighPressureSets[P.getPSet()])
562 return (isBotUp ? P.getUnitInc() : -P.getUnitInc());
563 }
564 return 0;
565}
566
567// Constants used to denote relative importance of
568// heuristic components for cost computation.
569static const unsigned PriorityOne = 200;
570static const unsigned PriorityTwo = 50;
571static const unsigned PriorityThree = 75;
572static const unsigned ScaleTwo = 10;
573
574/// Single point to compute overall scheduling cost.
575/// TODO: More heuristics will be used soon.
576int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
577 SchedCandidate &Candidate,
578 RegPressureDelta &Delta,
579 bool verbose) {
580 // Initial trivial priority.
581 int ResCount = 1;
582
583 // Do not waste time on a node that is already scheduled.
584 if (!SU || SU->isScheduled)
585 return ResCount;
586
587 LLVM_DEBUG(if (verbose) dbgs()do { } while (false)
588 << ((Q.getID() == TopQID) ? "(top|" : "(bot|"))do { } while (false);
589 // Forced priority is high.
590 if (SU->isScheduleHigh) {
591 ResCount += PriorityOne;
592 LLVM_DEBUG(dbgs() << "H|")do { } while (false);
593 }
594
595 unsigned IsAvailableAmt = 0;
596 // Critical path first.
597 if (Q.getID() == TopQID) {
598 if (Top.isLatencyBound(SU)) {
599 LLVM_DEBUG(if (verbose) dbgs() << "LB|")do { } while (false);
600 ResCount += (SU->getHeight() * ScaleTwo);
601 }
602
603 LLVM_DEBUG(if (verbose) {do { } while (false)
604 std::stringstream dbgstr;do { } while (false)
605 dbgstr << "h" << std::setw(3) << SU->getHeight() << "|";do { } while (false)
606 dbgs() << dbgstr.str();do { } while (false)
607 })do { } while (false);
608
609 // If resources are available for it, multiply the
610 // chance of scheduling.
611 if (Top.ResourceModel->isResourceAvailable(SU, true)) {
612 IsAvailableAmt = (PriorityTwo + PriorityThree);
613 ResCount += IsAvailableAmt;
614 LLVM_DEBUG(if (verbose) dbgs() << "A|")do { } while (false);
615 } else
616 LLVM_DEBUG(if (verbose) dbgs() << " |")do { } while (false);
617 } else {
618 if (Bot.isLatencyBound(SU)) {
619 LLVM_DEBUG(if (verbose) dbgs() << "LB|")do { } while (false);
620 ResCount += (SU->getDepth() * ScaleTwo);
621 }
622
623 LLVM_DEBUG(if (verbose) {do { } while (false)
624 std::stringstream dbgstr;do { } while (false)
625 dbgstr << "d" << std::setw(3) << SU->getDepth() << "|";do { } while (false)
626 dbgs() << dbgstr.str();do { } while (false)
627 })do { } while (false);
628
629 // If resources are available for it, multiply the
630 // chance of scheduling.
631 if (Bot.ResourceModel->isResourceAvailable(SU, false)) {
632 IsAvailableAmt = (PriorityTwo + PriorityThree);
633 ResCount += IsAvailableAmt;
634 LLVM_DEBUG(if (verbose) dbgs() << "A|")do { } while (false);
635 } else
636 LLVM_DEBUG(if (verbose) dbgs() << " |")do { } while (false);
637 }
638
639 unsigned NumNodesBlocking = 0;
640 if (Q.getID() == TopQID) {
641 // How many SUs does it block from scheduling?
642 // Look at all of the successors of this node.
643 // Count the number of nodes that
644 // this node is the sole unscheduled node for.
645 if (Top.isLatencyBound(SU))
646 for (const SDep &SI : SU->Succs)
647 if (isSingleUnscheduledPred(SI.getSUnit(), SU))
648 ++NumNodesBlocking;
649 } else {
650 // How many unscheduled predecessors block this node?
651 if (Bot.isLatencyBound(SU))
652 for (const SDep &PI : SU->Preds)
653 if (isSingleUnscheduledSucc(PI.getSUnit(), SU))
654 ++NumNodesBlocking;
655 }
656 ResCount += (NumNodesBlocking * ScaleTwo);
657
658 LLVM_DEBUG(if (verbose) {do { } while (false)
659 std::stringstream dbgstr;do { } while (false)
660 dbgstr << "blk " << std::setw(2) << NumNodesBlocking << ")|";do { } while (false)
661 dbgs() << dbgstr.str();do { } while (false)
662 })do { } while (false);
663
664 // Factor in reg pressure as a heuristic.
665 if (!IgnoreBBRegPressure) {
666 // Decrease priority by the amount that register pressure exceeds the limit.
667 ResCount -= (Delta.Excess.getUnitInc()*PriorityOne);
668 // Decrease priority if register pressure exceeds the limit.
669 ResCount -= (Delta.CriticalMax.getUnitInc()*PriorityOne);
670 // Decrease priority slightly if register pressure would increase over the
671 // current maximum.
672 ResCount -= (Delta.CurrentMax.getUnitInc()*PriorityTwo);
673 // If there are register pressure issues, then we remove the value added for
674 // the instruction being available. The rationale is that we really don't
675 // want to schedule an instruction that causes a spill.
676 if (IsAvailableAmt && pressureChange(SU, Q.getID() != TopQID) > 0 &&
677 (Delta.Excess.getUnitInc() || Delta.CriticalMax.getUnitInc() ||
678 Delta.CurrentMax.getUnitInc()))
679 ResCount -= IsAvailableAmt;
680 LLVM_DEBUG(if (verbose) {do { } while (false)
681 dbgs() << "RP " << Delta.Excess.getUnitInc() << "/"do { } while (false)
682 << Delta.CriticalMax.getUnitInc() << "/"do { } while (false)
683 << Delta.CurrentMax.getUnitInc() << ")|";do { } while (false)
684 })do { } while (false);
685 }
686
687 // Give a little extra priority to a .cur instruction if there is a resource
688 // available for it.
689 auto &QST = DAG->MF.getSubtarget<HexagonSubtarget>();
690 auto &QII = *QST.getInstrInfo();
691 if (SU->isInstr() && QII.mayBeCurLoad(*SU->getInstr())) {
692 if (Q.getID() == TopQID &&
693 Top.ResourceModel->isResourceAvailable(SU, true)) {
694 ResCount += PriorityTwo;
695 LLVM_DEBUG(if (verbose) dbgs() << "C|")do { } while (false);
696 } else if (Q.getID() == BotQID &&
697 Bot.ResourceModel->isResourceAvailable(SU, false)) {
698 ResCount += PriorityTwo;
699 LLVM_DEBUG(if (verbose) dbgs() << "C|")do { } while (false);
700 }
701 }
702
703 // Give preference to a zero latency instruction if the dependent
704 // instruction is in the current packet.
705 if (Q.getID() == TopQID && getWeakLeft(SU, true) == 0) {
706 for (const SDep &PI : SU->Preds) {
707 if (!PI.getSUnit()->getInstr()->isPseudo() && PI.isAssignedRegDep() &&
708 PI.getLatency() == 0 &&
709 Top.ResourceModel->isInPacket(PI.getSUnit())) {
710 ResCount += PriorityThree;
711 LLVM_DEBUG(if (verbose) dbgs() << "Z|")do { } while (false);
712 }
713 }
714 } else if (Q.getID() == BotQID && getWeakLeft(SU, false) == 0) {
715 for (const SDep &SI : SU->Succs) {
716 if (!SI.getSUnit()->getInstr()->isPseudo() && SI.isAssignedRegDep() &&
717 SI.getLatency() == 0 &&
718 Bot.ResourceModel->isInPacket(SI.getSUnit())) {
719 ResCount += PriorityThree;
720 LLVM_DEBUG(if (verbose) dbgs() << "Z|")do { } while (false);
721 }
722 }
723 }
724
725 // If the instruction has a non-zero latency dependence with an instruction in
726 // the current packet, then it should not be scheduled yet. The case occurs
727 // when the dependent instruction is scheduled in a new packet, so the
728 // scheduler updates the current cycle and pending instructions become
729 // available.
730 if (CheckEarlyAvail) {
731 if (Q.getID() == TopQID) {
732 for (const auto &PI : SU->Preds) {
733 if (PI.getLatency() > 0 &&
734 Top.ResourceModel->isInPacket(PI.getSUnit())) {
735 ResCount -= PriorityOne;
736 LLVM_DEBUG(if (verbose) dbgs() << "D|")do { } while (false);
737 }
738 }
739 } else {
740 for (const auto &SI : SU->Succs) {
741 if (SI.getLatency() > 0 &&
742 Bot.ResourceModel->isInPacket(SI.getSUnit())) {
743 ResCount -= PriorityOne;
744 LLVM_DEBUG(if (verbose) dbgs() << "D|")do { } while (false);
745 }
746 }
747 }
748 }
749
750 LLVM_DEBUG(if (verbose) {do { } while (false)
751 std::stringstream dbgstr;do { } while (false)
752 dbgstr << "Total " << std::setw(4) << ResCount << ")";do { } while (false)
753 dbgs() << dbgstr.str();do { } while (false)
754 })do { } while (false);
755
756 return ResCount;
757}
758
759/// Pick the best candidate from the top queue.
760///
761/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
762/// DAG building. To adjust for the current scheduling location we need to
763/// maintain the number of vreg uses remaining to be top-scheduled.
764ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
765pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
766 SchedCandidate &Candidate) {
767 ReadyQueue &Q = Zone.Available;
768 LLVM_DEBUG(if (SchedDebugVerboseLevel > 1)do { } while (false)
24
Loop condition is false. Exiting loop
769 readyQueueVerboseDump(RPTracker, Candidate, Q);do { } while (false)
770 else Q.dump();)do { } while (false);
771
772 // getMaxPressureDelta temporarily modifies the tracker.
773 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
774
775 // BestSU remains NULL if no top candidates beat the best existing candidate.
776 CandResult FoundCandidate = NoCand;
777 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
25
Loop condition is false. Execution continues on line 875
778 RegPressureDelta RPDelta;
779 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
780 DAG->getRegionCriticalPSets(),
781 DAG->getRegPressure().MaxSetPressure);
782
783 int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false);
784
785 // Initialize the candidate if needed.
786 if (!Candidate.SU) {
787 LLVM_DEBUG(traceCandidate("DCAND", Q, *I, CurrentCost))do { } while (false);
788 Candidate.SU = *I;
789 Candidate.RPDelta = RPDelta;
790 Candidate.SCost = CurrentCost;
791 FoundCandidate = NodeOrder;
792 continue;
793 }
794
795 // Choose node order for negative cost candidates. There is no good
796 // candidate in this case.
797 if (CurrentCost < 0 && Candidate.SCost < 0) {
798 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
799 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
800 LLVM_DEBUG(traceCandidate("NCAND", Q, *I, CurrentCost))do { } while (false);
801 Candidate.SU = *I;
802 Candidate.RPDelta = RPDelta;
803 Candidate.SCost = CurrentCost;
804 FoundCandidate = NodeOrder;
805 }
806 continue;
807 }
808
809 // Best cost.
810 if (CurrentCost > Candidate.SCost) {
811 LLVM_DEBUG(traceCandidate("CCAND", Q, *I, CurrentCost))do { } while (false);
812 Candidate.SU = *I;
813 Candidate.RPDelta = RPDelta;
814 Candidate.SCost = CurrentCost;
815 FoundCandidate = BestCost;
816 continue;
817 }
818
819 // Choose an instruction that does not depend on an artificial edge.
820 unsigned CurrWeak = getWeakLeft(*I, (Q.getID() == TopQID));
821 unsigned CandWeak = getWeakLeft(Candidate.SU, (Q.getID() == TopQID));
822 if (CurrWeak != CandWeak) {
823 if (CurrWeak < CandWeak) {
824 LLVM_DEBUG(traceCandidate("WCAND", Q, *I, CurrentCost))do { } while (false);
825 Candidate.SU = *I;
826 Candidate.RPDelta = RPDelta;
827 Candidate.SCost = CurrentCost;
828 FoundCandidate = Weak;
829 }
830 continue;
831 }
832
833 if (CurrentCost == Candidate.SCost && Zone.isLatencyBound(*I)) {
834 unsigned CurrSize, CandSize;
835 if (Q.getID() == TopQID) {
836 CurrSize = (*I)->Succs.size();
837 CandSize = Candidate.SU->Succs.size();
838 } else {
839 CurrSize = (*I)->Preds.size();
840 CandSize = Candidate.SU->Preds.size();
841 }
842 if (CurrSize > CandSize) {
843 LLVM_DEBUG(traceCandidate("SPCAND", Q, *I, CurrentCost))do { } while (false);
844 Candidate.SU = *I;
845 Candidate.RPDelta = RPDelta;
846 Candidate.SCost = CurrentCost;
847 FoundCandidate = BestCost;
848 }
849 // Keep the old candidate if it's a better candidate. That is, don't use
850 // the subsequent tie breaker.
851 if (CurrSize != CandSize)
852 continue;
853 }
854
855 // Tie breaker.
856 // To avoid scheduling indeterminism, we need a tie breaker
857 // for the case when cost is identical for two nodes.
858 if (UseNewerCandidate && CurrentCost == Candidate.SCost) {
859 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
860 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
861 LLVM_DEBUG(traceCandidate("TCAND", Q, *I, CurrentCost))do { } while (false);
862 Candidate.SU = *I;
863 Candidate.RPDelta = RPDelta;
864 Candidate.SCost = CurrentCost;
865 FoundCandidate = NodeOrder;
866 continue;
867 }
868 }
869
870 // Fall through to original instruction order.
871 // Only consider node order if Candidate was chosen from this Q.
872 if (FoundCandidate == NoCand)
873 continue;
874 }
875 return FoundCandidate;
26
Returning without writing to 'Candidate.SU'
876}
877
878/// Pick the best candidate node from either the top or bottom queue.
879SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
880 // Schedule as far as possible in the direction of no choice. This is most
881 // efficient, but also provides the best heuristics for CriticalPSets.
882 if (SUnit *SU = Bot.pickOnlyChoice()) {
883 LLVM_DEBUG(dbgs() << "Picked only Bottom\n")do { } while (false);
884 IsTopNode = false;
885 return SU;
886 }
887 if (SUnit *SU = Top.pickOnlyChoice()) {
888 LLVM_DEBUG(dbgs() << "Picked only Top\n")do { } while (false);
889 IsTopNode = true;
890 return SU;
891 }
892 SchedCandidate BotCand;
893 // Prefer bottom scheduling when heuristics are silent.
894 CandResult BotResult = pickNodeFromQueue(Bot,
895 DAG->getBotRPTracker(), BotCand);
896 assert(BotResult != NoCand && "failed to find the first candidate")(static_cast<void> (0));
897
898 // If either Q has a single candidate that provides the least increase in
899 // Excess pressure, we can immediately schedule from that Q.
900 //
901 // RegionCriticalPSets summarizes the pressure within the scheduled region and
902 // affects picking from either Q. If scheduling in one direction must
903 // increase pressure for one of the excess PSets, then schedule in that
904 // direction first to provide more freedom in the other direction.
905 if (BotResult == SingleExcess || BotResult == SingleCritical) {
906 LLVM_DEBUG(dbgs() << "Prefered Bottom Node\n")do { } while (false);
907 IsTopNode = false;
908 return BotCand.SU;
909 }
910 // Check if the top Q has a better candidate.
911 SchedCandidate TopCand;
912 CandResult TopResult = pickNodeFromQueue(Top,
913 DAG->getTopRPTracker(), TopCand);
914 assert(TopResult != NoCand && "failed to find the first candidate")(static_cast<void> (0));
915
916 if (TopResult == SingleExcess || TopResult == SingleCritical) {
917 LLVM_DEBUG(dbgs() << "Prefered Top Node\n")do { } while (false);
918 IsTopNode = true;
919 return TopCand.SU;
920 }
921 // If either Q has a single candidate that minimizes pressure above the
922 // original region's pressure pick it.
923 if (BotResult == SingleMax) {
924 LLVM_DEBUG(dbgs() << "Prefered Bottom Node SingleMax\n")do { } while (false);
925 IsTopNode = false;
926 return BotCand.SU;
927 }
928 if (TopResult == SingleMax) {
929 LLVM_DEBUG(dbgs() << "Prefered Top Node SingleMax\n")do { } while (false);
930 IsTopNode = true;
931 return TopCand.SU;
932 }
933 if (TopCand.SCost > BotCand.SCost) {
934 LLVM_DEBUG(dbgs() << "Prefered Top Node Cost\n")do { } while (false);
935 IsTopNode = true;
936 return TopCand.SU;
937 }
938 // Otherwise prefer the bottom candidate in node order.
939 LLVM_DEBUG(dbgs() << "Prefered Bottom in Node order\n")do { } while (false);
940 IsTopNode = false;
941 return BotCand.SU;
942}
943
944/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
945SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
946 if (DAG->top() == DAG->bottom()) {
1
Calling 'operator=='
7
Returning from 'operator=='
8
Taking false branch
947 assert(Top.Available.empty() && Top.Pending.empty() &&(static_cast<void> (0))
948 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage")(static_cast<void> (0));
949 return nullptr;
950 }
951 SUnit *SU;
952 if (ForceTopDown) {
9
Assuming the condition is true
10
Taking true branch
953 SU = Top.pickOnlyChoice();
11
Calling 'VLIWSchedBoundary::pickOnlyChoice'
18
Returning from 'VLIWSchedBoundary::pickOnlyChoice'
954 if (!SU
18.1
'SU' is null
18.1
'SU' is null
18.1
'SU' is null
18.1
'SU' is null
) {
19
Taking true branch
955 SchedCandidate TopCand;
20
Calling defaulted default constructor for 'SchedCandidate'
22
Returning from default constructor for 'SchedCandidate'
956 CandResult TopResult =
957 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
23
Calling 'ConvergingVLIWScheduler::pickNodeFromQueue'
27
Returning from 'ConvergingVLIWScheduler::pickNodeFromQueue'
958 assert(TopResult != NoCand && "failed to find the first candidate")(static_cast<void> (0));
959 (void)TopResult;
960 SU = TopCand.SU;
28
Null pointer value stored to 'SU'
961 }
962 IsTopNode = true;
963 } else if (ForceBottomUp) {
964 SU = Bot.pickOnlyChoice();
965 if (!SU) {
966 SchedCandidate BotCand;
967 CandResult BotResult =
968 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
969 assert(BotResult != NoCand && "failed to find the first candidate")(static_cast<void> (0));
970 (void)BotResult;
971 SU = BotCand.SU;
972 }
973 IsTopNode = false;
974 } else {
975 SU = pickNodeBidrectional(IsTopNode);
976 }
977 if (SU->isTopReady())
29
Called C++ object pointer is null
978 Top.removeReady(SU);
979 if (SU->isBottomReady())
980 Bot.removeReady(SU);
981
982 LLVM_DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")do { } while (false)
983 << " Scheduling instruction in cycle "do { } while (false)
984 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << " ("do { } while (false)
985 << reportPackets() << ")\n";do { } while (false)
986 DAG->dumpNode(*SU))do { } while (false);
987 return SU;
988}
989
990/// Update the scheduler's state after scheduling a node. This is the same node
991/// that was just returned by pickNode(). However, VLIWMachineScheduler needs
992/// to update it's state based on the current cycle before MachineSchedStrategy
993/// does.
994void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) {
995 if (IsTopNode) {
996 Top.bumpNode(SU);
997 SU->TopReadyCycle = Top.CurrCycle;
998 } else {
999 Bot.bumpNode(SU);
1000 SU->BotReadyCycle = Bot.CurrCycle;
1001 }
1002}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/CodeGen/MachineInstrBundleIterator.h

1//===- llvm/CodeGen/MachineInstrBundleIterator.h ----------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Defines an iterator class that bundles MachineInstr.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
14#define LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
15
16#include "llvm/ADT/ilist.h"
17#include "llvm/ADT/simple_ilist.h"
18#include <cassert>
19#include <iterator>
20#include <type_traits>
21
22namespace llvm {
23
24template <class T, bool IsReverse> struct MachineInstrBundleIteratorTraits;
25template <class T> struct MachineInstrBundleIteratorTraits<T, false> {
26 using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
27 using instr_iterator = typename list_type::iterator;
28 using nonconst_instr_iterator = typename list_type::iterator;
29 using const_instr_iterator = typename list_type::const_iterator;
30};
31template <class T> struct MachineInstrBundleIteratorTraits<T, true> {
32 using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
33 using instr_iterator = typename list_type::reverse_iterator;
34 using nonconst_instr_iterator = typename list_type::reverse_iterator;
35 using const_instr_iterator = typename list_type::const_reverse_iterator;
36};
37template <class T> struct MachineInstrBundleIteratorTraits<const T, false> {
38 using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
39 using instr_iterator = typename list_type::const_iterator;
40 using nonconst_instr_iterator = typename list_type::iterator;
41 using const_instr_iterator = typename list_type::const_iterator;
42};
43template <class T> struct MachineInstrBundleIteratorTraits<const T, true> {
44 using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
45 using instr_iterator = typename list_type::const_reverse_iterator;
46 using nonconst_instr_iterator = typename list_type::reverse_iterator;
47 using const_instr_iterator = typename list_type::const_reverse_iterator;
48};
49
50template <bool IsReverse> struct MachineInstrBundleIteratorHelper;
51template <> struct MachineInstrBundleIteratorHelper<false> {
52 /// Get the beginning of the current bundle.
53 template <class Iterator> static Iterator getBundleBegin(Iterator I) {
54 if (!I.isEnd())
55 while (I->isBundledWithPred())
56 --I;
57 return I;
58 }
59
60 /// Get the final node of the current bundle.
61 template <class Iterator> static Iterator getBundleFinal(Iterator I) {
62 if (!I.isEnd())
63 while (I->isBundledWithSucc())
64 ++I;
65 return I;
66 }
67
68 /// Increment forward ilist iterator.
69 template <class Iterator> static void increment(Iterator &I) {
70 I = std::next(getBundleFinal(I));
71 }
72
73 /// Decrement forward ilist iterator.
74 template <class Iterator> static void decrement(Iterator &I) {
75 I = getBundleBegin(std::prev(I));
76 }
77};
78
79template <> struct MachineInstrBundleIteratorHelper<true> {
80 /// Get the beginning of the current bundle.
81 template <class Iterator> static Iterator getBundleBegin(Iterator I) {
82 return MachineInstrBundleIteratorHelper<false>::getBundleBegin(
83 I.getReverse())
84 .getReverse();
85 }
86
87 /// Get the final node of the current bundle.
88 template <class Iterator> static Iterator getBundleFinal(Iterator I) {
89 return MachineInstrBundleIteratorHelper<false>::getBundleFinal(
90 I.getReverse())
91 .getReverse();
92 }
93
94 /// Increment reverse ilist iterator.
95 template <class Iterator> static void increment(Iterator &I) {
96 I = getBundleBegin(std::next(I));
97 }
98
99 /// Decrement reverse ilist iterator.
100 template <class Iterator> static void decrement(Iterator &I) {
101 I = std::prev(getBundleFinal(I));
102 }
103};
104
105/// MachineBasicBlock iterator that automatically skips over MIs that are
106/// inside bundles (i.e. walk top level MIs only).
107template <typename Ty, bool IsReverse = false>
108class MachineInstrBundleIterator : MachineInstrBundleIteratorHelper<IsReverse> {
109 using Traits = MachineInstrBundleIteratorTraits<Ty, IsReverse>;
110 using instr_iterator = typename Traits::instr_iterator;
111
112 instr_iterator MII;
113
114public:
115 using value_type = typename instr_iterator::value_type;
116 using difference_type = typename instr_iterator::difference_type;
117 using pointer = typename instr_iterator::pointer;
118 using reference = typename instr_iterator::reference;
119 using const_pointer = typename instr_iterator::const_pointer;
120 using const_reference = typename instr_iterator::const_reference;
121 using iterator_category = std::bidirectional_iterator_tag;
122
123private:
124 using nonconst_instr_iterator = typename Traits::nonconst_instr_iterator;
125 using const_instr_iterator = typename Traits::const_instr_iterator;
126 using nonconst_iterator =
127 MachineInstrBundleIterator<typename nonconst_instr_iterator::value_type,
128 IsReverse>;
129 using reverse_iterator = MachineInstrBundleIterator<Ty, !IsReverse>;
130
131public:
132 MachineInstrBundleIterator(instr_iterator MI) : MII(MI) {
133 assert((!MI.getNodePtr() || MI.isEnd() || !MI->isBundledWithPred()) &&(static_cast<void> (0))
134 "It's not legal to initialize MachineInstrBundleIterator with a "(static_cast<void> (0))
135 "bundled MI")(static_cast<void> (0));
136 }
137
138 MachineInstrBundleIterator(reference MI) : MII(MI) {
139 assert(!MI.isBundledWithPred() && "It's not legal to initialize "(static_cast<void> (0))
140 "MachineInstrBundleIterator with a "(static_cast<void> (0))
141 "bundled MI")(static_cast<void> (0));
142 }
143
144 MachineInstrBundleIterator(pointer MI) : MII(MI) {
145 // FIXME: This conversion should be explicit.
146 assert((!MI || !MI->isBundledWithPred()) && "It's not legal to initialize "(static_cast<void> (0))
147 "MachineInstrBundleIterator "(static_cast<void> (0))
148 "with a bundled MI")(static_cast<void> (0));
149 }
150
151 // Template allows conversion from const to nonconst.
152 template <class OtherTy>
153 MachineInstrBundleIterator(
154 const MachineInstrBundleIterator<OtherTy, IsReverse> &I,
155 std::enable_if_t<std::is_convertible<OtherTy *, Ty *>::value, void *> =
156 nullptr)
157 : MII(I.getInstrIterator()) {}
158
159 MachineInstrBundleIterator() : MII(nullptr) {}
160
161 /// Explicit conversion between forward/reverse iterators.
162 ///
163 /// Translate between forward and reverse iterators without changing range
164 /// boundaries. The resulting iterator will dereference (and have a handle)
165 /// to the previous node, which is somewhat unexpected; but converting the
166 /// two endpoints in a range will give the same range in reverse.
167 ///
168 /// This matches std::reverse_iterator conversions.
169 explicit MachineInstrBundleIterator(
170 const MachineInstrBundleIterator<Ty, !IsReverse> &I)
171 : MachineInstrBundleIterator(++I.getReverse()) {}
172
173 /// Get the bundle iterator for the given instruction's bundle.
174 static MachineInstrBundleIterator getAtBundleBegin(instr_iterator MI) {
175 return MachineInstrBundleIteratorHelper<IsReverse>::getBundleBegin(MI);
176 }
177
178 reference operator*() const { return *MII; }
179 pointer operator->() const { return &operator*(); }
180
181 /// Check for null.
182 bool isValid() const { return MII.getNodePtr(); }
183
184 friend bool operator==(const MachineInstrBundleIterator &L,
185 const MachineInstrBundleIterator &R) {
186 return L.MII == R.MII;
2
Calling 'operator=='
5
Returning from 'operator=='
6
Returning zero, which participates in a condition later
187 }
188 friend bool operator==(const MachineInstrBundleIterator &L,
189 const const_instr_iterator &R) {
190 return L.MII == R; // Avoid assertion about validity of R.
191 }
192 friend bool operator==(const const_instr_iterator &L,
193 const MachineInstrBundleIterator &R) {
194 return L == R.MII; // Avoid assertion about validity of L.
195 }
196 friend bool operator==(const MachineInstrBundleIterator &L,
197 const nonconst_instr_iterator &R) {
198 return L.MII == R; // Avoid assertion about validity of R.
199 }
200 friend bool operator==(const nonconst_instr_iterator &L,
201 const MachineInstrBundleIterator &R) {
202 return L == R.MII; // Avoid assertion about validity of L.
203 }
204 friend bool operator==(const MachineInstrBundleIterator &L, const_pointer R) {
205 return L == const_instr_iterator(R); // Avoid assertion about validity of R.
206 }
207 friend bool operator==(const_pointer L, const MachineInstrBundleIterator &R) {
208 return const_instr_iterator(L) == R; // Avoid assertion about validity of L.
209 }
210 friend bool operator==(const MachineInstrBundleIterator &L,
211 const_reference R) {
212 return L == &R; // Avoid assertion about validity of R.
213 }
214 friend bool operator==(const_reference L,
215 const MachineInstrBundleIterator &R) {
216 return &L == R; // Avoid assertion about validity of L.
217 }
218
219 friend bool operator!=(const MachineInstrBundleIterator &L,
220 const MachineInstrBundleIterator &R) {
221 return !(L == R);
222 }
223 friend bool operator!=(const MachineInstrBundleIterator &L,
224 const const_instr_iterator &R) {
225 return !(L == R);
226 }
227 friend bool operator!=(const const_instr_iterator &L,
228 const MachineInstrBundleIterator &R) {
229 return !(L == R);
230 }
231 friend bool operator!=(const MachineInstrBundleIterator &L,
232 const nonconst_instr_iterator &R) {
233 return !(L == R);
234 }
235 friend bool operator!=(const nonconst_instr_iterator &L,
236 const MachineInstrBundleIterator &R) {
237 return !(L == R);
238 }
239 friend bool operator!=(const MachineInstrBundleIterator &L, const_pointer R) {
240 return !(L == R);
241 }
242 friend bool operator!=(const_pointer L, const MachineInstrBundleIterator &R) {
243 return !(L == R);
244 }
245 friend bool operator!=(const MachineInstrBundleIterator &L,
246 const_reference R) {
247 return !(L == R);
248 }
249 friend bool operator!=(const_reference L,
250 const MachineInstrBundleIterator &R) {
251 return !(L == R);
252 }
253
254 // Increment and decrement operators...
255 MachineInstrBundleIterator &operator--() {
256 this->decrement(MII);
257 return *this;
258 }
259 MachineInstrBundleIterator &operator++() {
260 this->increment(MII);
261 return *this;
262 }
263 MachineInstrBundleIterator operator--(int) {
264 MachineInstrBundleIterator Temp = *this;
265 --*this;
266 return Temp;
267 }
268 MachineInstrBundleIterator operator++(int) {
269 MachineInstrBundleIterator Temp = *this;
270 ++*this;
271 return Temp;
272 }
273
274 instr_iterator getInstrIterator() const { return MII; }
275
276 nonconst_iterator getNonConstIterator() const { return MII.getNonConst(); }
277
278 /// Get a reverse iterator to the same node.
279 ///
280 /// Gives a reverse iterator that will dereference (and have a handle) to the
281 /// same node. Converting the endpoint iterators in a range will give a
282 /// different range; for range operations, use the explicit conversions.
283 reverse_iterator getReverse() const { return MII.getReverse(); }
284};
285
286} // end namespace llvm
287
288#endif // LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/ilist_iterator.h

1//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_ADT_ILIST_ITERATOR_H
10#define LLVM_ADT_ILIST_ITERATOR_H
11
12#include "llvm/ADT/ilist_node.h"
13#include <cassert>
14#include <cstddef>
15#include <iterator>
16#include <type_traits>
17
18namespace llvm {
19
20namespace ilist_detail {
21
22/// Find const-correct node types.
23template <class OptionsT, bool IsConst> struct IteratorTraits;
24template <class OptionsT> struct IteratorTraits<OptionsT, false> {
25 using value_type = typename OptionsT::value_type;
26 using pointer = typename OptionsT::pointer;
27 using reference = typename OptionsT::reference;
28 using node_pointer = ilist_node_impl<OptionsT> *;
29 using node_reference = ilist_node_impl<OptionsT> &;
30};
31template <class OptionsT> struct IteratorTraits<OptionsT, true> {
32 using value_type = const typename OptionsT::value_type;
33 using pointer = typename OptionsT::const_pointer;
34 using reference = typename OptionsT::const_reference;
35 using node_pointer = const ilist_node_impl<OptionsT> *;
36 using node_reference = const ilist_node_impl<OptionsT> &;
37};
38
39template <bool IsReverse> struct IteratorHelper;
40template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
41 using Access = ilist_detail::NodeAccess;
42
43 template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
44 template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
45};
46template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
47 using Access = ilist_detail::NodeAccess;
48
49 template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
50 template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
51};
52
53} // end namespace ilist_detail
54
55/// Iterator for intrusive lists based on ilist_node.
56template <class OptionsT, bool IsReverse, bool IsConst>
57class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
58 friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
59 friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
60 friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
61
62 using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
63 using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
64
65public:
66 using value_type = typename Traits::value_type;
67 using pointer = typename Traits::pointer;
68 using reference = typename Traits::reference;
69 using difference_type = ptrdiff_t;
70 using iterator_category = std::bidirectional_iterator_tag;
71 using const_pointer = typename OptionsT::const_pointer;
72 using const_reference = typename OptionsT::const_reference;
73
74private:
75 using node_pointer = typename Traits::node_pointer;
76 using node_reference = typename Traits::node_reference;
77
78 node_pointer NodePtr = nullptr;
79
80public:
81 /// Create from an ilist_node.
82 explicit ilist_iterator(node_reference N) : NodePtr(&N) {}
83
84 explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
85 explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
86 ilist_iterator() = default;
87
88 // This is templated so that we can allow constructing a const iterator from
89 // a nonconst iterator...
90 template <bool RHSIsConst>
91 ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
92 std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr)
93 : NodePtr(RHS.NodePtr) {}
94
95 // This is templated so that we can allow assigning to a const iterator from
96 // a nonconst iterator...
97 template <bool RHSIsConst>
98 std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &>
99 operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
100 NodePtr = RHS.NodePtr;
101 return *this;
102 }
103
104 /// Explicit conversion between forward/reverse iterators.
105 ///
106 /// Translate between forward and reverse iterators without changing range
107 /// boundaries. The resulting iterator will dereference (and have a handle)
108 /// to the previous node, which is somewhat unexpected; but converting the
109 /// two endpoints in a range will give the same range in reverse.
110 ///
111 /// This matches std::reverse_iterator conversions.
112 explicit ilist_iterator(
113 const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
114 : ilist_iterator(++RHS.getReverse()) {}
115
116 /// Get a reverse iterator to the same node.
117 ///
118 /// Gives a reverse iterator that will dereference (and have a handle) to the
119 /// same node. Converting the endpoint iterators in a range will give a
120 /// different range; for range operations, use the explicit conversions.
121 ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
122 if (NodePtr)
123 return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
124 return ilist_iterator<OptionsT, !IsReverse, IsConst>();
125 }
126
127 /// Const-cast.
128 ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
129 if (NodePtr)
130 return ilist_iterator<OptionsT, IsReverse, false>(
131 const_cast<typename ilist_iterator<OptionsT, IsReverse,
132 false>::node_reference>(*NodePtr));
133 return ilist_iterator<OptionsT, IsReverse, false>();
134 }
135
136 // Accessors...
137 reference operator*() const {
138 assert(!NodePtr->isKnownSentinel())(static_cast<void> (0));
139 return *Access::getValuePtr(NodePtr);
140 }
141 pointer operator->() const { return &operator*(); }
142
143 // Comparison operators
144 friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
145 return LHS.NodePtr == RHS.NodePtr;
3
Assuming 'LHS.NodePtr' is not equal to 'RHS.NodePtr'
4
Returning zero, which participates in a condition later
146 }
147 friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
148 return LHS.NodePtr != RHS.NodePtr;
149 }
150
151 // Increment and decrement operators...
152 ilist_iterator &operator--() {
153 NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
154 return *this;
155 }
156 ilist_iterator &operator++() {
157 NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
158 return *this;
159 }
160 ilist_iterator operator--(int) {
161 ilist_iterator tmp = *this;
162 --*this;
163 return tmp;
164 }
165 ilist_iterator operator++(int) {
166 ilist_iterator tmp = *this;
167 ++*this;
168 return tmp;
169 }
170
171 /// Get the underlying ilist_node.
172 node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }
173
174 /// Check for end. Only valid if ilist_sentinel_tracking<true>.
175 bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
176};
177
178template <typename From> struct simplify_type;
179
180/// Allow ilist_iterators to convert into pointers to a node automatically when
181/// used by the dyn_cast, cast, isa mechanisms...
182///
183/// FIXME: remove this, since there is no implicit conversion to NodeTy.
184template <class OptionsT, bool IsConst>
185struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
186 using iterator = ilist_iterator<OptionsT, false, IsConst>;
187 using SimpleType = typename iterator::pointer;
188
189 static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
190};
191template <class OptionsT, bool IsConst>
192struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
193 : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};
194
195} // end namespace llvm
196
197#endif // LLVM_ADT_ILIST_ITERATOR_H

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/Hexagon/HexagonMachineScheduler.h

1//===- HexagonMachineScheduler.h - Custom Hexagon MI scheduler --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Custom Hexagon MI scheduler.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONMACHINESCHEDULER_H
14#define LLVM_LIB_TARGET_HEXAGON_HEXAGONMACHINESCHEDULER_H
15
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/Twine.h"
18#include "llvm/CodeGen/DFAPacketizer.h"
19#include "llvm/CodeGen/MachineScheduler.h"
20#include "llvm/CodeGen/RegisterPressure.h"
21#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
22#include "llvm/CodeGen/TargetInstrInfo.h"
23#include "llvm/CodeGen/TargetSchedule.h"
24#include "llvm/CodeGen/TargetSubtargetInfo.h"
25#include <algorithm>
26#include <cassert>
27#include <limits>
28#include <memory>
29#include <vector>
30
31namespace llvm {
32
33class SUnit;
34
35class VLIWResourceModel {
36 /// ResourcesModel - Represents VLIW state.
37 /// Not limited to VLIW targets per se, but assumes
38 /// definition of DFA by a target.
39 DFAPacketizer *ResourcesModel;
40
41 const TargetSchedModel *SchedModel;
42
43 /// Local packet/bundle model. Purely
44 /// internal to the MI schedulre at the time.
45 std::vector<SUnit *> Packet;
46
47 /// Total packets created.
48 unsigned TotalPackets = 0;
49
50public:
51 VLIWResourceModel(const TargetSubtargetInfo &STI, const TargetSchedModel *SM)
52 : SchedModel(SM) {
53 ResourcesModel = STI.getInstrInfo()->CreateTargetScheduleState(STI);
54
55 // This hard requirement could be relaxed,
56 // but for now do not let it proceed.
57 assert(ResourcesModel && "Unimplemented CreateTargetScheduleState.")(static_cast<void> (0));
58
59 Packet.resize(SchedModel->getIssueWidth());
60 Packet.clear();
61 ResourcesModel->clearResources();
62 }
63
64 ~VLIWResourceModel() {
65 delete ResourcesModel;
66 }
67
68 void resetPacketState() {
69 Packet.clear();
70 }
71
72 void resetDFA() {
73 ResourcesModel->clearResources();
74 }
75
76 void reset() {
77 Packet.clear();
78 ResourcesModel->clearResources();
79 }
80
81 bool isResourceAvailable(SUnit *SU, bool IsTop);
82 bool reserveResources(SUnit *SU, bool IsTop);
83 unsigned getTotalPackets() const { return TotalPackets; }
84 bool isInPacket(SUnit *SU) const { return is_contained(Packet, SU); }
85};
86
87/// Extend the standard ScheduleDAGMI to provide more context and override the
88/// top-level schedule() driver.
89class VLIWMachineScheduler : public ScheduleDAGMILive {
90public:
91 VLIWMachineScheduler(MachineSchedContext *C,
92 std::unique_ptr<MachineSchedStrategy> S)
93 : ScheduleDAGMILive(C, std::move(S)) {}
94
95 /// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
96 /// time to do some work.
97 void schedule() override;
98
99 RegisterClassInfo *getRegClassInfo() { return RegClassInfo; }
100 int getBBSize() { return BB->size(); }
101};
102
103//===----------------------------------------------------------------------===//
104// ConvergingVLIWScheduler - Implementation of the standard
105// MachineSchedStrategy.
106//===----------------------------------------------------------------------===//
107
108/// ConvergingVLIWScheduler shrinks the unscheduled zone using heuristics
109/// to balance the schedule.
110class ConvergingVLIWScheduler : public MachineSchedStrategy {
111 /// Store the state used by ConvergingVLIWScheduler heuristics, required
112 /// for the lifetime of one invocation of pickNode().
113 struct SchedCandidate {
114 // The best SUnit candidate.
115 SUnit *SU = nullptr;
21
Null pointer value stored to 'TopCand.SU'
116
117 // Register pressure values for the best candidate.
118 RegPressureDelta RPDelta;
119
120 // Best scheduling cost.
121 int SCost = 0;
122
123 SchedCandidate() = default;
124 };
125 /// Represent the type of SchedCandidate found within a single queue.
126 enum CandResult {
127 NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure,
128 BestCost, Weak};
129
130 /// Each Scheduling boundary is associated with ready queues. It tracks the
131 /// current cycle in whichever direction at has moved, and maintains the state
132 /// of "hazards" and other interlocks at the current cycle.
133 struct VLIWSchedBoundary {
134 VLIWMachineScheduler *DAG = nullptr;
135 const TargetSchedModel *SchedModel = nullptr;
136
137 ReadyQueue Available;
138 ReadyQueue Pending;
139 bool CheckPending = false;
140
141 ScheduleHazardRecognizer *HazardRec = nullptr;
142 VLIWResourceModel *ResourceModel = nullptr;
143
144 unsigned CurrCycle = 0;
145 unsigned IssueCount = 0;
146 unsigned CriticalPathLength = 0;
147
148 /// MinReadyCycle - Cycle of the soonest available instruction.
149 unsigned MinReadyCycle = std::numeric_limits<unsigned>::max();
150
151 // Remember the greatest min operand latency.
152 unsigned MaxMinLatency = 0;
153
154 /// Pending queues extend the ready queues with the same ID and the
155 /// PendingFlag set.
156 VLIWSchedBoundary(unsigned ID, const Twine &Name)
157 : Available(ID, Name+".A"),
158 Pending(ID << ConvergingVLIWScheduler::LogMaxQID, Name+".P") {}
159
160 ~VLIWSchedBoundary() {
161 delete ResourceModel;
162 delete HazardRec;
163 }
164
165 void init(VLIWMachineScheduler *dag, const TargetSchedModel *smodel) {
166 DAG = dag;
167 SchedModel = smodel;
168 CurrCycle = 0;
169 IssueCount = 0;
170 // Initialize the critical path length limit, which used by the scheduling
171 // cost model to determine the value for scheduling an instruction. We use
172 // a slightly different heuristic for small and large functions. For small
173 // functions, it's important to use the height/depth of the instruction.
174 // For large functions, prioritizing by height or depth increases spills.
175 CriticalPathLength = DAG->getBBSize() / SchedModel->getIssueWidth();
176 if (DAG->getBBSize() < 50)
177 // We divide by two as a cheap and simple heuristic to reduce the
178 // critcal path length, which increases the priority of using the graph
179 // height/depth in the scheduler's cost computation.
180 CriticalPathLength >>= 1;
181 else {
182 // For large basic blocks, we prefer a larger critical path length to
183 // decrease the priority of using the graph height/depth.
184 unsigned MaxPath = 0;
185 for (auto &SU : DAG->SUnits)
186 MaxPath = std::max(MaxPath, isTop() ? SU.getHeight() : SU.getDepth());
187 CriticalPathLength = std::max(CriticalPathLength, MaxPath) + 1;
188 }
189 }
190
191 bool isTop() const {
192 return Available.getID() == ConvergingVLIWScheduler::TopQID;
193 }
194
195 bool checkHazard(SUnit *SU);
196
197 void releaseNode(SUnit *SU, unsigned ReadyCycle);
198
199 void bumpCycle();
200
201 void bumpNode(SUnit *SU);
202
203 void releasePending();
204
205 void removeReady(SUnit *SU);
206
207 SUnit *pickOnlyChoice();
208
209 bool isLatencyBound(SUnit *SU) {
210 if (CurrCycle >= CriticalPathLength)
211 return true;
212 unsigned PathLength = isTop() ? SU->getHeight() : SU->getDepth();
213 return CriticalPathLength - CurrCycle <= PathLength;
214 }
215 };
216
217 VLIWMachineScheduler *DAG = nullptr;
218 const TargetSchedModel *SchedModel = nullptr;
219
220 // State of the top and bottom scheduled instruction boundaries.
221 VLIWSchedBoundary Top;
222 VLIWSchedBoundary Bot;
223
224 /// List of pressure sets that have a high pressure level in the region.
225 std::vector<bool> HighPressureSets;
226
227public:
228 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
229 enum {
230 TopQID = 1,
231 BotQID = 2,
232 LogMaxQID = 2
233 };
234
235 ConvergingVLIWScheduler() : Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
236
237 void initialize(ScheduleDAGMI *dag) override;
238
239 SUnit *pickNode(bool &IsTopNode) override;
240
241 void schedNode(SUnit *SU, bool IsTopNode) override;
242
243 void releaseTopNode(SUnit *SU) override;
244
245 void releaseBottomNode(SUnit *SU) override;
246
247 unsigned reportPackets() {
248 return Top.ResourceModel->getTotalPackets() +
249 Bot.ResourceModel->getTotalPackets();
250 }
251
252protected:
253 SUnit *pickNodeBidrectional(bool &IsTopNode);
254
255 int pressureChange(const SUnit *SU, bool isBotUp);
256
257 int SchedulingCost(ReadyQueue &Q,
258 SUnit *SU, SchedCandidate &Candidate,
259 RegPressureDelta &Delta, bool verbose);
260
261 CandResult pickNodeFromQueue(VLIWSchedBoundary &Zone,
262 const RegPressureTracker &RPTracker,
263 SchedCandidate &Candidate);
264#ifndef NDEBUG1
265 void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
266 int Cost, PressureChange P = PressureChange());
267
268 void readyQueueVerboseDump(const RegPressureTracker &RPTracker,
269 SchedCandidate &Candidate, ReadyQueue &Q);
270#endif
271};
272
273} // end namespace llvm
274
275#endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONMACHINESCHEDULER_H