LLVM 19.0.0git
GCNSchedStrategy.cpp
Go to the documentation of this file.
1//===-- GCNSchedStrategy.cpp - GCN Scheduler Strategy ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This contains a MachineSchedStrategy implementation for maximizing wave
11/// occupancy on GCN hardware.
12///
13/// This pass will apply multiple scheduling stages to the same function.
14/// Regions are first recorded in GCNScheduleDAGMILive::schedule. The actual
15/// entry point for the scheduling of those regions is
16/// GCNScheduleDAGMILive::runSchedStages.
17
18/// Generally, the reason for having multiple scheduling stages is to account
19/// for the kernel-wide effect of register usage on occupancy. Usually, only a
20/// few scheduling regions will have register pressure high enough to limit
21/// occupancy for the kernel, so constraints can be relaxed to improve ILP in
22/// other regions.
23///
24//===----------------------------------------------------------------------===//
25
26#include "GCNSchedStrategy.h"
27#include "AMDGPUIGroupLP.h"
30
31#define DEBUG_TYPE "machine-scheduler"
32
33using namespace llvm;
34
36 "amdgpu-disable-unclustered-high-rp-reschedule", cl::Hidden,
37 cl::desc("Disable unclustered high register pressure "
38 "reduction scheduling stage."),
39 cl::init(false));
40
42 "amdgpu-disable-clustered-low-occupancy-reschedule", cl::Hidden,
43 cl::desc("Disable clustered low occupancy "
44 "rescheduling for ILP scheduling stage."),
45 cl::init(false));
46
48 "amdgpu-schedule-metric-bias", cl::Hidden,
50 "Sets the bias which adds weight to occupancy vs latency. Set it to "
51 "100 to chase the occupancy only."),
52 cl::init(10));
53
54static cl::opt<bool>
55 RelaxedOcc("amdgpu-schedule-relaxed-occupancy", cl::Hidden,
56 cl::desc("Relax occupancy targets for kernels which are memory "
57 "bound (amdgpu-membound-threshold), or "
58 "Wave Limited (amdgpu-limit-wave-threshold)."),
59 cl::init(false));
60
61const unsigned ScheduleMetrics::ScaleFactor = 100;
62
64 : GenericScheduler(C), TargetOccupancy(0), MF(nullptr),
65 HasHighPressure(false) {}
66
69
70 MF = &DAG->MF;
71
73
75 Context->RegClassInfo->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass);
77 Context->RegClassInfo->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass);
78
80 // Set the initial TargetOccupnacy to the maximum occupancy that we can
81 // achieve for this function. This effectively sets a lower bound on the
82 // 'Critical' register limits in the scheduler.
83 // Allow for lower occupancy targets if kernel is wave limited or memory
84 // bound, and using the relaxed occupancy feature.
88 std::min(ST.getMaxNumSGPRs(TargetOccupancy, true), SGPRExcessLimit);
89
90 if (!KnownExcessRP) {
92 std::min(ST.getMaxNumVGPRs(TargetOccupancy), VGPRExcessLimit);
93 } else {
94 // This is similar to ST.getMaxNumVGPRs(TargetOccupancy) result except
95 // returns a reasonably small number for targets with lots of VGPRs, such
96 // as GFX10 and GFX11.
97 LLVM_DEBUG(dbgs() << "Region is known to spill, use alternative "
98 "VGPRCriticalLimit calculation method.\n");
99
100 unsigned Granule = AMDGPU::IsaInfo::getVGPRAllocGranule(&ST);
101 unsigned Addressable = AMDGPU::IsaInfo::getAddressableNumVGPRs(&ST);
102 unsigned VGPRBudget = alignDown(Addressable / TargetOccupancy, Granule);
103 VGPRBudget = std::max(VGPRBudget, Granule);
104 VGPRCriticalLimit = std::min(VGPRBudget, VGPRExcessLimit);
105 }
106
107 // Subtract error margin and bias from register limits and avoid overflow.
112
113 LLVM_DEBUG(dbgs() << "VGPRCriticalLimit = " << VGPRCriticalLimit
114 << ", VGPRExcessLimit = " << VGPRExcessLimit
115 << ", SGPRCriticalLimit = " << SGPRCriticalLimit
116 << ", SGPRExcessLimit = " << SGPRExcessLimit << "\n\n");
117}
118
120 bool AtTop,
121 const RegPressureTracker &RPTracker,
122 const SIRegisterInfo *SRI,
123 unsigned SGPRPressure,
124 unsigned VGPRPressure) {
125 Cand.SU = SU;
126 Cand.AtTop = AtTop;
127
128 if (!DAG->isTrackingPressure())
129 return;
130
131 // getDownwardPressure() and getUpwardPressure() make temporary changes to
132 // the tracker, so we need to pass those function a non-const copy.
133 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
134
135 Pressure.clear();
136 MaxPressure.clear();
137
138 if (AtTop)
140 else {
141 // FIXME: I think for bottom up scheduling, the register pressure is cached
142 // and can be retrieved by DAG->getPressureDif(SU).
143 TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
144 }
145
146 unsigned NewSGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
147 unsigned NewVGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
148
149 // If two instructions increase the pressure of different register sets
150 // by the same amount, the generic scheduler will prefer to schedule the
151 // instruction that increases the set with the least amount of registers,
152 // which in our case would be SGPRs. This is rarely what we want, so
153 // when we report excess/critical register pressure, we do it either
154 // only for VGPRs or only for SGPRs.
155
156 // FIXME: Better heuristics to determine whether to prefer SGPRs or VGPRs.
157 const unsigned MaxVGPRPressureInc = 16;
158 bool ShouldTrackVGPRs = VGPRPressure + MaxVGPRPressureInc >= VGPRExcessLimit;
159 bool ShouldTrackSGPRs = !ShouldTrackVGPRs && SGPRPressure >= SGPRExcessLimit;
160
161
162 // FIXME: We have to enter REG-EXCESS before we reach the actual threshold
163 // to increase the likelihood we don't go over the limits. We should improve
164 // the analysis to look through dependencies to find the path with the least
165 // register pressure.
166
167 // We only need to update the RPDelta for instructions that increase register
168 // pressure. Instructions that decrease or keep reg pressure the same will be
169 // marked as RegExcess in tryCandidate() when they are compared with
170 // instructions that increase the register pressure.
171 if (ShouldTrackVGPRs && NewVGPRPressure >= VGPRExcessLimit) {
172 HasHighPressure = true;
173 Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::VGPR_32);
174 Cand.RPDelta.Excess.setUnitInc(NewVGPRPressure - VGPRExcessLimit);
175 }
176
177 if (ShouldTrackSGPRs && NewSGPRPressure >= SGPRExcessLimit) {
178 HasHighPressure = true;
179 Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::SReg_32);
180 Cand.RPDelta.Excess.setUnitInc(NewSGPRPressure - SGPRExcessLimit);
181 }
182
183 // Register pressure is considered 'CRITICAL' if it is approaching a value
184 // that would reduce the wave occupancy for the execution unit. When
185 // register pressure is 'CRITICAL', increasing SGPR and VGPR pressure both
186 // has the same cost, so we don't need to prefer one over the other.
187
188 int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit;
189 int VGPRDelta = NewVGPRPressure - VGPRCriticalLimit;
190
191 if (SGPRDelta >= 0 || VGPRDelta >= 0) {
192 HasHighPressure = true;
193 if (SGPRDelta > VGPRDelta) {
194 Cand.RPDelta.CriticalMax =
195 PressureChange(AMDGPU::RegisterPressureSets::SReg_32);
196 Cand.RPDelta.CriticalMax.setUnitInc(SGPRDelta);
197 } else {
198 Cand.RPDelta.CriticalMax =
199 PressureChange(AMDGPU::RegisterPressureSets::VGPR_32);
200 Cand.RPDelta.CriticalMax.setUnitInc(VGPRDelta);
201 }
202 }
203}
204
205// This function is mostly cut and pasted from
206// GenericScheduler::pickNodeFromQueue()
208 const CandPolicy &ZonePolicy,
209 const RegPressureTracker &RPTracker,
210 SchedCandidate &Cand) {
211 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
213 unsigned SGPRPressure = 0;
214 unsigned VGPRPressure = 0;
215 if (DAG->isTrackingPressure()) {
216 SGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
217 VGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
218 }
219 ReadyQueue &Q = Zone.Available;
220 for (SUnit *SU : Q) {
221
222 SchedCandidate TryCand(ZonePolicy);
223 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, SRI,
224 SGPRPressure, VGPRPressure);
225 // Pass SchedBoundary only when comparing nodes from the same boundary.
226 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
227 tryCandidate(Cand, TryCand, ZoneArg);
228 if (TryCand.Reason != NoCand) {
229 // Initialize resource delta if needed in case future heuristics query it.
230 if (TryCand.ResDelta == SchedResourceDelta())
231 TryCand.initResourceDelta(Zone.DAG, SchedModel);
232 Cand.setBest(TryCand);
234 }
235 }
236}
237
238// This function is mostly cut and pasted from
239// GenericScheduler::pickNodeBidirectional()
241 // Schedule as far as possible in the direction of no choice. This is most
242 // efficient, but also provides the best heuristics for CriticalPSets.
243 if (SUnit *SU = Bot.pickOnlyChoice()) {
244 IsTopNode = false;
245 return SU;
246 }
247 if (SUnit *SU = Top.pickOnlyChoice()) {
248 IsTopNode = true;
249 return SU;
250 }
251 // Set the bottom-up policy based on the state of the current bottom zone and
252 // the instructions outside the zone, including the top zone.
253 CandPolicy BotPolicy;
254 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
255 // Set the top-down policy based on the state of the current top zone and
256 // the instructions outside the zone, including the bottom zone.
257 CandPolicy TopPolicy;
258 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
259
260 // See if BotCand is still valid (because we previously scheduled from Top).
261 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
262 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
263 BotCand.Policy != BotPolicy) {
266 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
267 } else {
269#ifndef NDEBUG
270 if (VerifyScheduling) {
271 SchedCandidate TCand;
272 TCand.reset(CandPolicy());
273 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
274 assert(TCand.SU == BotCand.SU &&
275 "Last pick result should correspond to re-picking right now");
276 }
277#endif
278 }
279
280 // Check if the top Q has a better candidate.
281 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
282 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
283 TopCand.Policy != TopPolicy) {
286 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
287 } else {
289#ifndef NDEBUG
290 if (VerifyScheduling) {
291 SchedCandidate TCand;
292 TCand.reset(CandPolicy());
293 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
294 assert(TCand.SU == TopCand.SU &&
295 "Last pick result should correspond to re-picking right now");
296 }
297#endif
298 }
299
300 // Pick best from BotCand and TopCand.
301 LLVM_DEBUG(dbgs() << "Top Cand: "; traceCandidate(TopCand);
302 dbgs() << "Bot Cand: "; traceCandidate(BotCand););
303 SchedCandidate Cand = BotCand;
305 tryCandidate(Cand, TopCand, nullptr);
306 if (TopCand.Reason != NoCand) {
307 Cand.setBest(TopCand);
308 }
309 LLVM_DEBUG(dbgs() << "Picking: "; traceCandidate(Cand););
310
311 IsTopNode = Cand.AtTop;
312 return Cand.SU;
313}
314
315// This function is mostly cut and pasted from
316// GenericScheduler::pickNode()
318 if (DAG->top() == DAG->bottom()) {
320 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
321 return nullptr;
322 }
323 SUnit *SU;
324 do {
326 SU = Top.pickOnlyChoice();
327 if (!SU) {
328 CandPolicy NoPolicy;
329 TopCand.reset(NoPolicy);
331 assert(TopCand.Reason != NoCand && "failed to find a candidate");
332 SU = TopCand.SU;
333 }
334 IsTopNode = true;
335 } else if (RegionPolicy.OnlyBottomUp) {
336 SU = Bot.pickOnlyChoice();
337 if (!SU) {
338 CandPolicy NoPolicy;
339 BotCand.reset(NoPolicy);
341 assert(BotCand.Reason != NoCand && "failed to find a candidate");
342 SU = BotCand.SU;
343 }
344 IsTopNode = false;
345 } else {
346 SU = pickNodeBidirectional(IsTopNode);
347 }
348 } while (SU->isScheduled);
349
350 if (SU->isTopReady())
351 Top.removeReady(SU);
352 if (SU->isBottomReady())
353 Bot.removeReady(SU);
354
355 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
356 << *SU->getInstr());
357 return SU;
358}
359
362 return *CurrentStage;
363}
364
367 if (!CurrentStage)
369 else
370 CurrentStage++;
371
372 return CurrentStage != SchedStages.end();
373}
374
377 return std::next(CurrentStage) != SchedStages.end();
378}
379
381 assert(CurrentStage && std::next(CurrentStage) != SchedStages.end());
382 return *std::next(CurrentStage);
383}
384
386 const MachineSchedContext *C)
392}
393
397}
398
400 SchedCandidate &TryCand,
401 SchedBoundary *Zone) const {
402 // Initialize the candidate if needed.
403 if (!Cand.isValid()) {
404 TryCand.Reason = NodeOrder;
405 return true;
406 }
407
408 // Avoid spilling by exceeding the register limit.
409 if (DAG->isTrackingPressure() &&
410 tryPressure(TryCand.RPDelta.Excess, Cand.RPDelta.Excess, TryCand, Cand,
411 RegExcess, TRI, DAG->MF))
412 return TryCand.Reason != NoCand;
413
414 // Bias PhysReg Defs and copies to their uses and defined respectively.
415 if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop),
416 biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg))
417 return TryCand.Reason != NoCand;
418
419 bool SameBoundary = Zone != nullptr;
420 if (SameBoundary) {
421 // Prioritize instructions that read unbuffered resources by stall cycles.
422 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
423 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
424 return TryCand.Reason != NoCand;
425
426 // Avoid critical resource consumption and balance the schedule.
429 TryCand, Cand, ResourceReduce))
430 return TryCand.Reason != NoCand;
432 Cand.ResDelta.DemandedResources, TryCand, Cand,
434 return TryCand.Reason != NoCand;
435
436 // Unconditionally try to reduce latency.
437 if (tryLatency(TryCand, Cand, *Zone))
438 return TryCand.Reason != NoCand;
439
440 // Weak edges are for clustering and other constraints.
441 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
442 getWeakLeft(Cand.SU, Cand.AtTop), TryCand, Cand, Weak))
443 return TryCand.Reason != NoCand;
444 }
445
446 // Keep clustered nodes together to encourage downstream peephole
447 // optimizations which may reduce resource requirements.
448 //
449 // This is a best effort to set things up for a post-RA pass. Optimizations
450 // like generating loads of multiple registers should ideally be done within
451 // the scheduler pass by combining the loads during DAG postprocessing.
452 const SUnit *CandNextClusterSU =
454 const SUnit *TryCandNextClusterSU =
456 if (tryGreater(TryCand.SU == TryCandNextClusterSU,
457 Cand.SU == CandNextClusterSU, TryCand, Cand, Cluster))
458 return TryCand.Reason != NoCand;
459
460 // Avoid increasing the max critical pressure in the scheduled region.
461 if (DAG->isTrackingPressure() &&
463 TryCand, Cand, RegCritical, TRI, DAG->MF))
464 return TryCand.Reason != NoCand;
465
466 // Avoid increasing the max pressure of the entire region.
467 if (DAG->isTrackingPressure() &&
468 tryPressure(TryCand.RPDelta.CurrentMax, Cand.RPDelta.CurrentMax, TryCand,
469 Cand, RegMax, TRI, DAG->MF))
470 return TryCand.Reason != NoCand;
471
472 if (SameBoundary) {
473 // Fall through to original instruction order.
474 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) ||
475 (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
476 TryCand.Reason = NodeOrder;
477 return true;
478 }
479 }
480 return false;
481}
482
484 MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S)
485 : ScheduleDAGMILive(C, std::move(S)), ST(MF.getSubtarget<GCNSubtarget>()),
486 MFI(*MF.getInfo<SIMachineFunctionInfo>()),
487 StartingOccupancy(MFI.getOccupancy()), MinOccupancy(StartingOccupancy) {
488
489 LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
490 if (RelaxedOcc) {
491 MinOccupancy = std::min(MFI.getMinAllowedOccupancy(), StartingOccupancy);
492 if (MinOccupancy != StartingOccupancy)
493 LLVM_DEBUG(dbgs() << "Allowing Occupancy drops to " << MinOccupancy
494 << ".\n");
495 }
496}
497
498std::unique_ptr<GCNSchedStage>
499GCNScheduleDAGMILive::createSchedStage(GCNSchedStageID SchedStageID) {
500 switch (SchedStageID) {
502 return std::make_unique<OccInitialScheduleStage>(SchedStageID, *this);
504 return std::make_unique<UnclusteredHighRPStage>(SchedStageID, *this);
506 return std::make_unique<ClusteredLowOccStage>(SchedStageID, *this);
508 return std::make_unique<PreRARematStage>(SchedStageID, *this);
510 return std::make_unique<ILPInitialScheduleStage>(SchedStageID, *this);
511 }
512
513 llvm_unreachable("Unknown SchedStageID.");
514}
515
517 // Collect all scheduling regions. The actual scheduling is performed in
518 // GCNScheduleDAGMILive::finalizeSchedule.
519 Regions.push_back(std::pair(RegionBegin, RegionEnd));
520}
521
523GCNScheduleDAGMILive::getRealRegPressure(unsigned RegionIdx) const {
525 RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
526 return RPTracker.moveMaxPressure();
527}
528
529void GCNScheduleDAGMILive::computeBlockPressure(unsigned RegionIdx,
530 const MachineBasicBlock *MBB) {
532
533 // If the block has the only successor then live-ins of that successor are
534 // live-outs of the current block. We can reuse calculated live set if the
535 // successor will be sent to scheduling past current block.
536
537 // However, due to the bug in LiveInterval analysis it may happen that two
538 // predecessors of the same successor block have different lane bitmasks for
539 // a live-out register. Workaround that by sticking to one-to-one relationship
540 // i.e. one predecessor with one successor block.
541 const MachineBasicBlock *OnlySucc = nullptr;
542 if (MBB->succ_size() == 1) {
543 auto *Candidate = *MBB->succ_begin();
544 if (!Candidate->empty() && Candidate->pred_size() == 1) {
546 if (Ind->getMBBStartIdx(MBB) < Ind->getMBBStartIdx(Candidate))
547 OnlySucc = Candidate;
548 }
549 }
550
551 // Scheduler sends regions from the end of the block upwards.
552 size_t CurRegion = RegionIdx;
553 for (size_t E = Regions.size(); CurRegion != E; ++CurRegion)
554 if (Regions[CurRegion].first->getParent() != MBB)
555 break;
556 --CurRegion;
557
558 auto I = MBB->begin();
559 auto LiveInIt = MBBLiveIns.find(MBB);
560 auto &Rgn = Regions[CurRegion];
561 auto *NonDbgMI = &*skipDebugInstructionsForward(Rgn.first, Rgn.second);
562 if (LiveInIt != MBBLiveIns.end()) {
563 auto LiveIn = std::move(LiveInIt->second);
564 RPTracker.reset(*MBB->begin(), &LiveIn);
565 MBBLiveIns.erase(LiveInIt);
566 } else {
567 I = Rgn.first;
568 auto LRS = BBLiveInMap.lookup(NonDbgMI);
569#ifdef EXPENSIVE_CHECKS
570 assert(isEqual(getLiveRegsBefore(*NonDbgMI, *LIS), LRS));
571#endif
572 RPTracker.reset(*I, &LRS);
573 }
574
575 for (;;) {
576 I = RPTracker.getNext();
577
578 if (Regions[CurRegion].first == I || NonDbgMI == I) {
579 LiveIns[CurRegion] = RPTracker.getLiveRegs();
580 RPTracker.clearMaxPressure();
581 }
582
583 if (Regions[CurRegion].second == I) {
584 Pressure[CurRegion] = RPTracker.moveMaxPressure();
585 if (CurRegion-- == RegionIdx)
586 break;
587 }
588 RPTracker.advanceToNext();
589 RPTracker.advanceBeforeNext();
590 }
591
592 if (OnlySucc) {
593 if (I != MBB->end()) {
594 RPTracker.advanceToNext();
596 }
597 RPTracker.advanceBeforeNext();
598 MBBLiveIns[OnlySucc] = RPTracker.moveLiveRegs();
599 }
600}
601
603GCNScheduleDAGMILive::getBBLiveInMap() const {
604 assert(!Regions.empty());
605 std::vector<MachineInstr *> BBStarters;
606 BBStarters.reserve(Regions.size());
607 auto I = Regions.rbegin(), E = Regions.rend();
608 auto *BB = I->first->getParent();
609 do {
610 auto *MI = &*skipDebugInstructionsForward(I->first, I->second);
611 BBStarters.push_back(MI);
612 do {
613 ++I;
614 } while (I != E && I->first->getParent() == BB);
615 } while (I != E);
616 return getLiveRegMap(BBStarters, false /*After*/, *LIS);
617}
618
620 // Start actual scheduling here. This function is called by the base
621 // MachineScheduler after all regions have been recorded by
622 // GCNScheduleDAGMILive::schedule().
623 LiveIns.resize(Regions.size());
624 Pressure.resize(Regions.size());
625 RescheduleRegions.resize(Regions.size());
626 RegionsWithHighRP.resize(Regions.size());
627 RegionsWithExcessRP.resize(Regions.size());
628 RegionsWithMinOcc.resize(Regions.size());
629 RegionsWithIGLPInstrs.resize(Regions.size());
630 RescheduleRegions.set();
631 RegionsWithHighRP.reset();
632 RegionsWithExcessRP.reset();
633 RegionsWithMinOcc.reset();
634 RegionsWithIGLPInstrs.reset();
635
636 runSchedStages();
637}
638
639void GCNScheduleDAGMILive::runSchedStages() {
640 LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
641
642 if (!Regions.empty())
643 BBLiveInMap = getBBLiveInMap();
644
645 GCNSchedStrategy &S = static_cast<GCNSchedStrategy &>(*SchedImpl);
646 while (S.advanceStage()) {
647 auto Stage = createSchedStage(S.getCurrentStage());
648 if (!Stage->initGCNSchedStage())
649 continue;
650
651 for (auto Region : Regions) {
652 RegionBegin = Region.first;
653 RegionEnd = Region.second;
654 // Setup for scheduling the region and check whether it should be skipped.
655 if (!Stage->initGCNRegion()) {
656 Stage->advanceRegion();
657 exitRegion();
658 continue;
659 }
660
662 Stage->finalizeGCNRegion();
663 }
664
665 Stage->finalizeGCNSchedStage();
666 }
667}
668
669#ifndef NDEBUG
671 switch (StageID) {
673 OS << "Max Occupancy Initial Schedule";
674 break;
676 OS << "Unclustered High Register Pressure Reschedule";
677 break;
679 OS << "Clustered Low Occupancy Reschedule";
680 break;
682 OS << "Pre-RA Rematerialize";
683 break;
685 OS << "Max ILP Initial Schedule";
686 break;
687 }
688
689 return OS;
690}
691#endif
692
694 : DAG(DAG), S(static_cast<GCNSchedStrategy &>(*DAG.SchedImpl)), MF(DAG.MF),
695 MFI(DAG.MFI), ST(DAG.ST), StageID(StageID) {}
696
698 if (!DAG.LIS)
699 return false;
700
701 LLVM_DEBUG(dbgs() << "Starting scheduling stage: " << StageID << "\n");
702 return true;
703}
704
707 return false;
708
710 return false;
711
712 if (DAG.RegionsWithHighRP.none() && DAG.RegionsWithExcessRP.none())
713 return false;
714
716 DAG.addMutation(createIGroupLPDAGMutation(/*IsPostRA=*/false));
717
718 InitialOccupancy = DAG.MinOccupancy;
719 // Aggressivly try to reduce register pressure in the unclustered high RP
720 // stage. Temporarily increase occupancy target in the region.
723 if (MFI.getMaxWavesPerEU() > DAG.MinOccupancy)
724 MFI.increaseOccupancy(MF, ++DAG.MinOccupancy);
725
727 dbgs()
728 << "Retrying function scheduling without clustering. "
729 "Aggressivly try to reduce register pressure to achieve occupancy "
730 << DAG.MinOccupancy << ".\n");
731
732 return true;
733}
734
737 return false;
738
740 return false;
741
742 // Don't bother trying to improve ILP in lower RP regions if occupancy has not
743 // been dropped. All regions will have already been scheduled with the ideal
744 // occupancy targets.
745 if (DAG.StartingOccupancy <= DAG.MinOccupancy)
746 return false;
747
749 dbgs() << "Retrying function scheduling with lowest recorded occupancy "
750 << DAG.MinOccupancy << ".\n");
751 return true;
752}
753
756 return false;
757
758 if (DAG.RegionsWithMinOcc.none() || DAG.Regions.size() == 1)
759 return false;
760
762 // Check maximum occupancy
764 DAG.MinOccupancy)
765 return false;
766
767 // FIXME: This pass will invalidate cached MBBLiveIns for regions
768 // inbetween the defs and region we sinked the def to. Cached pressure
769 // for regions where a def is sinked from will also be invalidated. Will
770 // need to be fixed if there is another pass after this pass.
772
773 collectRematerializableInstructions();
774 if (RematerializableInsts.empty() || !sinkTriviallyRematInsts(ST, TII))
775 return false;
776
778 dbgs() << "Retrying function scheduling with improved occupancy of "
779 << DAG.MinOccupancy << " from rematerializing\n");
780 return true;
781}
782
785 LLVM_DEBUG(dbgs() << "Ending scheduling stage: " << StageID << "\n");
786}
787
791 if (DAG.MinOccupancy > InitialOccupancy) {
792 for (unsigned IDX = 0; IDX < DAG.Pressure.size(); ++IDX)
793 DAG.RegionsWithMinOcc[IDX] =
794 DAG.Pressure[IDX].getOccupancy(DAG.ST) == DAG.MinOccupancy;
795
797 << " stage successfully increased occupancy to "
798 << DAG.MinOccupancy << '\n');
799 }
800
802}
803
805 // Check whether this new region is also a new block.
806 if (DAG.RegionBegin->getParent() != CurrentMBB)
808
809 unsigned NumRegionInstrs = std::distance(DAG.begin(), DAG.end());
810 DAG.enterRegion(CurrentMBB, DAG.begin(), DAG.end(), NumRegionInstrs);
811
812 // Skip empty scheduling regions (0 or 1 schedulable instructions).
813 if (DAG.begin() == DAG.end() || DAG.begin() == std::prev(DAG.end()))
814 return false;
815
816 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
818 << " " << CurrentMBB->getName()
819 << "\n From: " << *DAG.begin() << " To: ";
821 else dbgs() << "End";
822 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
823
824 // Save original instruction order before scheduling for possible revert.
825 Unsched.clear();
826 Unsched.reserve(DAG.NumRegionInstrs);
829 for (auto &I : DAG) {
830 Unsched.push_back(&I);
831 if (I.getOpcode() == AMDGPU::SCHED_GROUP_BARRIER ||
832 I.getOpcode() == AMDGPU::IGLP_OPT)
833 DAG.RegionsWithIGLPInstrs[RegionIdx] = true;
834 }
835 } else {
836 for (auto &I : DAG)
837 Unsched.push_back(&I);
838 }
839
840 PressureBefore = DAG.Pressure[RegionIdx];
841
843 dbgs() << "Pressure before scheduling:\nRegion live-ins:"
844 << print(DAG.LiveIns[RegionIdx], DAG.MRI)
845 << "Region live-in pressure: "
847 << "Region register pressure: " << print(PressureBefore));
848
849 S.HasHighPressure = false;
851
852 if (DAG.RegionsWithIGLPInstrs[RegionIdx] &&
854 SavedMutations.clear();
856 bool IsInitialStage = StageID == GCNSchedStageID::OccInitialSchedule ||
858 DAG.addMutation(createIGroupLPDAGMutation(/*IsReentry=*/!IsInitialStage));
859 }
860
861 return true;
862}
863
865 // Only reschedule regions with the minimum occupancy or regions that may have
866 // spilling (excess register pressure).
867 if ((!DAG.RegionsWithMinOcc[RegionIdx] ||
868 DAG.MinOccupancy <= InitialOccupancy) &&
869 !DAG.RegionsWithExcessRP[RegionIdx])
870 return false;
871
873}
874
876 // We may need to reschedule this region if it wasn't rescheduled in the last
877 // stage, or if we found it was testing critical register pressure limits in
878 // the unclustered reschedule stage. The later is because we may not have been
879 // able to raise the min occupancy in the previous stage so the region may be
880 // overly constrained even if it was already rescheduled.
881 if (!DAG.RegionsWithHighRP[RegionIdx])
882 return false;
883
885}
886
888 if (!DAG.RescheduleRegions[RegionIdx])
889 return false;
890
892}
893
895 if (CurrentMBB)
897
898 CurrentMBB = DAG.RegionBegin->getParent();
900 // Get real RP for the region if it hasn't be calculated before. After the
901 // initial schedule stage real RP will be collected after scheduling.
904 DAG.computeBlockPressure(RegionIdx, CurrentMBB);
905}
906
908 DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd);
909 DAG.RescheduleRegions[RegionIdx] = false;
910 if (S.HasHighPressure)
911 DAG.RegionsWithHighRP[RegionIdx] = true;
912
913 // Revert scheduling if we have dropped occupancy or there is some other
914 // reason that the original schedule is better.
916
917 if (DAG.RegionsWithIGLPInstrs[RegionIdx] &&
920
921 DAG.exitRegion();
922 RegionIdx++;
923}
924
926 // Check the results of scheduling.
927 PressureAfter = DAG.getRealRegPressure(RegionIdx);
928 LLVM_DEBUG(dbgs() << "Pressure after scheduling: " << print(PressureAfter));
929 LLVM_DEBUG(dbgs() << "Region: " << RegionIdx << ".\n");
930
933 DAG.Pressure[RegionIdx] = PressureAfter;
934 DAG.RegionsWithMinOcc[RegionIdx] =
935 PressureAfter.getOccupancy(ST) == DAG.MinOccupancy;
936
937 // Early out if we have achieved the occupancy target.
938 LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
939 return;
940 }
941
942 unsigned TargetOccupancy =
944 unsigned WavesAfter =
945 std::min(TargetOccupancy, PressureAfter.getOccupancy(ST));
946 unsigned WavesBefore =
947 std::min(TargetOccupancy, PressureBefore.getOccupancy(ST));
948 LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
949 << ", after " << WavesAfter << ".\n");
950
951 // We may not be able to keep the current target occupancy because of the just
952 // scheduled region. We might still be able to revert scheduling if the
953 // occupancy before was higher, or if the current schedule has register
954 // pressure higher than the excess limits which could lead to more spilling.
955 unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
956
957 // Allow memory bound functions to drop to 4 waves if not limited by an
958 // attribute.
959 if (WavesAfter < WavesBefore && WavesAfter < DAG.MinOccupancy &&
960 WavesAfter >= MFI.getMinAllowedOccupancy()) {
961 LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "
962 << MFI.getMinAllowedOccupancy() << " waves\n");
963 NewOccupancy = WavesAfter;
964 }
965
966 if (NewOccupancy < DAG.MinOccupancy) {
967 DAG.MinOccupancy = NewOccupancy;
968 MFI.limitOccupancy(DAG.MinOccupancy);
969 DAG.RegionsWithMinOcc.reset();
970 LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
971 << DAG.MinOccupancy << ".\n");
972 }
973
974 unsigned MaxVGPRs = ST.getMaxNumVGPRs(MF);
975 unsigned MaxSGPRs = ST.getMaxNumSGPRs(MF);
976 if (PressureAfter.getVGPRNum(false) > MaxVGPRs ||
977 PressureAfter.getAGPRNum() > MaxVGPRs ||
978 PressureAfter.getSGPRNum() > MaxSGPRs) {
979 DAG.RescheduleRegions[RegionIdx] = true;
980 DAG.RegionsWithHighRP[RegionIdx] = true;
981 DAG.RegionsWithExcessRP[RegionIdx] = true;
982 }
983
984 // Revert if this region's schedule would cause a drop in occupancy or
985 // spilling.
986 if (shouldRevertScheduling(WavesAfter)) {
988 } else {
989 DAG.Pressure[RegionIdx] = PressureAfter;
990 DAG.RegionsWithMinOcc[RegionIdx] =
991 PressureAfter.getOccupancy(ST) == DAG.MinOccupancy;
992 }
993}
994
995unsigned
996GCNSchedStage::computeSUnitReadyCycle(const SUnit &SU, unsigned CurrCycle,
997 DenseMap<unsigned, unsigned> &ReadyCycles,
998 const TargetSchedModel &SM) {
999 unsigned ReadyCycle = CurrCycle;
1000 for (auto &D : SU.Preds) {
1001 if (D.isAssignedRegDep()) {
1002 MachineInstr *DefMI = D.getSUnit()->getInstr();
1003 unsigned Latency = SM.computeInstrLatency(DefMI);
1004 unsigned DefReady = ReadyCycles[DAG.getSUnit(DefMI)->NodeNum];
1005 ReadyCycle = std::max(ReadyCycle, DefReady + Latency);
1006 }
1007 }
1008 ReadyCycles[SU.NodeNum] = ReadyCycle;
1009 return ReadyCycle;
1010}
1011
1012#ifndef NDEBUG
1014 bool operator()(std::pair<MachineInstr *, unsigned> A,
1015 std::pair<MachineInstr *, unsigned> B) const {
1016 return A.second < B.second;
1017 }
1018};
1019
1020static void printScheduleModel(std::set<std::pair<MachineInstr *, unsigned>,
1021 EarlierIssuingCycle> &ReadyCycles) {
1022 if (ReadyCycles.empty())
1023 return;
1024 unsigned BBNum = ReadyCycles.begin()->first->getParent()->getNumber();
1025 dbgs() << "\n################## Schedule time ReadyCycles for MBB : " << BBNum
1026 << " ##################\n# Cycle #\t\t\tInstruction "
1027 " "
1028 " \n";
1029 unsigned IPrev = 1;
1030 for (auto &I : ReadyCycles) {
1031 if (I.second > IPrev + 1)
1032 dbgs() << "****************************** BUBBLE OF " << I.second - IPrev
1033 << " CYCLES DETECTED ******************************\n\n";
1034 dbgs() << "[ " << I.second << " ] : " << *I.first << "\n";
1035 IPrev = I.second;
1036 }
1037}
1038#endif
1039
1041GCNSchedStage::getScheduleMetrics(const std::vector<SUnit> &InputSchedule) {
1042#ifndef NDEBUG
1043 std::set<std::pair<MachineInstr *, unsigned>, EarlierIssuingCycle>
1044 ReadyCyclesSorted;
1045#endif
1047 unsigned SumBubbles = 0;
1048 DenseMap<unsigned, unsigned> ReadyCycles;
1049 unsigned CurrCycle = 0;
1050 for (auto &SU : InputSchedule) {
1051 unsigned ReadyCycle =
1052 computeSUnitReadyCycle(SU, CurrCycle, ReadyCycles, SM);
1053 SumBubbles += ReadyCycle - CurrCycle;
1054#ifndef NDEBUG
1055 ReadyCyclesSorted.insert(std::make_pair(SU.getInstr(), ReadyCycle));
1056#endif
1057 CurrCycle = ++ReadyCycle;
1058 }
1059#ifndef NDEBUG
1060 LLVM_DEBUG(
1061 printScheduleModel(ReadyCyclesSorted);
1062 dbgs() << "\n\t"
1063 << "Metric: "
1064 << (SumBubbles
1065 ? (SumBubbles * ScheduleMetrics::ScaleFactor) / CurrCycle
1066 : 1)
1067 << "\n\n");
1068#endif
1069
1070 return ScheduleMetrics(CurrCycle, SumBubbles);
1071}
1072
1075#ifndef NDEBUG
1076 std::set<std::pair<MachineInstr *, unsigned>, EarlierIssuingCycle>
1077 ReadyCyclesSorted;
1078#endif
1080 unsigned SumBubbles = 0;
1081 DenseMap<unsigned, unsigned> ReadyCycles;
1082 unsigned CurrCycle = 0;
1083 for (auto &MI : DAG) {
1084 SUnit *SU = DAG.getSUnit(&MI);
1085 if (!SU)
1086 continue;
1087 unsigned ReadyCycle =
1088 computeSUnitReadyCycle(*SU, CurrCycle, ReadyCycles, SM);
1089 SumBubbles += ReadyCycle - CurrCycle;
1090#ifndef NDEBUG
1091 ReadyCyclesSorted.insert(std::make_pair(SU->getInstr(), ReadyCycle));
1092#endif
1093 CurrCycle = ++ReadyCycle;
1094 }
1095#ifndef NDEBUG
1096 LLVM_DEBUG(
1097 printScheduleModel(ReadyCyclesSorted);
1098 dbgs() << "\n\t"
1099 << "Metric: "
1100 << (SumBubbles
1101 ? (SumBubbles * ScheduleMetrics::ScaleFactor) / CurrCycle
1102 : 1)
1103 << "\n\n");
1104#endif
1105
1106 return ScheduleMetrics(CurrCycle, SumBubbles);
1107}
1108
1109bool GCNSchedStage::shouldRevertScheduling(unsigned WavesAfter) {
1110 if (WavesAfter < DAG.MinOccupancy)
1111 return true;
1112
1113 return false;
1114}
1115
1118 return false;
1119
1121 return true;
1122
1123 if (mayCauseSpilling(WavesAfter))
1124 return true;
1125
1126 return false;
1127}
1128
1130 // If RP is not reduced in the unclustered reschedule stage, revert to the
1131 // old schedule.
1132 if ((WavesAfter <= PressureBefore.getOccupancy(ST) &&
1133 mayCauseSpilling(WavesAfter)) ||
1135 LLVM_DEBUG(dbgs() << "Unclustered reschedule did not help.\n");
1136 return true;
1137 }
1138
1139 // Do not attempt to relax schedule even more if we are already spilling.
1141 return false;
1142
1143 LLVM_DEBUG(
1144 dbgs()
1145 << "\n\t *** In shouldRevertScheduling ***\n"
1146 << " *********** BEFORE UnclusteredHighRPStage ***********\n");
1147 ScheduleMetrics MBefore =
1149 LLVM_DEBUG(
1150 dbgs()
1151 << "\n *********** AFTER UnclusteredHighRPStage ***********\n");
1153 unsigned OldMetric = MBefore.getMetric();
1154 unsigned NewMetric = MAfter.getMetric();
1155 unsigned WavesBefore =
1157 unsigned Profit =
1158 ((WavesAfter * ScheduleMetrics::ScaleFactor) / WavesBefore *
1160 NewMetric) /
1162 LLVM_DEBUG(dbgs() << "\tMetric before " << MBefore << "\tMetric after "
1163 << MAfter << "Profit: " << Profit << "\n");
1164 return Profit < ScheduleMetrics::ScaleFactor;
1165}
1166
1169 return false;
1170
1172 return true;
1173
1174 if (mayCauseSpilling(WavesAfter))
1175 return true;
1176
1177 return false;
1178}
1179
1182 return true;
1183
1184 if (mayCauseSpilling(WavesAfter))
1185 return true;
1186
1187 return false;
1188}
1189
1191 if (mayCauseSpilling(WavesAfter))
1192 return true;
1193
1194 return false;
1195}
1196
1197bool GCNSchedStage::mayCauseSpilling(unsigned WavesAfter) {
1198 if (WavesAfter <= MFI.getMinWavesPerEU() &&
1201 LLVM_DEBUG(dbgs() << "New pressure will result in more spilling.\n");
1202 return true;
1203 }
1204
1205 return false;
1206}
1207
1209 DAG.RegionsWithMinOcc[RegionIdx] =
1210 PressureBefore.getOccupancy(ST) == DAG.MinOccupancy;
1211 LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
1212 DAG.RescheduleRegions[RegionIdx] =
1213 S.hasNextStage() &&
1216 int SkippedDebugInstr = 0;
1217 for (MachineInstr *MI : Unsched) {
1218 if (MI->isDebugInstr()) {
1219 ++SkippedDebugInstr;
1220 continue;
1221 }
1222
1223 if (MI->getIterator() != DAG.RegionEnd) {
1224 DAG.BB->remove(MI);
1226 if (!MI->isDebugInstr())
1227 DAG.LIS->handleMove(*MI, true);
1228 }
1229
1230 // Reset read-undef flags and update them later.
1231 for (auto &Op : MI->all_defs())
1232 Op.setIsUndef(false);
1233 RegisterOperands RegOpers;
1234 RegOpers.collect(*MI, *DAG.TRI, DAG.MRI, DAG.ShouldTrackLaneMasks, false);
1235 if (!MI->isDebugInstr()) {
1237 // Adjust liveness and add missing dead+read-undef flags.
1239 RegOpers.adjustLaneLiveness(*DAG.LIS, DAG.MRI, SlotIdx, MI);
1240 } else {
1241 // Adjust for missing dead-def flags.
1242 RegOpers.detectDeadDefs(*MI, *DAG.LIS);
1243 }
1244 }
1245 DAG.RegionEnd = MI->getIterator();
1246 ++DAG.RegionEnd;
1247 LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
1248 }
1249
1250 // After reverting schedule, debug instrs will now be at the end of the block
1251 // and RegionEnd will point to the first debug instr. Increment RegionEnd
1252 // pass debug instrs to the actual end of the scheduling region.
1253 while (SkippedDebugInstr-- > 0)
1254 ++DAG.RegionEnd;
1255
1256 // If Unsched.front() instruction is a debug instruction, this will actually
1257 // shrink the region since we moved all debug instructions to the end of the
1258 // block. Find the first instruction that is not a debug instruction.
1259 DAG.RegionBegin = Unsched.front()->getIterator();
1260 if (DAG.RegionBegin->isDebugInstr()) {
1261 for (MachineInstr *MI : Unsched) {
1262 if (MI->isDebugInstr())
1263 continue;
1264 DAG.RegionBegin = MI->getIterator();
1265 break;
1266 }
1267 }
1268
1269 // Then move the debug instructions back into their correct place and set
1270 // RegionBegin and RegionEnd if needed.
1272
1273 DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd);
1274}
1275
1276void PreRARematStage::collectRematerializableInstructions() {
1277 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo *>(DAG.TRI);
1278 for (unsigned I = 0, E = DAG.MRI.getNumVirtRegs(); I != E; ++I) {
1280 if (!DAG.LIS->hasInterval(Reg))
1281 continue;
1282
1283 // TODO: Handle AGPR and SGPR rematerialization
1284 if (!SRI->isVGPRClass(DAG.MRI.getRegClass(Reg)) ||
1285 !DAG.MRI.hasOneDef(Reg) || !DAG.MRI.hasOneNonDBGUse(Reg))
1286 continue;
1287
1289 MachineInstr *Def = Op->getParent();
1290 if (Op->getSubReg() != 0 || !isTriviallyReMaterializable(*Def))
1291 continue;
1292
1294 if (Def->getParent() == UseI->getParent())
1295 continue;
1296
1297 // We are only collecting defs that are defined in another block and are
1298 // live-through or used inside regions at MinOccupancy. This means that the
1299 // register must be in the live-in set for the region.
1300 bool AddedToRematList = false;
1301 for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) {
1302 auto It = DAG.LiveIns[I].find(Reg);
1303 if (It != DAG.LiveIns[I].end() && !It->second.none()) {
1304 if (DAG.RegionsWithMinOcc[I]) {
1305 RematerializableInsts[I][Def] = UseI;
1306 AddedToRematList = true;
1307 }
1308
1309 // Collect regions with rematerializable reg as live-in to avoid
1310 // searching later when updating RP.
1311 RematDefToLiveInRegions[Def].push_back(I);
1312 }
1313 }
1314 if (!AddedToRematList)
1315 RematDefToLiveInRegions.erase(Def);
1316 }
1317}
1318
1319bool PreRARematStage::sinkTriviallyRematInsts(const GCNSubtarget &ST,
1320 const TargetInstrInfo *TII) {
1321 // Temporary copies of cached variables we will be modifying and replacing if
1322 // sinking succeeds.
1324 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>, 32>
1325 NewRegions;
1328 BitVector NewRescheduleRegions;
1329 LiveIntervals *LIS = DAG.LIS;
1330
1331 NewRegions.resize(DAG.Regions.size());
1332 NewRescheduleRegions.resize(DAG.Regions.size());
1333
1334 // Collect only regions that has a rematerializable def as a live-in.
1335 SmallSet<unsigned, 16> ImpactedRegions;
1336 for (const auto &It : RematDefToLiveInRegions)
1337 ImpactedRegions.insert(It.second.begin(), It.second.end());
1338
1339 // Make copies of register pressure and live-ins cache that will be updated
1340 // as we rematerialize.
1341 for (auto Idx : ImpactedRegions) {
1342 NewPressure[Idx] = DAG.Pressure[Idx];
1343 NewLiveIns[Idx] = DAG.LiveIns[Idx];
1344 }
1345 NewRegions = DAG.Regions;
1346 NewRescheduleRegions.reset();
1347
1349 bool Improved = false;
1350 for (auto I : ImpactedRegions) {
1351 if (!DAG.RegionsWithMinOcc[I])
1352 continue;
1353
1354 Improved = false;
1355 int VGPRUsage = NewPressure[I].getVGPRNum(ST.hasGFX90AInsts());
1356 int SGPRUsage = NewPressure[I].getSGPRNum();
1357
1358 // TODO: Handle occupancy drop due to AGPR and SGPR.
1359 // Check if cause of occupancy drop is due to VGPR usage and not SGPR.
1360 if (ST.getOccupancyWithNumSGPRs(SGPRUsage) == DAG.MinOccupancy)
1361 break;
1362
1363 // The occupancy of this region could have been improved by a previous
1364 // iteration's sinking of defs.
1365 if (NewPressure[I].getOccupancy(ST) > DAG.MinOccupancy) {
1366 NewRescheduleRegions[I] = true;
1367 Improved = true;
1368 continue;
1369 }
1370
1371 // First check if we have enough trivially rematerializable instructions to
1372 // improve occupancy. Optimistically assume all instructions we are able to
1373 // sink decreased RP.
1374 int TotalSinkableRegs = 0;
1375 for (const auto &It : RematerializableInsts[I]) {
1376 MachineInstr *Def = It.first;
1377 Register DefReg = Def->getOperand(0).getReg();
1378 TotalSinkableRegs +=
1379 SIRegisterInfo::getNumCoveredRegs(NewLiveIns[I][DefReg]);
1380 }
1381 int VGPRsAfterSink = VGPRUsage - TotalSinkableRegs;
1382 unsigned OptimisticOccupancy = ST.getOccupancyWithNumVGPRs(VGPRsAfterSink);
1383 // If in the most optimistic scenario, we cannot improve occupancy, then do
1384 // not attempt to sink any instructions.
1385 if (OptimisticOccupancy <= DAG.MinOccupancy)
1386 break;
1387
1388 unsigned ImproveOccupancy = 0;
1390 for (auto &It : RematerializableInsts[I]) {
1391 MachineInstr *Def = It.first;
1392 MachineBasicBlock::iterator InsertPos =
1393 MachineBasicBlock::iterator(It.second);
1394 Register Reg = Def->getOperand(0).getReg();
1395 // Rematerialize MI to its use block. Since we are only rematerializing
1396 // instructions that do not have any virtual reg uses, we do not need to
1397 // call LiveRangeEdit::allUsesAvailableAt() and
1398 // LiveRangeEdit::canRematerializeAt().
1399 TII->reMaterialize(*InsertPos->getParent(), InsertPos, Reg,
1400 Def->getOperand(0).getSubReg(), *Def, *DAG.TRI);
1401 MachineInstr *NewMI = &*std::prev(InsertPos);
1402 LIS->InsertMachineInstrInMaps(*NewMI);
1403 LIS->removeInterval(Reg);
1405 InsertedMIToOldDef[NewMI] = Def;
1406
1407 // Update region boundaries in scheduling region we sinked from since we
1408 // may sink an instruction that was at the beginning or end of its region
1409 DAG.updateRegionBoundaries(NewRegions, Def, /*NewMI =*/nullptr,
1410 /*Removing =*/true);
1411
1412 // Update region boundaries in region we sinked to.
1413 DAG.updateRegionBoundaries(NewRegions, InsertPos, NewMI);
1414
1415 LaneBitmask PrevMask = NewLiveIns[I][Reg];
1416 // FIXME: Also update cached pressure for where the def was sinked from.
1417 // Update RP for all regions that has this reg as a live-in and remove
1418 // the reg from all regions as a live-in.
1419 for (auto Idx : RematDefToLiveInRegions[Def]) {
1420 NewLiveIns[Idx].erase(Reg);
1421 if (InsertPos->getParent() != DAG.Regions[Idx].first->getParent()) {
1422 // Def is live-through and not used in this block.
1423 NewPressure[Idx].inc(Reg, PrevMask, LaneBitmask::getNone(), DAG.MRI);
1424 } else {
1425 // Def is used and rematerialized into this block.
1426 GCNDownwardRPTracker RPT(*LIS);
1427 auto *NonDbgMI = &*skipDebugInstructionsForward(
1428 NewRegions[Idx].first, NewRegions[Idx].second);
1429 RPT.reset(*NonDbgMI, &NewLiveIns[Idx]);
1430 RPT.advance(NewRegions[Idx].second);
1431 NewPressure[Idx] = RPT.moveMaxPressure();
1432 }
1433 }
1434
1435 SinkedDefs.push_back(Def);
1436 ImproveOccupancy = NewPressure[I].getOccupancy(ST);
1437 if (ImproveOccupancy > DAG.MinOccupancy)
1438 break;
1439 }
1440
1441 // Remove defs we just sinked from all regions' list of sinkable defs
1442 for (auto &Def : SinkedDefs)
1443 for (auto TrackedIdx : RematDefToLiveInRegions[Def])
1444 RematerializableInsts[TrackedIdx].erase(Def);
1445
1446 if (ImproveOccupancy <= DAG.MinOccupancy)
1447 break;
1448
1449 NewRescheduleRegions[I] = true;
1450 Improved = true;
1451 }
1452
1453 if (!Improved) {
1454 // Occupancy was not improved for all regions that were at MinOccupancy.
1455 // Undo sinking and remove newly rematerialized instructions.
1456 for (auto &Entry : InsertedMIToOldDef) {
1457 MachineInstr *MI = Entry.first;
1458 MachineInstr *OldMI = Entry.second;
1459 Register Reg = MI->getOperand(0).getReg();
1461 MI->eraseFromParent();
1462 OldMI->clearRegisterDeads(Reg);
1463 LIS->removeInterval(Reg);
1465 }
1466 return false;
1467 }
1468
1469 // Occupancy was improved for all regions.
1470 for (auto &Entry : InsertedMIToOldDef) {
1471 MachineInstr *MI = Entry.first;
1472 MachineInstr *OldMI = Entry.second;
1473
1474 // Remove OldMI from BBLiveInMap since we are sinking it from its MBB.
1475 DAG.BBLiveInMap.erase(OldMI);
1476
1477 // Remove OldMI and update LIS
1478 Register Reg = MI->getOperand(0).getReg();
1479 LIS->RemoveMachineInstrFromMaps(*OldMI);
1480 OldMI->eraseFromParent();
1481 LIS->removeInterval(Reg);
1483 }
1484
1485 // Update live-ins, register pressure, and regions caches.
1486 for (auto Idx : ImpactedRegions) {
1487 DAG.LiveIns[Idx] = NewLiveIns[Idx];
1488 DAG.Pressure[Idx] = NewPressure[Idx];
1489 DAG.MBBLiveIns.erase(DAG.Regions[Idx].first->getParent());
1490 }
1491 DAG.Regions = NewRegions;
1492 DAG.RescheduleRegions = NewRescheduleRegions;
1493
1495 MFI.increaseOccupancy(MF, ++DAG.MinOccupancy);
1496
1497 return true;
1498}
1499
1500// Copied from MachineLICM
1501bool PreRARematStage::isTriviallyReMaterializable(const MachineInstr &MI) {
1503 return false;
1504
1505 for (const MachineOperand &MO : MI.all_uses())
1506 if (MO.getReg().isVirtual())
1507 return false;
1508
1509 return true;
1510}
1511
1512// When removing, we will have to check both beginning and ending of the region.
1513// When inserting, we will only have to check if we are inserting NewMI in front
1514// of a scheduling region and do not need to check the ending since we will only
1515// ever be inserting before an already existing MI.
1516void GCNScheduleDAGMILive::updateRegionBoundaries(
1518 MachineBasicBlock::iterator>> &RegionBoundaries,
1519 MachineBasicBlock::iterator MI, MachineInstr *NewMI, bool Removing) {
1520 unsigned I = 0, E = RegionBoundaries.size();
1521 // Search for first region of the block where MI is located
1522 while (I != E && MI->getParent() != RegionBoundaries[I].first->getParent())
1523 ++I;
1524
1525 for (; I != E; ++I) {
1526 if (MI->getParent() != RegionBoundaries[I].first->getParent())
1527 return;
1528
1529 if (Removing && MI == RegionBoundaries[I].first &&
1530 MI == RegionBoundaries[I].second) {
1531 // MI is in a region with size 1, after removing, the region will be
1532 // size 0, set RegionBegin and RegionEnd to pass end of block iterator.
1533 RegionBoundaries[I] =
1534 std::pair(MI->getParent()->end(), MI->getParent()->end());
1535 return;
1536 }
1537 if (MI == RegionBoundaries[I].first) {
1538 if (Removing)
1539 RegionBoundaries[I] =
1540 std::pair(std::next(MI), RegionBoundaries[I].second);
1541 else
1542 // Inserted NewMI in front of region, set new RegionBegin to NewMI
1543 RegionBoundaries[I] = std::pair(MachineBasicBlock::iterator(NewMI),
1544 RegionBoundaries[I].second);
1545 return;
1546 }
1547 if (Removing && MI == RegionBoundaries[I].second) {
1548 RegionBoundaries[I] = std::pair(RegionBoundaries[I].first, std::prev(MI));
1549 return;
1550 }
1551 }
1552}
1553
1555 return std::any_of(
1556 DAG->begin(), DAG->end(), [](MachineBasicBlock::iterator MI) {
1557 unsigned Opc = MI->getOpcode();
1558 return Opc == AMDGPU::SCHED_GROUP_BARRIER || Opc == AMDGPU::IGLP_OPT;
1559 });
1560}
1561
1563 MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
1564 bool RemoveKillFlags)
1565 : ScheduleDAGMI(C, std::move(S), RemoveKillFlags) {}
1566
1568 HasIGLPInstrs = hasIGLPInstrs(this);
1569 if (HasIGLPInstrs) {
1570 SavedMutations.clear();
1571 SavedMutations.swap(Mutations);
1572 addMutation(createIGroupLPDAGMutation(/*IsReentry=*/true));
1573 }
1574
1576}
1577
1579 if (HasIGLPInstrs)
1580 SavedMutations.swap(Mutations);
1581
1583}
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
static cl::opt< bool > DisableClusteredLowOccupancy("amdgpu-disable-clustered-low-occupancy-reschedule", cl::Hidden, cl::desc("Disable clustered low occupancy " "rescheduling for ILP scheduling stage."), cl::init(false))
static cl::opt< bool > RelaxedOcc("amdgpu-schedule-relaxed-occupancy", cl::Hidden, cl::desc("Relax occupancy targets for kernels which are memory " "bound (amdgpu-membound-threshold), or " "Wave Limited (amdgpu-limit-wave-threshold)."), cl::init(false))
static cl::opt< bool > DisableUnclusterHighRP("amdgpu-disable-unclustered-high-rp-reschedule", cl::Hidden, cl::desc("Disable unclustered high register pressure " "reduction scheduling stage."), cl::init(false))
static void printScheduleModel(std::set< std::pair< MachineInstr *, unsigned >, EarlierIssuingCycle > &ReadyCycles)
static bool hasIGLPInstrs(ScheduleDAGInstrs *DAG)
static cl::opt< unsigned > ScheduleMetricBias("amdgpu-schedule-metric-bias", cl::Hidden, cl::desc("Sets the bias which adds weight to occupancy vs latency. Set it to " "100 to chase the occupancy only."), cl::init(10))
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
if(VerifyEach)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
unsigned getOccupancyWithLocalMemSize(uint32_t Bytes, const Function &) const
Inverse of getMaxLocalMemWithWaveCount.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
BitVector & reset()
Definition: BitVector.h:392
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:341
BitVector & set()
Definition: BitVector.h:351
bool none() const
none - Returns true if none of the bits are set.
Definition: BitVector.h:188
bool shouldRevertScheduling(unsigned WavesAfter) override
This class represents an Operation in the Expression.
bool erase(const KeyT &Val)
Definition: DenseMap.h:329
GCNMaxILPSchedStrategy(const MachineSchedContext *C)
bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const override
Apply a set of heuristics to a new candidate.
GCNMaxOccupancySchedStrategy(const MachineSchedContext *C)
void finalizeSchedule() override
Allow targets to perform final scheduling actions at the level of the whole MachineFunction.
void schedule() override
Implement ScheduleDAGInstrs interface for scheduling a sequence of reorderable instructions.
GCNPostScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
virtual bool initGCNRegion()
GCNSchedStrategy & S
GCNRegPressure PressureBefore
bool isRegionWithExcessRP() const
bool mayCauseSpilling(unsigned WavesAfter)
ScheduleMetrics getScheduleMetrics(const std::vector< SUnit > &InputSchedule)
GCNScheduleDAGMILive & DAG
const GCNSchedStageID StageID
std::vector< MachineInstr * > Unsched
GCNRegPressure PressureAfter
MachineFunction & MF
SIMachineFunctionInfo & MFI
unsigned computeSUnitReadyCycle(const SUnit &SU, unsigned CurrCycle, DenseMap< unsigned, unsigned > &ReadyCycles, const TargetSchedModel &SM)
virtual void finalizeGCNSchedStage()
virtual bool initGCNSchedStage()
virtual bool shouldRevertScheduling(unsigned WavesAfter)
std::vector< std::unique_ptr< ScheduleDAGMutation > > SavedMutations
GCNSchedStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
MachineBasicBlock * CurrentMBB
const GCNSubtarget & ST
This is a minimal scheduler strategy.
const unsigned HighRPSGPRBias
GCNSchedStrategy(const MachineSchedContext *C)
SmallVector< GCNSchedStageID, 4 > SchedStages
void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy, const RegPressureTracker &RPTracker, SchedCandidate &Cand)
SUnit * pickNodeBidirectional(bool &IsTopNode)
std::vector< unsigned > MaxPressure
GCNSchedStageID getCurrentStage()
MachineFunction * MF
SmallVectorImpl< GCNSchedStageID >::iterator CurrentStage
void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop, const RegPressureTracker &RPTracker, const SIRegisterInfo *SRI, unsigned SGPRPressure, unsigned VGPRPressure)
std::vector< unsigned > Pressure
void initialize(ScheduleDAGMI *DAG) override
Initialize the strategy after building the DAG for a new region.
const unsigned HighRPVGPRBias
SUnit * pickNode(bool &IsTopNode) override
Pick the best node to balance the schedule. Implements MachineSchedStrategy.
GCNSchedStageID getNextStage() const
void finalizeSchedule() override
Allow targets to perform final scheduling actions at the level of the whole MachineFunction.
void schedule() override
Implement ScheduleDAGInstrs interface for scheduling a sequence of reorderable instructions.
GCNScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
bool hasGFX90AInsts() const
unsigned computeOccupancy(const Function &F, unsigned LDSSize=0, unsigned NumSGPRs=0, unsigned NumVGPRs=0) const
Return occupancy for the given function.
const SIInstrInfo * getInstrInfo() const override
Definition: GCNSubtarget.h:250
unsigned getMaxNumVGPRs(unsigned WavesPerEU) const
unsigned getOccupancyWithNumVGPRs(unsigned VGPRs) const
Return the maximum number of waves per SIMD for kernels using VGPRs VGPRs.
unsigned getOccupancyWithNumSGPRs(unsigned SGPRs) const
Return the maximum number of waves per SIMD for kernels using SGPRs SGPRs.
unsigned getMaxNumSGPRs(unsigned WavesPerEU, bool Addressable) const
void traceCandidate(const SchedCandidate &Cand)
void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, SchedBoundary *OtherZone)
Set the CandPolicy given a scheduling zone given the current resources and latencies inside and outsi...
const TargetSchedModel * SchedModel
const MachineSchedContext * Context
const TargetRegisterInfo * TRI
GenericScheduler shrinks the unscheduled zone using heuristics to balance the schedule.
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
MachineSchedPolicy RegionPolicy
virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const
Apply a set of heuristics to a new candidate.
ScheduleDAGMILive * DAG
void initialize(ScheduleDAGMI *dag) override
Initialize the strategy after building the DAG for a new region.
bool shouldRevertScheduling(unsigned WavesAfter) override
bool hasInterval(Register Reg) const
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
void handleMove(MachineInstr &MI, bool UpdateFlags=false)
Call this method to notify LiveIntervals that instruction MI has been moved within a basic block.
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
void removeInterval(Register Reg)
Interval removal.
LiveInterval & createAndComputeVirtRegInterval(Register Reg)
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
unsigned succ_size() const
MachineInstrBundleIterator< MachineInstr > iterator
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
Definition: MachineInstr.h:68
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:326
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
MachineOperand class - Representation of each machine instruction operand.
bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MachineOperand * getOneDef(Register Reg) const
Returns the defining operand if there is exactly one operand defining the specified register,...
use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const
bool hasOneDef(Register RegNo) const
Return true if there is exactly one operand defining the specified register.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
bool shouldRevertScheduling(unsigned WavesAfter) override
bool shouldRevertScheduling(unsigned WavesAfter) override
bool initGCNRegion() override
bool initGCNSchedStage() override
Capture a change in pressure for a single pressure set.
void setUnitInc(int Inc)
Helpers for implementing custom MachineSchedStrategy classes.
bool empty() const
Track the current register pressure at some position in the instruction stream, and remember the high...
void advance()
Advance across the current instruction.
void getDownwardPressure(const MachineInstr *MI, std::vector< unsigned > &PressureResult, std::vector< unsigned > &MaxPressureResult)
Get the pressure of each PSet after traversing this instruction top-down.
const std::vector< unsigned > & getRegSetPressureAtPos() const
Get the register set pressure at the current position, which may be less than the pressure across the...
void getUpwardPressure(const MachineInstr *MI, std::vector< unsigned > &PressureResult, std::vector< unsigned > &MaxPressureResult)
Get the pressure of each PSet after traversing this instruction bottom-up.
unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const
getNumAllocatableRegs - Returns the number of actually allocatable registers in RC in the current fun...
List of registers defined and used by a machine instruction.
void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
void adjustLaneLiveness(const LiveIntervals &LIS, const MachineRegisterInfo &MRI, SlotIndex Pos, MachineInstr *AddFlagsMI=nullptr)
Use liveness information to find out which uses/defs are partially undefined/dead and adjust the Regi...
void detectDeadDefs(const MachineInstr &MI, const LiveIntervals &LIS)
Use liveness information to find dead defs not marked with a dead flag and move them to the DeadDefs ...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
const TargetSchedModel & getSchedModel() const
Definition: SIInstrInfo.h:1374
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
void increaseOccupancy(const MachineFunction &MF, unsigned Limit)
void limitOccupancy(const MachineFunction &MF)
static unsigned getNumCoveredRegs(LaneBitmask LM)
static bool isVGPRClass(const TargetRegisterClass *RC)
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:242
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:264
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:284
bool isBottomReady() const
Definition: ScheduleDAG.h:449
bool isTopReady() const
Definition: ScheduleDAG.h:446
SmallVector< SDep, 4 > Preds
All sunit predecessors.
Definition: ScheduleDAG.h:256
MachineInstr * getInstr() const
Returns the representative MachineInstr for this SUnit.
Definition: ScheduleDAG.h:373
Each Scheduling boundary is associated with ready queues.
unsigned getLatencyStallCycles(SUnit *SU)
Get the difference between the given SUnit's ready time and the current cycle.
ScheduleDAGMI * DAG
SUnit * pickOnlyChoice()
Call this before applying any other heuristics to the Available queue.
void removeReady(SUnit *SU)
Remove SU from the ready set for this boundary.
A ScheduleDAG for scheduling lists of MachineInstr.
MachineBasicBlock::iterator end() const
Returns an iterator to the bottom of the current scheduling region.
MachineBasicBlock * BB
The block in which to insert instructions.
MachineBasicBlock::iterator RegionEnd
The end of the range to be scheduled.
virtual void finalizeSchedule()
Allow targets to perform final scheduling actions at the level of the whole MachineFunction.
MachineBasicBlock::iterator begin() const
Returns an iterator to the top of the current scheduling region.
SUnit * getSUnit(MachineInstr *MI) const
Returns an existing SUnit for this MI, or nullptr.
virtual void exitRegion()
Called when the scheduler has finished scheduling the current region.
MachineBasicBlock::iterator RegionBegin
The beginning of the range to be scheduled.
unsigned NumRegionInstrs
Instructions in this region (distance(RegionBegin, RegionEnd)).
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
void schedule() override
Implement ScheduleDAGInstrs interface for scheduling a sequence of reorderable instructions.
void enterRegion(MachineBasicBlock *bb, MachineBasicBlock::iterator begin, MachineBasicBlock::iterator end, unsigned regioninstrs) override
Implement the ScheduleDAGInstrs interface for handling the next scheduling region.
const RegPressureTracker & getBotRPTracker() const
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
const RegPressureTracker & getTopRPTracker() const
RegPressureTracker RPTracker
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void startBlock(MachineBasicBlock *bb) override
Prepares to perform scheduling in the given block.
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
MachineBasicBlock::iterator top() const
void schedule() override
Implement ScheduleDAGInstrs interface for scheduling a sequence of reorderable instructions.
MachineBasicBlock::iterator bottom() const
void finishBlock() override
Cleans up after scheduling in the given block.
LiveIntervals * LIS
const SUnit * getNextClusterPred() const
void placeDebugValues()
Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
const SUnit * getNextClusterSucc() const
std::vector< std::unique_ptr< ScheduleDAGMutation > > Mutations
Ordered list of DAG postprocessing steps.
MachineRegisterInfo & MRI
Virtual/real register map.
Definition: ScheduleDAG.h:560
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:557
std::vector< SUnit > SUnits
The scheduling units.
Definition: ScheduleDAG.h:561
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:558
MachineFunction & MF
Machine function.
Definition: ScheduleDAG.h:559
static const unsigned ScaleFactor
unsigned getMetric() const
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:68
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:240
SlotIndexes pass.
Definition: SlotIndexes.h:300
SlotIndex getMBBStartIdx(unsigned Num) const
Returns the first index in the given basic block number.
Definition: SlotIndexes.h:452
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
TargetInstrInfo - Interface to description of machine instruction set.
bool isTriviallyReMaterializable(const MachineInstr &MI) const
Return true if the instruction is trivially rematerializable, meaning it has no side effects and requ...
Provide an instruction scheduling machine model to CodeGen passes.
virtual const TargetInstrInfo * getInstrInfo() const
bool shouldRevertScheduling(unsigned WavesAfter) override
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI)
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, std::optional< bool > EnableWavefrontSize32)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool isEqual(const GCNRPTracker::LiveRegSet &S1, const GCNRPTracker::LiveRegSet &S2)
unsigned getWeakLeft(const SUnit *SU, bool isTop)
GCNRegPressure getRegPressure(const MachineRegisterInfo &MRI, Range &&LiveRegs)
cl::opt< bool > VerifyScheduling
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr)
IterT skipDebugInstructionsForward(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It until it points to a non-debug instruction or to End and return the resulting iterator.
bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:293
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1858
DenseMap< MachineInstr *, GCNRPTracker::LiveRegSet > getLiveRegMap(Range &&R, bool After, LiveIntervals &LIS)
creates a map MachineInstr -> LiveRegSet R - range of iterators on instructions After - upon entry or...
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
Definition: MathExtras.h:428
GCNRPTracker::LiveRegSet getLiveRegsBefore(const MachineInstr &MI, const LiveIntervals &LIS)
bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(bool IsReentry)
IsReentry specifes whether or not this is a reentry into the IGroupLPDAGMutation.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
bool operator()(std::pair< MachineInstr *, unsigned > A, std::pair< MachineInstr *, unsigned > B) const
unsigned getOccupancy(const GCNSubtarget &ST) const
unsigned getVGPRNum(bool UnifiedVGPRFile) const
unsigned getAGPRNum() const
unsigned getSGPRNum() const
bool less(const GCNSubtarget &ST, const GCNRegPressure &O, unsigned MaxOccupancy=std::numeric_limits< unsigned >::max()) const
Policy for scheduling the next instruction in the candidate's zone.
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
void reset(const CandPolicy &NewPolicy)
void initResourceDelta(const ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
Status of an instruction's critical resource consumption.
static constexpr LaneBitmask getNone()
Definition: LaneBitmask.h:81
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterClassInfo * RegClassInfo
PressureChange CriticalMax