LLVM 23.0.0git
GCNSchedStrategy.h
Go to the documentation of this file.
1//===-- GCNSchedStrategy.h - GCN Scheduler Strategy -*- C++ -*-------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
14#define LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
15
16#include "GCNRegPressure.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/MapVector.h"
24
25namespace llvm {
26
28class SIRegisterInfo;
29class GCNSubtarget;
30class GCNSchedStage;
31
41
42#ifndef NDEBUG
43raw_ostream &operator<<(raw_ostream &OS, const GCNSchedStageID &StageID);
44#endif
45
46/// This is a minimal scheduler strategy. The main difference between this
47/// and the GenericScheduler is that GCNSchedStrategy uses different
48/// heuristics to determine excess/critical pressure sets.
50protected:
51 SUnit *pickNodeBidirectional(bool &IsTopNode, bool &PickedPending);
52
53 void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy,
54 const RegPressureTracker &RPTracker,
55 SchedCandidate &Cand, bool &IsPending,
56 bool IsBottomUp);
57
58 void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
59 const RegPressureTracker &RPTracker,
60 const SIRegisterInfo *SRI, unsigned SGPRPressure,
61 unsigned VGPRPressure, bool IsBottomUp);
62
63 /// Evaluates instructions in the pending queue using a subset of scheduling
64 /// heuristics.
65 ///
66 /// Instructions that cannot be issued due to hardware constraints are placed
67 /// in the pending queue rather than the available queue, making them normally
68 /// invisible to scheduling heuristics. However, in certain scenarios (such as
69 /// avoiding register spilling), it may be beneficial to consider scheduling
70 /// these not-yet-ready instructions.
72 SchedBoundary *Zone) const;
73
74 void printCandidateDecision(const SchedCandidate &Current,
75 const SchedCandidate &Preferred);
76
77 std::vector<unsigned> Pressure;
78
79 std::vector<unsigned> MaxPressure;
80
82
84
86
88
89 // Scheduling stages for this strategy.
91
92 // Pointer to the current SchedStageID.
94
95 // GCN RP Tracker for top-down scheduling
97
98 // GCN RP Tracker for botttom-up scheduling
100
101public:
102 // schedule() have seen register pressure over the critical limits and had to
103 // track register pressure for actual scheduling heuristics.
105
106 // Schedule known to have excess register pressure. Be more conservative in
107 // increasing ILP and preserving VGPRs.
108 bool KnownExcessRP = false;
109
110 // An error margin is necessary because of poor performance of the generic RP
111 // tracker and can be adjusted up for tuning heuristics to try and more
112 // aggressively reduce register pressure.
113 unsigned ErrorMargin = 3;
114
115 // Bias for SGPR limits under a high register pressure.
116 const unsigned HighRPSGPRBias = 7;
117
118 // Bias for VGPR limits under a high register pressure.
119 const unsigned HighRPVGPRBias = 7;
120
122
124
125 unsigned SGPRLimitBias = 0;
126
127 unsigned VGPRLimitBias = 0;
128
130
131 SUnit *pickNode(bool &IsTopNode) override;
132
133 void schedNode(SUnit *SU, bool IsTopNode) override;
134
135 void initialize(ScheduleDAGMI *DAG) override;
136
137 unsigned getTargetOccupancy() { return TargetOccupancy; }
138
139 void setTargetOccupancy(unsigned Occ) { TargetOccupancy = Occ; }
140
142
143 // Advances stage. Returns true if there are remaining stages.
144 bool advanceStage();
145
146 bool hasNextStage() const;
147
149
151
153};
154
155/// The goal of this scheduling strategy is to maximize kernel occupancy (i.e.
156/// maximum number of waves per simd).
158public:
160 bool IsLegacyScheduler = false);
161};
162
163/// The goal of this scheduling strategy is to maximize ILP for a single wave
164/// (i.e. latency hiding).
166protected:
167 bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
168 SchedBoundary *Zone) const override;
169
170public:
172};
173
174/// The goal of this scheduling strategy is to maximize memory clause for a
175/// single wave.
177protected:
178 bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
179 SchedBoundary *Zone) const override;
180
181public:
183};
184
186 unsigned ScheduleLength;
187 unsigned BubbleCycles;
188
189public:
190 ScheduleMetrics() = default;
191 ScheduleMetrics(unsigned L, unsigned BC)
192 : ScheduleLength(L), BubbleCycles(BC) {}
193 unsigned getLength() const { return ScheduleLength; }
194 unsigned getBubbles() const { return BubbleCycles; }
195 unsigned getMetric() const {
196 unsigned Metric = (BubbleCycles * ScaleFactor) / ScheduleLength;
197 // Metric is zero if the amount of bubbles is less than 1% which is too
198 // small. So, return 1.
199 return Metric ? Metric : 1;
200 }
201 static const unsigned ScaleFactor;
202};
203
205 dbgs() << "\n Schedule Metric (scaled by "
207 << " ) is: " << Sm.getMetric() << " [ " << Sm.getBubbles() << "/"
208 << Sm.getLength() << " ]\n";
209 return OS;
210}
211
212class GCNScheduleDAGMILive;
215 // The live in/out pressure as indexed by the first or last MI in the region
216 // before scheduling.
218 // The mapping of RegionIDx to key instruction
219 DenseMap<unsigned, MachineInstr *> IdxToInstruction;
220 // Whether we are calculating LiveOuts or LiveIns
221 bool IsLiveOut;
222
223public:
224 RegionPressureMap() = default;
226 : DAG(GCNDAG), IsLiveOut(LiveOut) {}
227 // Build the Instr->LiveReg and RegionIdx->Instr maps
228 void buildLiveRegMap();
229
230 // Retrieve the LiveReg for a given RegionIdx
232 assert(IdxToInstruction.contains(RegionIdx));
233 MachineInstr *Key = IdxToInstruction[RegionIdx];
234 return RegionLiveRegMap[Key];
235 }
236};
237
238/// A region's boundaries i.e. a pair of instruction bundle iterators. The lower
239/// boundary is inclusive, the upper boundary is exclusive.
241 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>;
242
244 friend class GCNSchedStage;
249 friend class PreRARematStage;
251 friend class RegionPressureMap;
252
253 const GCNSubtarget &ST;
254
256
257 // Occupancy target at the beginning of function scheduling cycle.
258 unsigned StartingOccupancy;
259
260 // Minimal real occupancy recorder for the function.
261 unsigned MinOccupancy;
262
263 // Vector of regions recorder for later rescheduling
265
266 // Record regions with high register pressure.
267 BitVector RegionsWithHighRP;
268
269 // Record regions with excess register pressure over the physical register
270 // limit. Register pressure in these regions usually will result in spilling.
271 BitVector RegionsWithExcessRP;
272
273 // Regions that have IGLP instructions (SCHED_GROUP_BARRIER or IGLP_OPT).
274 BitVector RegionsWithIGLPInstrs;
275
276 // Region live-in cache.
278
279 // Region pressure cache.
281
282 // Temporary basic block live-in cache.
284
285 // The map of the initial first region instruction to region live in registers
287
288 // Calculate the map of the initial first region instruction to region live in
289 // registers
291
292 // Calculate the map of the initial last region instruction to region live out
293 // registers
295 getRegionLiveOutMap() const;
296
297 // The live out registers per region. These are internally stored as a map of
298 // the initial last region instruction to region live out registers, but can
299 // be retreived with the regionIdx by calls to getLiveRegsForRegionIdx.
300 RegionPressureMap RegionLiveOuts;
301
302 // Return current region pressure.
303 GCNRegPressure getRealRegPressure(unsigned RegionIdx) const;
304
305 // Compute and cache live-ins and pressure for all regions in block.
306 void computeBlockPressure(unsigned RegionIdx, const MachineBasicBlock *MBB);
307
308 /// If necessary, updates a region's boundaries following insertion ( \p NewMI
309 /// != nullptr) or removal ( \p NewMI == nullptr) of a \p MI in the region.
310 /// For an MI removal, this must be called before the MI is actually erased
311 /// from its parent MBB.
312 void updateRegionBoundaries(RegionBoundaries &RegionBounds,
314 MachineInstr *NewMI);
315
316 void runSchedStages();
317
318 std::unique_ptr<GCNSchedStage> createSchedStage(GCNSchedStageID SchedStageID);
319
320public:
322 std::unique_ptr<MachineSchedStrategy> S);
323
324 void schedule() override;
325
326 void finalizeSchedule() override;
327};
328
329// GCNSchedStrategy applies multiple scheduling stages to a function.
331protected:
333
335
337
339
341
343
344 // The current block being scheduled.
346
347 // Current region index.
348 unsigned RegionIdx = 0;
349
350 // Record the original order of instructions before scheduling.
351 std::vector<MachineInstr *> Unsched;
352
353 // RP before scheduling the current region.
355
356 // RP after scheduling the current region.
358
359 std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
360
362
363public:
364 // Initialize state for a scheduling stage. Returns false if the current stage
365 // should be skipped.
366 virtual bool initGCNSchedStage();
367
368 // Finalize state after finishing a scheduling pass on the function.
369 virtual void finalizeGCNSchedStage();
370
371 // Setup for scheduling a region. Returns false if the current region should
372 // be skipped.
373 virtual bool initGCNRegion();
374
375 // Finalize state after scheduling a region.
376 virtual void finalizeGCNRegion();
377
378 // Track whether a new region is also a new MBB.
379 void setupNewBlock();
380
381 // Check result of scheduling.
382 void checkScheduling();
383
384 // computes the given schedule virtual execution time in clocks
385 ScheduleMetrics getScheduleMetrics(const std::vector<SUnit> &InputSchedule);
387 unsigned computeSUnitReadyCycle(const SUnit &SU, unsigned CurrCycle,
388 DenseMap<unsigned, unsigned> &ReadyCycles,
389 const TargetSchedModel &SM);
390
391 // Returns true if scheduling should be reverted.
392 virtual bool shouldRevertScheduling(unsigned WavesAfter);
393
394 // Returns true if current region has known excess pressure.
395 bool isRegionWithExcessRP() const {
396 return DAG.RegionsWithExcessRP[RegionIdx];
397 }
398
399 // The region number this stage is currently working on
400 unsigned getRegionIdx() { return RegionIdx; }
401
402 // Returns true if the new schedule may result in more spilling.
403 bool mayCauseSpilling(unsigned WavesAfter);
404
405 /// Sets the schedule of region \p RegionIdx in block \p MBB to \p MIOrder.
406 /// The MIs in \p MIOrder must be exactly the same as the ones currently
407 /// existing inside the region, only in a different order that honors def-use
408 /// chains.
411
413
414 virtual ~GCNSchedStage() = default;
415};
416
424
426private:
427 // Record regions with excess archvgpr register pressure over the physical
428 // register limit. Register pressure in these regions usually will result in
429 // spilling.
430 BitVector RegionsWithExcessArchVGPR;
431
432 const SIInstrInfo *TII;
433 const SIRegisterInfo *SRI;
434
435 /// Do a speculative rewrite and collect copy locations. The speculative
436 /// rewrite allows us to calculate the RP of the code after the rewrite, and
437 /// the copy locations allow us to calculate the total cost of copies required
438 /// for the rewrite. Stores the rewritten instructions in \p RewriteCands ,
439 /// the copy locations for uses (of the MFMA result) in \p CopyForUse and the
440 /// copy locations for defs (of the MFMA operands) in \p CopyForDef
441 bool
442 initHeuristics(std::vector<std::pair<MachineInstr *, unsigned>> &RewriteCands,
443 DenseMap<MachineBasicBlock *, std::set<Register>> &CopyForUse,
445
446 /// Calculate the rewrite cost and undo the state change (e.g. rewriting) done
447 /// in initHeuristics. Uses \p CopyForUse and \p CopyForDef to calculate copy
448 /// costs, and \p RewriteCands to undo rewriting.
449 int64_t getRewriteCost(
450 const std::vector<std::pair<MachineInstr *, unsigned>> &RewriteCands,
451 const DenseMap<MachineBasicBlock *, std::set<Register>> &CopyForUse,
452 const SmallPtrSetImpl<MachineInstr *> &CopyForDef);
453
454 /// Do the final rewrite on \p RewriteCands and insert any needed copies.
455 bool
456 rewrite(const std::vector<std::pair<MachineInstr *, unsigned>> &RewriteCands);
457
458 /// \returns true if this MI is a rewrite candidate.
459 bool isRewriteCandidate(MachineInstr *MI) const;
460
461 /// Finds all the reaching defs of \p UseMO and stores the SlotIndexes into \p
462 /// DefIdxs
463 void findReachingDefs(MachineOperand &UseMO, LiveIntervals *LIS,
465
466 /// Finds all the reaching uses of \p DefMI and stores the use operands in \p
467 /// ReachingUses
468 void findReachingUses(MachineInstr *DefMI, LiveIntervals *LIS,
470
471public:
472 bool initGCNSchedStage() override;
473
476};
477
479private:
480 // Save the initial occupancy before starting this stage.
481 unsigned InitialOccupancy;
482 // Save the temporary target occupancy before starting this stage.
483 unsigned TempTargetOccupancy;
484 // Track whether any region was scheduled by this stage.
485 bool IsAnyRegionScheduled;
486
487public:
488 bool initGCNSchedStage() override;
489
490 void finalizeGCNSchedStage() override;
491
492 bool initGCNRegion() override;
493
494 bool shouldRevertScheduling(unsigned WavesAfter) override;
495
498};
499
500// Retry function scheduling if we found resulting occupancy and it is
501// lower than used for other scheduling passes. This will give more freedom
502// to schedule low register pressure blocks.
504public:
505 bool initGCNSchedStage() override;
506
507 bool initGCNRegion() override;
508
509 bool shouldRevertScheduling(unsigned WavesAfter) override;
510
513};
514
515/// Attempts to reduce function spilling or, if there is no spilling, to
516/// increase function occupancy by one with respect to ArchVGPR usage by sinking
517/// rematerializable instructions to their use. When the stage
518/// estimates reducing spilling or increasing occupancy is possible, as few
519/// instructions as possible are rematerialized to reduce potential negative
520/// effects on function latency.
522private:
523 /// Useful information about a rematerializable instruction.
524 struct RematInstruction {
525 /// Single use of the rematerializable instruction's defined register,
526 /// located in a different block.
528 /// Rematerialized version of \p DefMI, set in
529 /// PreRARematStage::rematerialize. Used for reverting rematerializations.
530 MachineInstr *RematMI;
531 /// Set of regions in which the rematerializable instruction's defined
532 /// register is a live-in.
533 SmallDenseSet<unsigned, 4> LiveInRegions;
534
535 RematInstruction(MachineInstr *UseMI) : UseMI(UseMI) {}
536 };
537
538 /// Maps all MIs to their parent region. MI terminators are considered to be
539 /// outside the region they delimitate, and as such are not stored in the map.
541 /// Parent MBB to each region, in region order.
543 /// Collects instructions to rematerialize.
545 /// Collects regions whose live-ins or register pressure will change due to
546 /// rematerializations.
548 /// In case we need to rollback rematerializations, save lane masks for all
549 /// rematerialized registers in all regions in which they are live-ins.
551 /// After successful stage initialization, indicates which regions should be
552 /// rescheduled.
553 BitVector RescheduleRegions;
554 /// The target occupancy the stage is trying to achieve. Empty when the
555 /// objective is spilling reduction.
556 std::optional<unsigned> TargetOcc;
557 /// Achieved occupancy *only* through rematerializations (pre-rescheduling).
558 /// Smaller than or equal to the target occupancy.
559 unsigned AchievedOcc;
560
561 /// Returns whether remat can reduce spilling or increase function occupancy
562 /// by 1 through rematerialization. If it can do one, collects instructions in
563 /// PreRARematStage::Rematerializations and sets the target occupancy in
564 /// PreRARematStage::TargetOccupancy.
565 bool canIncreaseOccupancyOrReduceSpill();
566
567 /// Whether the MI is rematerializable
568 bool isReMaterializable(const MachineInstr &MI);
569
570 /// Rematerializes all instructions in PreRARematStage::Rematerializations
571 /// and stores the achieved occupancy after remat in
572 /// PreRARematStage::AchievedOcc.
573 void rematerialize();
574
575 /// If remat alone did not increase occupancy to the target one, rollbacks all
576 /// rematerializations and resets live-ins/RP in all regions impacted by the
577 /// stage to their pre-stage values.
578 void finalizeGCNSchedStage() override;
579
580public:
581 bool initGCNSchedStage() override;
582
583 bool initGCNRegion() override;
584
585 bool shouldRevertScheduling(unsigned WavesAfter) override;
586
589};
590
598
607
609private:
610 std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
611
612 bool HasIGLPInstrs = false;
613
614public:
615 void schedule() override;
616
617 void finalizeSchedule() override;
618
620 std::unique_ptr<MachineSchedStrategy> S,
621 bool RemoveKillFlags);
622};
623
624} // End namespace llvm
625
626#endif // LLVM_LIB_TARGET_AMDGPU_GCNSCHEDSTRATEGY_H
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
This file defines the DenseMap class.
This file defines the GCNRegPressure class, which tracks registry pressure by bookkeeping number of S...
IRTranslator LLVM IR MI
This file implements a map that provides insertion order iteration.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
bool shouldRevertScheduling(unsigned WavesAfter) override
ClusteredLowOccStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
GCNMaxILPSchedStrategy(const MachineSchedContext *C)
bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const override
Apply a set of heuristics to a new candidate.
bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const override
GCNMaxMemoryClauseSchedStrategy tries best to clause memory instructions as much as possible.
GCNMaxMemoryClauseSchedStrategy(const MachineSchedContext *C)
GCNMaxOccupancySchedStrategy(const MachineSchedContext *C, bool IsLegacyScheduler=false)
void finalizeSchedule() override
Allow targets to perform final scheduling actions at the level of the whole MachineFunction.
void schedule() override
Orders nodes according to selected style.
GCNPostScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
DenseMap< unsigned, LaneBitmask > LiveRegSet
GCNSchedStrategy & S
GCNRegPressure PressureBefore
bool isRegionWithExcessRP() const
bool mayCauseSpilling(unsigned WavesAfter)
ScheduleMetrics getScheduleMetrics(const std::vector< SUnit > &InputSchedule)
GCNScheduleDAGMILive & DAG
const GCNSchedStageID StageID
std::vector< MachineInstr * > Unsched
GCNRegPressure PressureAfter
MachineFunction & MF
virtual void finalizeGCNRegion()
SIMachineFunctionInfo & MFI
unsigned computeSUnitReadyCycle(const SUnit &SU, unsigned CurrCycle, DenseMap< unsigned, unsigned > &ReadyCycles, const TargetSchedModel &SM)
virtual ~GCNSchedStage()=default
virtual void finalizeGCNSchedStage()
virtual bool initGCNSchedStage()
virtual bool shouldRevertScheduling(unsigned WavesAfter)
std::vector< std::unique_ptr< ScheduleDAGMutation > > SavedMutations
GCNSchedStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
void modifyRegionSchedule(unsigned RegionIdx, MachineBasicBlock *MBB, ArrayRef< MachineInstr * > MIOrder)
Sets the schedule of region RegionIdx in block MBB to MIOrder.
MachineBasicBlock * CurrentMBB
const GCNSubtarget & ST
This is a minimal scheduler strategy.
const unsigned HighRPSGPRBias
GCNDownwardRPTracker DownwardTracker
GCNSchedStrategy(const MachineSchedContext *C)
SmallVector< GCNSchedStageID, 4 > SchedStages
std::vector< unsigned > MaxPressure
SUnit * pickNodeBidirectional(bool &IsTopNode, bool &PickedPending)
GCNSchedStageID getCurrentStage()
bool tryPendingCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const
Evaluates instructions in the pending queue using a subset of scheduling heuristics.
SmallVectorImpl< GCNSchedStageID >::iterator CurrentStage
void schedNode(SUnit *SU, bool IsTopNode) override
Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an instruction and updated scheduled/rem...
GCNDownwardRPTracker * getDownwardTracker()
std::vector< unsigned > Pressure
void initialize(ScheduleDAGMI *DAG) override
Initialize the strategy after building the DAG for a new region.
GCNUpwardRPTracker UpwardTracker
void printCandidateDecision(const SchedCandidate &Current, const SchedCandidate &Preferred)
const unsigned HighRPVGPRBias
void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy, const RegPressureTracker &RPTracker, SchedCandidate &Cand, bool &IsPending, bool IsBottomUp)
void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop, const RegPressureTracker &RPTracker, const SIRegisterInfo *SRI, unsigned SGPRPressure, unsigned VGPRPressure, bool IsBottomUp)
void setTargetOccupancy(unsigned Occ)
SUnit * pickNode(bool &IsTopNode) override
Pick the next node to schedule, or return NULL.
GCNUpwardRPTracker * getUpwardTracker()
GCNSchedStageID getNextStage() const
void finalizeSchedule() override
Allow targets to perform final scheduling actions at the level of the whole MachineFunction.
void schedule() override
Orders nodes according to selected style.
GCNScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
ScheduleDAGMILive * DAG
GenericScheduler(const MachineSchedContext *C)
bool shouldRevertScheduling(unsigned WavesAfter) override
ILPInitialScheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
bool shouldRevertScheduling(unsigned WavesAfter) override
MemoryClauseInitialScheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
bool shouldRevertScheduling(unsigned WavesAfter) override
OccInitialScheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
PreRARematStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
bool shouldRevertScheduling(unsigned WavesAfter) override
bool initGCNSchedStage() override
Track the current register pressure at some position in the instruction stream, and remember the high...
GCNRPTracker::LiveRegSet & getLiveRegsForRegionIdx(unsigned RegionIdx)
RegionPressureMap(GCNScheduleDAGMILive *GCNDAG, bool LiveOut)
RewriteMFMAFormStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Scheduling unit. This is a node in the scheduling DAG.
Each Scheduling boundary is associated with ready queues.
bool RemoveKillFlags
True if the DAG builder should remove kill flags (in preparation for rescheduling).
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
unsigned getBubbles() const
ScheduleMetrics(unsigned L, unsigned BC)
unsigned getLength() const
static const unsigned ScaleFactor
unsigned getMetric() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:291
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
typename SuperClass::iterator iterator
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Provide an instruction scheduling machine model to CodeGen passes.
UnclusteredHighRPStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
bool shouldRevertScheduling(unsigned WavesAfter) override
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > RegionBoundaries
A region's boundaries i.e.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Policy for scheduling the next instruction in the candidate's zone.
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...