LLVM 17.0.0git
MachineScheduler.h
Go to the documentation of this file.
1//===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides an interface for customizing the standard MachineScheduler
10// pass. Note that the entire pass may be replaced as follows:
11//
12// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
13// PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
14// ...}
15//
16// The MachineScheduler pass is only responsible for choosing the regions to be
17// scheduled. Targets can override the DAG builder and scheduler without
18// replacing the pass as follows:
19//
20// ScheduleDAGInstrs *<Target>PassConfig::
21// createMachineScheduler(MachineSchedContext *C) {
22// return new CustomMachineScheduler(C);
23// }
24//
25// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
26// scheduling while updating the instruction stream, register pressure, and live
27// intervals. Most targets don't need to override the DAG builder and list
28// scheduler, but subtargets that require custom scheduling heuristics may
29// plugin an alternate MachineSchedStrategy. The strategy is responsible for
30// selecting the highest priority node from the list:
31//
32// ScheduleDAGInstrs *<Target>PassConfig::
33// createMachineScheduler(MachineSchedContext *C) {
34// return new ScheduleDAGMILive(C, CustomStrategy(C));
35// }
36//
37// The DAG builder can also be customized in a sense by adding DAG mutations
38// that will run after DAG building and before list scheduling. DAG mutations
39// can adjust dependencies based on target-specific knowledge or add weak edges
40// to aid heuristics:
41//
42// ScheduleDAGInstrs *<Target>PassConfig::
43// createMachineScheduler(MachineSchedContext *C) {
44// ScheduleDAGMI *DAG = createGenericSchedLive(C);
45// DAG->addMutation(new CustomDAGMutation(...));
46// return DAG;
47// }
48//
49// A target that supports alternative schedulers can use the
50// MachineSchedRegistry to allow command line selection. This can be done by
51// implementing the following boilerplate:
52//
53// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
54// return new CustomMachineScheduler(C);
55// }
56// static MachineSchedRegistry
57// SchedCustomRegistry("custom", "Run my target's custom scheduler",
58// createCustomMachineSched);
59//
60//
61// Finally, subtargets that don't need to implement custom heuristics but would
62// like to configure the GenericScheduler's policy for a given scheduler region,
63// including scheduling direction and register pressure tracking policy, can do
64// this:
65//
66// void <SubTarget>Subtarget::
67// overrideSchedPolicy(MachineSchedPolicy &Policy,
68// unsigned NumRegionInstrs) const {
69// Policy.<Flag> = true;
70// }
71//
72//===----------------------------------------------------------------------===//
73
74#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
75#define LLVM_CODEGEN_MACHINESCHEDULER_H
76
77#include "llvm/ADT/APInt.h"
78#include "llvm/ADT/ArrayRef.h"
79#include "llvm/ADT/BitVector.h"
80#include "llvm/ADT/STLExtras.h"
82#include "llvm/ADT/StringRef.h"
83#include "llvm/ADT/Twine.h"
93#include <algorithm>
94#include <cassert>
95#include <memory>
96#include <string>
97#include <vector>
98
99namespace llvm {
100
101extern cl::opt<bool> ForceTopDown;
102extern cl::opt<bool> ForceBottomUp;
103extern cl::opt<bool> VerifyScheduling;
104#ifndef NDEBUG
105extern cl::opt<bool> ViewMISchedDAGs;
106extern cl::opt<bool> PrintDAGs;
107#else
108extern const bool ViewMISchedDAGs;
109extern const bool PrintDAGs;
110#endif
111
112class AAResults;
113class LiveIntervals;
114class MachineDominatorTree;
115class MachineFunction;
116class MachineInstr;
117class MachineLoopInfo;
118class RegisterClassInfo;
119class SchedDFSResult;
120class ScheduleHazardRecognizer;
121class TargetInstrInfo;
122class TargetPassConfig;
123class TargetRegisterInfo;
124
125/// MachineSchedContext provides enough context from the MachineScheduler pass
126/// for the target to instantiate a scheduler.
128 MachineFunction *MF = nullptr;
129 const MachineLoopInfo *MLI = nullptr;
130 const MachineDominatorTree *MDT = nullptr;
131 const TargetPassConfig *PassConfig = nullptr;
132 AAResults *AA = nullptr;
133 LiveIntervals *LIS = nullptr;
134
136
138 virtual ~MachineSchedContext();
139};
140
141/// MachineSchedRegistry provides a selection of available machine instruction
142/// schedulers.
145 ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
146public:
148
149 // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
151
153
154 MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
156 Registry.Add(this);
157 }
158
159 ~MachineSchedRegistry() { Registry.Remove(this); }
160
161 // Accessors.
162 //
165 }
166
168 return (MachineSchedRegistry *)Registry.getList();
169 }
170
172 Registry.setListener(L);
173 }
174};
175
176class ScheduleDAGMI;
177
178/// Define a generic scheduling policy for targets that don't provide their own
179/// MachineSchedStrategy. This can be overriden for each scheduling region
180/// before building the DAG.
182 // Allow the scheduler to disable register pressure tracking.
184 /// Track LaneMasks to allow reordering of independent subregister writes
185 /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
187
188 // Allow the scheduler to force top-down or bottom-up scheduling. If neither
189 // is true, the scheduler runs in both directions and converges.
190 bool OnlyTopDown = false;
191 bool OnlyBottomUp = false;
192
193 // Disable heuristic that tries to fetch nodes from long dependency chains
194 // first.
196
197 // Compute DFSResult for use in scheduling heuristics.
198 bool ComputeDFSResult = false;
199
201};
202
203/// MachineSchedStrategy - Interface to the scheduling algorithm used by
204/// ScheduleDAGMI.
205///
206/// Initialization sequence:
207/// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
209 virtual void anchor();
210
211public:
212 virtual ~MachineSchedStrategy() = default;
213
214 /// Optionally override the per-region scheduling policy.
217 unsigned NumRegionInstrs) {}
218
219 virtual void dumpPolicy() const {}
220
221 /// Check if pressure tracking is needed before building the DAG and
222 /// initializing this strategy. Called after initPolicy.
223 virtual bool shouldTrackPressure() const { return true; }
224
225 /// Returns true if lanemasks should be tracked. LaneMask tracking is
226 /// necessary to reorder independent subregister defs for the same vreg.
227 /// This has to be enabled in combination with shouldTrackPressure().
228 virtual bool shouldTrackLaneMasks() const { return false; }
229
230 // If this method returns true, handling of the scheduling regions
231 // themselves (in case of a scheduling boundary in MBB) will be done
232 // beginning with the topmost region of MBB.
233 virtual bool doMBBSchedRegionsTopDown() const { return false; }
234
235 /// Initialize the strategy after building the DAG for a new region.
236 virtual void initialize(ScheduleDAGMI *DAG) = 0;
237
238 /// Tell the strategy that MBB is about to be processed.
239 virtual void enterMBB(MachineBasicBlock *MBB) {};
240
241 /// Tell the strategy that current MBB is done.
242 virtual void leaveMBB() {};
243
244 /// Notify this strategy that all roots have been released (including those
245 /// that depend on EntrySU or ExitSU).
246 virtual void registerRoots() {}
247
248 /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
249 /// schedule the node at the top of the unscheduled region. Otherwise it will
250 /// be scheduled at the bottom.
251 virtual SUnit *pickNode(bool &IsTopNode) = 0;
252
253 /// Scheduler callback to notify that a new subtree is scheduled.
254 virtual void scheduleTree(unsigned SubtreeID) {}
255
256 /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
257 /// instruction and updated scheduled/remaining flags in the DAG nodes.
258 virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
259
260 /// When all predecessor dependencies have been resolved, free this node for
261 /// top-down scheduling.
262 virtual void releaseTopNode(SUnit *SU) = 0;
263
264 /// When all successor dependencies have been resolved, free this node for
265 /// bottom-up scheduling.
266 virtual void releaseBottomNode(SUnit *SU) = 0;
267};
268
269/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
270/// schedules machine instructions according to the given MachineSchedStrategy
271/// without much extra book-keeping. This is the common functionality between
272/// PreRA and PostRA MachineScheduler.
274protected:
277 std::unique_ptr<MachineSchedStrategy> SchedImpl;
278
279 /// Ordered list of DAG postprocessing steps.
280 std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
281
282 /// The top of the unscheduled zone.
284
285 /// The bottom of the unscheduled zone.
287
288 /// Record the next node in a scheduled cluster.
289 const SUnit *NextClusterPred = nullptr;
290 const SUnit *NextClusterSucc = nullptr;
291
292#if LLVM_ENABLE_ABI_BREAKING_CHECKS
293 /// The number of instructions scheduled so far. Used to cut off the
294 /// scheduler at the point determined by misched-cutoff.
295 unsigned NumInstrsScheduled = 0;
296#endif
297
298public:
299 ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
300 bool RemoveKillFlags)
302 LIS(C->LIS), SchedImpl(std::move(S)) {}
303
304 // Provide a vtable anchor
305 ~ScheduleDAGMI() override;
306
307 /// If this method returns true, handling of the scheduling regions
308 /// themselves (in case of a scheduling boundary in MBB) will be done
309 /// beginning with the topmost region of MBB.
310 bool doMBBSchedRegionsTopDown() const override {
311 return SchedImpl->doMBBSchedRegionsTopDown();
312 }
313
314 // Returns LiveIntervals instance for use in DAG mutators and such.
315 LiveIntervals *getLIS() const { return LIS; }
316
317 /// Return true if this DAG supports VReg liveness and RegPressure.
318 virtual bool hasVRegLiveness() const { return false; }
319
320 /// Add a postprocessing step to the DAG builder.
321 /// Mutations are applied in the order that they are added after normal DAG
322 /// building and before MachineSchedStrategy initialization.
323 ///
324 /// ScheduleDAGMI takes ownership of the Mutation object.
325 void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
326 if (Mutation)
327 Mutations.push_back(std::move(Mutation));
328 }
329
332
333 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
334 /// region. This covers all instructions in a block, while schedule() may only
335 /// cover a subset.
339 unsigned regioninstrs) override;
340
341 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
342 /// reorderable instructions.
343 void schedule() override;
344
345 void startBlock(MachineBasicBlock *bb) override;
346 void finishBlock() override;
347
348 /// Change the position of an instruction within the basic block and update
349 /// live ranges and region boundary iterators.
351
352 const SUnit *getNextClusterPred() const { return NextClusterPred; }
353
354 const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
355
356 void viewGraph(const Twine &Name, const Twine &Title) override;
357 void viewGraph() override;
358
359protected:
360 // Top-Level entry points for the schedule() driver...
361
362 /// Apply each ScheduleDAGMutation step in order. This allows different
363 /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
364 void postProcessDAG();
365
366 /// Release ExitSU predecessors and setup scheduler queues.
367 void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
368
369 /// Update scheduler DAG and queues after scheduling an instruction.
370 void updateQueues(SUnit *SU, bool IsTopNode);
371
372 /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
373 void placeDebugValues();
374
375 /// dump the scheduled Sequence.
376 void dumpSchedule() const;
377 /// Print execution trace of the schedule top-down or bottom-up.
378 void dumpScheduleTraceTopDown() const;
379 void dumpScheduleTraceBottomUp() const;
380
381 // Lesser helpers...
382 bool checkSchedLimit();
383
385 SmallVectorImpl<SUnit*> &BotRoots);
386
387 void releaseSucc(SUnit *SU, SDep *SuccEdge);
388 void releaseSuccessors(SUnit *SU);
389 void releasePred(SUnit *SU, SDep *PredEdge);
390 void releasePredecessors(SUnit *SU);
391};
392
393/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
394/// machine instructions while updating LiveIntervals and tracking regpressure.
396protected:
398
399 /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
400 /// will be empty.
403
405
406 /// Maps vregs to the SUnits of their uses in the current scheduling region.
408
409 // Map each SU to its summary of pressure changes. This array is updated for
410 // liveness during bottom-up scheduling. Top-down scheduling may proceed but
411 // has no affect on the pressure diffs.
413
414 /// Register pressure in this region computed by initRegPressure.
419
420 /// List of pressure sets that exceed the target's pressure limit before
421 /// scheduling, listed in increasing set ID order. Each pressure set is paired
422 /// with its max pressure in the currently scheduled regions.
423 std::vector<PressureChange> RegionCriticalPSets;
424
425 /// The top of the unscheduled zone.
428
429 /// The bottom of the unscheduled zone.
432
433public:
435 std::unique_ptr<MachineSchedStrategy> S)
436 : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
439
440 ~ScheduleDAGMILive() override;
441
442 /// Return true if this DAG supports VReg liveness and RegPressure.
443 bool hasVRegLiveness() const override { return true; }
444
445 /// Return true if register pressure tracking is enabled.
447
448 /// Get current register pressure for the top scheduled instructions.
449 const IntervalPressure &getTopPressure() const { return TopPressure; }
451
452 /// Get current register pressure for the bottom scheduled instructions.
453 const IntervalPressure &getBotPressure() const { return BotPressure; }
455
456 /// Get register pressure for the entire scheduling region before scheduling.
457 const IntervalPressure &getRegPressure() const { return RegPressure; }
458
459 const std::vector<PressureChange> &getRegionCriticalPSets() const {
460 return RegionCriticalPSets;
461 }
462
464 return SUPressureDiffs[SU->NodeNum];
465 }
466 const PressureDiff &getPressureDiff(const SUnit *SU) const {
467 return SUPressureDiffs[SU->NodeNum];
468 }
469
470 /// Compute a DFSResult after DAG building is complete, and before any
471 /// queue comparisons.
472 void computeDFSResult();
473
474 /// Return a non-null DFS result if the scheduling strategy initialized it.
475 const SchedDFSResult *getDFSResult() const { return DFSResult; }
476
478
479 /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
480 /// region. This covers all instructions in a block, while schedule() may only
481 /// cover a subset.
485 unsigned regioninstrs) override;
486
487 /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
488 /// reorderable instructions.
489 void schedule() override;
490
491 /// Compute the cyclic critical path through the DAG.
492 unsigned computeCyclicCriticalPath();
493
494 void dump() const override;
495
496protected:
497 // Top-Level entry points for the schedule() driver...
498
499 /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
500 /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
501 /// region, TopTracker and BottomTracker will be initialized to the top and
502 /// bottom of the DAG region without covereing any unscheduled instruction.
504
505 /// Release ExitSU predecessors and setup scheduler queues. Re-position
506 /// the Top RP tracker in case the region beginning has changed.
507 void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
508
509 /// Move an instruction and update register pressure.
510 void scheduleMI(SUnit *SU, bool IsTopNode);
511
512 // Lesser helpers...
513
514 void initRegPressure();
515
517
518 void updateScheduledPressure(const SUnit *SU,
519 const std::vector<unsigned> &NewMaxPressure);
520
521 void collectVRegUses(SUnit &SU);
522};
523
524//===----------------------------------------------------------------------===//
525///
526/// Helpers for implementing custom MachineSchedStrategy classes. These take
527/// care of the book-keeping associated with list scheduling heuristics.
528///
529//===----------------------------------------------------------------------===//
530
531/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
532/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
533/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
534///
535/// This is a convenience class that may be used by implementations of
536/// MachineSchedStrategy.
538 unsigned ID;
539 std::string Name;
540 std::vector<SUnit*> Queue;
541
542public:
543 ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
544
545 unsigned getID() const { return ID; }
546
547 StringRef getName() const { return Name; }
548
549 // SU is in this queue if it's NodeQueueID is a superset of this ID.
550 bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
551
552 bool empty() const { return Queue.empty(); }
553
554 void clear() { Queue.clear(); }
555
556 unsigned size() const { return Queue.size(); }
557
558 using iterator = std::vector<SUnit*>::iterator;
559
560 iterator begin() { return Queue.begin(); }
561
562 iterator end() { return Queue.end(); }
563
564 ArrayRef<SUnit*> elements() { return Queue; }
565
566 iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
567
568 void push(SUnit *SU) {
569 Queue.push_back(SU);
570 SU->NodeQueueId |= ID;
571 }
572
574 (*I)->NodeQueueId &= ~ID;
575 *I = Queue.back();
576 unsigned idx = I - Queue.begin();
577 Queue.pop_back();
578 return Queue.begin() + idx;
579 }
580
581 void dump() const;
582};
583
584/// Summarize the unscheduled region.
586 // Critical path through the DAG in expected latency.
587 unsigned CriticalPath;
589
590 // Scaled count of micro-ops left to schedule.
592
594
595 // Unscheduled resources
597
599
600 void reset() {
601 CriticalPath = 0;
602 CyclicCritPath = 0;
603 RemIssueCount = 0;
606 }
607
608 void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
609};
610
611/// Each Scheduling boundary is associated with ready queues. It tracks the
612/// current cycle in the direction of movement, and maintains the state
613/// of "hazards" and other interlocks at the current cycle.
615public:
616 /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
617 enum {
620 LogMaxQID = 2
621 };
622
623 ScheduleDAGMI *DAG = nullptr;
624 const TargetSchedModel *SchedModel = nullptr;
625 SchedRemainder *Rem = nullptr;
626
629
631
632private:
633 /// True if the pending Q should be checked/updated before scheduling another
634 /// instruction.
635 bool CheckPending;
636
637 /// Number of cycles it takes to issue the instructions scheduled in this
638 /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
639 /// See getStalls().
640 unsigned CurrCycle;
641
642 /// Micro-ops issued in the current cycle
643 unsigned CurrMOps;
644
645 /// MinReadyCycle - Cycle of the soonest available instruction.
646 unsigned MinReadyCycle;
647
648 // The expected latency of the critical path in this scheduled zone.
649 unsigned ExpectedLatency;
650
651 // The latency of dependence chains leading into this zone.
652 // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
653 // For each cycle scheduled: DLat -= 1.
654 unsigned DependentLatency;
655
656 /// Count the scheduled (issued) micro-ops that can be retired by
657 /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
658 unsigned RetiredMOps;
659
660 // Count scheduled resources that have been executed. Resources are
661 // considered executed if they become ready in the time that it takes to
662 // saturate any resource including the one in question. Counts are scaled
663 // for direct comparison with other resources. Counts can be compared with
664 // MOps * getMicroOpFactor and Latency * getLatencyFactor.
665 SmallVector<unsigned, 16> ExecutedResCounts;
666
667 /// Cache the max count for a single resource.
668 unsigned MaxExecutedResCount;
669
670 // Cache the critical resources ID in this scheduled zone.
671 unsigned ZoneCritResIdx;
672
673 // Is the scheduled region resource limited vs. latency limited.
674 bool IsResourceLimited;
675
676 // Record the highest cycle at which each resource has been reserved by a
677 // scheduled instruction.
678 SmallVector<unsigned, 16> ReservedCycles;
679
680 /// For each PIdx, stores first index into ReservedCycles that corresponds to
681 /// it.
682 ///
683 /// For example, consider the following 3 resources (ResourceCount =
684 /// 3):
685 ///
686 /// +------------+--------+
687 /// |ResourceName|NumUnits|
688 /// +------------+--------+
689 /// | X | 2 |
690 /// +------------+--------+
691 /// | Y | 3 |
692 /// +------------+--------+
693 /// | Z | 1 |
694 /// +------------+--------+
695 ///
696 /// In this case, the total number of resource instances is 6. The
697 /// vector \ref ReservedCycles will have a slot for each instance. The
698 /// vector \ref ReservedCyclesIndex will track at what index the first
699 /// instance of the resource is found in the vector of \ref
700 /// ReservedCycles:
701 ///
702 /// Indexes of instances in ReservedCycles
703 /// 0 1 2 3 4 5
704 /// ReservedCyclesIndex[0] = 0; [X0, X1,
705 /// ReservedCyclesIndex[1] = 2; Y0, Y1, Y2
706 /// ReservedCyclesIndex[2] = 5; Z
707 SmallVector<unsigned, 16> ReservedCyclesIndex;
708
709 // For each PIdx, stores the resource group IDs of its subunits
710 SmallVector<APInt, 16> ResourceGroupSubUnitMasks;
711
712#if LLVM_ENABLE_ABI_BREAKING_CHECKS
713 // Remember the greatest possible stall as an upper bound on the number of
714 // times we should retry the pending queue because of a hazard.
715 unsigned MaxObservedStall;
716#endif
717
718public:
719 /// Pending queues extend the ready queues with the same ID and the
720 /// PendingFlag set.
721 SchedBoundary(unsigned ID, const Twine &Name):
722 Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
723 reset();
724 }
725 SchedBoundary &operator=(const SchedBoundary &other) = delete;
726 SchedBoundary(const SchedBoundary &other) = delete;
728
729 void reset();
730
731 void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
733
734 bool isTop() const {
735 return Available.getID() == TopQID;
736 }
737
738 /// Number of cycles to issue the instructions scheduled in this zone.
739 unsigned getCurrCycle() const { return CurrCycle; }
740
741 /// Micro-ops issued in the current cycle
742 unsigned getCurrMOps() const { return CurrMOps; }
743
744 // The latency of dependence chains leading into this zone.
745 unsigned getDependentLatency() const { return DependentLatency; }
746
747 /// Get the number of latency cycles "covered" by the scheduled
748 /// instructions. This is the larger of the critical path within the zone
749 /// and the number of cycles required to issue the instructions.
750 unsigned getScheduledLatency() const {
751 return std::max(ExpectedLatency, CurrCycle);
752 }
753
754 unsigned getUnscheduledLatency(SUnit *SU) const {
755 return isTop() ? SU->getHeight() : SU->getDepth();
756 }
757
758 unsigned getResourceCount(unsigned ResIdx) const {
759 return ExecutedResCounts[ResIdx];
760 }
761
762 /// Get the scaled count of scheduled micro-ops and resources, including
763 /// executed resources.
764 unsigned getCriticalCount() const {
765 if (!ZoneCritResIdx)
766 return RetiredMOps * SchedModel->getMicroOpFactor();
767 return getResourceCount(ZoneCritResIdx);
768 }
769
770 /// Get a scaled count for the minimum execution time of the scheduled
771 /// micro-ops that are ready to execute by getExecutedCount. Notice the
772 /// feedback loop.
773 unsigned getExecutedCount() const {
774 return std::max(CurrCycle * SchedModel->getLatencyFactor(),
775 MaxExecutedResCount);
776 }
777
778 unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
779
780 // Is the scheduled region resource limited vs. latency limited.
781 bool isResourceLimited() const { return IsResourceLimited; }
782
783 /// Get the difference between the given SUnit's ready time and the current
784 /// cycle.
785 unsigned getLatencyStallCycles(SUnit *SU);
786
787 unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
788 unsigned Cycles);
789
790 std::pair<unsigned, unsigned> getNextResourceCycle(const MCSchedClassDesc *SC,
791 unsigned PIdx,
792 unsigned Cycles);
793
794 bool isUnbufferedGroup(unsigned PIdx) const {
797 }
798
799 bool checkHazard(SUnit *SU);
800
801 unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
802
803 unsigned getOtherResourceCount(unsigned &OtherCritIdx);
804
805 /// Release SU to make it ready. If it's not in hazard, remove it from
806 /// pending queue (if already in) and push into available queue.
807 /// Otherwise, push the SU into pending queue.
808 ///
809 /// @param SU The unit to be released.
810 /// @param ReadyCycle Until which cycle the unit is ready.
811 /// @param InPQueue Whether SU is already in pending queue.
812 /// @param Idx Position offset in pending queue (if in it).
813 void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
814 unsigned Idx = 0);
815
816 void bumpCycle(unsigned NextCycle);
817
818 void incExecutedResources(unsigned PIdx, unsigned Count);
819
820 unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx,
821 unsigned Cycles, unsigned ReadyCycle);
822
823 void bumpNode(SUnit *SU);
824
825 void releasePending();
826
827 void removeReady(SUnit *SU);
828
829 /// Call this before applying any other heuristics to the Available queue.
830 /// Updates the Available/Pending Q's if necessary and returns the single
831 /// available instruction, or NULL if there are multiple candidates.
833
834 /// Dump the state of the information that tracks resource usage.
835 void dumpReservedCycles() const;
836 void dumpScheduledState() const;
837};
838
839/// Base class for GenericScheduler. This class maintains information about
840/// scheduling candidates based on TargetSchedModel making it easy to implement
841/// heuristics for either preRA or postRA scheduling.
843public:
844 /// Represent the type of SchedCandidate found within a single queue.
845 /// pickNodeBidirectional depends on these listed by decreasing priority.
846 enum CandReason : uint8_t {
850
851#ifndef NDEBUG
852 static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
853#endif
854
855 /// Policy for scheduling the next instruction in the candidate's zone.
856 struct CandPolicy {
857 bool ReduceLatency = false;
858 unsigned ReduceResIdx = 0;
859 unsigned DemandResIdx = 0;
860
861 CandPolicy() = default;
862
863 bool operator==(const CandPolicy &RHS) const {
864 return ReduceLatency == RHS.ReduceLatency &&
865 ReduceResIdx == RHS.ReduceResIdx &&
866 DemandResIdx == RHS.DemandResIdx;
867 }
868 bool operator!=(const CandPolicy &RHS) const {
869 return !(*this == RHS);
870 }
871 };
872
873 /// Status of an instruction's critical resource consumption.
875 // Count critical resources in the scheduled region required by SU.
876 unsigned CritResources = 0;
877
878 // Count critical resources from another region consumed by SU.
879 unsigned DemandedResources = 0;
880
882
883 bool operator==(const SchedResourceDelta &RHS) const {
884 return CritResources == RHS.CritResources
885 && DemandedResources == RHS.DemandedResources;
886 }
887 bool operator!=(const SchedResourceDelta &RHS) const {
888 return !operator==(RHS);
889 }
890 };
891
892 /// Store the state used by GenericScheduler heuristics, required for the
893 /// lifetime of one invocation of pickNode().
896
897 // The best SUnit candidate.
899
900 // The reason for this candidate.
902
903 // Whether this candidate should be scheduled at top/bottom.
904 bool AtTop;
905
906 // Register pressure values for the best candidate.
908
909 // Critical resource consumption of the best candidate.
911
914
915 void reset(const CandPolicy &NewPolicy) {
916 Policy = NewPolicy;
917 SU = nullptr;
918 Reason = NoCand;
919 AtTop = false;
922 }
923
924 bool isValid() const { return SU; }
925
926 // Copy the status of another candidate without changing policy.
928 assert(Best.Reason != NoCand && "uninitialized Sched candidate");
929 SU = Best.SU;
930 Reason = Best.Reason;
931 AtTop = Best.AtTop;
932 RPDelta = Best.RPDelta;
933 ResDelta = Best.ResDelta;
934 }
935
936 void initResourceDelta(const ScheduleDAGMI *DAG,
938 };
939
940protected:
942 const TargetSchedModel *SchedModel = nullptr;
943 const TargetRegisterInfo *TRI = nullptr;
944
946
948
949 void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
950 SchedBoundary *OtherZone);
951
952#ifndef NDEBUG
953 void traceCandidate(const SchedCandidate &Cand);
954#endif
955
956private:
957 bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
958 bool ComputeRemLatency, unsigned &RemLatency) const;
959};
960
961// Utility functions used by heuristics in tryCandidate().
962bool tryLess(int TryVal, int CandVal,
963 GenericSchedulerBase::SchedCandidate &TryCand,
964 GenericSchedulerBase::SchedCandidate &Cand,
966bool tryGreater(int TryVal, int CandVal,
967 GenericSchedulerBase::SchedCandidate &TryCand,
968 GenericSchedulerBase::SchedCandidate &Cand,
970bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
971 GenericSchedulerBase::SchedCandidate &Cand,
972 SchedBoundary &Zone);
973bool tryPressure(const PressureChange &TryP,
974 const PressureChange &CandP,
975 GenericSchedulerBase::SchedCandidate &TryCand,
976 GenericSchedulerBase::SchedCandidate &Cand,
978 const TargetRegisterInfo *TRI,
979 const MachineFunction &MF);
980unsigned getWeakLeft(const SUnit *SU, bool isTop);
981int biasPhysReg(const SUnit *SU, bool isTop);
982
983/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
984/// the schedule.
986public:
988 GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
989 Bot(SchedBoundary::BotQID, "BotQ") {}
990
993 unsigned NumRegionInstrs) override;
994
995 void dumpPolicy() const override;
996
997 bool shouldTrackPressure() const override {
999 }
1000
1001 bool shouldTrackLaneMasks() const override {
1003 }
1004
1005 void initialize(ScheduleDAGMI *dag) override;
1006
1007 SUnit *pickNode(bool &IsTopNode) override;
1008
1009 void schedNode(SUnit *SU, bool IsTopNode) override;
1010
1011 void releaseTopNode(SUnit *SU) override {
1012 if (SU->isScheduled)
1013 return;
1014
1015 Top.releaseNode(SU, SU->TopReadyCycle, false);
1016 TopCand.SU = nullptr;
1017 }
1018
1019 void releaseBottomNode(SUnit *SU) override {
1020 if (SU->isScheduled)
1021 return;
1022
1023 Bot.releaseNode(SU, SU->BotReadyCycle, false);
1024 BotCand.SU = nullptr;
1025 }
1026
1027 void registerRoots() override;
1028
1029protected:
1031
1033
1034 // State of the top and bottom scheduled instruction boundaries.
1037
1038 /// Candidate last picked from Top boundary.
1040 /// Candidate last picked from Bot boundary.
1042
1043 void checkAcyclicLatency();
1044
1045 void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
1046 const RegPressureTracker &RPTracker,
1047 RegPressureTracker &TempTracker);
1048
1049 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
1050 SchedBoundary *Zone) const;
1051
1052 SUnit *pickNodeBidirectional(bool &IsTopNode);
1053
1055 const CandPolicy &ZonePolicy,
1056 const RegPressureTracker &RPTracker,
1057 SchedCandidate &Candidate);
1058
1059 void reschedulePhysReg(SUnit *SU, bool isTop);
1060};
1061
1062/// PostGenericScheduler - Interface to the scheduling algorithm used by
1063/// ScheduleDAGMI.
1064///
1065/// Callbacks from ScheduleDAGMI:
1066/// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
1068protected:
1069 ScheduleDAGMI *DAG = nullptr;
1072
1073public:
1075 GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}
1076
1077 ~PostGenericScheduler() override = default;
1078
1081 unsigned NumRegionInstrs) override {
1082 /* no configurable policy */
1083 }
1084
1085 /// PostRA scheduling does not track pressure.
1086 bool shouldTrackPressure() const override { return false; }
1087
1088 void initialize(ScheduleDAGMI *Dag) override;
1089
1090 void registerRoots() override;
1091
1092 SUnit *pickNode(bool &IsTopNode) override;
1093
1094 void scheduleTree(unsigned SubtreeID) override {
1095 llvm_unreachable("PostRA scheduler does not support subtree analysis.");
1096 }
1097
1098 void schedNode(SUnit *SU, bool IsTopNode) override;
1099
1100 void releaseTopNode(SUnit *SU) override {
1101 if (SU->isScheduled)
1102 return;
1103 Top.releaseNode(SU, SU->TopReadyCycle, false);
1104 }
1105
1106 // Only called for roots.
1107 void releaseBottomNode(SUnit *SU) override {
1108 BotRoots.push_back(SU);
1109 }
1110
1111protected:
1112 virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
1113
1114 void pickNodeFromQueue(SchedCandidate &Cand);
1115};
1116
1117/// Create the standard converging machine scheduler. This will be used as the
1118/// default scheduler if the target does not set a default.
1119/// Adds default DAG mutations.
1120ScheduleDAGMILive *createGenericSchedLive(MachineSchedContext *C);
1121
1122/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
1123ScheduleDAGMI *createGenericSchedPostRA(MachineSchedContext *C);
1124
1125std::unique_ptr<ScheduleDAGMutation>
1126createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1127 const TargetRegisterInfo *TRI);
1128
1129std::unique_ptr<ScheduleDAGMutation>
1130createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1131 const TargetRegisterInfo *TRI);
1132
1133std::unique_ptr<ScheduleDAGMutation>
1134createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1135 const TargetRegisterInfo *TRI);
1136
1137} // end namespace llvm
1138
1139#endif // LLVM_CODEGEN_MACHINESCHEDULER_H
MachineBasicBlock & MBB
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the BitVector class.
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
bool End
Definition: ELF_riscv.cpp:464
expand large div rem
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
PowerPC VSX FMA Mutation
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static const char * name
Definition: SMEABIPass.cpp:49
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
Value * RHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Base class for GenericScheduler.
void traceCandidate(const SchedCandidate &Cand)
void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, SchedBoundary *OtherZone)
Set the CandPolicy given a scheduling zone given the current resources and latencies inside and outsi...
const TargetSchedModel * SchedModel
static const char * getReasonStr(GenericSchedulerBase::CandReason Reason)
GenericSchedulerBase(const MachineSchedContext *C)
const MachineSchedContext * Context
CandReason
Represent the type of SchedCandidate found within a single queue.
const TargetRegisterInfo * TRI
GenericScheduler shrinks the unscheduled zone using heuristics to balance the schedule.
void checkAcyclicLatency()
Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic critical path by more cycle...
SchedCandidate BotCand
Candidate last picked from Bot boundary.
SchedCandidate TopCand
Candidate last picked from Top boundary.
MachineSchedPolicy RegionPolicy
virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand, SchedBoundary *Zone) const
Apply a set of heuristics to a new candidate.
ScheduleDAGMILive * DAG
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
void dumpPolicy() const override
void initialize(ScheduleDAGMI *dag) override
Initialize the strategy after building the DAG for a new region.
void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop, const RegPressureTracker &RPTracker, RegPressureTracker &TempTracker)
bool shouldTrackPressure() const override
Check if pressure tracking is needed before building the DAG and initializing this strategy.
void registerRoots() override
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) override
Initialize the per-region scheduling policy.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
void reschedulePhysReg(SUnit *SU, bool isTop)
SUnit * pickNode(bool &IsTopNode) override
Pick the best node to balance the schedule. Implements MachineSchedStrategy.
void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy, const RegPressureTracker &RPTracker, SchedCandidate &Candidate)
Pick the best candidate from the queue.
bool shouldTrackLaneMasks() const override
Returns true if lanemasks should be tracked.
void schedNode(SUnit *SU, bool IsTopNode) override
Update the scheduler's state after scheduling a node.
GenericScheduler(const MachineSchedContext *C)
SUnit * pickNodeBidirectional(bool &IsTopNode)
Pick the best candidate node from either the top or bottom queue.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachinePassRegistryListener - Listener to adds and removals of nodes in registration list.
MachinePassRegistryNode - Machine pass node stored in registration list.
MachinePassRegistryNode * getNext() const
MachinePassRegistry - Track the registration of machine passes.
MachineSchedRegistry provides a selection of available machine instruction schedulers.
static void setListener(MachinePassRegistryListener< FunctionPassCtor > *L)
static MachinePassRegistry< ScheduleDAGCtor > Registry
ScheduleDAGCtor FunctionPassCtor
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
static MachineSchedRegistry * getList()
ScheduleDAGInstrs *(*)(MachineSchedContext *) ScheduleDAGCtor
MachineSchedRegistry * getNext() const
MachineSchedStrategy - Interface to the scheduling algorithm used by ScheduleDAGMI.
virtual bool shouldTrackPressure() const
Check if pressure tracking is needed before building the DAG and initializing this strategy.
virtual void leaveMBB()
Tell the strategy that current MBB is done.
virtual void enterMBB(MachineBasicBlock *MBB)
Tell the strategy that MBB is about to be processed.
virtual void scheduleTree(unsigned SubtreeID)
Scheduler callback to notify that a new subtree is scheduled.
virtual void schedNode(SUnit *SU, bool IsTopNode)=0
Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an instruction and updated scheduled/rem...
virtual ~MachineSchedStrategy()=default
virtual void initialize(ScheduleDAGMI *DAG)=0
Initialize the strategy after building the DAG for a new region.
virtual void releaseTopNode(SUnit *SU)=0
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
virtual void dumpPolicy() const
virtual bool doMBBSchedRegionsTopDown() const
virtual SUnit * pickNode(bool &IsTopNode)=0
Pick the next node to schedule, or return NULL.
virtual void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs)
Optionally override the per-region scheduling policy.
virtual void releaseBottomNode(SUnit *SU)=0
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
virtual bool shouldTrackLaneMasks() const
Returns true if lanemasks should be tracked.
virtual void registerRoots()
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
PostGenericScheduler - Interface to the scheduling algorithm used by ScheduleDAGMI.
virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand)
Apply a set of heuristics to a new candidate for PostRA scheduling.
bool shouldTrackPressure() const override
PostRA scheduling does not track pressure.
void schedNode(SUnit *SU, bool IsTopNode) override
Called after ScheduleDAGMI has scheduled an instruction and updated scheduled/remaining flags in the ...
void scheduleTree(unsigned SubtreeID) override
Scheduler callback to notify that a new subtree is scheduled.
void initialize(ScheduleDAGMI *Dag) override
Initialize the strategy after building the DAG for a new region.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling.
~PostGenericScheduler() override=default
SmallVector< SUnit *, 8 > BotRoots
void pickNodeFromQueue(SchedCandidate &Cand)
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
PostGenericScheduler(const MachineSchedContext *C)
void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) override
Optionally override the per-region scheduling policy.
void registerRoots() override
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
SUnit * pickNode(bool &IsTopNode) override
Pick the next node to schedule.
List of PressureChanges in order of increasing, unique PSetID.
Array of PressureDiffs.
Helpers for implementing custom MachineSchedStrategy classes.
void push(SUnit *SU)
iterator find(SUnit *SU)
ArrayRef< SUnit * > elements()
ReadyQueue(unsigned id, const Twine &name)
bool isInQueue(SUnit *SU) const
std::vector< SUnit * >::iterator iterator
bool empty() const
StringRef getName() const
unsigned size() const
iterator remove(iterator I)
unsigned getID() const
Track the current register pressure at some position in the instruction stream, and remember the high...
A static registration template.
Definition: Registry.h:114
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
Scheduling dependency.
Definition: ScheduleDAG.h:49
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:242
unsigned NodeQueueId
Queue id of node.
Definition: ScheduleDAG.h:265
unsigned TopReadyCycle
Cycle relative to start when node is ready.
Definition: ScheduleDAG.h:299
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:264
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:406
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:398
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:284
unsigned BotReadyCycle
Cycle relative to end when node is ready.
Definition: ScheduleDAG.h:300
Each Scheduling boundary is associated with ready queues.
void releasePending()
Release pending ready nodes in to the available queue.
unsigned getDependentLatency() const
unsigned getScheduledLatency() const
Get the number of latency cycles "covered" by the scheduled instructions.
void incExecutedResources(unsigned PIdx, unsigned Count)
bool isResourceLimited() const
const TargetSchedModel * SchedModel
unsigned getExecutedCount() const
Get a scaled count for the minimum execution time of the scheduled micro-ops that are ready to execut...
unsigned getLatencyStallCycles(SUnit *SU)
Get the difference between the given SUnit's ready time and the current cycle.
SchedBoundary(const SchedBoundary &other)=delete
unsigned findMaxLatency(ArrayRef< SUnit * > ReadySUs)
ScheduleDAGMI * DAG
void dumpReservedCycles() const
Dump the state of the information that tracks resource usage.
unsigned getOtherResourceCount(unsigned &OtherCritIdx)
SchedRemainder * Rem
void bumpNode(SUnit *SU)
Move the boundary of scheduled code by one SUnit.
unsigned getCriticalCount() const
Get the scaled count of scheduled micro-ops and resources, including executed resources.
SUnit * pickOnlyChoice()
Call this before applying any other heuristics to the Available queue.
void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue, unsigned Idx=0)
Release SU to make it ready.
unsigned getNextResourceCycleByInstance(unsigned InstanceIndex, unsigned Cycles)
Compute the next cycle at which the given processor resource unit can be scheduled.
SchedBoundary(unsigned ID, const Twine &Name)
Pending queues extend the ready queues with the same ID and the PendingFlag set.
ScheduleHazardRecognizer * HazardRec
bool isUnbufferedGroup(unsigned PIdx) const
void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem)
SchedBoundary & operator=(const SchedBoundary &other)=delete
unsigned getResourceCount(unsigned ResIdx) const
void bumpCycle(unsigned NextCycle)
Move the boundary of scheduled code by one cycle.
std::pair< unsigned, unsigned > getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx, unsigned Cycles)
Compute the next cycle at which the given processor resource can be scheduled.
unsigned getCurrMOps() const
Micro-ops issued in the current cycle.
unsigned getCurrCycle() const
Number of cycles to issue the instructions scheduled in this zone.
bool checkHazard(SUnit *SU)
Does this SU have a hazard within the current instruction group.
void dumpScheduledState() const
void removeReady(SUnit *SU)
Remove SU from the ready set for this boundary.
unsigned getZoneCritResIdx() const
unsigned getUnscheduledLatency(SUnit *SU) const
unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx, unsigned Cycles, unsigned ReadyCycle)
Add the given processor resource to this scheduled zone.
Compute the values of each DAG node for various metrics during DFS.
Definition: ScheduleDFS.h:65
A ScheduleDAG for scheduling lists of MachineInstr.
MachineBasicBlock::iterator end() const
Returns an iterator to the bottom of the current scheduling region.
MachineBasicBlock::iterator begin() const
Returns an iterator to the top of the current scheduling region.
const MachineLoopInfo * MLI
bool RemoveKillFlags
True if the DAG builder should remove kill flags (in preparation for rescheduling).
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
void scheduleMI(SUnit *SU, bool IsTopNode)
Move an instruction and update register pressure.
void schedule() override
Implement ScheduleDAGInstrs interface for scheduling a sequence of reorderable instructions.
VReg2SUnitMultiMap VRegUses
Maps vregs to the SUnits of their uses in the current scheduling region.
void computeDFSResult()
Compute a DFSResult after DAG building is complete, and before any queue comparisons.
PressureDiff & getPressureDiff(const SUnit *SU)
SchedDFSResult * DFSResult
Information about DAG subtrees.
void enterRegion(MachineBasicBlock *bb, MachineBasicBlock::iterator begin, MachineBasicBlock::iterator end, unsigned regioninstrs) override
Implement the ScheduleDAGInstrs interface for handling the next scheduling region.
void initQueues(ArrayRef< SUnit * > TopRoots, ArrayRef< SUnit * > BotRoots)
Release ExitSU predecessors and setup scheduler queues.
void updatePressureDiffs(ArrayRef< RegisterMaskPair > LiveUses)
Update the PressureDiff array for liveness after scheduling this instruction.
RegPressureTracker BotRPTracker
void buildDAGWithRegPressure()
Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking enabled.
std::vector< PressureChange > RegionCriticalPSets
List of pressure sets that exceed the target's pressure limit before scheduling, listed in increasing...
void updateScheduledPressure(const SUnit *SU, const std::vector< unsigned > &NewMaxPressure)
IntervalPressure TopPressure
The top of the unscheduled zone.
const RegPressureTracker & getBotRPTracker() const
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
unsigned computeCyclicCriticalPath()
Compute the cyclic critical path through the DAG.
IntervalPressure BotPressure
The bottom of the unscheduled zone.
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
bool hasVRegLiveness() const override
Return true if this DAG supports VReg liveness and RegPressure.
RegisterClassInfo * RegClassInfo
const SchedDFSResult * getDFSResult() const
Return a non-null DFS result if the scheduling strategy initialized it.
const PressureDiff & getPressureDiff(const SUnit *SU) const
const RegPressureTracker & getTopRPTracker() const
RegPressureTracker RPTracker
bool ShouldTrackPressure
Register pressure in this region computed by initRegPressure.
const IntervalPressure & getRegPressure() const
Get register pressure for the entire scheduling region before scheduling.
const IntervalPressure & getBotPressure() const
Get current register pressure for the bottom scheduled instructions.
void dump() const override
BitVector & getScheduledTrees()
MachineBasicBlock::iterator LiveRegionEnd
const IntervalPressure & getTopPressure() const
Get current register pressure for the top scheduled instructions.
const std::vector< PressureChange > & getRegionCriticalPSets() const
IntervalPressure RegPressure
RegPressureTracker TopRPTracker
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void dumpSchedule() const
dump the scheduled Sequence.
std::unique_ptr< MachineSchedStrategy > SchedImpl
void startBlock(MachineBasicBlock *bb) override
Prepares to perform scheduling in the given block.
void releasePred(SUnit *SU, SDep *PredEdge)
ReleasePred - Decrement the NumSuccsLeft count of a predecessor.
void initQueues(ArrayRef< SUnit * > TopRoots, ArrayRef< SUnit * > BotRoots)
Release ExitSU predecessors and setup scheduler queues.
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos)
Change the position of an instruction within the basic block and update live ranges and region bounda...
void releasePredecessors(SUnit *SU)
releasePredecessors - Call releasePred on each of SU's predecessors.
void postProcessDAG()
Apply each ScheduleDAGMutation step in order.
const SUnit * NextClusterSucc
void dumpScheduleTraceTopDown() const
Print execution trace of the schedule top-down or bottom-up.
const SUnit * NextClusterPred
Record the next node in a scheduled cluster.
MachineBasicBlock::iterator top() const
void schedule() override
Implement ScheduleDAGInstrs interface for scheduling a sequence of reorderable instructions.
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
void findRootsAndBiasEdges(SmallVectorImpl< SUnit * > &TopRoots, SmallVectorImpl< SUnit * > &BotRoots)
MachineBasicBlock::iterator bottom() const
MachineBasicBlock::iterator CurrentBottom
The bottom of the unscheduled zone.
bool doMBBSchedRegionsTopDown() const override
If this method returns true, handling of the scheduling regions themselves (in case of a scheduling b...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
void enterRegion(MachineBasicBlock *bb, MachineBasicBlock::iterator begin, MachineBasicBlock::iterator end, unsigned regioninstrs) override
Implement the ScheduleDAGInstrs interface for handling the next scheduling region.
LiveIntervals * getLIS() const
void viewGraph() override
Out-of-line implementation with no arguments is handy for gdb.
void releaseSucc(SUnit *SU, SDep *SuccEdge)
ReleaseSucc - Decrement the NumPredsLeft count of a successor.
void dumpScheduleTraceBottomUp() const
~ScheduleDAGMI() override
void finishBlock() override
Cleans up after scheduling in the given block.
LiveIntervals * LIS
const SUnit * getNextClusterPred() const
void updateQueues(SUnit *SU, bool IsTopNode)
Update scheduler DAG and queues after scheduling an instruction.
void placeDebugValues()
Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
MachineBasicBlock::iterator CurrentTop
The top of the unscheduled zone.
void releaseSuccessors(SUnit *SU)
releaseSuccessors - Call releaseSucc on each of SU's successors.
const SUnit * getNextClusterSucc() const
std::vector< std::unique_ptr< ScheduleDAGMutation > > Mutations
Ordered list of DAG postprocessing steps.
MachineFunction & MF
Machine function.
Definition: ScheduleDAG.h:559
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Target-Independent Code Generator Pass Configuration Options.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
unsigned getMicroOpFactor() const
Multiply number of micro-ops by this factor to normalize it relative to other resources.
unsigned getLatencyFactor() const
Multiply cycle count by this factor to normalize it relative to other resources.
const MCProcResourceDesc * getProcResource(unsigned PIdx) const
Get a processor resource by ID for convenience.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1839
cl::opt< bool > PrintDAGs
unsigned getWeakLeft(const SUnit *SU, bool isTop)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
cl::opt< bool > VerifyScheduling
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
cl::opt< bool > ViewMISchedDAGs
bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
cl::opt< bool > ForceBottomUp
ScheduleDAGMI * createGenericSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1946
cl::opt< bool > ForceTopDown
bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
std::unique_ptr< ScheduleDAGMutation > createCopyConstrainDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
Definition: BitVector.h:858
#define N
Policy for scheduling the next instruction in the candidate's zone.
bool operator==(const CandPolicy &RHS) const
bool operator!=(const CandPolicy &RHS) const
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
void reset(const CandPolicy &NewPolicy)
void initResourceDelta(const ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
Status of an instruction's critical resource consumption.
bool operator!=(const SchedResourceDelta &RHS) const
bool operator==(const SchedResourceDelta &RHS) const
RegisterPressure computed within a region of instructions delimited by TopIdx and BottomIdx.
const unsigned * SubUnitsIdxBegin
Definition: MCSchedule.h:53
Summarize the scheduling resources required for an instruction of a particular scheduling class.
Definition: MCSchedule.h:114
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
const TargetPassConfig * PassConfig
const MachineDominatorTree * MDT
RegisterClassInfo * RegClassInfo
const MachineLoopInfo * MLI
Define a generic scheduling policy for targets that don't provide their own MachineSchedStrategy.
bool ShouldTrackLaneMasks
Track LaneMasks to allow reordering of independent subregister writes of the same vreg.
Store the effects of a change in pressure on things that MI scheduler cares about.
Summarize the unscheduled region.
void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel)
SmallVector< unsigned, 16 > RemainingCounts