LLVM  10.0.0svn
MachineScheduler.h
Go to the documentation of this file.
1 //===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides an interface for customizing the standard MachineScheduler
10 // pass. Note that the entire pass may be replaced as follows:
11 //
12 // <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
13 // PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
14 // ...}
15 //
16 // The MachineScheduler pass is only responsible for choosing the regions to be
17 // scheduled. Targets can override the DAG builder and scheduler without
18 // replacing the pass as follows:
19 //
20 // ScheduleDAGInstrs *<Target>PassConfig::
21 // createMachineScheduler(MachineSchedContext *C) {
22 // return new CustomMachineScheduler(C);
23 // }
24 //
25 // The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
26 // scheduling while updating the instruction stream, register pressure, and live
27 // intervals. Most targets don't need to override the DAG builder and list
28 // scheduler, but subtargets that require custom scheduling heuristics may
29 // plugin an alternate MachineSchedStrategy. The strategy is responsible for
30 // selecting the highest priority node from the list:
31 //
32 // ScheduleDAGInstrs *<Target>PassConfig::
33 // createMachineScheduler(MachineSchedContext *C) {
34 // return new ScheduleDAGMILive(C, CustomStrategy(C));
35 // }
36 //
37 // The DAG builder can also be customized in a sense by adding DAG mutations
38 // that will run after DAG building and before list scheduling. DAG mutations
39 // can adjust dependencies based on target-specific knowledge or add weak edges
40 // to aid heuristics:
41 //
42 // ScheduleDAGInstrs *<Target>PassConfig::
43 // createMachineScheduler(MachineSchedContext *C) {
44 // ScheduleDAGMI *DAG = createGenericSchedLive(C);
45 // DAG->addMutation(new CustomDAGMutation(...));
46 // return DAG;
47 // }
48 //
49 // A target that supports alternative schedulers can use the
50 // MachineSchedRegistry to allow command line selection. This can be done by
51 // implementing the following boilerplate:
52 //
53 // static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
54 // return new CustomMachineScheduler(C);
55 // }
56 // static MachineSchedRegistry
57 // SchedCustomRegistry("custom", "Run my target's custom scheduler",
58 // createCustomMachineSched);
59 //
60 //
61 // Finally, subtargets that don't need to implement custom heuristics but would
62 // like to configure the GenericScheduler's policy for a given scheduler region,
63 // including scheduling direction and register pressure tracking policy, can do
64 // this:
65 //
66 // void <SubTarget>Subtarget::
67 // overrideSchedPolicy(MachineSchedPolicy &Policy,
68 // unsigned NumRegionInstrs) const {
69 // Policy.<Flag> = true;
70 // }
71 //
72 //===----------------------------------------------------------------------===//
73 
74 #ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
75 #define LLVM_CODEGEN_MACHINESCHEDULER_H
76 
77 #include "llvm/ADT/ArrayRef.h"
78 #include "llvm/ADT/BitVector.h"
79 #include "llvm/ADT/STLExtras.h"
80 #include "llvm/ADT/SmallVector.h"
81 #include "llvm/ADT/StringRef.h"
82 #include "llvm/ADT/Twine.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <memory>
96 #include <string>
97 #include <vector>
98 
99 namespace llvm {
100 
101 extern cl::opt<bool> ForceTopDown;
102 extern cl::opt<bool> ForceBottomUp;
103 
104 class LiveIntervals;
105 class MachineDominatorTree;
106 class MachineFunction;
107 class MachineInstr;
108 class MachineLoopInfo;
109 class RegisterClassInfo;
110 class SchedDFSResult;
111 class ScheduleHazardRecognizer;
112 class TargetInstrInfo;
113 class TargetPassConfig;
114 class TargetRegisterInfo;
115 
116 /// MachineSchedContext provides enough context from the MachineScheduler pass
117 /// for the target to instantiate a scheduler.
119  MachineFunction *MF = nullptr;
120  const MachineLoopInfo *MLI = nullptr;
121  const MachineDominatorTree *MDT = nullptr;
122  const TargetPassConfig *PassConfig = nullptr;
123  AliasAnalysis *AA = nullptr;
124  LiveIntervals *LIS = nullptr;
125 
127 
129  virtual ~MachineSchedContext();
130 };
131 
132 /// MachineSchedRegistry provides a selection of available machine instruction
133 /// schedulers.
135  : public MachinePassRegistryNode<
136  ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
137 public:
139 
140  // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
142 
144 
145  MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
146  : MachinePassRegistryNode(N, D, C) {
147  Registry.Add(this);
148  }
149 
150  ~MachineSchedRegistry() { Registry.Remove(this); }
151 
152  // Accessors.
153  //
156  }
157 
159  return (MachineSchedRegistry *)Registry.getList();
160  }
161 
163  Registry.setListener(L);
164  }
165 };
166 
167 class ScheduleDAGMI;
168 
169 /// Define a generic scheduling policy for targets that don't provide their own
170 /// MachineSchedStrategy. This can be overriden for each scheduling region
171 /// before building the DAG.
173  // Allow the scheduler to disable register pressure tracking.
174  bool ShouldTrackPressure = false;
175  /// Track LaneMasks to allow reordering of independent subregister writes
176  /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
177  bool ShouldTrackLaneMasks = false;
178 
179  // Allow the scheduler to force top-down or bottom-up scheduling. If neither
180  // is true, the scheduler runs in both directions and converges.
181  bool OnlyTopDown = false;
182  bool OnlyBottomUp = false;
183 
184  // Disable heuristic that tries to fetch nodes from long dependency chains
185  // first.
186  bool DisableLatencyHeuristic = false;
187 
188  MachineSchedPolicy() = default;
189 };
190 
191 /// MachineSchedStrategy - Interface to the scheduling algorithm used by
192 /// ScheduleDAGMI.
193 ///
194 /// Initialization sequence:
195 /// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
197  virtual void anchor();
198 
199 public:
200  virtual ~MachineSchedStrategy() = default;
201 
202  /// Optionally override the per-region scheduling policy.
205  unsigned NumRegionInstrs) {}
206 
207  virtual void dumpPolicy() const {}
208 
209  /// Check if pressure tracking is needed before building the DAG and
210  /// initializing this strategy. Called after initPolicy.
211  virtual bool shouldTrackPressure() const { return true; }
212 
213  /// Returns true if lanemasks should be tracked. LaneMask tracking is
214  /// necessary to reorder independent subregister defs for the same vreg.
215  /// This has to be enabled in combination with shouldTrackPressure().
216  virtual bool shouldTrackLaneMasks() const { return false; }
217 
218  // If this method returns true, handling of the scheduling regions
219  // themselves (in case of a scheduling boundary in MBB) will be done
220  // beginning with the topmost region of MBB.
221  virtual bool doMBBSchedRegionsTopDown() const { return false; }
222 
223  /// Initialize the strategy after building the DAG for a new region.
224  virtual void initialize(ScheduleDAGMI *DAG) = 0;
225 
226  /// Tell the strategy that MBB is about to be processed.
227  virtual void enterMBB(MachineBasicBlock *MBB) {};
228 
229  /// Tell the strategy that current MBB is done.
230  virtual void leaveMBB() {};
231 
232  /// Notify this strategy that all roots have been released (including those
233  /// that depend on EntrySU or ExitSU).
234  virtual void registerRoots() {}
235 
236  /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
237  /// schedule the node at the top of the unscheduled region. Otherwise it will
238  /// be scheduled at the bottom.
239  virtual SUnit *pickNode(bool &IsTopNode) = 0;
240 
241  /// Scheduler callback to notify that a new subtree is scheduled.
242  virtual void scheduleTree(unsigned SubtreeID) {}
243 
244  /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
245  /// instruction and updated scheduled/remaining flags in the DAG nodes.
246  virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
247 
248  /// When all predecessor dependencies have been resolved, free this node for
249  /// top-down scheduling.
250  virtual void releaseTopNode(SUnit *SU) = 0;
251 
252  /// When all successor dependencies have been resolved, free this node for
253  /// bottom-up scheduling.
254  virtual void releaseBottomNode(SUnit *SU) = 0;
255 };
256 
257 /// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
258 /// schedules machine instructions according to the given MachineSchedStrategy
259 /// without much extra book-keeping. This is the common functionality between
260 /// PreRA and PostRA MachineScheduler.
262 protected:
265  std::unique_ptr<MachineSchedStrategy> SchedImpl;
266 
267  /// Ordered list of DAG postprocessing steps.
268  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
269 
270  /// The top of the unscheduled zone.
272 
273  /// The bottom of the unscheduled zone.
275 
276  /// Record the next node in a scheduled cluster.
277  const SUnit *NextClusterPred = nullptr;
278  const SUnit *NextClusterSucc = nullptr;
279 
280 #ifndef NDEBUG
281  /// The number of instructions scheduled so far. Used to cut off the
282  /// scheduler at the point determined by misched-cutoff.
283  unsigned NumInstrsScheduled = 0;
284 #endif
285 
286 public:
287  ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
288  bool RemoveKillFlags)
289  : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
290  LIS(C->LIS), SchedImpl(std::move(S)) {}
291 
292  // Provide a vtable anchor
293  ~ScheduleDAGMI() override;
294 
295  /// If this method returns true, handling of the scheduling regions
296  /// themselves (in case of a scheduling boundary in MBB) will be done
297  /// beginning with the topmost region of MBB.
298  bool doMBBSchedRegionsTopDown() const override {
299  return SchedImpl->doMBBSchedRegionsTopDown();
300  }
301 
302  // Returns LiveIntervals instance for use in DAG mutators and such.
303  LiveIntervals *getLIS() const { return LIS; }
304 
305  /// Return true if this DAG supports VReg liveness and RegPressure.
306  virtual bool hasVRegLiveness() const { return false; }
307 
308  /// Add a postprocessing step to the DAG builder.
309  /// Mutations are applied in the order that they are added after normal DAG
310  /// building and before MachineSchedStrategy initialization.
311  ///
312  /// ScheduleDAGMI takes ownership of the Mutation object.
313  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
314  if (Mutation)
315  Mutations.push_back(std::move(Mutation));
316  }
317 
318  MachineBasicBlock::iterator top() const { return CurrentTop; }
319  MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
320 
321  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
322  /// region. This covers all instructions in a block, while schedule() may only
323  /// cover a subset.
324  void enterRegion(MachineBasicBlock *bb,
327  unsigned regioninstrs) override;
328 
329  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
330  /// reorderable instructions.
331  void schedule() override;
332 
333  void startBlock(MachineBasicBlock *bb) override;
334  void finishBlock() override;
335 
336  /// Change the position of an instruction within the basic block and update
337  /// live ranges and region boundary iterators.
338  void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
339 
340  const SUnit *getNextClusterPred() const { return NextClusterPred; }
341 
342  const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
343 
344  void viewGraph(const Twine &Name, const Twine &Title) override;
345  void viewGraph() override;
346 
347 protected:
348  // Top-Level entry points for the schedule() driver...
349 
350  /// Apply each ScheduleDAGMutation step in order. This allows different
351  /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
352  void postprocessDAG();
353 
354  /// Release ExitSU predecessors and setup scheduler queues.
355  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
356 
357  /// Update scheduler DAG and queues after scheduling an instruction.
358  void updateQueues(SUnit *SU, bool IsTopNode);
359 
360  /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
361  void placeDebugValues();
362 
363  /// dump the scheduled Sequence.
364  void dumpSchedule() const;
365 
366  // Lesser helpers...
367  bool checkSchedLimit();
368 
369  void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
370  SmallVectorImpl<SUnit*> &BotRoots);
371 
372  void releaseSucc(SUnit *SU, SDep *SuccEdge);
373  void releaseSuccessors(SUnit *SU);
374  void releasePred(SUnit *SU, SDep *PredEdge);
375  void releasePredecessors(SUnit *SU);
376 };
377 
378 /// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
379 /// machine instructions while updating LiveIntervals and tracking regpressure.
381 protected:
383 
384  /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
385  /// will be empty.
386  SchedDFSResult *DFSResult = nullptr;
388 
390 
391  /// Maps vregs to the SUnits of their uses in the current scheduling region.
393 
394  // Map each SU to its summary of pressure changes. This array is updated for
395  // liveness during bottom-up scheduling. Top-down scheduling may proceed but
396  // has no affect on the pressure diffs.
398 
399  /// Register pressure in this region computed by initRegPressure.
400  bool ShouldTrackPressure = false;
401  bool ShouldTrackLaneMasks = false;
404 
405  /// List of pressure sets that exceed the target's pressure limit before
406  /// scheduling, listed in increasing set ID order. Each pressure set is paired
407  /// with its max pressure in the currently scheduled regions.
408  std::vector<PressureChange> RegionCriticalPSets;
409 
410  /// The top of the unscheduled zone.
413 
414  /// The bottom of the unscheduled zone.
417 
418  /// True if disconnected subregister components are already renamed.
419  /// The renaming is only done on demand if lane masks are tracked.
420  bool DisconnectedComponentsRenamed = false;
421 
422 public:
424  std::unique_ptr<MachineSchedStrategy> S)
425  : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
426  RegClassInfo(C->RegClassInfo), RPTracker(RegPressure),
427  TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
428 
429  ~ScheduleDAGMILive() override;
430 
431  /// Return true if this DAG supports VReg liveness and RegPressure.
432  bool hasVRegLiveness() const override { return true; }
433 
434  /// Return true if register pressure tracking is enabled.
435  bool isTrackingPressure() const { return ShouldTrackPressure; }
436 
437  /// Get current register pressure for the top scheduled instructions.
438  const IntervalPressure &getTopPressure() const { return TopPressure; }
439  const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
440 
441  /// Get current register pressure for the bottom scheduled instructions.
442  const IntervalPressure &getBotPressure() const { return BotPressure; }
443  const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
444 
445  /// Get register pressure for the entire scheduling region before scheduling.
446  const IntervalPressure &getRegPressure() const { return RegPressure; }
447 
448  const std::vector<PressureChange> &getRegionCriticalPSets() const {
449  return RegionCriticalPSets;
450  }
451 
453  return SUPressureDiffs[SU->NodeNum];
454  }
455  const PressureDiff &getPressureDiff(const SUnit *SU) const {
456  return SUPressureDiffs[SU->NodeNum];
457  }
458 
459  /// Compute a DFSResult after DAG building is complete, and before any
460  /// queue comparisons.
461  void computeDFSResult();
462 
463  /// Return a non-null DFS result if the scheduling strategy initialized it.
464  const SchedDFSResult *getDFSResult() const { return DFSResult; }
465 
466  BitVector &getScheduledTrees() { return ScheduledTrees; }
467 
468  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
469  /// region. This covers all instructions in a block, while schedule() may only
470  /// cover a subset.
471  void enterRegion(MachineBasicBlock *bb,
474  unsigned regioninstrs) override;
475 
476  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
477  /// reorderable instructions.
478  void schedule() override;
479 
480  /// Compute the cyclic critical path through the DAG.
481  unsigned computeCyclicCriticalPath();
482 
483  void dump() const override;
484 
485 protected:
486  // Top-Level entry points for the schedule() driver...
487 
488  /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
489  /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
490  /// region, TopTracker and BottomTracker will be initialized to the top and
491  /// bottom of the DAG region without covereing any unscheduled instruction.
492  void buildDAGWithRegPressure();
493 
494  /// Release ExitSU predecessors and setup scheduler queues. Re-position
495  /// the Top RP tracker in case the region beginning has changed.
496  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
497 
498  /// Move an instruction and update register pressure.
499  void scheduleMI(SUnit *SU, bool IsTopNode);
500 
501  // Lesser helpers...
502 
503  void initRegPressure();
504 
505  void updatePressureDiffs(ArrayRef<RegisterMaskPair> LiveUses);
506 
507  void updateScheduledPressure(const SUnit *SU,
508  const std::vector<unsigned> &NewMaxPressure);
509 
510  void collectVRegUses(SUnit &SU);
511 };
512 
513 //===----------------------------------------------------------------------===//
514 ///
515 /// Helpers for implementing custom MachineSchedStrategy classes. These take
516 /// care of the book-keeping associated with list scheduling heuristics.
517 ///
518 //===----------------------------------------------------------------------===//
519 
520 /// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
521 /// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
522 /// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
523 ///
524 /// This is a convenience class that may be used by implementations of
525 /// MachineSchedStrategy.
526 class ReadyQueue {
527  unsigned ID;
528  std::string Name;
529  std::vector<SUnit*> Queue;
530 
531 public:
532  ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
533 
534  unsigned getID() const { return ID; }
535 
536  StringRef getName() const { return Name; }
537 
538  // SU is in this queue if it's NodeQueueID is a superset of this ID.
539  bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
540 
541  bool empty() const { return Queue.empty(); }
542 
543  void clear() { Queue.clear(); }
544 
545  unsigned size() const { return Queue.size(); }
546 
547  using iterator = std::vector<SUnit*>::iterator;
548 
549  iterator begin() { return Queue.begin(); }
550 
551  iterator end() { return Queue.end(); }
552 
553  ArrayRef<SUnit*> elements() { return Queue; }
554 
555  iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
556 
557  void push(SUnit *SU) {
558  Queue.push_back(SU);
559  SU->NodeQueueId |= ID;
560  }
561 
562  iterator remove(iterator I) {
563  (*I)->NodeQueueId &= ~ID;
564  *I = Queue.back();
565  unsigned idx = I - Queue.begin();
566  Queue.pop_back();
567  return Queue.begin() + idx;
568  }
569 
570  void dump() const;
571 };
572 
573 /// Summarize the unscheduled region.
575  // Critical path through the DAG in expected latency.
576  unsigned CriticalPath;
577  unsigned CyclicCritPath;
578 
579  // Scaled count of micro-ops left to schedule.
580  unsigned RemIssueCount;
581 
583 
584  // Unscheduled resources
586 
587  SchedRemainder() { reset(); }
588 
589  void reset() {
590  CriticalPath = 0;
591  CyclicCritPath = 0;
592  RemIssueCount = 0;
593  IsAcyclicLatencyLimited = false;
594  RemainingCounts.clear();
595  }
596 
597  void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
598 };
599 
600 /// Each Scheduling boundary is associated with ready queues. It tracks the
601 /// current cycle in the direction of movement, and maintains the state
602 /// of "hazards" and other interlocks at the current cycle.
604 public:
605  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
606  enum {
607  TopQID = 1,
608  BotQID = 2,
609  LogMaxQID = 2
610  };
611 
612  ScheduleDAGMI *DAG = nullptr;
613  const TargetSchedModel *SchedModel = nullptr;
614  SchedRemainder *Rem = nullptr;
615 
618 
619  ScheduleHazardRecognizer *HazardRec = nullptr;
620 
621 private:
622  /// True if the pending Q should be checked/updated before scheduling another
623  /// instruction.
624  bool CheckPending;
625 
626  /// Number of cycles it takes to issue the instructions scheduled in this
627  /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
628  /// See getStalls().
629  unsigned CurrCycle;
630 
631  /// Micro-ops issued in the current cycle
632  unsigned CurrMOps;
633 
634  /// MinReadyCycle - Cycle of the soonest available instruction.
635  unsigned MinReadyCycle;
636 
637  // The expected latency of the critical path in this scheduled zone.
638  unsigned ExpectedLatency;
639 
640  // The latency of dependence chains leading into this zone.
641  // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
642  // For each cycle scheduled: DLat -= 1.
643  unsigned DependentLatency;
644 
645  /// Count the scheduled (issued) micro-ops that can be retired by
646  /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
647  unsigned RetiredMOps;
648 
649  // Count scheduled resources that have been executed. Resources are
650  // considered executed if they become ready in the time that it takes to
651  // saturate any resource including the one in question. Counts are scaled
652  // for direct comparison with other resources. Counts can be compared with
653  // MOps * getMicroOpFactor and Latency * getLatencyFactor.
654  SmallVector<unsigned, 16> ExecutedResCounts;
655 
656  /// Cache the max count for a single resource.
657  unsigned MaxExecutedResCount;
658 
659  // Cache the critical resources ID in this scheduled zone.
660  unsigned ZoneCritResIdx;
661 
662  // Is the scheduled region resource limited vs. latency limited.
663  bool IsResourceLimited;
664 
665  // Record the highest cycle at which each resource has been reserved by a
666  // scheduled instruction.
667  SmallVector<unsigned, 16> ReservedCycles;
668 
669  // For each PIdx, stores first index into ReservedCycles that corresponds to
670  // it.
671  SmallVector<unsigned, 16> ReservedCyclesIndex;
672 
673 #ifndef NDEBUG
674  // Remember the greatest possible stall as an upper bound on the number of
675  // times we should retry the pending queue because of a hazard.
676  unsigned MaxObservedStall;
677 #endif
678 
679 public:
680  /// Pending queues extend the ready queues with the same ID and the
681  /// PendingFlag set.
682  SchedBoundary(unsigned ID, const Twine &Name):
683  Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
684  reset();
685  }
686 
687  ~SchedBoundary();
688 
689  void reset();
690 
691  void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
692  SchedRemainder *rem);
693 
694  bool isTop() const {
695  return Available.getID() == TopQID;
696  }
697 
698  /// Number of cycles to issue the instructions scheduled in this zone.
699  unsigned getCurrCycle() const { return CurrCycle; }
700 
701  /// Micro-ops issued in the current cycle
702  unsigned getCurrMOps() const { return CurrMOps; }
703 
704  // The latency of dependence chains leading into this zone.
705  unsigned getDependentLatency() const { return DependentLatency; }
706 
707  /// Get the number of latency cycles "covered" by the scheduled
708  /// instructions. This is the larger of the critical path within the zone
709  /// and the number of cycles required to issue the instructions.
710  unsigned getScheduledLatency() const {
711  return std::max(ExpectedLatency, CurrCycle);
712  }
713 
714  unsigned getUnscheduledLatency(SUnit *SU) const {
715  return isTop() ? SU->getHeight() : SU->getDepth();
716  }
717 
718  unsigned getResourceCount(unsigned ResIdx) const {
719  return ExecutedResCounts[ResIdx];
720  }
721 
722  /// Get the scaled count of scheduled micro-ops and resources, including
723  /// executed resources.
724  unsigned getCriticalCount() const {
725  if (!ZoneCritResIdx)
726  return RetiredMOps * SchedModel->getMicroOpFactor();
727  return getResourceCount(ZoneCritResIdx);
728  }
729 
730  /// Get a scaled count for the minimum execution time of the scheduled
731  /// micro-ops that are ready to execute by getExecutedCount. Notice the
732  /// feedback loop.
733  unsigned getExecutedCount() const {
734  return std::max(CurrCycle * SchedModel->getLatencyFactor(),
735  MaxExecutedResCount);
736  }
737 
738  unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
739 
740  // Is the scheduled region resource limited vs. latency limited.
741  bool isResourceLimited() const { return IsResourceLimited; }
742 
743  /// Get the difference between the given SUnit's ready time and the current
744  /// cycle.
745  unsigned getLatencyStallCycles(SUnit *SU);
746 
747  unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
748  unsigned Cycles);
749 
750  std::pair<unsigned, unsigned> getNextResourceCycle(unsigned PIdx,
751  unsigned Cycles);
752 
753  bool checkHazard(SUnit *SU);
754 
755  unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
756 
757  unsigned getOtherResourceCount(unsigned &OtherCritIdx);
758 
759  void releaseNode(SUnit *SU, unsigned ReadyCycle);
760 
761  void bumpCycle(unsigned NextCycle);
762 
763  void incExecutedResources(unsigned PIdx, unsigned Count);
764 
765  unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
766 
767  void bumpNode(SUnit *SU);
768 
769  void releasePending();
770 
771  void removeReady(SUnit *SU);
772 
773  /// Call this before applying any other heuristics to the Available queue.
774  /// Updates the Available/Pending Q's if necessary and returns the single
775  /// available instruction, or NULL if there are multiple candidates.
776  SUnit *pickOnlyChoice();
777 
778  void dumpScheduledState() const;
779 };
780 
781 /// Base class for GenericScheduler. This class maintains information about
782 /// scheduling candidates based on TargetSchedModel making it easy to implement
783 /// heuristics for either preRA or postRA scheduling.
785 public:
786  /// Represent the type of SchedCandidate found within a single queue.
787  /// pickNodeBidirectional depends on these listed by decreasing priority.
788  enum CandReason : uint8_t {
789  NoCand, Only1, PhysReg, RegExcess, RegCritical, Stall, Cluster, Weak,
790  RegMax, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
791  TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
792 
793 #ifndef NDEBUG
794  static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
795 #endif
796 
797  /// Policy for scheduling the next instruction in the candidate's zone.
798  struct CandPolicy {
799  bool ReduceLatency = false;
800  unsigned ReduceResIdx = 0;
801  unsigned DemandResIdx = 0;
802 
803  CandPolicy() = default;
804 
805  bool operator==(const CandPolicy &RHS) const {
806  return ReduceLatency == RHS.ReduceLatency &&
807  ReduceResIdx == RHS.ReduceResIdx &&
808  DemandResIdx == RHS.DemandResIdx;
809  }
810  bool operator!=(const CandPolicy &RHS) const {
811  return !(*this == RHS);
812  }
813  };
814 
815  /// Status of an instruction's critical resource consumption.
817  // Count critical resources in the scheduled region required by SU.
818  unsigned CritResources = 0;
819 
820  // Count critical resources from another region consumed by SU.
821  unsigned DemandedResources = 0;
822 
823  SchedResourceDelta() = default;
824 
825  bool operator==(const SchedResourceDelta &RHS) const {
826  return CritResources == RHS.CritResources
827  && DemandedResources == RHS.DemandedResources;
828  }
829  bool operator!=(const SchedResourceDelta &RHS) const {
830  return !operator==(RHS);
831  }
832  };
833 
834  /// Store the state used by GenericScheduler heuristics, required for the
835  /// lifetime of one invocation of pickNode().
836  struct SchedCandidate {
838 
839  // The best SUnit candidate.
841 
842  // The reason for this candidate.
844 
845  // Whether this candidate should be scheduled at top/bottom.
846  bool AtTop;
847 
848  // Register pressure values for the best candidate.
850 
851  // Critical resource consumption of the best candidate.
853 
854  SchedCandidate() { reset(CandPolicy()); }
855  SchedCandidate(const CandPolicy &Policy) { reset(Policy); }
856 
857  void reset(const CandPolicy &NewPolicy) {
858  Policy = NewPolicy;
859  SU = nullptr;
860  Reason = NoCand;
861  AtTop = false;
862  RPDelta = RegPressureDelta();
863  ResDelta = SchedResourceDelta();
864  }
865 
866  bool isValid() const { return SU; }
867 
868  // Copy the status of another candidate without changing policy.
869  void setBest(SchedCandidate &Best) {
870  assert(Best.Reason != NoCand && "uninitialized Sched candidate");
871  SU = Best.SU;
872  Reason = Best.Reason;
873  AtTop = Best.AtTop;
874  RPDelta = Best.RPDelta;
875  ResDelta = Best.ResDelta;
876  }
877 
878  void initResourceDelta(const ScheduleDAGMI *DAG,
879  const TargetSchedModel *SchedModel);
880  };
881 
882 protected:
884  const TargetSchedModel *SchedModel = nullptr;
885  const TargetRegisterInfo *TRI = nullptr;
886 
888 
890 
891  void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
892  SchedBoundary *OtherZone);
893 
894 #ifndef NDEBUG
895  void traceCandidate(const SchedCandidate &Cand);
896 #endif
897 
898 private:
899  bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
900  bool ComputeRemLatency, unsigned &RemLatency) const;
901 };
902 
903 // Utility functions used by heuristics in tryCandidate().
904 bool tryLess(int TryVal, int CandVal,
908 bool tryGreater(int TryVal, int CandVal,
914  SchedBoundary &Zone);
915 bool tryPressure(const PressureChange &TryP,
916  const PressureChange &CandP,
920  const TargetRegisterInfo *TRI,
921  const MachineFunction &MF);
922 unsigned getWeakLeft(const SUnit *SU, bool isTop);
923 int biasPhysReg(const SUnit *SU, bool isTop);
924 
925 /// GenericScheduler shrinks the unscheduled zone using heuristics to balance
926 /// the schedule.
928 public:
930  GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
931  Bot(SchedBoundary::BotQID, "BotQ") {}
932 
933  void initPolicy(MachineBasicBlock::iterator Begin,
935  unsigned NumRegionInstrs) override;
936 
937  void dumpPolicy() const override;
938 
939  bool shouldTrackPressure() const override {
940  return RegionPolicy.ShouldTrackPressure;
941  }
942 
943  bool shouldTrackLaneMasks() const override {
944  return RegionPolicy.ShouldTrackLaneMasks;
945  }
946 
947  void initialize(ScheduleDAGMI *dag) override;
948 
949  SUnit *pickNode(bool &IsTopNode) override;
950 
951  void schedNode(SUnit *SU, bool IsTopNode) override;
952 
953  void releaseTopNode(SUnit *SU) override {
954  if (SU->isScheduled)
955  return;
956 
957  Top.releaseNode(SU, SU->TopReadyCycle);
958  TopCand.SU = nullptr;
959  }
960 
961  void releaseBottomNode(SUnit *SU) override {
962  if (SU->isScheduled)
963  return;
964 
965  Bot.releaseNode(SU, SU->BotReadyCycle);
966  BotCand.SU = nullptr;
967  }
968 
969  void registerRoots() override;
970 
971 protected:
972  ScheduleDAGMILive *DAG = nullptr;
973 
975 
976  // State of the top and bottom scheduled instruction boundaries.
979 
980  /// Candidate last picked from Top boundary.
982  /// Candidate last picked from Bot boundary.
984 
985  void checkAcyclicLatency();
986 
987  void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
988  const RegPressureTracker &RPTracker,
989  RegPressureTracker &TempTracker);
990 
991  virtual void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
992  SchedBoundary *Zone) const;
993 
994  SUnit *pickNodeBidirectional(bool &IsTopNode);
995 
996  void pickNodeFromQueue(SchedBoundary &Zone,
997  const CandPolicy &ZonePolicy,
998  const RegPressureTracker &RPTracker,
999  SchedCandidate &Candidate);
1000 
1001  void reschedulePhysReg(SUnit *SU, bool isTop);
1002 };
1003 
1004 /// PostGenericScheduler - Interface to the scheduling algorithm used by
1005 /// ScheduleDAGMI.
1006 ///
1007 /// Callbacks from ScheduleDAGMI:
1008 /// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
1010 protected:
1014 
1015 public:
1017  GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}
1018 
1019  ~PostGenericScheduler() override = default;
1020 
1023  unsigned NumRegionInstrs) override {
1024  /* no configurable policy */
1025  }
1026 
1027  /// PostRA scheduling does not track pressure.
1028  bool shouldTrackPressure() const override { return false; }
1029 
1030  void initialize(ScheduleDAGMI *Dag) override;
1031 
1032  void registerRoots() override;
1033 
1034  SUnit *pickNode(bool &IsTopNode) override;
1035 
1036  void scheduleTree(unsigned SubtreeID) override {
1037  llvm_unreachable("PostRA scheduler does not support subtree analysis.");
1038  }
1039 
1040  void schedNode(SUnit *SU, bool IsTopNode) override;
1041 
1042  void releaseTopNode(SUnit *SU) override {
1043  if (SU->isScheduled)
1044  return;
1045  Top.releaseNode(SU, SU->TopReadyCycle);
1046  }
1047 
1048  // Only called for roots.
1049  void releaseBottomNode(SUnit *SU) override {
1050  BotRoots.push_back(SU);
1051  }
1052 
1053 protected:
1054  void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
1055 
1056  void pickNodeFromQueue(SchedCandidate &Cand);
1057 };
1058 
1059 /// Create the standard converging machine scheduler. This will be used as the
1060 /// default scheduler if the target does not set a default.
1061 /// Adds default DAG mutations.
1063 
1064 /// Create a generic scheduler with no vreg liveness or DAG mutation passes.
1066 
1067 std::unique_ptr<ScheduleDAGMutation>
1069  const TargetRegisterInfo *TRI);
1070 
1071 std::unique_ptr<ScheduleDAGMutation>
1073  const TargetRegisterInfo *TRI);
1074 
1075 std::unique_ptr<ScheduleDAGMutation>
1077  const TargetRegisterInfo *TRI);
1078 
1079 } // end namespace llvm
1080 
1081 #endif // LLVM_CODEGEN_MACHINESCHEDULER_H
unsigned getCriticalCount() const
Get the scaled count of scheduled micro-ops and resources, including executed resources.
VReg2SUnitMultiMap VRegUses
Maps vregs to the SUnits of their uses in the current scheduling region.
uint64_t CallInst * C
void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) override
Optionally override the per-region scheduling policy.
int biasPhysReg(const SUnit *SU, bool isTop)
Minimize physical register live ranges.
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:233
unsigned getZoneCritResIdx() const
Base class for GenericScheduler.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
static MachinePassRegistry< ScheduleDAGCtor > Registry
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:224
Each Scheduling boundary is associated with ready queues.
PostGenericScheduler - Interface to the scheduling algorithm used by ScheduleDAGMI.
GenericSchedulerBase(const MachineSchedContext *C)
This class represents lattice values for constants.
Definition: AllocatorList.h:23
const MachineSchedContext * Context
void Remove(MachinePassRegistryNode< PassCtorTy > *Node)
Remove - Removes a function pass from the registration list.
ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S, bool RemoveKillFlags)
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
MachineBasicBlock::iterator CurrentTop
The top of the unscheduled zone.
const MachineLoopInfo * MLI
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling...
unsigned getCurrCycle() const
Number of cycles to issue the instructions scheduled in this zone.
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:398
bool isInQueue(SUnit *SU) const
ScheduleDAGMI * createGenericSchedPostRA(MachineSchedContext *C)
Create a generic scheduler with no vreg liveness or DAG mutation passes.
const IntervalPressure & getBotPressure() const
Get current register pressure for the bottom scheduled instructions.
MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
unsigned getUnscheduledLatency(SUnit *SU) const
const IntervalPressure & getRegPressure() const
Get register pressure for the entire scheduling region before scheduling.
unsigned getDependentLatency() const
unsigned const TargetRegisterInfo * TRI
Summarize the unscheduled region.
void reset(const CandPolicy &NewPolicy)
const SchedDFSResult * getDFSResult() const
Return a non-null DFS result if the scheduling strategy initialized it.
MachineSchedRegistry provides a selection of available machine instruction schedulers.
RegisterClassInfo * RegClassInfo
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
std::unique_ptr< MachineSchedStrategy > SchedImpl
unsigned BotReadyCycle
Cycle relative to end when node is ready.
Definition: ScheduleDAG.h:300
bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary &Zone)
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:284
unsigned getID() const
ArrayRef< SUnit * > elements()
const RegPressureTracker & getBotRPTracker() const
Definition: BitVector.h:937
BitVector & getScheduledTrees()
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
const TargetPassConfig * PassConfig
virtual void dumpPolicy() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
StringRef getName() const
void scheduleTree(unsigned SubtreeID) override
Scheduler callback to notify that a new subtree is scheduled.
PressureDiff & getPressureDiff(const SUnit *SU)
std::vector< std::unique_ptr< ScheduleDAGMutation > > Mutations
Ordered list of DAG postprocessing steps.
void setListener(MachinePassRegistryListener< PassCtorTy > *L)
Target-Independent Code Generator Pass Configuration Options.
bool shouldTrackLaneMasks() const override
Returns true if lanemasks should be tracked.
static MachineSchedRegistry * getList()
static const char * getReasonStr(SIScheduleCandReason Reason)
Compute the values of each DAG node for various metrics during DFS.
Definition: ScheduleDFS.h:65
unsigned TopReadyCycle
Cycle relative to start when node is ready.
Definition: ScheduleDAG.h:299
PowerPC VSX FMA Mutation
MachineBasicBlock::iterator LiveRegionEnd
iterator find(SUnit *SU)
RegPressureTracker BotRPTracker
MachineBasicBlock::iterator top() const
MachineSchedPolicy RegionPolicy
std::vector< PressureChange > RegionCriticalPSets
List of pressure sets that exceed the target&#39;s pressure limit before scheduling, listed in increasing...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
virtual void initPolicy(MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs)
Optionally override the per-region scheduling policy.
bool doMBBSchedRegionsTopDown() const override
If this method returns true, handling of the scheduling regions themselves (in case of a scheduling b...
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
std::vector< SUnit * >::iterator iterator
unsigned getCurrMOps() const
Micro-ops issued in the current cycle.
const SUnit * getNextClusterSucc() const
TargetInstrInfo - Interface to description of machine instruction set.
virtual void registerRoots()
Notify this strategy that all roots have been released (including those that depend on EntrySU or Exi...
Scheduling dependency.
Definition: ScheduleDAG.h:49
RegisterClassInfo * RegClassInfo
static void setListener(MachinePassRegistryListener< FunctionPassCtor > *L)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
CandReason
Represent the type of SchedCandidate found within a single queue.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
Helpers for implementing custom MachineSchedStrategy classes.
Array of PressureDiffs.
RegisterPressure computed within a region of instructions delimited by TopIdx and BottomIdx...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
bool tryGreater(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
virtual void enterMBB(MachineBasicBlock *MBB)
Tell the strategy that MBB is about to be processed.
unsigned size() const
unsigned getLatencyFactor() const
Multiply cycle count by this factor to normalize it relative to other resources.
void Add(MachinePassRegistryNode< PassCtorTy > *Node)
Add - Adds a function pass to the registration list.
bool operator!=(const CandPolicy &RHS) const
Store the state used by GenericScheduler heuristics, required for the lifetime of one invocation of p...
Track the current register pressure at some position in the instruction stream, and remember the high...
Policy for scheduling the next instruction in the candidate&#39;s zone.
unsigned getScheduledLatency() const
Get the number of latency cycles "covered" by the scheduled instructions.
List of PressureChanges in order of increasing, unique PSetID.
MachinePassRegistryNode< PassCtorTy > * getList()
ScheduleDAGInstrs *(*)(MachineSchedContext *) ScheduleDAGCtor
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
unsigned getWeakLeft(const SUnit *SU, bool isTop)
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1198
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SchedBoundary(unsigned ID, const Twine &Name)
Pending queues extend the ready queues with the same ID and the PendingFlag set.
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
bool operator==(const SchedResourceDelta &RHS) const
GenericScheduler shrinks the unscheduled zone using heuristics to balance the schedule.
bool shouldTrackPressure() const override
Check if pressure tracking is needed before building the DAG and initializing this strategy...
void releaseNode(SUnit *SU, unsigned ReadyCycle)
unsigned getMicroOpFactor() const
Multiply number of micro-ops by this factor to normalize it relative to other resources.
bool hasVRegLiveness() const override
Return true if this DAG supports VReg liveness and RegPressure.
const RegPressureTracker & getTopRPTracker() const
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
bool isResourceLimited() const
std::unique_ptr< ScheduleDAGMutation > createCopyConstrainDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
unsigned getResourceCount(unsigned ResIdx) const
PostGenericScheduler(const MachineSchedContext *C)
bool tryLess(int TryVal, int CandVal, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason)
Return true if this heuristic determines order.
ReadyQueue(unsigned id, const Twine &name)
unsigned getExecutedCount() const
Get a scaled count for the minimum execution time of the scheduled micro-ops that are ready to execut...
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringRef > StandardNames)
Initialize the set of available library functions based on the specified target triple.
virtual bool shouldTrackPressure() const
Check if pressure tracking is needed before building the DAG and initializing this strategy...
const IntervalPressure & getTopPressure() const
Get current register pressure for the top scheduled instructions.
ScheduleDAGMILive(MachineSchedContext *C, std::unique_ptr< MachineSchedStrategy > S)
const PressureDiff & getPressureDiff(const SUnit *SU) const
virtual void scheduleTree(unsigned SubtreeID)
Scheduler callback to notify that a new subtree is scheduled.
unsigned NodeQueueId
Queue id of node.
Definition: ScheduleDAG.h:265
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:406
A ScheduleDAG for scheduling lists of MachineInstr.
Define a generic scheduling policy for targets that don&#39;t provide their own MachineSchedStrategy.
Representation of each machine instruction.
Definition: MachineInstr.h:64
LiveIntervals * LIS
const MachineDominatorTree * MDT
Status of an instruction&#39;s critical resource consumption.
GenericScheduler(const MachineSchedContext *C)
LiveIntervals * getLIS() const
bool shouldTrackPressure() const override
PostRA scheduling does not track pressure.
SmallVector< SUnit *, 8 > BotRoots
SchedCandidate BotCand
Candidate last picked from Bot boundary.
cl::opt< bool > ForceBottomUp
virtual void leaveMBB()
Tell the strategy that current MBB is done.
MachineBasicBlock::iterator bottom() const
bool operator!=(const SchedResourceDelta &RHS) const
MachinePassRegistryNode * getNext() const
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
MachineSchedStrategy - Interface to the scheduling algorithm used by ScheduleDAGMI.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
MachinePassRegistryNode - Machine pass node stored in registration list.
Capture a change in pressure for a single pressure set.
SmallVector< unsigned, 16 > RemainingCounts
const SUnit * getNextClusterPred() const
IntervalPressure TopPressure
The top of the unscheduled zone.
void releaseTopNode(SUnit *SU) override
When all predecessor dependencies have been resolved, free this node for top-down scheduling...
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:264
const std::vector< PressureChange > & getRegionCriticalPSets() const
MachineBasicBlock::iterator CurrentBottom
The bottom of the unscheduled zone.
bool tryPressure(const PressureChange &TryP, const PressureChange &CandP, GenericSchedulerBase::SchedCandidate &TryCand, GenericSchedulerBase::SchedCandidate &Cand, GenericSchedulerBase::CandReason Reason, const TargetRegisterInfo *TRI, const MachineFunction &MF)
Store the effects of a change in pressure on things that MI scheduler cares about.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineSchedRegistry * getNext() const
bool operator==(const CandPolicy &RHS) const
bool isTrackingPressure() const
Return true if register pressure tracking is enabled.
static const char * name
virtual bool doMBBSchedRegionsTopDown() const
SchedCandidate TopCand
Candidate last picked from Top boundary.
IntervalPressure RegPressure
void push(SUnit *SU)
IRTranslator LLVM IR MI
virtual bool shouldTrackLaneMasks() const
Returns true if lanemasks should be tracked.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
bool operator==(uint64_t V1, const APInt &V2)
Definition: APInt.h:1973
bool empty() const
IntervalPressure BotPressure
The bottom of the unscheduled zone.
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.
AliasAnalysis * AA
RegPressureTracker TopRPTracker
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
RegPressureTracker RPTracker
ScheduleDAGCtor FunctionPassCtor
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:242
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
cl::opt< bool > ForceTopDown
void releaseBottomNode(SUnit *SU) override
When all successor dependencies have been resolved, free this node for bottom-up scheduling.