LLVM  3.7.0
PostRASchedulerList.cpp
Go to the documentation of this file.
1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
14 //
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
18 //
19 //===----------------------------------------------------------------------===//
20 
21 #include "llvm/CodeGen/Passes.h"
23 #include "AntiDepBreaker.h"
24 #include "CriticalAntiDepBreaker.h"
25 #include "llvm/ADT/BitVector.h"
26 #include "llvm/ADT/Statistic.h"
39 #include "llvm/Support/Debug.h"
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "post-RA-sched"
49 
50 STATISTIC(NumNoops, "Number of noops inserted");
51 STATISTIC(NumStalls, "Number of pipeline stalls");
52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
53 
54 // Post-RA scheduling is enabled with
55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
56 // override the target.
57 static cl::opt<bool>
58 EnablePostRAScheduler("post-RA-scheduler",
59  cl::desc("Enable scheduling after register allocation"),
60  cl::init(false), cl::Hidden);
62 EnableAntiDepBreaking("break-anti-dependencies",
63  cl::desc("Break post-RA scheduling anti-dependencies: "
64  "\"critical\", \"all\", or \"none\""),
65  cl::init("none"), cl::Hidden);
66 
67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
68 static cl::opt<int>
69 DebugDiv("postra-sched-debugdiv",
70  cl::desc("Debug control MBBs that are scheduled"),
71  cl::init(0), cl::Hidden);
72 static cl::opt<int>
73 DebugMod("postra-sched-debugmod",
74  cl::desc("Debug control MBBs that are scheduled"),
75  cl::init(0), cl::Hidden);
76 
78 
79 namespace {
80  class PostRAScheduler : public MachineFunctionPass {
81  const TargetInstrInfo *TII;
82  RegisterClassInfo RegClassInfo;
83 
84  public:
85  static char ID;
86  PostRAScheduler() : MachineFunctionPass(ID) {}
87 
88  void getAnalysisUsage(AnalysisUsage &AU) const override {
89  AU.setPreservesCFG();
97  }
98 
99  bool runOnMachineFunction(MachineFunction &Fn) override;
100 
101  bool enablePostRAScheduler(
102  const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel,
104  TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const;
105  };
106  char PostRAScheduler::ID = 0;
107 
108  class SchedulePostRATDList : public ScheduleDAGInstrs {
109  /// AvailableQueue - The priority queue to use for the available SUnits.
110  ///
111  LatencyPriorityQueue AvailableQueue;
112 
113  /// PendingQueue - This contains all of the instructions whose operands have
114  /// been issued, but their results are not ready yet (due to the latency of
115  /// the operation). Once the operands becomes available, the instruction is
116  /// added to the AvailableQueue.
117  std::vector<SUnit*> PendingQueue;
118 
119  /// HazardRec - The hazard recognizer to use.
120  ScheduleHazardRecognizer *HazardRec;
121 
122  /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
123  AntiDepBreaker *AntiDepBreak;
124 
125  /// AA - AliasAnalysis for making memory reference queries.
126  AliasAnalysis *AA;
127 
128  /// The schedule. Null SUnit*'s represent noop instructions.
129  std::vector<SUnit*> Sequence;
130 
131  /// The index in BB of RegionEnd.
132  ///
133  /// This is the instruction number from the top of the current block, not
134  /// the SlotIndex. It is only used by the AntiDepBreaker.
135  unsigned EndIndex;
136 
137  public:
138  SchedulePostRATDList(
140  const RegisterClassInfo &,
143 
144  ~SchedulePostRATDList() override;
145 
146  /// startBlock - Initialize register live-range state for scheduling in
147  /// this block.
148  ///
149  void startBlock(MachineBasicBlock *BB) override;
150 
151  // Set the index of RegionEnd within the current BB.
152  void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; }
153 
154  /// Initialize the scheduler state for the next scheduling region.
155  void enterRegion(MachineBasicBlock *bb,
158  unsigned regioninstrs) override;
159 
160  /// Notify that the scheduler has finished scheduling the current region.
161  void exitRegion() override;
162 
163  /// Schedule - Schedule the instruction range using list scheduling.
164  ///
165  void schedule() override;
166 
167  void EmitSchedule();
168 
169  /// Observe - Update liveness information to account for the current
170  /// instruction, which will not be scheduled.
171  ///
172  void Observe(MachineInstr *MI, unsigned Count);
173 
174  /// finishBlock - Clean up register live-range state.
175  ///
176  void finishBlock() override;
177 
178  private:
179  void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
180  void ReleaseSuccessors(SUnit *SU);
181  void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
182  void ListScheduleTopDown();
183 
184  void dumpSchedule() const;
185  void emitNoop(unsigned CurCycle);
186  };
187 }
188 
190 
191 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched",
192  "Post RA top-down list latency scheduler", false, false)
193 
194 SchedulePostRATDList::SchedulePostRATDList(
197  TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
198  SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs)
199  : ScheduleDAGInstrs(MF, &MLI, /*IsPostRA=*/true), AA(AA), EndIndex(0) {
200 
201  const InstrItineraryData *InstrItins =
202  MF.getSubtarget().getInstrItineraryData();
203  HazardRec =
204  MF.getSubtarget().getInstrInfo()->CreateTargetPostRAHazardRecognizer(
205  InstrItins, this);
206 
207  assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
208  MRI.tracksLiveness()) &&
209  "Live-ins must be accurate for anti-dependency breaking");
210  AntiDepBreak =
211  ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
212  (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
213  ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
214  (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr));
215 }
216 
217 SchedulePostRATDList::~SchedulePostRATDList() {
218  delete HazardRec;
219  delete AntiDepBreak;
220 }
221 
222 /// Initialize state associated with the next scheduling region.
223 void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
226  unsigned regioninstrs) {
227  ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
228  Sequence.clear();
229 }
230 
231 /// Print the schedule before exiting the region.
232 void SchedulePostRATDList::exitRegion() {
233  DEBUG({
234  dbgs() << "*** Final schedule ***\n";
235  dumpSchedule();
236  dbgs() << '\n';
237  });
239 }
240 
241 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
242 /// dumpSchedule - dump the scheduled Sequence.
243 void SchedulePostRATDList::dumpSchedule() const {
244  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
245  if (SUnit *SU = Sequence[i])
246  SU->dump(this);
247  else
248  dbgs() << "**** NOOP ****\n";
249  }
250 }
251 #endif
252 
253 bool PostRAScheduler::enablePostRAScheduler(
254  const TargetSubtargetInfo &ST,
255  CodeGenOpt::Level OptLevel,
257  TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
258  Mode = ST.getAntiDepBreakMode();
259  ST.getCriticalPathRCs(CriticalPathRCs);
260  return ST.enablePostRAScheduler() &&
261  OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
262 }
263 
264 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
265  if (skipOptnoneFunction(*Fn.getFunction()))
266  return false;
267 
268  TII = Fn.getSubtarget().getInstrInfo();
269  MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
270  AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
271  TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
272 
273  RegClassInfo.runOnMachineFunction(Fn);
274 
275  // Check for explicit enable/disable of post-ra scheduling.
279  if (EnablePostRAScheduler.getPosition() > 0) {
281  return false;
282  } else {
283  // Check that post-RA scheduling is enabled for this target.
284  // This may upgrade the AntiDepMode.
285  if (!enablePostRAScheduler(Fn.getSubtarget(), PassConfig->getOptLevel(),
286  AntiDepMode, CriticalPathRCs))
287  return false;
288  }
289 
290  // Check for antidep breaking override...
291  if (EnableAntiDepBreaking.getPosition() > 0) {
292  AntiDepMode = (EnableAntiDepBreaking == "all")
294  : ((EnableAntiDepBreaking == "critical")
297  }
298 
299  DEBUG(dbgs() << "PostRAScheduler\n");
300 
301  SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode,
302  CriticalPathRCs);
303 
304  // Loop over all of the basic blocks
305  for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
306  MBB != MBBe; ++MBB) {
307 #ifndef NDEBUG
308  // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
309  if (DebugDiv > 0) {
310  static int bbcnt = 0;
311  if (bbcnt++ % DebugDiv != DebugMod)
312  continue;
313  dbgs() << "*** DEBUG scheduling " << Fn.getName()
314  << ":BB#" << MBB->getNumber() << " ***\n";
315  }
316 #endif
317 
318  // Initialize register live-range state for scheduling in this block.
319  Scheduler.startBlock(MBB);
320 
321  // Schedule each sequence of instructions not interrupted by a label
322  // or anything else that effectively needs to shut down scheduling.
323  MachineBasicBlock::iterator Current = MBB->end();
324  unsigned Count = MBB->size(), CurrentCount = Count;
325  for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
326  MachineInstr *MI = std::prev(I);
327  --Count;
328  // Calls are not scheduling boundaries before register allocation, but
329  // post-ra we don't gain anything by scheduling across calls since we
330  // don't need to worry about register pressure.
331  if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) {
332  Scheduler.enterRegion(MBB, I, Current, CurrentCount - Count);
333  Scheduler.setEndIndex(CurrentCount);
334  Scheduler.schedule();
335  Scheduler.exitRegion();
336  Scheduler.EmitSchedule();
337  Current = MI;
338  CurrentCount = Count;
339  Scheduler.Observe(MI, CurrentCount);
340  }
341  I = MI;
342  if (MI->isBundle())
343  Count -= MI->getBundleSize();
344  }
345  assert(Count == 0 && "Instruction count mismatch!");
346  assert((MBB->begin() == Current || CurrentCount != 0) &&
347  "Instruction count mismatch!");
348  Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount);
349  Scheduler.setEndIndex(CurrentCount);
350  Scheduler.schedule();
351  Scheduler.exitRegion();
352  Scheduler.EmitSchedule();
353 
354  // Clean up register live-range state.
355  Scheduler.finishBlock();
356 
357  // Update register kills
358  Scheduler.fixupKills(MBB);
359  }
360 
361  return true;
362 }
363 
364 /// StartBlock - Initialize register live-range state for scheduling in
365 /// this block.
366 ///
367 void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
368  // Call the superclass.
370 
371  // Reset the hazard recognizer and anti-dep breaker.
372  HazardRec->Reset();
373  if (AntiDepBreak)
374  AntiDepBreak->StartBlock(BB);
375 }
376 
377 /// Schedule - Schedule the instruction range using list scheduling.
378 ///
379 void SchedulePostRATDList::schedule() {
380  // Build the scheduling graph.
381  buildSchedGraph(AA);
382 
383  if (AntiDepBreak) {
384  unsigned Broken =
385  AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
386  EndIndex, DbgValues);
387 
388  if (Broken != 0) {
389  // We made changes. Update the dependency graph.
390  // Theoretically we could update the graph in place:
391  // When a live range is changed to use a different register, remove
392  // the def's anti-dependence *and* output-dependence edges due to
393  // that register, and add new anti-dependence and output-dependence
394  // edges based on the next live range of the register.
396  buildSchedGraph(AA);
397 
398  NumFixedAnti += Broken;
399  }
400  }
401 
402  DEBUG(dbgs() << "********** List Scheduling **********\n");
403  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
404  SUnits[su].dumpAll(this));
405 
406  AvailableQueue.initNodes(SUnits);
407  ListScheduleTopDown();
408  AvailableQueue.releaseState();
409 }
410 
411 /// Observe - Update liveness information to account for the current
412 /// instruction, which will not be scheduled.
413 ///
414 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
415  if (AntiDepBreak)
416  AntiDepBreak->Observe(MI, Count, EndIndex);
417 }
418 
419 /// FinishBlock - Clean up register live-range state.
420 ///
421 void SchedulePostRATDList::finishBlock() {
422  if (AntiDepBreak)
423  AntiDepBreak->FinishBlock();
424 
425  // Call the superclass.
427 }
428 
429 //===----------------------------------------------------------------------===//
430 // Top-Down Scheduling
431 //===----------------------------------------------------------------------===//
432 
433 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
434 /// the PendingQueue if the count reaches zero.
435 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
436  SUnit *SuccSU = SuccEdge->getSUnit();
437 
438  if (SuccEdge->isWeak()) {
439  --SuccSU->WeakPredsLeft;
440  return;
441  }
442 #ifndef NDEBUG
443  if (SuccSU->NumPredsLeft == 0) {
444  dbgs() << "*** Scheduling failed! ***\n";
445  SuccSU->dump(this);
446  dbgs() << " has been released too many times!\n";
447  llvm_unreachable(nullptr);
448  }
449 #endif
450  --SuccSU->NumPredsLeft;
451 
452  // Standard scheduler algorithms will recompute the depth of the successor
453  // here as such:
454  // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
455  //
456  // However, we lazily compute node depth instead. Note that
457  // ScheduleNodeTopDown has already updated the depth of this node which causes
458  // all descendents to be marked dirty. Setting the successor depth explicitly
459  // here would cause depth to be recomputed for all its ancestors. If the
460  // successor is not yet ready (because of a transitively redundant edge) then
461  // this causes depth computation to be quadratic in the size of the DAG.
462 
463  // If all the node's predecessors are scheduled, this node is ready
464  // to be scheduled. Ignore the special ExitSU node.
465  if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
466  PendingQueue.push_back(SuccSU);
467 }
468 
469 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
470 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
471  for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
472  I != E; ++I) {
473  ReleaseSucc(SU, &*I);
474  }
475 }
476 
477 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
478 /// count of its successors. If a successor pending count is zero, add it to
479 /// the Available queue.
480 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
481  DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
482  DEBUG(SU->dump(this));
483 
484  Sequence.push_back(SU);
485  assert(CurCycle >= SU->getDepth() &&
486  "Node scheduled above its depth!");
487  SU->setDepthToAtLeast(CurCycle);
488 
489  ReleaseSuccessors(SU);
490  SU->isScheduled = true;
491  AvailableQueue.scheduledNode(SU);
492 }
493 
494 /// emitNoop - Add a noop to the current instruction sequence.
495 void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
496  DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
497  HazardRec->EmitNoop();
498  Sequence.push_back(nullptr); // NULL here means noop
499  ++NumNoops;
500 }
501 
502 /// ListScheduleTopDown - The main loop of list scheduling for top-down
503 /// schedulers.
504 void SchedulePostRATDList::ListScheduleTopDown() {
505  unsigned CurCycle = 0;
506 
507  // We're scheduling top-down but we're visiting the regions in
508  // bottom-up order, so we don't know the hazards at the start of a
509  // region. So assume no hazards (this should usually be ok as most
510  // blocks are a single region).
511  HazardRec->Reset();
512 
513  // Release any successors of the special Entry node.
514  ReleaseSuccessors(&EntrySU);
515 
516  // Add all leaves to Available queue.
517  for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
518  // It is available if it has no predecessors.
519  if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
520  AvailableQueue.push(&SUnits[i]);
521  SUnits[i].isAvailable = true;
522  }
523  }
524 
525  // In any cycle where we can't schedule any instructions, we must
526  // stall or emit a noop, depending on the target.
527  bool CycleHasInsts = false;
528 
529  // While Available queue is not empty, grab the node with the highest
530  // priority. If it is not ready put it back. Schedule the node.
531  std::vector<SUnit*> NotReady;
532  Sequence.reserve(SUnits.size());
533  while (!AvailableQueue.empty() || !PendingQueue.empty()) {
534  // Check to see if any of the pending instructions are ready to issue. If
535  // so, add them to the available queue.
536  unsigned MinDepth = ~0u;
537  for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
538  if (PendingQueue[i]->getDepth() <= CurCycle) {
539  AvailableQueue.push(PendingQueue[i]);
540  PendingQueue[i]->isAvailable = true;
541  PendingQueue[i] = PendingQueue.back();
542  PendingQueue.pop_back();
543  --i; --e;
544  } else if (PendingQueue[i]->getDepth() < MinDepth)
545  MinDepth = PendingQueue[i]->getDepth();
546  }
547 
548  DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
549 
550  SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
551  bool HasNoopHazards = false;
552  while (!AvailableQueue.empty()) {
553  SUnit *CurSUnit = AvailableQueue.pop();
554 
556  HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
558  if (HazardRec->ShouldPreferAnother(CurSUnit)) {
559  if (!NotPreferredSUnit) {
560  // If this is the first non-preferred node for this cycle, then
561  // record it and continue searching for a preferred node. If this
562  // is not the first non-preferred node, then treat it as though
563  // there had been a hazard.
564  NotPreferredSUnit = CurSUnit;
565  continue;
566  }
567  } else {
568  FoundSUnit = CurSUnit;
569  break;
570  }
571  }
572 
573  // Remember if this is a noop hazard.
574  HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
575 
576  NotReady.push_back(CurSUnit);
577  }
578 
579  // If we have a non-preferred node, push it back onto the available list.
580  // If we did not find a preferred node, then schedule this first
581  // non-preferred node.
582  if (NotPreferredSUnit) {
583  if (!FoundSUnit) {
584  DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n");
585  FoundSUnit = NotPreferredSUnit;
586  } else {
587  AvailableQueue.push(NotPreferredSUnit);
588  }
589 
590  NotPreferredSUnit = nullptr;
591  }
592 
593  // Add the nodes that aren't ready back onto the available list.
594  if (!NotReady.empty()) {
595  AvailableQueue.push_all(NotReady);
596  NotReady.clear();
597  }
598 
599  // If we found a node to schedule...
600  if (FoundSUnit) {
601  // If we need to emit noops prior to this instruction, then do so.
602  unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit);
603  for (unsigned i = 0; i != NumPreNoops; ++i)
604  emitNoop(CurCycle);
605 
606  // ... schedule the node...
607  ScheduleNodeTopDown(FoundSUnit, CurCycle);
608  HazardRec->EmitInstruction(FoundSUnit);
609  CycleHasInsts = true;
610  if (HazardRec->atIssueLimit()) {
611  DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n');
612  HazardRec->AdvanceCycle();
613  ++CurCycle;
614  CycleHasInsts = false;
615  }
616  } else {
617  if (CycleHasInsts) {
618  DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
619  HazardRec->AdvanceCycle();
620  } else if (!HasNoopHazards) {
621  // Otherwise, we have a pipeline stall, but no other problem,
622  // just advance the current cycle and try again.
623  DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
624  HazardRec->AdvanceCycle();
625  ++NumStalls;
626  } else {
627  // Otherwise, we have no instructions to issue and we have instructions
628  // that will fault if we don't do this right. This is the case for
629  // processors without pipeline interlocks and other cases.
630  emitNoop(CurCycle);
631  }
632 
633  ++CurCycle;
634  CycleHasInsts = false;
635  }
636  }
637 
638 #ifndef NDEBUG
639  unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
640  unsigned Noops = 0;
641  for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
642  if (!Sequence[i])
643  ++Noops;
644  assert(Sequence.size() - Noops == ScheduledNodes &&
645  "The number of nodes scheduled doesn't match the expected number!");
646 #endif // NDEBUG
647 }
648 
649 // EmitSchedule - Emit the machine code in scheduled order.
650 void SchedulePostRATDList::EmitSchedule() {
651  RegionBegin = RegionEnd;
652 
653  // If first instruction was a DBG_VALUE then put it back.
654  if (FirstDbgValue)
655  BB->splice(RegionEnd, BB, FirstDbgValue);
656 
657  // Then re-insert them according to the given schedule.
658  for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
659  if (SUnit *SU = Sequence[i])
660  BB->splice(RegionEnd, BB, SU->getInstr());
661  else
662  // Null SUnit* is a noop.
663  TII->insertNoop(*BB, RegionEnd);
664 
665  // Update the Begin iterator, as the first instruction in the block
666  // may have been scheduled later.
667  if (i == 0)
668  RegionBegin = std::prev(RegionEnd);
669  }
670 
671  // Reinsert any remaining debug_values.
672  for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
673  DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
674  std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
675  MachineInstr *DbgValue = P.first;
676  MachineBasicBlock::iterator OrigPrivMI = P.second;
677  BB->splice(++OrigPrivMI, BB, DbgValue);
678  }
679  DbgValues.clear();
680  FirstDbgValue = nullptr;
681 }
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:240
virtual void finishBlock()
finishBlock - Clean up after scheduling in the given block.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
STATISTIC(NumFunctions,"Total number of functions")
MachineInstr * getInstr() const
getInstr - Return the representative MachineInstr for this SUnit.
Definition: ScheduleDAG.h:406
const_iterator begin(StringRef path)
Get begin iterator over path.
Definition: Path.cpp:232
static cl::opt< int > DebugDiv("postra-sched-debugdiv", cl::desc("Debug control MBBs that are scheduled"), cl::init(0), cl::Hidden)
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
virtual bool enablePostRAScheduler() const
True if the subtarget should run a scheduler after register allocation.
virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const
virtual void startBlock(MachineBasicBlock *BB)
startBlock - Prepare to perform scheduling in the given block.
virtual AntiDepBreakMode getAntiDepBreakMode() const
bool isScheduled
Definition: ScheduleDAG.h:303
AnalysisUsage & addRequired()
bool isSchedulingBoundary(const MachineInstr *MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
bool isWeak() const
isWeak - Test if this a weak dependence.
Definition: ScheduleDAG.h:210
const HexagonInstrInfo * TII
This class works in conjunction with the post-RA scheduler to rename registers to break register anti...
static cl::opt< bool > EnablePostRAScheduler("post-RA-scheduler", cl::desc("Enable scheduling after register allocation"), cl::init(false), cl::Hidden)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
INITIALIZE_PASS(PostRAScheduler,"post-RA-sched","Post RA top-down list latency scheduler", false, false) SchedulePostRATDList
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APInt.h:33
Target-Independent Code Generator Pass Configuration Options.
Itinerary data supplied by a subtarget to be used by a target.
unsigned NumPredsLeft
Definition: ScheduleDAG.h:287
virtual void enterRegion(MachineBasicBlock *bb, MachineBasicBlock::iterator begin, MachineBasicBlock::iterator end, unsigned regioninstrs)
Initialize the scheduler state for the next scheduling region.
TargetInstrInfo - Interface to description of machine instruction set.
void setDepthToAtLeast(unsigned NewDepth)
setDepthToAtLeast - If NewDepth is greater than this node's depth value, set it to be the new depth v...
bool isBundle() const
Definition: MachineInstr.h:775
SDep - Scheduling dependency.
Definition: ScheduleDAG.h:45
bundle_iterator< MachineInstr, instr_iterator > iterator
bool isAvailable()
Definition: Compression.cpp:48
#define P(N)
#define true
Definition: ConvertUTF.c:66
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:325
void clearDAG()
clearDAG - clear the DAG state (between regions).
Definition: ScheduleDAG.cpp:50
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Represent the analysis usage information of a pass.
virtual void exitRegion()
Notify that the scheduler has finished scheduling the current region.
unsigned WeakPredsLeft
Definition: ScheduleDAG.h:289
char & PostRASchedulerID
createPostRAScheduler - This pass performs post register allocation scheduling.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:263
static cl::opt< std::string > EnableAntiDepBreaking("break-anti-dependencies", cl::desc("Break post-RA scheduling anti-dependencies: ""\"critical\", \"all\", or \"none\""), cl::init("none"), cl::Hidden)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:123
SUnit * getSUnit() const
Definition: ScheduleDAG.h:160
machine Machine Instruction Scheduler
unsigned getDepth() const
getDepth - Return the depth of this node, which is the length of the maximum path up to any node whic...
Definition: ScheduleDAG.h:423
TargetSubtargetInfo - Generic base class for all target subtargets.
static cl::opt< int > DebugMod("postra-sched-debugmod", cl::desc("Debug control MBBs that are scheduled"), cl::init(0), cl::Hidden)
ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of MachineInstrs. ...
Representation of each machine instruction.
Definition: MachineInstr.h:51
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
#define I(x, y, z)
Definition: MD5.cpp:54
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:403
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
Definition: PtrState.h:37
aarch64 promote const
virtual void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const
virtual const TargetInstrInfo * getInstrInfo() const
SmallVector< SDep, 4 > Succs
Definition: ScheduleDAG.h:276
BasicBlockListType::iterator iterator
#define DEBUG(X)
Definition: Debug.h:92
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
void dump(const ScheduleDAG *G) const
SUnit - Scheduling unit.
SUnit - Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:261
This file describes how to lower LLVM code to machine code.