LLVM  6.0.0svn
ScheduleDAG.cpp
Go to the documentation of this file.
1 //===- ScheduleDAG.cpp - Implement the ScheduleDAG class ------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file Implements the ScheduleDAG class, which is a base class used by
11 /// scheduling implementation classes.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/Support/Compiler.h"
24 #include "llvm/Support/Debug.h"
29 #include <algorithm>
30 #include <cassert>
31 #include <iterator>
32 #include <limits>
33 #include <utility>
34 #include <vector>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "pre-RA-sched"
39 
40 #ifndef NDEBUG
42  "stress-sched", cl::Hidden, cl::init(false),
43  cl::desc("Stress test instruction scheduling"));
44 #endif
45 
46 void SchedulingPriorityQueue::anchor() {}
47 
49  : TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
50  TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
51  MRI(mf.getRegInfo()) {
52 #ifndef NDEBUG
54 #endif
55 }
56 
57 ScheduleDAG::~ScheduleDAG() = default;
58 
60  SUnits.clear();
61  EntrySU = SUnit();
62  ExitSU = SUnit();
63 }
64 
65 const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
66  if (!Node || !Node->isMachineOpcode()) return nullptr;
67  return &TII->get(Node->getMachineOpcode());
68 }
69 
72  switch (getKind()) {
73  case Data: OS << "Data"; break;
74  case Anti: OS << "Anti"; break;
75  case Output: OS << "Out "; break;
76  case Order: OS << "Ord "; break;
77  }
78 
79  switch (getKind()) {
80  case Data:
81  OS << " Latency=" << getLatency();
82  if (TRI && isAssignedRegDep())
83  OS << " Reg=" << PrintReg(getReg(), TRI);
84  break;
85  case Anti:
86  case Output:
87  OS << " Latency=" << getLatency();
88  break;
89  case Order:
90  OS << " Latency=" << getLatency();
91  switch(Contents.OrdKind) {
92  case Barrier: OS << " Barrier"; break;
93  case MayAliasMem:
94  case MustAliasMem: OS << " Memory"; break;
95  case Artificial: OS << " Artificial"; break;
96  case Weak: OS << " Weak"; break;
97  case Cluster: OS << " Cluster"; break;
98  }
99  break;
100  }
101 
102  return OS;
103 }
104 
105 bool SUnit::addPred(const SDep &D, bool Required) {
106  // If this node already has this dependence, don't add a redundant one.
107  for (SDep &PredDep : Preds) {
108  // Zero-latency weak edges may be added purely for heuristic ordering. Don't
109  // add them if another kind of edge already exists.
110  if (!Required && PredDep.getSUnit() == D.getSUnit())
111  return false;
112  if (PredDep.overlaps(D)) {
113  // Extend the latency if needed. Equivalent to
114  // removePred(PredDep) + addPred(D).
115  if (PredDep.getLatency() < D.getLatency()) {
116  SUnit *PredSU = PredDep.getSUnit();
117  // Find the corresponding successor in N.
118  SDep ForwardD = PredDep;
119  ForwardD.setSUnit(this);
120  for (SDep &SuccDep : PredSU->Succs) {
121  if (SuccDep == ForwardD) {
122  SuccDep.setLatency(D.getLatency());
123  break;
124  }
125  }
126  PredDep.setLatency(D.getLatency());
127  }
128  return false;
129  }
130  }
131  // Now add a corresponding succ to N.
132  SDep P = D;
133  P.setSUnit(this);
134  SUnit *N = D.getSUnit();
135  // Update the bookkeeping.
136  if (D.getKind() == SDep::Data) {
138  "NumPreds will overflow!");
140  "NumSuccs will overflow!");
141  ++NumPreds;
142  ++N->NumSuccs;
143  }
144  if (!N->isScheduled) {
145  if (D.isWeak()) {
146  ++WeakPredsLeft;
147  }
148  else {
149  assert(NumPredsLeft < std::numeric_limits<unsigned>::max() &&
150  "NumPredsLeft will overflow!");
151  ++NumPredsLeft;
152  }
153  }
154  if (!isScheduled) {
155  if (D.isWeak()) {
156  ++N->WeakSuccsLeft;
157  }
158  else {
160  "NumSuccsLeft will overflow!");
161  ++N->NumSuccsLeft;
162  }
163  }
164  Preds.push_back(D);
165  N->Succs.push_back(P);
166  if (P.getLatency() != 0) {
167  this->setDepthDirty();
168  N->setHeightDirty();
169  }
170  return true;
171 }
172 
173 void SUnit::removePred(const SDep &D) {
174  // Find the matching predecessor.
176  if (I == Preds.end())
177  return;
178  // Find the corresponding successor in N.
179  SDep P = D;
180  P.setSUnit(this);
181  SUnit *N = D.getSUnit();
183  assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
184  N->Succs.erase(Succ);
185  Preds.erase(I);
186  // Update the bookkeeping.
187  if (P.getKind() == SDep::Data) {
188  assert(NumPreds > 0 && "NumPreds will underflow!");
189  assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
190  --NumPreds;
191  --N->NumSuccs;
192  }
193  if (!N->isScheduled) {
194  if (D.isWeak())
195  --WeakPredsLeft;
196  else {
197  assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
198  --NumPredsLeft;
199  }
200  }
201  if (!isScheduled) {
202  if (D.isWeak())
203  --N->WeakSuccsLeft;
204  else {
205  assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
206  --N->NumSuccsLeft;
207  }
208  }
209  if (P.getLatency() != 0) {
210  this->setDepthDirty();
211  N->setHeightDirty();
212  }
213 }
214 
216  if (!isDepthCurrent) return;
217  SmallVector<SUnit*, 8> WorkList;
218  WorkList.push_back(this);
219  do {
220  SUnit *SU = WorkList.pop_back_val();
221  SU->isDepthCurrent = false;
222  for (SDep &SuccDep : SU->Succs) {
223  SUnit *SuccSU = SuccDep.getSUnit();
224  if (SuccSU->isDepthCurrent)
225  WorkList.push_back(SuccSU);
226  }
227  } while (!WorkList.empty());
228 }
229 
231  if (!isHeightCurrent) return;
232  SmallVector<SUnit*, 8> WorkList;
233  WorkList.push_back(this);
234  do {
235  SUnit *SU = WorkList.pop_back_val();
236  SU->isHeightCurrent = false;
237  for (SDep &PredDep : SU->Preds) {
238  SUnit *PredSU = PredDep.getSUnit();
239  if (PredSU->isHeightCurrent)
240  WorkList.push_back(PredSU);
241  }
242  } while (!WorkList.empty());
243 }
244 
245 void SUnit::setDepthToAtLeast(unsigned NewDepth) {
246  if (NewDepth <= getDepth())
247  return;
248  setDepthDirty();
249  Depth = NewDepth;
250  isDepthCurrent = true;
251 }
252 
253 void SUnit::setHeightToAtLeast(unsigned NewHeight) {
254  if (NewHeight <= getHeight())
255  return;
256  setHeightDirty();
257  Height = NewHeight;
258  isHeightCurrent = true;
259 }
260 
261 /// Calculates the maximal path from the node to the exit.
262 void SUnit::ComputeDepth() {
263  SmallVector<SUnit*, 8> WorkList;
264  WorkList.push_back(this);
265  do {
266  SUnit *Cur = WorkList.back();
267 
268  bool Done = true;
269  unsigned MaxPredDepth = 0;
270  for (const SDep &PredDep : Cur->Preds) {
271  SUnit *PredSU = PredDep.getSUnit();
272  if (PredSU->isDepthCurrent)
273  MaxPredDepth = std::max(MaxPredDepth,
274  PredSU->Depth + PredDep.getLatency());
275  else {
276  Done = false;
277  WorkList.push_back(PredSU);
278  }
279  }
280 
281  if (Done) {
282  WorkList.pop_back();
283  if (MaxPredDepth != Cur->Depth) {
284  Cur->setDepthDirty();
285  Cur->Depth = MaxPredDepth;
286  }
287  Cur->isDepthCurrent = true;
288  }
289  } while (!WorkList.empty());
290 }
291 
292 /// Calculates the maximal path from the node to the entry.
293 void SUnit::ComputeHeight() {
294  SmallVector<SUnit*, 8> WorkList;
295  WorkList.push_back(this);
296  do {
297  SUnit *Cur = WorkList.back();
298 
299  bool Done = true;
300  unsigned MaxSuccHeight = 0;
301  for (const SDep &SuccDep : Cur->Succs) {
302  SUnit *SuccSU = SuccDep.getSUnit();
303  if (SuccSU->isHeightCurrent)
304  MaxSuccHeight = std::max(MaxSuccHeight,
305  SuccSU->Height + SuccDep.getLatency());
306  else {
307  Done = false;
308  WorkList.push_back(SuccSU);
309  }
310  }
311 
312  if (Done) {
313  WorkList.pop_back();
314  if (MaxSuccHeight != Cur->Height) {
315  Cur->setHeightDirty();
316  Cur->Height = MaxSuccHeight;
317  }
318  Cur->isHeightCurrent = true;
319  }
320  } while (!WorkList.empty());
321 }
322 
324  if (NumPreds < 2)
325  return;
326 
327  SUnit::pred_iterator BestI = Preds.begin();
328  unsigned MaxDepth = BestI->getSUnit()->getDepth();
329  for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
330  ++I) {
331  if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
332  BestI = I;
333  }
334  if (BestI != Preds.begin())
335  std::swap(*Preds.begin(), *BestI);
336 }
337 
338 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
341  const SUnit *Entry, const SUnit *Exit) const {
342  if (this == Entry)
343  OS << "EntrySU";
344  else if (this == Exit)
345  OS << "ExitSU";
346  else
347  OS << "SU(" << NodeNum << ")";
348  return OS;
349 }
350 
353  return print(OS, &G->EntrySU, &G->ExitSU);
354 }
355 
357 void SUnit::dump(const ScheduleDAG *G) const {
358  print(dbgs(), G);
359  dbgs() << ": ";
360  G->dumpNode(this);
361 }
362 
364  dump(G);
365 
366  dbgs() << " # preds left : " << NumPredsLeft << "\n";
367  dbgs() << " # succs left : " << NumSuccsLeft << "\n";
368  if (WeakPredsLeft)
369  dbgs() << " # weak preds left : " << WeakPredsLeft << "\n";
370  if (WeakSuccsLeft)
371  dbgs() << " # weak succs left : " << WeakSuccsLeft << "\n";
372  dbgs() << " # rdefs left : " << NumRegDefsLeft << "\n";
373  dbgs() << " Latency : " << Latency << "\n";
374  dbgs() << " Depth : " << getDepth() << "\n";
375  dbgs() << " Height : " << getHeight() << "\n";
376 
377  if (Preds.size() != 0) {
378  dbgs() << " Predecessors:\n";
379  for (const SDep &Dep : Preds) {
380  dbgs() << " ";
381  Dep.getSUnit()->print(dbgs(), G); dbgs() << ": ";
382  Dep.print(dbgs(), G->TRI); dbgs() << '\n';
383  }
384  }
385  if (Succs.size() != 0) {
386  dbgs() << " Successors:\n";
387  for (const SDep &Dep : Succs) {
388  dbgs() << " ";
389  Dep.getSUnit()->print(dbgs(), G); dbgs() << ": ";
390  Dep.print(dbgs(), G->TRI); dbgs() << '\n';
391  }
392  }
393 }
394 #endif
395 
396 #ifndef NDEBUG
397 unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
398  bool AnyNotSched = false;
399  unsigned DeadNodes = 0;
400  for (const SUnit &SUnit : SUnits) {
401  if (!SUnit.isScheduled) {
402  if (SUnit.NumPreds == 0 && SUnit.NumSuccs == 0) {
403  ++DeadNodes;
404  continue;
405  }
406  if (!AnyNotSched)
407  dbgs() << "*** Scheduling failed! ***\n";
408  SUnit.dump(this);
409  dbgs() << "has not been scheduled!\n";
410  AnyNotSched = true;
411  }
412  if (SUnit.isScheduled &&
413  (isBottomUp ? SUnit.getHeight() : SUnit.getDepth()) >
414  unsigned(std::numeric_limits<int>::max())) {
415  if (!AnyNotSched)
416  dbgs() << "*** Scheduling failed! ***\n";
417  SUnit.dump(this);
418  dbgs() << "has an unexpected "
419  << (isBottomUp ? "Height" : "Depth") << " value!\n";
420  AnyNotSched = true;
421  }
422  if (isBottomUp) {
423  if (SUnit.NumSuccsLeft != 0) {
424  if (!AnyNotSched)
425  dbgs() << "*** Scheduling failed! ***\n";
426  SUnit.dump(this);
427  dbgs() << "has successors left!\n";
428  AnyNotSched = true;
429  }
430  } else {
431  if (SUnit.NumPredsLeft != 0) {
432  if (!AnyNotSched)
433  dbgs() << "*** Scheduling failed! ***\n";
434  SUnit.dump(this);
435  dbgs() << "has predecessors left!\n";
436  AnyNotSched = true;
437  }
438  }
439  }
440  assert(!AnyNotSched);
441  return SUnits.size() - DeadNodes;
442 }
443 #endif
444 
446  // The idea of the algorithm is taken from
447  // "Online algorithms for managing the topological order of
448  // a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
449  // This is the MNR algorithm, which was first introduced by
450  // A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
451  // "Maintaining a topological order under edge insertions".
452  //
453  // Short description of the algorithm:
454  //
455  // Topological ordering, ord, of a DAG maps each node to a topological
456  // index so that for all edges X->Y it is the case that ord(X) < ord(Y).
457  //
458  // This means that if there is a path from the node X to the node Z,
459  // then ord(X) < ord(Z).
460  //
461  // This property can be used to check for reachability of nodes:
462  // if Z is reachable from X, then an insertion of the edge Z->X would
463  // create a cycle.
464  //
465  // The algorithm first computes a topological ordering for the DAG by
466  // initializing the Index2Node and Node2Index arrays and then tries to keep
467  // the ordering up-to-date after edge insertions by reordering the DAG.
468  //
469  // On insertion of the edge X->Y, the algorithm first marks by calling DFS
470  // the nodes reachable from Y, and then shifts them using Shift to lie
471  // immediately after X in Index2Node.
472  unsigned DAGSize = SUnits.size();
473  std::vector<SUnit*> WorkList;
474  WorkList.reserve(DAGSize);
475 
476  Index2Node.resize(DAGSize);
477  Node2Index.resize(DAGSize);
478 
479  // Initialize the data structures.
480  if (ExitSU)
481  WorkList.push_back(ExitSU);
482  for (SUnit &SU : SUnits) {
483  int NodeNum = SU.NodeNum;
484  unsigned Degree = SU.Succs.size();
485  // Temporarily use the Node2Index array as scratch space for degree counts.
486  Node2Index[NodeNum] = Degree;
487 
488  // Is it a node without dependencies?
489  if (Degree == 0) {
490  assert(SU.Succs.empty() && "SUnit should have no successors");
491  // Collect leaf nodes.
492  WorkList.push_back(&SU);
493  }
494  }
495 
496  int Id = DAGSize;
497  while (!WorkList.empty()) {
498  SUnit *SU = WorkList.back();
499  WorkList.pop_back();
500  if (SU->NodeNum < DAGSize)
501  Allocate(SU->NodeNum, --Id);
502  for (const SDep &PredDep : SU->Preds) {
503  SUnit *SU = PredDep.getSUnit();
504  if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
505  // If all dependencies of the node are processed already,
506  // then the node can be computed now.
507  WorkList.push_back(SU);
508  }
509  }
510 
511  Visited.resize(DAGSize);
512 
513 #ifndef NDEBUG
514  // Check correctness of the ordering
515  for (SUnit &SU : SUnits) {
516  for (const SDep &PD : SU.Preds) {
517  assert(Node2Index[SU.NodeNum] > Node2Index[PD.getSUnit()->NodeNum] &&
518  "Wrong topological sorting");
519  }
520  }
521 #endif
522 }
523 
525  int UpperBound, LowerBound;
526  LowerBound = Node2Index[Y->NodeNum];
527  UpperBound = Node2Index[X->NodeNum];
528  bool HasLoop = false;
529  // Is Ord(X) < Ord(Y) ?
530  if (LowerBound < UpperBound) {
531  // Update the topological order.
532  Visited.reset();
533  DFS(Y, UpperBound, HasLoop);
534  assert(!HasLoop && "Inserted edge creates a loop!");
535  // Recompute topological indexes.
536  Shift(Visited, LowerBound, UpperBound);
537  }
538 }
539 
541  // InitDAGTopologicalSorting();
542 }
543 
544 void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
545  bool &HasLoop) {
546  std::vector<const SUnit*> WorkList;
547  WorkList.reserve(SUnits.size());
548 
549  WorkList.push_back(SU);
550  do {
551  SU = WorkList.back();
552  WorkList.pop_back();
553  Visited.set(SU->NodeNum);
554  for (const SDep &SuccDep
555  : make_range(SU->Succs.rbegin(), SU->Succs.rend())) {
556  unsigned s = SuccDep.getSUnit()->NodeNum;
557  // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
558  if (s >= Node2Index.size())
559  continue;
560  if (Node2Index[s] == UpperBound) {
561  HasLoop = true;
562  return;
563  }
564  // Visit successors if not already and in affected region.
565  if (!Visited.test(s) && Node2Index[s] < UpperBound) {
566  WorkList.push_back(SuccDep.getSUnit());
567  }
568  }
569  } while (!WorkList.empty());
570 }
571 
572 std::vector<int> ScheduleDAGTopologicalSort::GetSubGraph(const SUnit &StartSU,
573  const SUnit &TargetSU,
574  bool &Success) {
575  std::vector<const SUnit*> WorkList;
576  int LowerBound = Node2Index[StartSU.NodeNum];
577  int UpperBound = Node2Index[TargetSU.NodeNum];
578  bool Found = false;
579  BitVector VisitedBack;
580  std::vector<int> Nodes;
581 
582  if (LowerBound > UpperBound) {
583  Success = false;
584  return Nodes;
585  }
586 
587  WorkList.reserve(SUnits.size());
588  Visited.reset();
589 
590  // Starting from StartSU, visit all successors up
591  // to UpperBound.
592  WorkList.push_back(&StartSU);
593  do {
594  const SUnit *SU = WorkList.back();
595  WorkList.pop_back();
596  for (int I = SU->Succs.size()-1; I >= 0; --I) {
597  const SUnit *Succ = SU->Succs[I].getSUnit();
598  unsigned s = Succ->NodeNum;
599  // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
600  if (Succ->isBoundaryNode())
601  continue;
602  if (Node2Index[s] == UpperBound) {
603  Found = true;
604  continue;
605  }
606  // Visit successors if not already and in affected region.
607  if (!Visited.test(s) && Node2Index[s] < UpperBound) {
608  Visited.set(s);
609  WorkList.push_back(Succ);
610  }
611  }
612  } while (!WorkList.empty());
613 
614  if (!Found) {
615  Success = false;
616  return Nodes;
617  }
618 
619  WorkList.clear();
620  VisitedBack.resize(SUnits.size());
621  Found = false;
622 
623  // Starting from TargetSU, visit all predecessors up
624  // to LowerBound. SUs that are visited by the two
625  // passes are added to Nodes.
626  WorkList.push_back(&TargetSU);
627  do {
628  const SUnit *SU = WorkList.back();
629  WorkList.pop_back();
630  for (int I = SU->Preds.size()-1; I >= 0; --I) {
631  const SUnit *Pred = SU->Preds[I].getSUnit();
632  unsigned s = Pred->NodeNum;
633  // Edges to non-SUnits are allowed but ignored (e.g. EntrySU).
634  if (Pred->isBoundaryNode())
635  continue;
636  if (Node2Index[s] == LowerBound) {
637  Found = true;
638  continue;
639  }
640  if (!VisitedBack.test(s) && Visited.test(s)) {
641  VisitedBack.set(s);
642  WorkList.push_back(Pred);
643  Nodes.push_back(s);
644  }
645  }
646  } while (!WorkList.empty());
647 
648  assert(Found && "Error in SUnit Graph!");
649  Success = true;
650  return Nodes;
651 }
652 
653 void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
654  int UpperBound) {
655  std::vector<int> L;
656  int shift = 0;
657  int i;
658 
659  for (i = LowerBound; i <= UpperBound; ++i) {
660  // w is node at topological index i.
661  int w = Index2Node[i];
662  if (Visited.test(w)) {
663  // Unmark.
664  Visited.reset(w);
665  L.push_back(w);
666  shift = shift + 1;
667  } else {
668  Allocate(w, i - shift);
669  }
670  }
671 
672  for (unsigned LI : L) {
673  Allocate(LI, i - shift);
674  i = i + 1;
675  }
676 }
677 
679  // Is SU reachable from TargetSU via successor edges?
680  if (IsReachable(SU, TargetSU))
681  return true;
682  for (const SDep &PredDep : TargetSU->Preds)
683  if (PredDep.isAssignedRegDep() &&
684  IsReachable(SU, PredDep.getSUnit()))
685  return true;
686  return false;
687 }
688 
690  const SUnit *TargetSU) {
691  // If insertion of the edge SU->TargetSU would create a cycle
692  // then there is a path from TargetSU to SU.
693  int UpperBound, LowerBound;
694  LowerBound = Node2Index[TargetSU->NodeNum];
695  UpperBound = Node2Index[SU->NodeNum];
696  bool HasLoop = false;
697  // Is Ord(TargetSU) < Ord(SU) ?
698  if (LowerBound < UpperBound) {
699  Visited.reset();
700  // There may be a path from TargetSU to SU. Check for it.
701  DFS(TargetSU, UpperBound, HasLoop);
702  }
703  return HasLoop;
704 }
705 
706 void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
707  Node2Index[n] = index;
708  Index2Node[index] = n;
709 }
710 
712 ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
713  : SUnits(sunits), ExitSU(exitsu) {}
714 
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:372
BitVector & set()
Definition: BitVector.h:398
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned NumPreds
of SDep::Data preds.
Definition: ScheduleDAG.h:271
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:449
void dump(const ScheduleDAG *G) const
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:403
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
bool test(unsigned Idx) const
Definition: BitVector.h:502
void setSUnit(SUnit *SU)
Definition: ScheduleDAG.h:493
void removePred(const SDep &D)
Removes the specified edge as a pred of the current node if it exists.
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
Definition: ScheduleDAG.h:212
SmallVector< SDep, 4 > Preds
All sunit predecessors.
Definition: ScheduleDAG.h:261
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:289
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
raw_ostream & print(raw_ostream &O, const TargetRegisterInfo *TRI=nullptr) const
Definition: ScheduleDAG.cpp:71
SmallVectorImpl< SDep >::iterator pred_iterator
Definition: ScheduleDAG.h:264
unsigned NumSuccs
of SDep::Data sucss.
Definition: ScheduleDAG.h:272
unsigned getLatency() const
Returns the latency value for this edge, which roughly means the minimum number of cycles that must e...
Definition: ScheduleDAG.h:143
unsigned NumSuccsLeft
of succs not scheduled.
Definition: ScheduleDAG.h:274
const HexagonInstrInfo * TII
Regular data dependence (aka true-dependence).
Definition: ScheduleDAG.h:54
void InitDAGTopologicalSorting()
Creates the initial topological ordering from the DAG to be scheduled.
std::vector< int > GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU, bool &Success)
Returns an array of SUs that are both in the successor subtree of StartSU and in the predecessor subt...
static cl::opt< bool > StressSchedOpt("stress-sched", cl::Hidden, cl::init(false), cl::desc("Stress test instruction scheduling"))
unsigned WeakSuccsLeft
of weak succs not scheduled.
Definition: ScheduleDAG.h:276
unsigned NumPredsLeft
of preds not scheduled.
Definition: ScheduleDAG.h:273
SUnit * getSUnit() const
Definition: ScheduleDAG.h:490
Printable PrintReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubRegIdx=0)
Prints virtual and physical registers with or without a TRI instance.
static int getLatency(LLVMDisasmContext *DC, const MCInst &Inst)
Gets latency information for Inst, based on DC information.
bool isBoundaryNode() const
Boundary nodes are placeholders for the boundary of the scheduling region.
Definition: ScheduleDAG.h:349
void setDepthToAtLeast(unsigned NewDepth)
If NewDepth is greater than this node&#39;s depth value, sets it to be the new depth value.
Scheduling dependency.
Definition: ScheduleDAG.h:50
void setHeightToAtLeast(unsigned NewHeight)
If NewDepth is greater than this node&#39;s depth value, set it to be the new height value.
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:404
void setDepthDirty()
Sets a flag in this node to indicate that its stored Depth value will require recomputation the next ...
bool WillCreateCycle(SUnit *TargetSU, SUnit *SU)
Returns true if addPred(TargetSU, SU) creates a cycle.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
unsigned const MachineRegisterInfo * MRI
void clearDAG()
Clears the DAG state (between regions).
Definition: ScheduleDAG.cpp:59
raw_ostream & print(raw_ostream &O, const SUnit *N=nullptr, const SUnit *X=nullptr) const
BitVector & reset()
Definition: BitVector.h:439
void RemovePred(SUnit *M, SUnit *N)
Updates the topological ordering to accommodate an an edge to be removed from the specified node N fr...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
const unsigned MaxDepth
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
#define E
Definition: LargeTest.cpp:27
auto find(R &&Range, const T &Val) -> decltype(std::begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:839
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
const DataFlowGraph & G
Definition: RDFGraph.cpp:206
const size_t N
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:385
Represents one node in the SelectionDAG.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
bool IsReachable(const SUnit *SU, const SUnit *TargetSU)
Checks if SU is reachable from TargetSU.
void biasCriticalPath()
Orders this node&#39;s predecessor edges such that the critical path edge occurs first.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
ScheduleDAG(MachineFunction &mf)
Definition: ScheduleDAG.cpp:48
typename SuperClass::iterator iterator
Definition: SmallVector.h:328
static void DFS(BasicBlock *Root, SetVector< BasicBlock *> &Set)
void setLatency(unsigned Lat)
Sets the latency for this edge.
Definition: ScheduleDAG.h:148
#define Success
SUnit EntrySU
Special node for the region entry.
Definition: ScheduleDAG.h:573
unsigned VerifyScheduledDAG(bool isBottomUp)
Verifies that all SUnits were scheduled and that their state is consistent.
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:411
SUnit ExitSU
Special node for the region exit.
Definition: ScheduleDAG.h:574
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:569
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
#define I(x, y, z)
Definition: MD5.cpp:58
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:568
Kind getKind() const
Returns an enum value representing the kind of the dependence.
Definition: ScheduleDAG.h:496
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:269
void setHeightDirty()
Sets a flag in this node to indicate that its stored Height value will require recomputation the next...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
virtual void dumpNode(const SUnit *SU) const =0
bool addPred(const SDep &D, bool Required=true)
Adds the specified edge as a pred of the current node if not already.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SmallVector< SDep, 4 > Succs
All sunit successors.
Definition: ScheduleDAG.h:262
bool isWeak() const
Tests if this a weak dependence.
Definition: ScheduleDAG.h:195
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:44
void AddPred(SUnit *Y, SUnit *X)
Updates the topological ordering to accommodate an edge to be added from SUnit X to SUnit Y...
virtual ~ScheduleDAG()
void dumpAll(const ScheduleDAG *G) const
std::vector< SUnit > SUnits
The scheduling units.
Definition: ScheduleDAG.h:572
ScheduleDAGTopologicalSort(std::vector< SUnit > &SUnits, SUnit *ExitSU)
#define D
Definition: LargeTest.cpp:26
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:247