LLVM  9.0.0svn
ScheduleDAG.cpp
Go to the documentation of this file.
1 //===- ScheduleDAG.cpp - Implement the ScheduleDAG class ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file Implements the ScheduleDAG class, which is a base class used by
10 /// scheduling implementation classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Config/llvm-config.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/Debug.h"
29 #include <algorithm>
30 #include <cassert>
31 #include <iterator>
32 #include <limits>
33 #include <utility>
34 #include <vector>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "pre-RA-sched"
39 
40 #ifndef NDEBUG
42  "stress-sched", cl::Hidden, cl::init(false),
43  cl::desc("Stress test instruction scheduling"));
44 #endif
45 
46 void SchedulingPriorityQueue::anchor() {}
47 
49  : TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
50  TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
51  MRI(mf.getRegInfo()) {
52 #ifndef NDEBUG
54 #endif
55 }
56 
57 ScheduleDAG::~ScheduleDAG() = default;
58 
60  SUnits.clear();
61  EntrySU = SUnit();
62  ExitSU = SUnit();
63 }
64 
65 const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
66  if (!Node || !Node->isMachineOpcode()) return nullptr;
67  return &TII->get(Node->getMachineOpcode());
68 }
69 
71  switch (getKind()) {
72  case Data: dbgs() << "Data"; break;
73  case Anti: dbgs() << "Anti"; break;
74  case Output: dbgs() << "Out "; break;
75  case Order: dbgs() << "Ord "; break;
76  }
77 
78  switch (getKind()) {
79  case Data:
80  dbgs() << " Latency=" << getLatency();
81  if (TRI && isAssignedRegDep())
82  dbgs() << " Reg=" << printReg(getReg(), TRI);
83  break;
84  case Anti:
85  case Output:
86  dbgs() << " Latency=" << getLatency();
87  break;
88  case Order:
89  dbgs() << " Latency=" << getLatency();
90  switch(Contents.OrdKind) {
91  case Barrier: dbgs() << " Barrier"; break;
92  case MayAliasMem:
93  case MustAliasMem: dbgs() << " Memory"; break;
94  case Artificial: dbgs() << " Artificial"; break;
95  case Weak: dbgs() << " Weak"; break;
96  case Cluster: dbgs() << " Cluster"; break;
97  }
98  break;
99  }
100 }
101 
102 bool SUnit::addPred(const SDep &D, bool Required) {
103  // If this node already has this dependence, don't add a redundant one.
104  for (SDep &PredDep : Preds) {
105  // Zero-latency weak edges may be added purely for heuristic ordering. Don't
106  // add them if another kind of edge already exists.
107  if (!Required && PredDep.getSUnit() == D.getSUnit())
108  return false;
109  if (PredDep.overlaps(D)) {
110  // Extend the latency if needed. Equivalent to
111  // removePred(PredDep) + addPred(D).
112  if (PredDep.getLatency() < D.getLatency()) {
113  SUnit *PredSU = PredDep.getSUnit();
114  // Find the corresponding successor in N.
115  SDep ForwardD = PredDep;
116  ForwardD.setSUnit(this);
117  for (SDep &SuccDep : PredSU->Succs) {
118  if (SuccDep == ForwardD) {
119  SuccDep.setLatency(D.getLatency());
120  break;
121  }
122  }
123  PredDep.setLatency(D.getLatency());
124  }
125  return false;
126  }
127  }
128  // Now add a corresponding succ to N.
129  SDep P = D;
130  P.setSUnit(this);
131  SUnit *N = D.getSUnit();
132  // Update the bookkeeping.
133  if (D.getKind() == SDep::Data) {
135  "NumPreds will overflow!");
137  "NumSuccs will overflow!");
138  ++NumPreds;
139  ++N->NumSuccs;
140  }
141  if (!N->isScheduled) {
142  if (D.isWeak()) {
143  ++WeakPredsLeft;
144  }
145  else {
146  assert(NumPredsLeft < std::numeric_limits<unsigned>::max() &&
147  "NumPredsLeft will overflow!");
148  ++NumPredsLeft;
149  }
150  }
151  if (!isScheduled) {
152  if (D.isWeak()) {
153  ++N->WeakSuccsLeft;
154  }
155  else {
157  "NumSuccsLeft will overflow!");
158  ++N->NumSuccsLeft;
159  }
160  }
161  Preds.push_back(D);
162  N->Succs.push_back(P);
163  if (P.getLatency() != 0) {
164  this->setDepthDirty();
165  N->setHeightDirty();
166  }
167  return true;
168 }
169 
170 void SUnit::removePred(const SDep &D) {
171  // Find the matching predecessor.
173  if (I == Preds.end())
174  return;
175  // Find the corresponding successor in N.
176  SDep P = D;
177  P.setSUnit(this);
178  SUnit *N = D.getSUnit();
180  assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
181  N->Succs.erase(Succ);
182  Preds.erase(I);
183  // Update the bookkeeping.
184  if (P.getKind() == SDep::Data) {
185  assert(NumPreds > 0 && "NumPreds will underflow!");
186  assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
187  --NumPreds;
188  --N->NumSuccs;
189  }
190  if (!N->isScheduled) {
191  if (D.isWeak())
192  --WeakPredsLeft;
193  else {
194  assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
195  --NumPredsLeft;
196  }
197  }
198  if (!isScheduled) {
199  if (D.isWeak())
200  --N->WeakSuccsLeft;
201  else {
202  assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
203  --N->NumSuccsLeft;
204  }
205  }
206  if (P.getLatency() != 0) {
207  this->setDepthDirty();
208  N->setHeightDirty();
209  }
210 }
211 
213  if (!isDepthCurrent) return;
214  SmallVector<SUnit*, 8> WorkList;
215  WorkList.push_back(this);
216  do {
217  SUnit *SU = WorkList.pop_back_val();
218  SU->isDepthCurrent = false;
219  for (SDep &SuccDep : SU->Succs) {
220  SUnit *SuccSU = SuccDep.getSUnit();
221  if (SuccSU->isDepthCurrent)
222  WorkList.push_back(SuccSU);
223  }
224  } while (!WorkList.empty());
225 }
226 
228  if (!isHeightCurrent) return;
229  SmallVector<SUnit*, 8> WorkList;
230  WorkList.push_back(this);
231  do {
232  SUnit *SU = WorkList.pop_back_val();
233  SU->isHeightCurrent = false;
234  for (SDep &PredDep : SU->Preds) {
235  SUnit *PredSU = PredDep.getSUnit();
236  if (PredSU->isHeightCurrent)
237  WorkList.push_back(PredSU);
238  }
239  } while (!WorkList.empty());
240 }
241 
242 void SUnit::setDepthToAtLeast(unsigned NewDepth) {
243  if (NewDepth <= getDepth())
244  return;
245  setDepthDirty();
246  Depth = NewDepth;
247  isDepthCurrent = true;
248 }
249 
250 void SUnit::setHeightToAtLeast(unsigned NewHeight) {
251  if (NewHeight <= getHeight())
252  return;
253  setHeightDirty();
254  Height = NewHeight;
255  isHeightCurrent = true;
256 }
257 
258 /// Calculates the maximal path from the node to the exit.
259 void SUnit::ComputeDepth() {
260  SmallVector<SUnit*, 8> WorkList;
261  WorkList.push_back(this);
262  do {
263  SUnit *Cur = WorkList.back();
264 
265  bool Done = true;
266  unsigned MaxPredDepth = 0;
267  for (const SDep &PredDep : Cur->Preds) {
268  SUnit *PredSU = PredDep.getSUnit();
269  if (PredSU->isDepthCurrent)
270  MaxPredDepth = std::max(MaxPredDepth,
271  PredSU->Depth + PredDep.getLatency());
272  else {
273  Done = false;
274  WorkList.push_back(PredSU);
275  }
276  }
277 
278  if (Done) {
279  WorkList.pop_back();
280  if (MaxPredDepth != Cur->Depth) {
281  Cur->setDepthDirty();
282  Cur->Depth = MaxPredDepth;
283  }
284  Cur->isDepthCurrent = true;
285  }
286  } while (!WorkList.empty());
287 }
288 
289 /// Calculates the maximal path from the node to the entry.
290 void SUnit::ComputeHeight() {
291  SmallVector<SUnit*, 8> WorkList;
292  WorkList.push_back(this);
293  do {
294  SUnit *Cur = WorkList.back();
295 
296  bool Done = true;
297  unsigned MaxSuccHeight = 0;
298  for (const SDep &SuccDep : Cur->Succs) {
299  SUnit *SuccSU = SuccDep.getSUnit();
300  if (SuccSU->isHeightCurrent)
301  MaxSuccHeight = std::max(MaxSuccHeight,
302  SuccSU->Height + SuccDep.getLatency());
303  else {
304  Done = false;
305  WorkList.push_back(SuccSU);
306  }
307  }
308 
309  if (Done) {
310  WorkList.pop_back();
311  if (MaxSuccHeight != Cur->Height) {
312  Cur->setHeightDirty();
313  Cur->Height = MaxSuccHeight;
314  }
315  Cur->isHeightCurrent = true;
316  }
317  } while (!WorkList.empty());
318 }
319 
321  if (NumPreds < 2)
322  return;
323 
324  SUnit::pred_iterator BestI = Preds.begin();
325  unsigned MaxDepth = BestI->getSUnit()->getDepth();
326  for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
327  ++I) {
328  if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
329  BestI = I;
330  }
331  if (BestI != Preds.begin())
332  std::swap(*Preds.begin(), *BestI);
333 }
334 
335 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
337  dbgs() << " # preds left : " << NumPredsLeft << "\n";
338  dbgs() << " # succs left : " << NumSuccsLeft << "\n";
339  if (WeakPredsLeft)
340  dbgs() << " # weak preds left : " << WeakPredsLeft << "\n";
341  if (WeakSuccsLeft)
342  dbgs() << " # weak succs left : " << WeakSuccsLeft << "\n";
343  dbgs() << " # rdefs left : " << NumRegDefsLeft << "\n";
344  dbgs() << " Latency : " << Latency << "\n";
345  dbgs() << " Depth : " << getDepth() << "\n";
346  dbgs() << " Height : " << getHeight() << "\n";
347 }
348 
350  if (&SU == &EntrySU)
351  dbgs() << "EntrySU";
352  else if (&SU == &ExitSU)
353  dbgs() << "ExitSU";
354  else
355  dbgs() << "SU(" << SU.NodeNum << ")";
356 }
357 
359  dumpNode(SU);
360  SU.dumpAttributes();
361  if (SU.Preds.size() > 0) {
362  dbgs() << " Predecessors:\n";
363  for (const SDep &Dep : SU.Preds) {
364  dbgs() << " ";
365  dumpNodeName(*Dep.getSUnit());
366  dbgs() << ": ";
367  Dep.dump(TRI);
368  dbgs() << '\n';
369  }
370  }
371  if (SU.Succs.size() > 0) {
372  dbgs() << " Successors:\n";
373  for (const SDep &Dep : SU.Succs) {
374  dbgs() << " ";
375  dumpNodeName(*Dep.getSUnit());
376  dbgs() << ": ";
377  Dep.dump(TRI);
378  dbgs() << '\n';
379  }
380  }
381 }
382 #endif
383 
384 #ifndef NDEBUG
385 unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
386  bool AnyNotSched = false;
387  unsigned DeadNodes = 0;
388  for (const SUnit &SUnit : SUnits) {
389  if (!SUnit.isScheduled) {
390  if (SUnit.NumPreds == 0 && SUnit.NumSuccs == 0) {
391  ++DeadNodes;
392  continue;
393  }
394  if (!AnyNotSched)
395  dbgs() << "*** Scheduling failed! ***\n";
396  dumpNode(SUnit);
397  dbgs() << "has not been scheduled!\n";
398  AnyNotSched = true;
399  }
400  if (SUnit.isScheduled &&
401  (isBottomUp ? SUnit.getHeight() : SUnit.getDepth()) >
402  unsigned(std::numeric_limits<int>::max())) {
403  if (!AnyNotSched)
404  dbgs() << "*** Scheduling failed! ***\n";
405  dumpNode(SUnit);
406  dbgs() << "has an unexpected "
407  << (isBottomUp ? "Height" : "Depth") << " value!\n";
408  AnyNotSched = true;
409  }
410  if (isBottomUp) {
411  if (SUnit.NumSuccsLeft != 0) {
412  if (!AnyNotSched)
413  dbgs() << "*** Scheduling failed! ***\n";
414  dumpNode(SUnit);
415  dbgs() << "has successors left!\n";
416  AnyNotSched = true;
417  }
418  } else {
419  if (SUnit.NumPredsLeft != 0) {
420  if (!AnyNotSched)
421  dbgs() << "*** Scheduling failed! ***\n";
422  dumpNode(SUnit);
423  dbgs() << "has predecessors left!\n";
424  AnyNotSched = true;
425  }
426  }
427  }
428  assert(!AnyNotSched);
429  return SUnits.size() - DeadNodes;
430 }
431 #endif
432 
434  // The idea of the algorithm is taken from
435  // "Online algorithms for managing the topological order of
436  // a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
437  // This is the MNR algorithm, which was first introduced by
438  // A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
439  // "Maintaining a topological order under edge insertions".
440  //
441  // Short description of the algorithm:
442  //
443  // Topological ordering, ord, of a DAG maps each node to a topological
444  // index so that for all edges X->Y it is the case that ord(X) < ord(Y).
445  //
446  // This means that if there is a path from the node X to the node Z,
447  // then ord(X) < ord(Z).
448  //
449  // This property can be used to check for reachability of nodes:
450  // if Z is reachable from X, then an insertion of the edge Z->X would
451  // create a cycle.
452  //
453  // The algorithm first computes a topological ordering for the DAG by
454  // initializing the Index2Node and Node2Index arrays and then tries to keep
455  // the ordering up-to-date after edge insertions by reordering the DAG.
456  //
457  // On insertion of the edge X->Y, the algorithm first marks by calling DFS
458  // the nodes reachable from Y, and then shifts them using Shift to lie
459  // immediately after X in Index2Node.
460  unsigned DAGSize = SUnits.size();
461  std::vector<SUnit*> WorkList;
462  WorkList.reserve(DAGSize);
463 
464  Index2Node.resize(DAGSize);
465  Node2Index.resize(DAGSize);
466 
467  // Initialize the data structures.
468  if (ExitSU)
469  WorkList.push_back(ExitSU);
470  for (SUnit &SU : SUnits) {
471  int NodeNum = SU.NodeNum;
472  unsigned Degree = SU.Succs.size();
473  // Temporarily use the Node2Index array as scratch space for degree counts.
474  Node2Index[NodeNum] = Degree;
475 
476  // Is it a node without dependencies?
477  if (Degree == 0) {
478  assert(SU.Succs.empty() && "SUnit should have no successors");
479  // Collect leaf nodes.
480  WorkList.push_back(&SU);
481  }
482  }
483 
484  int Id = DAGSize;
485  while (!WorkList.empty()) {
486  SUnit *SU = WorkList.back();
487  WorkList.pop_back();
488  if (SU->NodeNum < DAGSize)
489  Allocate(SU->NodeNum, --Id);
490  for (const SDep &PredDep : SU->Preds) {
491  SUnit *SU = PredDep.getSUnit();
492  if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
493  // If all dependencies of the node are processed already,
494  // then the node can be computed now.
495  WorkList.push_back(SU);
496  }
497  }
498 
499  Visited.resize(DAGSize);
500 
501 #ifndef NDEBUG
502  // Check correctness of the ordering
503  for (SUnit &SU : SUnits) {
504  for (const SDep &PD : SU.Preds) {
505  assert(Node2Index[SU.NodeNum] > Node2Index[PD.getSUnit()->NodeNum] &&
506  "Wrong topological sorting");
507  }
508  }
509 #endif
510 }
511 
513  int UpperBound, LowerBound;
514  LowerBound = Node2Index[Y->NodeNum];
515  UpperBound = Node2Index[X->NodeNum];
516  bool HasLoop = false;
517  // Is Ord(X) < Ord(Y) ?
518  if (LowerBound < UpperBound) {
519  // Update the topological order.
520  Visited.reset();
521  DFS(Y, UpperBound, HasLoop);
522  assert(!HasLoop && "Inserted edge creates a loop!");
523  // Recompute topological indexes.
524  Shift(Visited, LowerBound, UpperBound);
525  }
526 }
527 
529  // InitDAGTopologicalSorting();
530 }
531 
532 void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
533  bool &HasLoop) {
534  std::vector<const SUnit*> WorkList;
535  WorkList.reserve(SUnits.size());
536 
537  WorkList.push_back(SU);
538  do {
539  SU = WorkList.back();
540  WorkList.pop_back();
541  Visited.set(SU->NodeNum);
542  for (const SDep &SuccDep
543  : make_range(SU->Succs.rbegin(), SU->Succs.rend())) {
544  unsigned s = SuccDep.getSUnit()->NodeNum;
545  // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
546  if (s >= Node2Index.size())
547  continue;
548  if (Node2Index[s] == UpperBound) {
549  HasLoop = true;
550  return;
551  }
552  // Visit successors if not already and in affected region.
553  if (!Visited.test(s) && Node2Index[s] < UpperBound) {
554  WorkList.push_back(SuccDep.getSUnit());
555  }
556  }
557  } while (!WorkList.empty());
558 }
559 
560 std::vector<int> ScheduleDAGTopologicalSort::GetSubGraph(const SUnit &StartSU,
561  const SUnit &TargetSU,
562  bool &Success) {
563  std::vector<const SUnit*> WorkList;
564  int LowerBound = Node2Index[StartSU.NodeNum];
565  int UpperBound = Node2Index[TargetSU.NodeNum];
566  bool Found = false;
567  BitVector VisitedBack;
568  std::vector<int> Nodes;
569 
570  if (LowerBound > UpperBound) {
571  Success = false;
572  return Nodes;
573  }
574 
575  WorkList.reserve(SUnits.size());
576  Visited.reset();
577 
578  // Starting from StartSU, visit all successors up
579  // to UpperBound.
580  WorkList.push_back(&StartSU);
581  do {
582  const SUnit *SU = WorkList.back();
583  WorkList.pop_back();
584  for (int I = SU->Succs.size()-1; I >= 0; --I) {
585  const SUnit *Succ = SU->Succs[I].getSUnit();
586  unsigned s = Succ->NodeNum;
587  // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
588  if (Succ->isBoundaryNode())
589  continue;
590  if (Node2Index[s] == UpperBound) {
591  Found = true;
592  continue;
593  }
594  // Visit successors if not already and in affected region.
595  if (!Visited.test(s) && Node2Index[s] < UpperBound) {
596  Visited.set(s);
597  WorkList.push_back(Succ);
598  }
599  }
600  } while (!WorkList.empty());
601 
602  if (!Found) {
603  Success = false;
604  return Nodes;
605  }
606 
607  WorkList.clear();
608  VisitedBack.resize(SUnits.size());
609  Found = false;
610 
611  // Starting from TargetSU, visit all predecessors up
612  // to LowerBound. SUs that are visited by the two
613  // passes are added to Nodes.
614  WorkList.push_back(&TargetSU);
615  do {
616  const SUnit *SU = WorkList.back();
617  WorkList.pop_back();
618  for (int I = SU->Preds.size()-1; I >= 0; --I) {
619  const SUnit *Pred = SU->Preds[I].getSUnit();
620  unsigned s = Pred->NodeNum;
621  // Edges to non-SUnits are allowed but ignored (e.g. EntrySU).
622  if (Pred->isBoundaryNode())
623  continue;
624  if (Node2Index[s] == LowerBound) {
625  Found = true;
626  continue;
627  }
628  if (!VisitedBack.test(s) && Visited.test(s)) {
629  VisitedBack.set(s);
630  WorkList.push_back(Pred);
631  Nodes.push_back(s);
632  }
633  }
634  } while (!WorkList.empty());
635 
636  assert(Found && "Error in SUnit Graph!");
637  Success = true;
638  return Nodes;
639 }
640 
641 void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
642  int UpperBound) {
643  std::vector<int> L;
644  int shift = 0;
645  int i;
646 
647  for (i = LowerBound; i <= UpperBound; ++i) {
648  // w is node at topological index i.
649  int w = Index2Node[i];
650  if (Visited.test(w)) {
651  // Unmark.
652  Visited.reset(w);
653  L.push_back(w);
654  shift = shift + 1;
655  } else {
656  Allocate(w, i - shift);
657  }
658  }
659 
660  for (unsigned LI : L) {
661  Allocate(LI, i - shift);
662  i = i + 1;
663  }
664 }
665 
667  // Is SU reachable from TargetSU via successor edges?
668  if (IsReachable(SU, TargetSU))
669  return true;
670  for (const SDep &PredDep : TargetSU->Preds)
671  if (PredDep.isAssignedRegDep() &&
672  IsReachable(SU, PredDep.getSUnit()))
673  return true;
674  return false;
675 }
676 
678  const SUnit *TargetSU) {
679  // If insertion of the edge SU->TargetSU would create a cycle
680  // then there is a path from TargetSU to SU.
681  int UpperBound, LowerBound;
682  LowerBound = Node2Index[TargetSU->NodeNum];
683  UpperBound = Node2Index[SU->NodeNum];
684  bool HasLoop = false;
685  // Is Ord(TargetSU) < Ord(SU) ?
686  if (LowerBound < UpperBound) {
687  Visited.reset();
688  // There may be a path from TargetSU to SU. Check for it.
689  DFS(TargetSU, UpperBound, HasLoop);
690  }
691  return HasLoop;
692 }
693 
694 void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
695  Node2Index[n] = index;
696  Index2Node[index] = n;
697 }
698 
700 ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
701  : SUnits(sunits), ExitSU(exitsu) {}
702 
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:371
BitVector & set()
Definition: BitVector.h:397
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned NumPreds
of SDep::Data preds.
Definition: ScheduleDAG.h:266
This class represents lattice values for constants.
Definition: AllocatorList.h:23
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:464
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:398
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
virtual void dumpNode(const SUnit &SU) const =0
bool test(unsigned Idx) const
Definition: BitVector.h:501
void setSUnit(SUnit *SU)
Definition: ScheduleDAG.h:483
unsigned const TargetRegisterInfo * TRI
void removePred(const SDep &D)
Removes the specified edge as a pred of the current node if it exists.
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
Definition: ScheduleDAG.h:211
SmallVector< SDep, 4 > Preds
All sunit predecessors.
Definition: ScheduleDAG.h:256
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:284
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
SmallVectorImpl< SDep >::iterator pred_iterator
Definition: ScheduleDAG.h:259
unsigned NumSuccs
of SDep::Data sucss.
Definition: ScheduleDAG.h:267
unsigned getLatency() const
Returns the latency value for this edge, which roughly means the minimum number of cycles that must e...
Definition: ScheduleDAG.h:142
unsigned NumSuccsLeft
of succs not scheduled.
Definition: ScheduleDAG.h:269
const HexagonInstrInfo * TII
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Regular data dependence (aka true-dependence).
Definition: ScheduleDAG.h:53
void InitDAGTopologicalSorting()
Creates the initial topological ordering from the DAG to be scheduled.
std::vector< int > GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU, bool &Success)
Returns an array of SUs that are both in the successor subtree of StartSU and in the predecessor subt...
static cl::opt< bool > StressSchedOpt("stress-sched", cl::Hidden, cl::init(false), cl::desc("Stress test instruction scheduling"))
unsigned WeakSuccsLeft
of weak succs not scheduled.
Definition: ScheduleDAG.h:271
unsigned NumPredsLeft
of preds not scheduled.
Definition: ScheduleDAG.h:268
SUnit * getSUnit() const
Definition: ScheduleDAG.h:480
static int getLatency(LLVMDisasmContext *DC, const MCInst &Inst)
Gets latency information for Inst, based on DC information.
bool isBoundaryNode() const
Boundary nodes are placeholders for the boundary of the scheduling region.
Definition: ScheduleDAG.h:344
void dumpNodeAll(const SUnit &SU) const
void setDepthToAtLeast(unsigned NewDepth)
If NewDepth is greater than this node&#39;s depth value, sets it to be the new depth value.
Scheduling dependency.
Definition: ScheduleDAG.h:49
void setHeightToAtLeast(unsigned NewHeight)
If NewDepth is greater than this node&#39;s depth value, set it to be the new height value.
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
void setDepthDirty()
Sets a flag in this node to indicate that its stored Depth value will require recomputation the next ...
bool WillCreateCycle(SUnit *TargetSU, SUnit *SU)
Returns true if addPred(TargetSU, SU) creates a cycle.
unsigned const MachineRegisterInfo * MRI
void clearDAG()
Clears the DAG state (between regions).
Definition: ScheduleDAG.cpp:59
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
BitVector & reset()
Definition: BitVector.h:438
void RemovePred(SUnit *M, SUnit *N)
Updates the topological ordering to accommodate an an edge to be removed from the specified node N fr...
void dumpNodeName(const SUnit &SU) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1206
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
void push_back(bool Val)
Definition: BitVector.h:506
void dump(const TargetRegisterInfo *TRI=nullptr) const
Definition: ScheduleDAG.cpp:70
const unsigned MaxDepth
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
static unsigned getDepth(const SmallVectorImpl< const MachineBasicBlock *> &Stack, const MachineBasicBlock *MBB)
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:374
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Represents one node in the SelectionDAG.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
bool IsReachable(const SUnit *SU, const SUnit *TargetSU)
Checks if SU is reachable from TargetSU.
void biasCriticalPath()
Orders this node&#39;s predecessor edges such that the critical path edge occurs first.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
ScheduleDAG(MachineFunction &mf)
Definition: ScheduleDAG.cpp:48
typename SuperClass::iterator iterator
Definition: SmallVector.h:319
static void DFS(BasicBlock *Root, SetVector< BasicBlock *> &Set)
void setLatency(unsigned Lat)
Sets the latency for this edge.
Definition: ScheduleDAG.h:147
#define Success
SUnit EntrySU
Special node for the region entry.
Definition: ScheduleDAG.h:563
unsigned VerifyScheduledDAG(bool isBottomUp)
Verifies that all SUnits were scheduled and that their state is consistent.
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:406
SUnit ExitSU
Special node for the region exit.
Definition: ScheduleDAG.h:564
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:559
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:558
Kind getKind() const
Returns an enum value representing the kind of the dependence.
Definition: ScheduleDAG.h:486
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:264
void setHeightDirty()
Sets a flag in this node to indicate that its stored Height value will require recomputation the next...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool addPred(const SDep &D, bool Required=true)
Adds the specified edge as a pred of the current node if not already.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SmallVector< SDep, 4 > Succs
All sunit successors.
Definition: ScheduleDAG.h:257
bool isWeak() const
Tests if this a weak dependence.
Definition: ScheduleDAG.h:194
void AddPred(SUnit *Y, SUnit *X)
Updates the topological ordering to accommodate an edge to be added from SUnit X to SUnit Y...
virtual ~ScheduleDAG()
std::vector< SUnit > SUnits
The scheduling units.
Definition: ScheduleDAG.h:562
void dumpAttributes() const
ScheduleDAGTopologicalSort(std::vector< SUnit > &SUnits, SUnit *ExitSU)
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:242