LLVM 19.0.0git
ScheduleDAG.cpp
Go to the documentation of this file.
1//===- ScheduleDAG.cpp - Implement the ScheduleDAG class ------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file Implements the ScheduleDAG class, which is a base class used by
10/// scheduling implementation classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/Statistic.h"
24#include "llvm/Config/llvm-config.h"
27#include "llvm/Support/Debug.h"
29#include <algorithm>
30#include <cassert>
31#include <iterator>
32#include <limits>
33#include <utility>
34#include <vector>
35
36using namespace llvm;
37
38#define DEBUG_TYPE "pre-RA-sched"
39
40STATISTIC(NumNewPredsAdded, "Number of times a single predecessor was added");
41STATISTIC(NumTopoInits,
42 "Number of times the topological order has been recomputed");
43
44#ifndef NDEBUG
46 "stress-sched", cl::Hidden, cl::init(false),
47 cl::desc("Stress test instruction scheduling"));
48#endif
49
50void SchedulingPriorityQueue::anchor() {}
51
53 : TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
54 TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
55 MRI(mf.getRegInfo()) {
56#ifndef NDEBUG
58#endif
59}
60
62
64 SUnits.clear();
65 EntrySU = SUnit();
66 ExitSU = SUnit();
67}
68
69const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
70 if (!Node || !Node->isMachineOpcode()) return nullptr;
71 return &TII->get(Node->getMachineOpcode());
72}
73
75 switch (getKind()) {
76 case Data: dbgs() << "Data"; break;
77 case Anti: dbgs() << "Anti"; break;
78 case Output: dbgs() << "Out "; break;
79 case Order: dbgs() << "Ord "; break;
80 }
81
82 switch (getKind()) {
83 case Data:
84 dbgs() << " Latency=" << getLatency();
85 if (TRI && isAssignedRegDep())
86 dbgs() << " Reg=" << printReg(getReg(), TRI);
87 break;
88 case Anti:
89 case Output:
90 dbgs() << " Latency=" << getLatency();
91 break;
92 case Order:
93 dbgs() << " Latency=" << getLatency();
94 switch(Contents.OrdKind) {
95 case Barrier: dbgs() << " Barrier"; break;
96 case MayAliasMem:
97 case MustAliasMem: dbgs() << " Memory"; break;
98 case Artificial: dbgs() << " Artificial"; break;
99 case Weak: dbgs() << " Weak"; break;
100 case Cluster: dbgs() << " Cluster"; break;
101 }
102 break;
103 }
104}
105
106bool SUnit::addPred(const SDep &D, bool Required) {
107 // If this node already has this dependence, don't add a redundant one.
108 for (SDep &PredDep : Preds) {
109 // Zero-latency weak edges may be added purely for heuristic ordering. Don't
110 // add them if another kind of edge already exists.
111 if (!Required && PredDep.getSUnit() == D.getSUnit())
112 return false;
113 if (PredDep.overlaps(D)) {
114 // Extend the latency if needed. Equivalent to
115 // removePred(PredDep) + addPred(D).
116 if (PredDep.getLatency() < D.getLatency()) {
117 SUnit *PredSU = PredDep.getSUnit();
118 // Find the corresponding successor in N.
119 SDep ForwardD = PredDep;
120 ForwardD.setSUnit(this);
121 for (SDep &SuccDep : PredSU->Succs) {
122 if (SuccDep == ForwardD) {
123 SuccDep.setLatency(D.getLatency());
124 break;
125 }
126 }
127 PredDep.setLatency(D.getLatency());
128 }
129 return false;
130 }
131 }
132 // Now add a corresponding succ to N.
133 SDep P = D;
134 P.setSUnit(this);
135 SUnit *N = D.getSUnit();
136 // Update the bookkeeping.
137 if (D.getKind() == SDep::Data) {
138 assert(NumPreds < std::numeric_limits<unsigned>::max() &&
139 "NumPreds will overflow!");
140 assert(N->NumSuccs < std::numeric_limits<unsigned>::max() &&
141 "NumSuccs will overflow!");
142 ++NumPreds;
143 ++N->NumSuccs;
144 }
145 if (!N->isScheduled) {
146 if (D.isWeak()) {
148 }
149 else {
150 assert(NumPredsLeft < std::numeric_limits<unsigned>::max() &&
151 "NumPredsLeft will overflow!");
152 ++NumPredsLeft;
153 }
154 }
155 if (!isScheduled) {
156 if (D.isWeak()) {
157 ++N->WeakSuccsLeft;
158 }
159 else {
160 assert(N->NumSuccsLeft < std::numeric_limits<unsigned>::max() &&
161 "NumSuccsLeft will overflow!");
162 ++N->NumSuccsLeft;
163 }
164 }
165 Preds.push_back(D);
166 N->Succs.push_back(P);
167 if (P.getLatency() != 0) {
168 this->setDepthDirty();
169 N->setHeightDirty();
170 }
171 return true;
172}
173
175 // Find the matching predecessor.
177 if (I == Preds.end())
178 return;
179 // Find the corresponding successor in N.
180 SDep P = D;
181 P.setSUnit(this);
182 SUnit *N = D.getSUnit();
184 assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
185 // Update the bookkeeping.
186 if (P.getKind() == SDep::Data) {
187 assert(NumPreds > 0 && "NumPreds will underflow!");
188 assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
189 --NumPreds;
190 --N->NumSuccs;
191 }
192 if (!N->isScheduled) {
193 if (D.isWeak()) {
194 assert(WeakPredsLeft > 0 && "WeakPredsLeft will underflow!");
196 } else {
197 assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
198 --NumPredsLeft;
199 }
200 }
201 if (!isScheduled) {
202 if (D.isWeak()) {
203 assert(N->WeakSuccsLeft > 0 && "WeakSuccsLeft will underflow!");
204 --N->WeakSuccsLeft;
205 } else {
206 assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
207 --N->NumSuccsLeft;
208 }
209 }
210 N->Succs.erase(Succ);
211 Preds.erase(I);
212 if (P.getLatency() != 0) {
213 this->setDepthDirty();
214 N->setHeightDirty();
215 }
216}
217
219 if (!isDepthCurrent) return;
220 SmallVector<SUnit*, 8> WorkList;
221 WorkList.push_back(this);
222 do {
223 SUnit *SU = WorkList.pop_back_val();
224 SU->isDepthCurrent = false;
225 for (SDep &SuccDep : SU->Succs) {
226 SUnit *SuccSU = SuccDep.getSUnit();
227 if (SuccSU->isDepthCurrent)
228 WorkList.push_back(SuccSU);
229 }
230 } while (!WorkList.empty());
231}
232
234 if (!isHeightCurrent) return;
235 SmallVector<SUnit*, 8> WorkList;
236 WorkList.push_back(this);
237 do {
238 SUnit *SU = WorkList.pop_back_val();
239 SU->isHeightCurrent = false;
240 for (SDep &PredDep : SU->Preds) {
241 SUnit *PredSU = PredDep.getSUnit();
242 if (PredSU->isHeightCurrent)
243 WorkList.push_back(PredSU);
244 }
245 } while (!WorkList.empty());
246}
247
248void SUnit::setDepthToAtLeast(unsigned NewDepth) {
249 if (NewDepth <= getDepth())
250 return;
252 Depth = NewDepth;
253 isDepthCurrent = true;
254}
255
256void SUnit::setHeightToAtLeast(unsigned NewHeight) {
257 if (NewHeight <= getHeight())
258 return;
260 Height = NewHeight;
261 isHeightCurrent = true;
262}
263
264/// Calculates the maximal path from the node to the exit.
265void SUnit::ComputeDepth() {
266 SmallVector<SUnit*, 8> WorkList;
267 WorkList.push_back(this);
268 do {
269 SUnit *Cur = WorkList.back();
270
271 bool Done = true;
272 unsigned MaxPredDepth = 0;
273 for (const SDep &PredDep : Cur->Preds) {
274 SUnit *PredSU = PredDep.getSUnit();
275 if (PredSU->isDepthCurrent)
276 MaxPredDepth = std::max(MaxPredDepth,
277 PredSU->Depth + PredDep.getLatency());
278 else {
279 Done = false;
280 WorkList.push_back(PredSU);
281 }
282 }
283
284 if (Done) {
285 WorkList.pop_back();
286 if (MaxPredDepth != Cur->Depth) {
287 Cur->setDepthDirty();
288 Cur->Depth = MaxPredDepth;
289 }
290 Cur->isDepthCurrent = true;
291 }
292 } while (!WorkList.empty());
293}
294
295/// Calculates the maximal path from the node to the entry.
296void SUnit::ComputeHeight() {
297 SmallVector<SUnit*, 8> WorkList;
298 WorkList.push_back(this);
299 do {
300 SUnit *Cur = WorkList.back();
301
302 bool Done = true;
303 unsigned MaxSuccHeight = 0;
304 for (const SDep &SuccDep : Cur->Succs) {
305 SUnit *SuccSU = SuccDep.getSUnit();
306 if (SuccSU->isHeightCurrent)
307 MaxSuccHeight = std::max(MaxSuccHeight,
308 SuccSU->Height + SuccDep.getLatency());
309 else {
310 Done = false;
311 WorkList.push_back(SuccSU);
312 }
313 }
314
315 if (Done) {
316 WorkList.pop_back();
317 if (MaxSuccHeight != Cur->Height) {
318 Cur->setHeightDirty();
319 Cur->Height = MaxSuccHeight;
320 }
321 Cur->isHeightCurrent = true;
322 }
323 } while (!WorkList.empty());
324}
325
327 if (NumPreds < 2)
328 return;
329
330 SUnit::pred_iterator BestI = Preds.begin();
331 unsigned MaxDepth = BestI->getSUnit()->getDepth();
332 for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
333 ++I) {
334 if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth) {
335 MaxDepth = I->getSUnit()->getDepth();
336 BestI = I;
337 }
338 }
339 if (BestI != Preds.begin())
340 std::swap(*Preds.begin(), *BestI);
341}
342
343#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
345 dbgs() << " # preds left : " << NumPredsLeft << "\n";
346 dbgs() << " # succs left : " << NumSuccsLeft << "\n";
347 if (WeakPredsLeft)
348 dbgs() << " # weak preds left : " << WeakPredsLeft << "\n";
349 if (WeakSuccsLeft)
350 dbgs() << " # weak succs left : " << WeakSuccsLeft << "\n";
351 dbgs() << " # rdefs left : " << NumRegDefsLeft << "\n";
352 dbgs() << " Latency : " << Latency << "\n";
353 dbgs() << " Depth : " << getDepth() << "\n";
354 dbgs() << " Height : " << getHeight() << "\n";
355}
356
358 if (&SU == &EntrySU)
359 dbgs() << "EntrySU";
360 else if (&SU == &ExitSU)
361 dbgs() << "ExitSU";
362 else
363 dbgs() << "SU(" << SU.NodeNum << ")";
364}
365
367 dumpNode(SU);
368 SU.dumpAttributes();
369 if (SU.Preds.size() > 0) {
370 dbgs() << " Predecessors:\n";
371 for (const SDep &Dep : SU.Preds) {
372 dbgs() << " ";
373 dumpNodeName(*Dep.getSUnit());
374 dbgs() << ": ";
375 Dep.dump(TRI);
376 dbgs() << '\n';
377 }
378 }
379 if (SU.Succs.size() > 0) {
380 dbgs() << " Successors:\n";
381 for (const SDep &Dep : SU.Succs) {
382 dbgs() << " ";
383 dumpNodeName(*Dep.getSUnit());
384 dbgs() << ": ";
385 Dep.dump(TRI);
386 dbgs() << '\n';
387 }
388 }
389}
390#endif
391
392#ifndef NDEBUG
393unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
394 bool AnyNotSched = false;
395 unsigned DeadNodes = 0;
396 for (const SUnit &SUnit : SUnits) {
397 if (!SUnit.isScheduled) {
398 if (SUnit.NumPreds == 0 && SUnit.NumSuccs == 0) {
399 ++DeadNodes;
400 continue;
401 }
402 if (!AnyNotSched)
403 dbgs() << "*** Scheduling failed! ***\n";
405 dbgs() << "has not been scheduled!\n";
406 AnyNotSched = true;
407 }
408 if (SUnit.isScheduled &&
409 (isBottomUp ? SUnit.getHeight() : SUnit.getDepth()) >
410 unsigned(std::numeric_limits<int>::max())) {
411 if (!AnyNotSched)
412 dbgs() << "*** Scheduling failed! ***\n";
414 dbgs() << "has an unexpected "
415 << (isBottomUp ? "Height" : "Depth") << " value!\n";
416 AnyNotSched = true;
417 }
418 if (isBottomUp) {
419 if (SUnit.NumSuccsLeft != 0) {
420 if (!AnyNotSched)
421 dbgs() << "*** Scheduling failed! ***\n";
423 dbgs() << "has successors left!\n";
424 AnyNotSched = true;
425 }
426 } else {
427 if (SUnit.NumPredsLeft != 0) {
428 if (!AnyNotSched)
429 dbgs() << "*** Scheduling failed! ***\n";
431 dbgs() << "has predecessors left!\n";
432 AnyNotSched = true;
433 }
434 }
435 }
436 assert(!AnyNotSched);
437 return SUnits.size() - DeadNodes;
438}
439#endif
440
442 // The idea of the algorithm is taken from
443 // "Online algorithms for managing the topological order of
444 // a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
445 // This is the MNR algorithm, which was first introduced by
446 // A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
447 // "Maintaining a topological order under edge insertions".
448 //
449 // Short description of the algorithm:
450 //
451 // Topological ordering, ord, of a DAG maps each node to a topological
452 // index so that for all edges X->Y it is the case that ord(X) < ord(Y).
453 //
454 // This means that if there is a path from the node X to the node Z,
455 // then ord(X) < ord(Z).
456 //
457 // This property can be used to check for reachability of nodes:
458 // if Z is reachable from X, then an insertion of the edge Z->X would
459 // create a cycle.
460 //
461 // The algorithm first computes a topological ordering for the DAG by
462 // initializing the Index2Node and Node2Index arrays and then tries to keep
463 // the ordering up-to-date after edge insertions by reordering the DAG.
464 //
465 // On insertion of the edge X->Y, the algorithm first marks by calling DFS
466 // the nodes reachable from Y, and then shifts them using Shift to lie
467 // immediately after X in Index2Node.
468
469 // Cancel pending updates, mark as valid.
470 Dirty = false;
471 Updates.clear();
472
473 unsigned DAGSize = SUnits.size();
474 std::vector<SUnit*> WorkList;
475 WorkList.reserve(DAGSize);
476
477 Index2Node.resize(DAGSize);
478 Node2Index.resize(DAGSize);
479
480 // Initialize the data structures.
481 if (ExitSU)
482 WorkList.push_back(ExitSU);
483 for (SUnit &SU : SUnits) {
484 int NodeNum = SU.NodeNum;
485 unsigned Degree = SU.Succs.size();
486 // Temporarily use the Node2Index array as scratch space for degree counts.
487 Node2Index[NodeNum] = Degree;
488
489 // Is it a node without dependencies?
490 if (Degree == 0) {
491 assert(SU.Succs.empty() && "SUnit should have no successors");
492 // Collect leaf nodes.
493 WorkList.push_back(&SU);
494 }
495 }
496
497 int Id = DAGSize;
498 while (!WorkList.empty()) {
499 SUnit *SU = WorkList.back();
500 WorkList.pop_back();
501 if (SU->NodeNum < DAGSize)
502 Allocate(SU->NodeNum, --Id);
503 for (const SDep &PredDep : SU->Preds) {
504 SUnit *SU = PredDep.getSUnit();
505 if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
506 // If all dependencies of the node are processed already,
507 // then the node can be computed now.
508 WorkList.push_back(SU);
509 }
510 }
511
512 Visited.resize(DAGSize);
513 NumTopoInits++;
514
515#ifndef NDEBUG
516 // Check correctness of the ordering
517 for (SUnit &SU : SUnits) {
518 for (const SDep &PD : SU.Preds) {
519 assert(Node2Index[SU.NodeNum] > Node2Index[PD.getSUnit()->NodeNum] &&
520 "Wrong topological sorting");
521 }
522 }
523#endif
524}
525
526void ScheduleDAGTopologicalSort::FixOrder() {
527 // Recompute from scratch after new nodes have been added.
528 if (Dirty) {
530 return;
531 }
532
533 // Otherwise apply updates one-by-one.
534 for (auto &U : Updates)
535 AddPred(U.first, U.second);
536 Updates.clear();
537}
538
540 // Recomputing the order from scratch is likely more efficient than applying
541 // updates one-by-one for too many updates. The current cut-off is arbitrarily
542 // chosen.
543 Dirty = Dirty || Updates.size() > 10;
544
545 if (Dirty)
546 return;
547
548 Updates.emplace_back(Y, X);
549}
550
552 int UpperBound, LowerBound;
553 LowerBound = Node2Index[Y->NodeNum];
554 UpperBound = Node2Index[X->NodeNum];
555 bool HasLoop = false;
556 // Is Ord(X) < Ord(Y) ?
557 if (LowerBound < UpperBound) {
558 // Update the topological order.
559 Visited.reset();
560 DFS(Y, UpperBound, HasLoop);
561 assert(!HasLoop && "Inserted edge creates a loop!");
562 // Recompute topological indexes.
563 Shift(Visited, LowerBound, UpperBound);
564 }
565
566 NumNewPredsAdded++;
567}
568
570 // InitDAGTopologicalSorting();
571}
572
573void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
574 bool &HasLoop) {
575 std::vector<const SUnit*> WorkList;
576 WorkList.reserve(SUnits.size());
577
578 WorkList.push_back(SU);
579 do {
580 SU = WorkList.back();
581 WorkList.pop_back();
582 Visited.set(SU->NodeNum);
583 for (const SDep &SuccDep : llvm::reverse(SU->Succs)) {
584 unsigned s = SuccDep.getSUnit()->NodeNum;
585 // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
586 if (s >= Node2Index.size())
587 continue;
588 if (Node2Index[s] == UpperBound) {
589 HasLoop = true;
590 return;
591 }
592 // Visit successors if not already and in affected region.
593 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
594 WorkList.push_back(SuccDep.getSUnit());
595 }
596 }
597 } while (!WorkList.empty());
598}
599
600std::vector<int> ScheduleDAGTopologicalSort::GetSubGraph(const SUnit &StartSU,
601 const SUnit &TargetSU,
602 bool &Success) {
603 std::vector<const SUnit*> WorkList;
604 int LowerBound = Node2Index[StartSU.NodeNum];
605 int UpperBound = Node2Index[TargetSU.NodeNum];
606 bool Found = false;
607 BitVector VisitedBack;
608 std::vector<int> Nodes;
609
610 if (LowerBound > UpperBound) {
611 Success = false;
612 return Nodes;
613 }
614
615 WorkList.reserve(SUnits.size());
616 Visited.reset();
617
618 // Starting from StartSU, visit all successors up
619 // to UpperBound.
620 WorkList.push_back(&StartSU);
621 do {
622 const SUnit *SU = WorkList.back();
623 WorkList.pop_back();
624 for (const SDep &SD : llvm::reverse(SU->Succs)) {
625 const SUnit *Succ = SD.getSUnit();
626 unsigned s = Succ->NodeNum;
627 // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
628 if (Succ->isBoundaryNode())
629 continue;
630 if (Node2Index[s] == UpperBound) {
631 Found = true;
632 continue;
633 }
634 // Visit successors if not already and in affected region.
635 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
636 Visited.set(s);
637 WorkList.push_back(Succ);
638 }
639 }
640 } while (!WorkList.empty());
641
642 if (!Found) {
643 Success = false;
644 return Nodes;
645 }
646
647 WorkList.clear();
648 VisitedBack.resize(SUnits.size());
649 Found = false;
650
651 // Starting from TargetSU, visit all predecessors up
652 // to LowerBound. SUs that are visited by the two
653 // passes are added to Nodes.
654 WorkList.push_back(&TargetSU);
655 do {
656 const SUnit *SU = WorkList.back();
657 WorkList.pop_back();
658 for (const SDep &SD : llvm::reverse(SU->Preds)) {
659 const SUnit *Pred = SD.getSUnit();
660 unsigned s = Pred->NodeNum;
661 // Edges to non-SUnits are allowed but ignored (e.g. EntrySU).
662 if (Pred->isBoundaryNode())
663 continue;
664 if (Node2Index[s] == LowerBound) {
665 Found = true;
666 continue;
667 }
668 if (!VisitedBack.test(s) && Visited.test(s)) {
669 VisitedBack.set(s);
670 WorkList.push_back(Pred);
671 Nodes.push_back(s);
672 }
673 }
674 } while (!WorkList.empty());
675
676 assert(Found && "Error in SUnit Graph!");
677 Success = true;
678 return Nodes;
679}
680
681void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
682 int UpperBound) {
683 std::vector<int> L;
684 int shift = 0;
685 int i;
686
687 for (i = LowerBound; i <= UpperBound; ++i) {
688 // w is node at topological index i.
689 int w = Index2Node[i];
690 if (Visited.test(w)) {
691 // Unmark.
692 Visited.reset(w);
693 L.push_back(w);
694 shift = shift + 1;
695 } else {
696 Allocate(w, i - shift);
697 }
698 }
699
700 for (unsigned LI : L) {
701 Allocate(LI, i - shift);
702 i = i + 1;
703 }
704}
705
707 FixOrder();
708 // Is SU reachable from TargetSU via successor edges?
709 if (IsReachable(SU, TargetSU))
710 return true;
711 for (const SDep &PredDep : TargetSU->Preds)
712 if (PredDep.isAssignedRegDep() &&
713 IsReachable(SU, PredDep.getSUnit()))
714 return true;
715 return false;
716}
717
719 assert(SU->NodeNum == Index2Node.size() && "Node cannot be added at the end");
720 assert(SU->NumPreds == 0 && "Can only add SU's with no predecessors");
721 Node2Index.push_back(Index2Node.size());
722 Index2Node.push_back(SU->NodeNum);
723 Visited.resize(Node2Index.size());
724}
725
727 const SUnit *TargetSU) {
728 assert(TargetSU != nullptr && "Invalid target SUnit");
729 assert(SU != nullptr && "Invalid SUnit");
730 FixOrder();
731 // If insertion of the edge SU->TargetSU would create a cycle
732 // then there is a path from TargetSU to SU.
733 int UpperBound, LowerBound;
734 LowerBound = Node2Index[TargetSU->NodeNum];
735 UpperBound = Node2Index[SU->NodeNum];
736 bool HasLoop = false;
737 // Is Ord(TargetSU) < Ord(SU) ?
738 if (LowerBound < UpperBound) {
739 Visited.reset();
740 // There may be a path from TargetSU to SU. Check for it.
741 DFS(TargetSU, UpperBound, HasLoop);
742 }
743 return HasLoop;
744}
745
746void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
747 Node2Index[n] = index;
748 Index2Node[index] = n;
749}
750
752ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
753 : SUnits(sunits), ExitSU(exitsu) {}
754
unsigned const MachineRegisterInfo * MRI
#define Success
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:537
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static const unsigned MaxDepth
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
static cl::opt< bool > StressSchedOpt("stress-sched", cl::Hidden, cl::init(false), cl::desc("Stress test instruction scheduling"))
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
bool test(unsigned Idx) const
Definition: BitVector.h:461
BitVector & reset()
Definition: BitVector.h:392
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition: BitVector.h:341
BitVector & set()
Definition: BitVector.h:351
void push_back(bool Val)
Definition: BitVector.h:466
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Represents one node in the SelectionDAG.
Scheduling dependency.
Definition: ScheduleDAG.h:49
SUnit * getSUnit() const
Definition: ScheduleDAG.h:498
Kind getKind() const
Returns an enum value representing the kind of the dependence.
Definition: ScheduleDAG.h:504
@ Output
A register output-dependence (aka WAW).
Definition: ScheduleDAG.h:55
@ Order
Any other ordering dependency.
Definition: ScheduleDAG.h:56
@ Anti
A register anti-dependence (aka WAR).
Definition: ScheduleDAG.h:54
@ Data
Regular data dependence (aka true-dependence).
Definition: ScheduleDAG.h:53
void setLatency(unsigned Lat)
Sets the latency for this edge.
Definition: ScheduleDAG.h:147
@ Cluster
Weak DAG edge linking a chain of clustered instrs.
Definition: ScheduleDAG.h:74
@ Barrier
An unknown scheduling barrier.
Definition: ScheduleDAG.h:69
@ Artificial
Arbitrary strong DAG edge (no real dependence).
Definition: ScheduleDAG.h:72
@ MayAliasMem
Nonvolatile load/Store instructions that may alias.
Definition: ScheduleDAG.h:70
@ Weak
Arbitrary weak DAG edge.
Definition: ScheduleDAG.h:73
@ MustAliasMem
Nonvolatile load/Store instructions that must alias.
Definition: ScheduleDAG.h:71
unsigned getLatency() const
Returns the latency value for this edge, which roughly means the minimum number of cycles that must e...
Definition: ScheduleDAG.h:142
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
Definition: ScheduleDAG.h:211
unsigned getReg() const
Returns the register associated with this edge.
Definition: ScheduleDAG.h:218
void setSUnit(SUnit *SU)
Definition: ScheduleDAG.h:501
void dump(const TargetRegisterInfo *TRI=nullptr) const
Definition: ScheduleDAG.cpp:74
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:242
void setHeightToAtLeast(unsigned NewHeight)
If NewHeight is greater than this node's height value, set it to be the new height value.
unsigned NumSuccs
Definition: ScheduleDAG.h:273
unsigned NumPreds
Definition: ScheduleDAG.h:272
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:270
unsigned NumSuccsLeft
Definition: ScheduleDAG.h:275
void biasCriticalPath()
Orders this node's predecessor edges such that the critical path edge occurs first.
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:424
void setHeightDirty()
Sets a flag in this node to indicate that its stored Height value will require recomputation the next...
void removePred(const SDep &D)
Removes the specified edge as a pred of the current node if it exists.
unsigned short Latency
Node latency.
Definition: ScheduleDAG.h:303
SmallVectorImpl< SDep >::iterator pred_iterator
Definition: ScheduleDAG.h:265
bool isBoundaryNode() const
Boundary nodes are placeholders for the boundary of the scheduling region.
Definition: ScheduleDAG.h:358
unsigned short NumRegDefsLeft
Definition: ScheduleDAG.h:302
unsigned getDepth() const
Returns the depth of this node, which is the length of the maximum path up to any node which has no p...
Definition: ScheduleDAG.h:416
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:296
unsigned NumPredsLeft
Definition: ScheduleDAG.h:274
void dumpAttributes() const
SmallVector< SDep, 4 > Succs
All sunit successors.
Definition: ScheduleDAG.h:263
unsigned WeakPredsLeft
Definition: ScheduleDAG.h:276
void setDepthDirty()
Sets a flag in this node to indicate that its stored Depth value will require recomputation the next ...
SmallVector< SDep, 4 > Preds
All sunit predecessors.
Definition: ScheduleDAG.h:262
unsigned WeakSuccsLeft
Definition: ScheduleDAG.h:277
void setDepthToAtLeast(unsigned NewDepth)
If NewDepth is greater than this node's depth value, sets it to be the new depth value.
bool addPred(const SDep &D, bool Required=true)
Adds the specified edge as a pred of the current node if not already.
void RemovePred(SUnit *M, SUnit *N)
Updates the topological ordering to accommodate an edge to be removed from the specified node N from ...
bool WillCreateCycle(SUnit *TargetSU, SUnit *SU)
Returns true if addPred(TargetSU, SU) creates a cycle.
void AddSUnitWithoutPredecessors(const SUnit *SU)
Add a SUnit without predecessors to the end of the topological order.
ScheduleDAGTopologicalSort(std::vector< SUnit > &SUnits, SUnit *ExitSU)
std::vector< int > GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU, bool &Success)
Returns an array of SUs that are both in the successor subtree of StartSU and in the predecessor subt...
void InitDAGTopologicalSorting()
Creates the initial topological ordering from the DAG to be scheduled.
void AddPred(SUnit *Y, SUnit *X)
Updates the topological ordering to accommodate an edge to be added from SUnit X to SUnit Y.
bool IsReachable(const SUnit *SU, const SUnit *TargetSU)
Checks if SU is reachable from TargetSU.
void AddPredQueued(SUnit *Y, SUnit *X)
Queues an update to the topological ordering to accommodate an edge to be added from SUnit X to SUnit...
void clearDAG()
Clears the DAG state (between regions).
Definition: ScheduleDAG.cpp:63
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
std::vector< SUnit > SUnits
The scheduling units.
Definition: ScheduleDAG.h:579
virtual ~ScheduleDAG()
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
SUnit EntrySU
Special node for the region entry.
Definition: ScheduleDAG.h:580
ScheduleDAG(const ScheduleDAG &)=delete
void dumpNodeAll(const SUnit &SU) const
unsigned VerifyScheduledDAG(bool isBottomUp)
Verifies that all SUnits were scheduled and that their state is consistent.
virtual void dumpNode(const SUnit &SU) const =0
void dumpNodeName(const SUnit &SU) const
SUnit ExitSU
Special node for the region exit.
Definition: ScheduleDAG.h:581
bool empty() const
Definition: SmallVector.h:94
void reserve(size_type N)
Definition: SmallVector.h:676
typename SuperClass::iterator iterator
Definition: SmallVector.h:590
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1742
@ Done
Definition: Threading.h:61
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N