LLVM 19.0.0git
ScheduleDAGFast.cpp
Go to the documentation of this file.
1//===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements a fast scheduler.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstrEmitter.h"
14#include "SDNodeDbgValue.h"
15#include "ScheduleDAGSDNodes.h"
16#include "llvm/ADT/SmallSet.h"
17#include "llvm/ADT/Statistic.h"
22#include "llvm/IR/InlineAsm.h"
23#include "llvm/Support/Debug.h"
26using namespace llvm;
27
28#define DEBUG_TYPE "pre-RA-sched"
29
30STATISTIC(NumUnfolds, "Number of nodes unfolded");
31STATISTIC(NumDups, "Number of duplicated nodes");
32STATISTIC(NumPRCopies, "Number of physical copies");
33
35 fastDAGScheduler("fast", "Fast suboptimal list scheduling",
38 linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
40
41
42namespace {
43 /// FastPriorityQueue - A degenerate priority queue that considers
44 /// all nodes to have the same priority.
45 ///
46 struct FastPriorityQueue {
48
49 bool empty() const { return Queue.empty(); }
50
51 void push(SUnit *U) {
52 Queue.push_back(U);
53 }
54
55 SUnit *pop() {
56 if (empty()) return nullptr;
57 return Queue.pop_back_val();
58 }
59 };
60
61//===----------------------------------------------------------------------===//
62/// ScheduleDAGFast - The actual "fast" list scheduler implementation.
63///
64class ScheduleDAGFast : public ScheduleDAGSDNodes {
65private:
66 /// AvailableQueue - The priority queue to use for the available SUnits.
67 FastPriorityQueue AvailableQueue;
68
69 /// LiveRegDefs - A set of physical registers and their definition
70 /// that are "live". These nodes must be scheduled before any other nodes that
71 /// modifies the registers can be scheduled.
72 unsigned NumLiveRegs = 0u;
73 std::vector<SUnit*> LiveRegDefs;
74 std::vector<unsigned> LiveRegCycles;
75
76public:
77 ScheduleDAGFast(MachineFunction &mf)
78 : ScheduleDAGSDNodes(mf) {}
79
80 void Schedule() override;
81
82 /// AddPred - adds a predecessor edge to SUnit SU.
83 /// This returns true if this is a new predecessor.
84 void AddPred(SUnit *SU, const SDep &D) {
85 SU->addPred(D);
86 }
87
88 /// RemovePred - removes a predecessor edge from SUnit SU.
89 /// This returns true if an edge was removed.
90 void RemovePred(SUnit *SU, const SDep &D) {
91 SU->removePred(D);
92 }
93
94private:
95 void ReleasePred(SUnit *SU, SDep *PredEdge);
96 void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
97 void ScheduleNodeBottomUp(SUnit*, unsigned);
98 SUnit *CopyAndMoveSuccessors(SUnit*);
99 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
100 const TargetRegisterClass*,
101 const TargetRegisterClass*,
103 bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
104 void ListScheduleBottomUp();
105
106 /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
107 bool forceUnitLatencies() const override { return true; }
108};
109} // end anonymous namespace
110
111
112/// Schedule - Schedule the DAG using list scheduling.
113void ScheduleDAGFast::Schedule() {
114 LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
115
116 NumLiveRegs = 0;
117 LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
118 LiveRegCycles.resize(TRI->getNumRegs(), 0);
119
120 // Build the scheduling graph.
121 BuildSchedGraph(nullptr);
122
123 LLVM_DEBUG(dump());
124
125 // Execute the actual scheduling loop.
126 ListScheduleBottomUp();
127}
128
129//===----------------------------------------------------------------------===//
130// Bottom-Up Scheduling
131//===----------------------------------------------------------------------===//
132
133/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
134/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
135void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
136 SUnit *PredSU = PredEdge->getSUnit();
137
138#ifndef NDEBUG
139 if (PredSU->NumSuccsLeft == 0) {
140 dbgs() << "*** Scheduling failed! ***\n";
141 dumpNode(*PredSU);
142 dbgs() << " has been released too many times!\n";
143 llvm_unreachable(nullptr);
144 }
145#endif
146 --PredSU->NumSuccsLeft;
147
148 // If all the node's successors are scheduled, this node is ready
149 // to be scheduled. Ignore the special EntrySU node.
150 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
151 PredSU->isAvailable = true;
152 AvailableQueue.push(PredSU);
153 }
154}
155
156void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
157 // Bottom up: release predecessors
158 for (SDep &Pred : SU->Preds) {
159 ReleasePred(SU, &Pred);
160 if (Pred.isAssignedRegDep()) {
161 // This is a physical register dependency and it's impossible or
162 // expensive to copy the register. Make sure nothing that can
163 // clobber the register is scheduled between the predecessor and
164 // this node.
165 if (!LiveRegDefs[Pred.getReg()]) {
166 ++NumLiveRegs;
167 LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
168 LiveRegCycles[Pred.getReg()] = CurCycle;
169 }
170 }
171 }
172}
173
174/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
175/// count of its predecessors. If a predecessor pending count is zero, add it to
176/// the Available queue.
177void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
178 LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
179 LLVM_DEBUG(dumpNode(*SU));
180
181 assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
182 SU->setHeightToAtLeast(CurCycle);
183 Sequence.push_back(SU);
184
185 ReleasePredecessors(SU, CurCycle);
186
187 // Release all the implicit physical register defs that are live.
188 for (SDep &Succ : SU->Succs) {
189 if (Succ.isAssignedRegDep()) {
190 if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
191 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
192 assert(LiveRegDefs[Succ.getReg()] == SU &&
193 "Physical register dependency violated?");
194 --NumLiveRegs;
195 LiveRegDefs[Succ.getReg()] = nullptr;
196 LiveRegCycles[Succ.getReg()] = 0;
197 }
198 }
199 }
200
201 SU->isScheduled = true;
202}
203
204/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
205/// successors to the newly created node.
206SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
207 if (SU->getNode()->getGluedNode())
208 return nullptr;
209
210 SDNode *N = SU->getNode();
211 if (!N)
212 return nullptr;
213
214 SUnit *NewSU;
215 bool TryUnfold = false;
216 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
217 MVT VT = N->getSimpleValueType(i);
218 if (VT == MVT::Glue)
219 return nullptr;
220 else if (VT == MVT::Other)
221 TryUnfold = true;
222 }
223 for (const SDValue &Op : N->op_values()) {
224 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
225 if (VT == MVT::Glue)
226 return nullptr;
227 }
228
229 if (TryUnfold) {
231 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
232 return nullptr;
233
234 LLVM_DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
235 assert(NewNodes.size() == 2 && "Expected a load folding node!");
236
237 N = NewNodes[1];
238 SDNode *LoadNode = NewNodes[0];
239 unsigned NumVals = N->getNumValues();
240 unsigned OldNumVals = SU->getNode()->getNumValues();
241 for (unsigned i = 0; i != NumVals; ++i)
242 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
243 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
244 SDValue(LoadNode, 1));
245
246 SUnit *NewSU = newSUnit(N);
247 assert(N->getNodeId() == -1 && "Node already inserted!");
248 N->setNodeId(NewSU->NodeNum);
249
250 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
251 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
252 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
253 NewSU->isTwoAddress = true;
254 break;
255 }
256 }
257 if (MCID.isCommutable())
258 NewSU->isCommutable = true;
259
260 // LoadNode may already exist. This can happen when there is another
261 // load from the same location and producing the same type of value
262 // but it has different alignment or volatileness.
263 bool isNewLoad = true;
264 SUnit *LoadSU;
265 if (LoadNode->getNodeId() != -1) {
266 LoadSU = &SUnits[LoadNode->getNodeId()];
267 isNewLoad = false;
268 } else {
269 LoadSU = newSUnit(LoadNode);
270 LoadNode->setNodeId(LoadSU->NodeNum);
271 }
272
273 SDep ChainPred;
274 SmallVector<SDep, 4> ChainSuccs;
275 SmallVector<SDep, 4> LoadPreds;
276 SmallVector<SDep, 4> NodePreds;
277 SmallVector<SDep, 4> NodeSuccs;
278 for (SDep &Pred : SU->Preds) {
279 if (Pred.isCtrl())
280 ChainPred = Pred;
281 else if (Pred.getSUnit()->getNode() &&
282 Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
283 LoadPreds.push_back(Pred);
284 else
285 NodePreds.push_back(Pred);
286 }
287 for (SDep &Succ : SU->Succs) {
288 if (Succ.isCtrl())
289 ChainSuccs.push_back(Succ);
290 else
291 NodeSuccs.push_back(Succ);
292 }
293
294 if (ChainPred.getSUnit()) {
295 RemovePred(SU, ChainPred);
296 if (isNewLoad)
297 AddPred(LoadSU, ChainPred);
298 }
299 for (const SDep &Pred : LoadPreds) {
300 RemovePred(SU, Pred);
301 if (isNewLoad) {
302 AddPred(LoadSU, Pred);
303 }
304 }
305 for (const SDep &Pred : NodePreds) {
306 RemovePred(SU, Pred);
307 AddPred(NewSU, Pred);
308 }
309 for (SDep D : NodeSuccs) {
310 SUnit *SuccDep = D.getSUnit();
311 D.setSUnit(SU);
312 RemovePred(SuccDep, D);
313 D.setSUnit(NewSU);
314 AddPred(SuccDep, D);
315 }
316 for (SDep D : ChainSuccs) {
317 SUnit *SuccDep = D.getSUnit();
318 D.setSUnit(SU);
319 RemovePred(SuccDep, D);
320 if (isNewLoad) {
321 D.setSUnit(LoadSU);
322 AddPred(SuccDep, D);
323 }
324 }
325 if (isNewLoad) {
326 SDep D(LoadSU, SDep::Barrier);
327 D.setLatency(LoadSU->Latency);
328 AddPred(NewSU, D);
329 }
330
331 ++NumUnfolds;
332
333 if (NewSU->NumSuccsLeft == 0) {
334 NewSU->isAvailable = true;
335 return NewSU;
336 }
337 SU = NewSU;
338 }
339
340 LLVM_DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
341 NewSU = Clone(SU);
342
343 // New SUnit has the exact same predecessors.
344 for (SDep &Pred : SU->Preds)
345 if (!Pred.isArtificial())
346 AddPred(NewSU, Pred);
347
348 // Only copy scheduled successors. Cut them from old node's successor
349 // list and move them over.
351 for (SDep &Succ : SU->Succs) {
352 if (Succ.isArtificial())
353 continue;
354 SUnit *SuccSU = Succ.getSUnit();
355 if (SuccSU->isScheduled) {
356 SDep D = Succ;
357 D.setSUnit(NewSU);
358 AddPred(SuccSU, D);
359 D.setSUnit(SU);
360 DelDeps.push_back(std::make_pair(SuccSU, D));
361 }
362 }
363 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
364 RemovePred(DelDeps[i].first, DelDeps[i].second);
365
366 ++NumDups;
367 return NewSU;
368}
369
370/// InsertCopiesAndMoveSuccs - Insert register copies and move all
371/// scheduled successors of the given SUnit to the last copy.
372void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
373 const TargetRegisterClass *DestRC,
374 const TargetRegisterClass *SrcRC,
376 SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
377 CopyFromSU->CopySrcRC = SrcRC;
378 CopyFromSU->CopyDstRC = DestRC;
379
380 SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
381 CopyToSU->CopySrcRC = DestRC;
382 CopyToSU->CopyDstRC = SrcRC;
383
384 // Only copy scheduled successors. Cut them from old node's successor
385 // list and move them over.
387 for (SDep &Succ : SU->Succs) {
388 if (Succ.isArtificial())
389 continue;
390 SUnit *SuccSU = Succ.getSUnit();
391 if (SuccSU->isScheduled) {
392 SDep D = Succ;
393 D.setSUnit(CopyToSU);
394 AddPred(SuccSU, D);
395 DelDeps.push_back(std::make_pair(SuccSU, Succ));
396 }
397 }
398 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
399 RemovePred(DelDeps[i].first, DelDeps[i].second);
400 }
401 SDep FromDep(SU, SDep::Data, Reg);
402 FromDep.setLatency(SU->Latency);
403 AddPred(CopyFromSU, FromDep);
404 SDep ToDep(CopyFromSU, SDep::Data, 0);
405 ToDep.setLatency(CopyFromSU->Latency);
406 AddPred(CopyToSU, ToDep);
407
408 Copies.push_back(CopyFromSU);
409 Copies.push_back(CopyToSU);
410
411 ++NumPRCopies;
412}
413
414/// getPhysicalRegisterVT - Returns the ValueType of the physical register
415/// definition of the specified node.
416/// FIXME: Move to SelectionDAG?
417static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
418 const TargetInstrInfo *TII) {
419 unsigned NumRes;
420 if (N->getOpcode() == ISD::CopyFromReg) {
421 // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
422 NumRes = 1;
423 } else {
424 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
425 assert(!MCID.implicit_defs().empty() &&
426 "Physical reg def must be in implicit def list!");
427 NumRes = MCID.getNumDefs();
428 for (MCPhysReg ImpDef : MCID.implicit_defs()) {
429 if (Reg == ImpDef)
430 break;
431 ++NumRes;
432 }
433 }
434 return N->getSimpleValueType(NumRes);
435}
436
437/// CheckForLiveRegDef - Return true and update live register vector if the
438/// specified register def of the specified SUnit clobbers any "live" registers.
439static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
440 std::vector<SUnit *> &LiveRegDefs,
441 SmallSet<unsigned, 4> &RegAdded,
443 const TargetRegisterInfo *TRI,
444 const SDNode *Node = nullptr) {
445 bool Added = false;
446 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
447 // Check if Ref is live.
448 if (!LiveRegDefs[*AI])
449 continue;
450
451 // Allow multiple uses of the same def.
452 if (LiveRegDefs[*AI] == SU)
453 continue;
454
455 // Allow multiple uses of same def
456 if (Node && LiveRegDefs[*AI]->getNode() == Node)
457 continue;
458
459 // Add Reg to the set of interfering live regs.
460 if (RegAdded.insert(*AI).second) {
461 LRegs.push_back(*AI);
462 Added = true;
463 }
464 }
465 return Added;
466}
467
468/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
469/// scheduling of the given node to satisfy live physical register dependencies.
470/// If the specific node is the last one that's available to schedule, do
471/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
472bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
474 if (NumLiveRegs == 0)
475 return false;
476
477 SmallSet<unsigned, 4> RegAdded;
478 // If this node would clobber any "live" register, then it's not ready.
479 for (SDep &Pred : SU->Preds) {
480 if (Pred.isAssignedRegDep()) {
481 CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
482 RegAdded, LRegs, TRI);
483 }
484 }
485
486 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
487 if (Node->getOpcode() == ISD::INLINEASM ||
488 Node->getOpcode() == ISD::INLINEASM_BR) {
489 // Inline asm can clobber physical defs.
490 unsigned NumOps = Node->getNumOperands();
491 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
492 --NumOps; // Ignore the glue operand.
493
494 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
495 unsigned Flags = Node->getConstantOperandVal(i);
496 const InlineAsm::Flag F(Flags);
497 unsigned NumVals = F.getNumOperandRegisters();
498
499 ++i; // Skip the ID value.
500 if (F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
501 F.isClobberKind()) {
502 // Check for def of register or earlyclobber register.
503 for (; NumVals; --NumVals, ++i) {
504 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
506 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
507 }
508 } else
509 i += NumVals;
510 }
511 continue;
512 }
513
514 if (Node->getOpcode() == ISD::CopyToReg) {
515 Register Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
516 if (Reg.isPhysical()) {
517 SDNode *SrcNode = Node->getOperand(2).getNode();
518 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI, SrcNode);
519 }
520 }
521
522 if (!Node->isMachineOpcode())
523 continue;
524 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
525 for (MCPhysReg Reg : MCID.implicit_defs())
526 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
527 }
528 return !LRegs.empty();
529}
530
531
532/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
533/// schedulers.
534void ScheduleDAGFast::ListScheduleBottomUp() {
535 unsigned CurCycle = 0;
536
537 // Release any predecessors of the special Exit node.
538 ReleasePredecessors(&ExitSU, CurCycle);
539
540 // Add root to Available queue.
541 if (!SUnits.empty()) {
542 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
543 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
544 RootSU->isAvailable = true;
545 AvailableQueue.push(RootSU);
546 }
547
548 // While Available queue is not empty, grab the node with the highest
549 // priority. If it is not ready put it back. Schedule the node.
550 SmallVector<SUnit*, 4> NotReady;
552 Sequence.reserve(SUnits.size());
553 while (!AvailableQueue.empty()) {
554 bool Delayed = false;
555 LRegsMap.clear();
556 SUnit *CurSU = AvailableQueue.pop();
557 while (CurSU) {
559 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
560 break;
561 Delayed = true;
562 LRegsMap.insert(std::make_pair(CurSU, LRegs));
563
564 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
565 NotReady.push_back(CurSU);
566 CurSU = AvailableQueue.pop();
567 }
568
569 // All candidates are delayed due to live physical reg dependencies.
570 // Try code duplication or inserting cross class copies
571 // to resolve it.
572 if (Delayed && !CurSU) {
573 if (!CurSU) {
574 // Try duplicating the nodes that produces these
575 // "expensive to copy" values to break the dependency. In case even
576 // that doesn't work, insert cross class copies.
577 SUnit *TrySU = NotReady[0];
578 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
579 assert(LRegs.size() == 1 && "Can't handle this yet!");
580 unsigned Reg = LRegs[0];
581 SUnit *LRDef = LiveRegDefs[Reg];
582 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
583 const TargetRegisterClass *RC =
584 TRI->getMinimalPhysRegClass(Reg, VT);
585 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
586
587 // If cross copy register class is the same as RC, then it must be
588 // possible copy the value directly. Do not try duplicate the def.
589 // If cross copy register class is not the same as RC, then it's
590 // possible to copy the value but it require cross register class copies
591 // and it is expensive.
592 // If cross copy register class is null, then it's not possible to copy
593 // the value at all.
594 SUnit *NewDef = nullptr;
595 if (DestRC != RC) {
596 NewDef = CopyAndMoveSuccessors(LRDef);
597 if (!DestRC && !NewDef)
598 report_fatal_error("Can't handle live physical "
599 "register dependency!");
600 }
601 if (!NewDef) {
602 // Issue copies, these can be expensive cross register class copies.
604 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
605 LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
606 << " to SU #" << Copies.front()->NodeNum << "\n");
607 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
608 NewDef = Copies.back();
609 }
610
611 LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
612 << " to SU #" << TrySU->NodeNum << "\n");
613 LiveRegDefs[Reg] = NewDef;
614 AddPred(NewDef, SDep(TrySU, SDep::Artificial));
615 TrySU->isAvailable = false;
616 CurSU = NewDef;
617 }
618
619 if (!CurSU) {
620 llvm_unreachable("Unable to resolve live physical register dependencies!");
621 }
622 }
623
624 // Add the nodes that aren't ready back onto the available list.
625 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
626 NotReady[i]->isPending = false;
627 // May no longer be available due to backtracking.
628 if (NotReady[i]->isAvailable)
629 AvailableQueue.push(NotReady[i]);
630 }
631 NotReady.clear();
632
633 if (CurSU)
634 ScheduleNodeBottomUp(CurSU, CurCycle);
635 ++CurCycle;
636 }
637
638 // Reverse the order since it is bottom up.
639 std::reverse(Sequence.begin(), Sequence.end());
640
641#ifndef NDEBUG
642 VerifyScheduledSequence(/*isBottomUp=*/true);
643#endif
644}
645
646
647namespace {
648//===----------------------------------------------------------------------===//
649// ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
650// DAG in topological order.
651// IMPORTANT: this may not work for targets with phyreg dependency.
652//
653class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
654public:
655 ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
656
657 void Schedule() override;
658
660 EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
661
662private:
663 std::vector<SDNode*> Sequence;
664 DenseMap<SDNode*, SDNode*> GluedMap; // Cache glue to its user
665
666 void ScheduleNode(SDNode *N);
667};
668} // end anonymous namespace
669
670void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
671 if (N->getNodeId() != 0)
672 llvm_unreachable(nullptr);
673
674 if (!N->isMachineOpcode() &&
675 (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
676 // These nodes do not need to be translated into MIs.
677 return;
678
679 LLVM_DEBUG(dbgs() << "\n*** Scheduling: ");
680 LLVM_DEBUG(N->dump(DAG));
681 Sequence.push_back(N);
682
683 unsigned NumOps = N->getNumOperands();
684 if (unsigned NumLeft = NumOps) {
685 SDNode *GluedOpN = nullptr;
686 do {
687 const SDValue &Op = N->getOperand(NumLeft-1);
688 SDNode *OpN = Op.getNode();
689
690 if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
691 // Schedule glue operand right above N.
692 GluedOpN = OpN;
693 assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
694 OpN->setNodeId(0);
695 ScheduleNode(OpN);
696 continue;
697 }
698
699 if (OpN == GluedOpN)
700 // Glue operand is already scheduled.
701 continue;
702
704 if (DI != GluedMap.end() && DI->second != N)
705 // Users of glues are counted against the glued users.
706 OpN = DI->second;
707
708 unsigned Degree = OpN->getNodeId();
709 assert(Degree > 0 && "Predecessor over-released!");
710 OpN->setNodeId(--Degree);
711 if (Degree == 0)
712 ScheduleNode(OpN);
713 } while (--NumLeft);
714 }
715}
716
717/// findGluedUser - Find the representative use of a glue value by walking
718/// the use chain.
720 while (SDNode *Glued = N->getGluedUser())
721 N = Glued;
722 return N;
723}
724
725void ScheduleDAGLinearize::Schedule() {
726 LLVM_DEBUG(dbgs() << "********** DAG Linearization **********\n");
727
729 unsigned DAGSize = 0;
730 for (SDNode &Node : DAG->allnodes()) {
731 SDNode *N = &Node;
732
733 // Use node id to record degree.
734 unsigned Degree = N->use_size();
735 N->setNodeId(Degree);
736 unsigned NumVals = N->getNumValues();
737 if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
738 N->hasAnyUseOfValue(NumVals-1)) {
740 if (User) {
741 Glues.push_back(N);
742 GluedMap.insert(std::make_pair(N, User));
743 }
744 }
745
746 if (N->isMachineOpcode() ||
747 (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
748 ++DAGSize;
749 }
750
751 for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
752 SDNode *Glue = Glues[i];
753 SDNode *GUser = GluedMap[Glue];
754 unsigned Degree = Glue->getNodeId();
755 unsigned UDegree = GUser->getNodeId();
756
757 // Glue user must be scheduled together with the glue operand. So other
758 // users of the glue operand must be treated as its users.
759 SDNode *ImmGUser = Glue->getGluedUser();
760 for (const SDNode *U : Glue->uses())
761 if (U == ImmGUser)
762 --Degree;
763 GUser->setNodeId(UDegree + Degree);
764 Glue->setNodeId(1);
765 }
766
767 Sequence.reserve(DAGSize);
768 ScheduleNode(DAG->getRoot().getNode());
769}
770
772ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
773 InstrEmitter Emitter(DAG->getTarget(), BB, InsertPos);
775
776 LLVM_DEBUG({ dbgs() << "\n*** Final schedule ***\n"; });
777
778 unsigned NumNodes = Sequence.size();
779 MachineBasicBlock *BB = Emitter.getBlock();
780 for (unsigned i = 0; i != NumNodes; ++i) {
781 SDNode *N = Sequence[NumNodes-i-1];
782 LLVM_DEBUG(N->dump(DAG));
783 Emitter.EmitNode(N, false, false, VRBaseMap);
784
785 // Emit any debug values associated with the node.
786 if (N->getHasDebugValue()) {
787 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
788 for (auto *DV : DAG->GetDbgValues(N)) {
789 if (!DV->isEmitted())
790 if (auto *DbgMI = Emitter.EmitDbgValue(DV, VRBaseMap))
791 BB->insert(InsertPos, DbgMI);
792 }
793 }
794 }
795
796 LLVM_DEBUG(dbgs() << '\n');
797
798 InsertPos = Emitter.getInsertPos();
799 return Emitter.getBlock();
800}
801
802//===----------------------------------------------------------------------===//
803// Public Constructor Functions
804//===----------------------------------------------------------------------===//
805
808 return new ScheduleDAGFast(*IS->MF);
809}
810
813 return new ScheduleDAGLinearize(*IS->MF);
814}
static void push(SmallVectorImpl< uint64_t > &R, StringRef Str)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
dxil DXContainer Global Emitter
#define LLVM_DEBUG(X)
Definition: Debug.h:101
const HexagonInstrInfo * TII
#define F(x, y, z)
Definition: MD5.cpp:55
unsigned const TargetRegisterInfo * TRI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Lower i1 Copies
static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, const TargetInstrInfo *TII)
getPhysicalRegisterVT - Returns the ValueType of the physical register definition of the specified no...
static RegisterScheduler fastDAGScheduler("fast", "Fast suboptimal list scheduling", createFastDAGScheduler)
static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg, std::vector< SUnit * > &LiveRegDefs, SmallSet< unsigned, 4 > &RegAdded, SmallVectorImpl< unsigned > &LRegs, const TargetRegisterInfo *TRI, const SDNode *Node=nullptr)
CheckForLiveRegDef - Return true and update live register vector if the specified register def of the...
static RegisterScheduler linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling", createDAGLinearizer)
static SDNode * findGluedUser(SDNode *N)
findGluedUser - Find the representative use of a glue value by walking the use chain.
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
Definition: MCInstrDesc.h:579
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Definition: MCInstrDesc.h:481
MCRegAliasIterator enumerates all registers aliasing Reg.
Machine Value Type.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
Represents one node in the SelectionDAG.
int getNodeId() const
Return the unique node id.
SDNode * getGluedUser() const
If this node has a glue value with a user, return the user (there is at most one).
iterator_range< use_iterator > uses()
void setNodeId(int Id)
Set unique node id.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
Scheduling dependency.
Definition: ScheduleDAG.h:49
SUnit * getSUnit() const
Definition: ScheduleDAG.h:480
@ Data
Regular data dependence (aka true-dependence).
Definition: ScheduleDAG.h:53
@ Barrier
An unknown scheduling barrier.
Definition: ScheduleDAG.h:69
@ Artificial
Arbitrary strong DAG edge (no real dependence).
Definition: ScheduleDAG.h:72
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
Definition: ScheduleDAG.h:211
bool isArtificial() const
Tests if this is an Order dependence that is marked as "artificial", meaning it isn't necessary for c...
Definition: ScheduleDAG.h:200
bool isCtrl() const
Shorthand for getKind() != SDep::Data.
Definition: ScheduleDAG.h:161
unsigned getReg() const
Returns the register associated with this edge.
Definition: ScheduleDAG.h:218
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:242
void setHeightToAtLeast(unsigned NewHeight)
If NewHeight is greater than this node's height value, set it to be the new height value.
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:264
unsigned NumSuccsLeft
Definition: ScheduleDAG.h:269
const TargetRegisterClass * CopyDstRC
Is a special copy node if != nullptr.
Definition: ScheduleDAG.h:302
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:406
void removePred(const SDep &D)
Removes the specified edge as a pred of the current node if it exists.
unsigned short Latency
Node latency.
Definition: ScheduleDAG.h:273
bool isPending
True once pending.
Definition: ScheduleDAG.h:282
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:284
bool isAvailable
True once available.
Definition: ScheduleDAG.h:283
SmallVector< SDep, 4 > Succs
All sunit successors.
Definition: ScheduleDAG.h:257
const TargetRegisterClass * CopySrcRC
Definition: ScheduleDAG.h:304
SDNode * getNode() const
Returns the representative SDNode for this SUnit.
Definition: ScheduleDAG.h:355
bool isTwoAddress
Is a two-address instruction.
Definition: ScheduleDAG.h:277
bool isCommutable
Is a commutable instruction.
Definition: ScheduleDAG.h:278
SmallVector< SDep, 4 > Preds
All sunit predecessors.
Definition: ScheduleDAG.h:256
bool addPred(const SDep &D, bool Required=true)
Adds the specified edge as a pred of the current node if not already.
ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
virtual void Schedule()=0
Schedule - Order nodes according to selected style, filling in the Sequence member.
virtual MachineBasicBlock * EmitSchedule(MachineBasicBlock::iterator &InsertPos)
EmitSchedule - Insert MachineInstrs into the MachineBasicBlock according to the order specified in Se...
virtual bool forceUnitLatencies() const
ForceUnitLatencies - Return true if all scheduling edges should be given a latency value of one.
std::vector< SUnit * > Sequence
The schedule. Null SUnit*'s represent noop instructions.
SelectionDAGISel - This is the common base class used for SelectionDAG-based pattern-matching instruc...
MachineFunction * MF
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:208
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
Definition: ISDOpcodes.h:47
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition: ISDOpcodes.h:203
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:1097
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1094
Reg
All possible values of the reg field in the ModR/M byte.
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
Definition: PtrState.h:41
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
ScheduleDAGSDNodes * createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOptLevel OptLevel)
createFastDAGScheduler - This creates a "fast" scheduler.
ScheduleDAGSDNodes * createDAGLinearizer(SelectionDAGISel *IS, CodeGenOptLevel OptLevel)
createDAGLinearizer - This creates a "no-scheduling" scheduler which linearize the DAG using topologi...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
#define N