LLVM  6.0.0svn
ScheduleDAGFast.cpp
Go to the documentation of this file.
1 //===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements a fast scheduler.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstrEmitter.h"
15 #include "ScheduleDAGSDNodes.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/Statistic.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/InlineAsm.h"
25 #include "llvm/Support/Debug.h"
28 using namespace llvm;
29 
30 #define DEBUG_TYPE "pre-RA-sched"
31 
32 STATISTIC(NumUnfolds, "Number of nodes unfolded");
33 STATISTIC(NumDups, "Number of duplicated nodes");
34 STATISTIC(NumPRCopies, "Number of physical copies");
35 
36 static RegisterScheduler
37  fastDAGScheduler("fast", "Fast suboptimal list scheduling",
39 static RegisterScheduler
40  linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
42 
43 
44 namespace {
45  /// FastPriorityQueue - A degenerate priority queue that considers
46  /// all nodes to have the same priority.
47  ///
48  struct FastPriorityQueue {
50 
51  bool empty() const { return Queue.empty(); }
52 
53  void push(SUnit *U) {
54  Queue.push_back(U);
55  }
56 
57  SUnit *pop() {
58  if (empty()) return nullptr;
59  SUnit *V = Queue.back();
60  Queue.pop_back();
61  return V;
62  }
63  };
64 
65 //===----------------------------------------------------------------------===//
66 /// ScheduleDAGFast - The actual "fast" list scheduler implementation.
67 ///
68 class ScheduleDAGFast : public ScheduleDAGSDNodes {
69 private:
70  /// AvailableQueue - The priority queue to use for the available SUnits.
71  FastPriorityQueue AvailableQueue;
72 
73  /// LiveRegDefs - A set of physical registers and their definition
74  /// that are "live". These nodes must be scheduled before any other nodes that
75  /// modifies the registers can be scheduled.
76  unsigned NumLiveRegs;
77  std::vector<SUnit*> LiveRegDefs;
78  std::vector<unsigned> LiveRegCycles;
79 
80 public:
81  ScheduleDAGFast(MachineFunction &mf)
82  : ScheduleDAGSDNodes(mf) {}
83 
84  void Schedule() override;
85 
86  /// AddPred - adds a predecessor edge to SUnit SU.
87  /// This returns true if this is a new predecessor.
88  void AddPred(SUnit *SU, const SDep &D) {
89  SU->addPred(D);
90  }
91 
92  /// RemovePred - removes a predecessor edge from SUnit SU.
93  /// This returns true if an edge was removed.
94  void RemovePred(SUnit *SU, const SDep &D) {
95  SU->removePred(D);
96  }
97 
98 private:
99  void ReleasePred(SUnit *SU, SDep *PredEdge);
100  void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
101  void ScheduleNodeBottomUp(SUnit*, unsigned);
102  SUnit *CopyAndMoveSuccessors(SUnit*);
103  void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
104  const TargetRegisterClass*,
105  const TargetRegisterClass*,
107  bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
108  void ListScheduleBottomUp();
109 
110  /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
111  bool forceUnitLatencies() const override { return true; }
112 };
113 } // end anonymous namespace
114 
115 
116 /// Schedule - Schedule the DAG using list scheduling.
117 void ScheduleDAGFast::Schedule() {
118  DEBUG(dbgs() << "********** List Scheduling **********\n");
119 
120  NumLiveRegs = 0;
121  LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
122  LiveRegCycles.resize(TRI->getNumRegs(), 0);
123 
124  // Build the scheduling graph.
125  BuildSchedGraph(nullptr);
126 
127  DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
128  SUnits[su].dumpAll(this));
129 
130  // Execute the actual scheduling loop.
131  ListScheduleBottomUp();
132 }
133 
134 //===----------------------------------------------------------------------===//
135 // Bottom-Up Scheduling
136 //===----------------------------------------------------------------------===//
137 
138 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
139 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
140 void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
141  SUnit *PredSU = PredEdge->getSUnit();
142 
143 #ifndef NDEBUG
144  if (PredSU->NumSuccsLeft == 0) {
145  dbgs() << "*** Scheduling failed! ***\n";
146  PredSU->dump(this);
147  dbgs() << " has been released too many times!\n";
148  llvm_unreachable(nullptr);
149  }
150 #endif
151  --PredSU->NumSuccsLeft;
152 
153  // If all the node's successors are scheduled, this node is ready
154  // to be scheduled. Ignore the special EntrySU node.
155  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
156  PredSU->isAvailable = true;
157  AvailableQueue.push(PredSU);
158  }
159 }
160 
161 void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
162  // Bottom up: release predecessors
163  for (SDep &Pred : SU->Preds) {
164  ReleasePred(SU, &Pred);
165  if (Pred.isAssignedRegDep()) {
166  // This is a physical register dependency and it's impossible or
167  // expensive to copy the register. Make sure nothing that can
168  // clobber the register is scheduled between the predecessor and
169  // this node.
170  if (!LiveRegDefs[Pred.getReg()]) {
171  ++NumLiveRegs;
172  LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
173  LiveRegCycles[Pred.getReg()] = CurCycle;
174  }
175  }
176  }
177 }
178 
179 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
180 /// count of its predecessors. If a predecessor pending count is zero, add it to
181 /// the Available queue.
182 void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
183  DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
184  DEBUG(SU->dump(this));
185 
186  assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
187  SU->setHeightToAtLeast(CurCycle);
188  Sequence.push_back(SU);
189 
190  ReleasePredecessors(SU, CurCycle);
191 
192  // Release all the implicit physical register defs that are live.
193  for (SDep &Succ : SU->Succs) {
194  if (Succ.isAssignedRegDep()) {
195  if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
196  assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
197  assert(LiveRegDefs[Succ.getReg()] == SU &&
198  "Physical register dependency violated?");
199  --NumLiveRegs;
200  LiveRegDefs[Succ.getReg()] = nullptr;
201  LiveRegCycles[Succ.getReg()] = 0;
202  }
203  }
204  }
205 
206  SU->isScheduled = true;
207 }
208 
209 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
210 /// successors to the newly created node.
211 SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
212  if (SU->getNode()->getGluedNode())
213  return nullptr;
214 
215  SDNode *N = SU->getNode();
216  if (!N)
217  return nullptr;
218 
219  SUnit *NewSU;
220  bool TryUnfold = false;
221  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
222  MVT VT = N->getSimpleValueType(i);
223  if (VT == MVT::Glue)
224  return nullptr;
225  else if (VT == MVT::Other)
226  TryUnfold = true;
227  }
228  for (const SDValue &Op : N->op_values()) {
229  MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
230  if (VT == MVT::Glue)
231  return nullptr;
232  }
233 
234  if (TryUnfold) {
235  SmallVector<SDNode*, 2> NewNodes;
236  if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
237  return nullptr;
238 
239  DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
240  assert(NewNodes.size() == 2 && "Expected a load folding node!");
241 
242  N = NewNodes[1];
243  SDNode *LoadNode = NewNodes[0];
244  unsigned NumVals = N->getNumValues();
245  unsigned OldNumVals = SU->getNode()->getNumValues();
246  for (unsigned i = 0; i != NumVals; ++i)
247  DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
248  DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
249  SDValue(LoadNode, 1));
250 
251  SUnit *NewSU = newSUnit(N);
252  assert(N->getNodeId() == -1 && "Node already inserted!");
253  N->setNodeId(NewSU->NodeNum);
254 
255  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
256  for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
257  if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
258  NewSU->isTwoAddress = true;
259  break;
260  }
261  }
262  if (MCID.isCommutable())
263  NewSU->isCommutable = true;
264 
265  // LoadNode may already exist. This can happen when there is another
266  // load from the same location and producing the same type of value
267  // but it has different alignment or volatileness.
268  bool isNewLoad = true;
269  SUnit *LoadSU;
270  if (LoadNode->getNodeId() != -1) {
271  LoadSU = &SUnits[LoadNode->getNodeId()];
272  isNewLoad = false;
273  } else {
274  LoadSU = newSUnit(LoadNode);
275  LoadNode->setNodeId(LoadSU->NodeNum);
276  }
277 
278  SDep ChainPred;
279  SmallVector<SDep, 4> ChainSuccs;
280  SmallVector<SDep, 4> LoadPreds;
281  SmallVector<SDep, 4> NodePreds;
282  SmallVector<SDep, 4> NodeSuccs;
283  for (SDep &Pred : SU->Preds) {
284  if (Pred.isCtrl())
285  ChainPred = Pred;
286  else if (Pred.getSUnit()->getNode() &&
287  Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
288  LoadPreds.push_back(Pred);
289  else
290  NodePreds.push_back(Pred);
291  }
292  for (SDep &Succ : SU->Succs) {
293  if (Succ.isCtrl())
294  ChainSuccs.push_back(Succ);
295  else
296  NodeSuccs.push_back(Succ);
297  }
298 
299  if (ChainPred.getSUnit()) {
300  RemovePred(SU, ChainPred);
301  if (isNewLoad)
302  AddPred(LoadSU, ChainPred);
303  }
304  for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
305  const SDep &Pred = LoadPreds[i];
306  RemovePred(SU, Pred);
307  if (isNewLoad) {
308  AddPred(LoadSU, Pred);
309  }
310  }
311  for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
312  const SDep &Pred = NodePreds[i];
313  RemovePred(SU, Pred);
314  AddPred(NewSU, Pred);
315  }
316  for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
317  SDep D = NodeSuccs[i];
318  SUnit *SuccDep = D.getSUnit();
319  D.setSUnit(SU);
320  RemovePred(SuccDep, D);
321  D.setSUnit(NewSU);
322  AddPred(SuccDep, D);
323  }
324  for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
325  SDep D = ChainSuccs[i];
326  SUnit *SuccDep = D.getSUnit();
327  D.setSUnit(SU);
328  RemovePred(SuccDep, D);
329  if (isNewLoad) {
330  D.setSUnit(LoadSU);
331  AddPred(SuccDep, D);
332  }
333  }
334  if (isNewLoad) {
335  SDep D(LoadSU, SDep::Barrier);
336  D.setLatency(LoadSU->Latency);
337  AddPred(NewSU, D);
338  }
339 
340  ++NumUnfolds;
341 
342  if (NewSU->NumSuccsLeft == 0) {
343  NewSU->isAvailable = true;
344  return NewSU;
345  }
346  SU = NewSU;
347  }
348 
349  DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
350  NewSU = Clone(SU);
351 
352  // New SUnit has the exact same predecessors.
353  for (SDep &Pred : SU->Preds)
354  if (!Pred.isArtificial())
355  AddPred(NewSU, Pred);
356 
357  // Only copy scheduled successors. Cut them from old node's successor
358  // list and move them over.
360  for (SDep &Succ : SU->Succs) {
361  if (Succ.isArtificial())
362  continue;
363  SUnit *SuccSU = Succ.getSUnit();
364  if (SuccSU->isScheduled) {
365  SDep D = Succ;
366  D.setSUnit(NewSU);
367  AddPred(SuccSU, D);
368  D.setSUnit(SU);
369  DelDeps.push_back(std::make_pair(SuccSU, D));
370  }
371  }
372  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
373  RemovePred(DelDeps[i].first, DelDeps[i].second);
374 
375  ++NumDups;
376  return NewSU;
377 }
378 
379 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
380 /// scheduled successors of the given SUnit to the last copy.
381 void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
382  const TargetRegisterClass *DestRC,
383  const TargetRegisterClass *SrcRC,
384  SmallVectorImpl<SUnit*> &Copies) {
385  SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
386  CopyFromSU->CopySrcRC = SrcRC;
387  CopyFromSU->CopyDstRC = DestRC;
388 
389  SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
390  CopyToSU->CopySrcRC = DestRC;
391  CopyToSU->CopyDstRC = SrcRC;
392 
393  // Only copy scheduled successors. Cut them from old node's successor
394  // list and move them over.
396  for (SDep &Succ : SU->Succs) {
397  if (Succ.isArtificial())
398  continue;
399  SUnit *SuccSU = Succ.getSUnit();
400  if (SuccSU->isScheduled) {
401  SDep D = Succ;
402  D.setSUnit(CopyToSU);
403  AddPred(SuccSU, D);
404  DelDeps.push_back(std::make_pair(SuccSU, Succ));
405  }
406  }
407  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
408  RemovePred(DelDeps[i].first, DelDeps[i].second);
409  }
410  SDep FromDep(SU, SDep::Data, Reg);
411  FromDep.setLatency(SU->Latency);
412  AddPred(CopyFromSU, FromDep);
413  SDep ToDep(CopyFromSU, SDep::Data, 0);
414  ToDep.setLatency(CopyFromSU->Latency);
415  AddPred(CopyToSU, ToDep);
416 
417  Copies.push_back(CopyFromSU);
418  Copies.push_back(CopyToSU);
419 
420  ++NumPRCopies;
421 }
422 
423 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
424 /// definition of the specified node.
425 /// FIXME: Move to SelectionDAG?
426 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
427  const TargetInstrInfo *TII) {
428  unsigned NumRes;
429  if (N->getOpcode() == ISD::CopyFromReg) {
430  // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
431  NumRes = 1;
432  } else {
433  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
434  assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
435  NumRes = MCID.getNumDefs();
436  for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
437  if (Reg == *ImpDef)
438  break;
439  ++NumRes;
440  }
441  }
442  return N->getSimpleValueType(NumRes);
443 }
444 
445 /// CheckForLiveRegDef - Return true and update live register vector if the
446 /// specified register def of the specified SUnit clobbers any "live" registers.
447 static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
448  std::vector<SUnit*> &LiveRegDefs,
449  SmallSet<unsigned, 4> &RegAdded,
451  const TargetRegisterInfo *TRI) {
452  bool Added = false;
453  for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
454  if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
455  if (RegAdded.insert(*AI).second) {
456  LRegs.push_back(*AI);
457  Added = true;
458  }
459  }
460  }
461  return Added;
462 }
463 
464 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
465 /// scheduling of the given node to satisfy live physical register dependencies.
466 /// If the specific node is the last one that's available to schedule, do
467 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
468 bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
470  if (NumLiveRegs == 0)
471  return false;
472 
473  SmallSet<unsigned, 4> RegAdded;
474  // If this node would clobber any "live" register, then it's not ready.
475  for (SDep &Pred : SU->Preds) {
476  if (Pred.isAssignedRegDep()) {
477  CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
478  RegAdded, LRegs, TRI);
479  }
480  }
481 
482  for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
483  if (Node->getOpcode() == ISD::INLINEASM) {
484  // Inline asm can clobber physical defs.
485  unsigned NumOps = Node->getNumOperands();
486  if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
487  --NumOps; // Ignore the glue operand.
488 
489  for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
490  unsigned Flags =
491  cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
492  unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
493 
494  ++i; // Skip the ID value.
495  if (InlineAsm::isRegDefKind(Flags) ||
497  InlineAsm::isClobberKind(Flags)) {
498  // Check for def of register or earlyclobber register.
499  for (; NumVals; --NumVals, ++i) {
500  unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
502  CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
503  }
504  } else
505  i += NumVals;
506  }
507  continue;
508  }
509  if (!Node->isMachineOpcode())
510  continue;
511  const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
512  if (!MCID.ImplicitDefs)
513  continue;
514  for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
515  CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
516  }
517  }
518  return !LRegs.empty();
519 }
520 
521 
522 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
523 /// schedulers.
524 void ScheduleDAGFast::ListScheduleBottomUp() {
525  unsigned CurCycle = 0;
526 
527  // Release any predecessors of the special Exit node.
528  ReleasePredecessors(&ExitSU, CurCycle);
529 
530  // Add root to Available queue.
531  if (!SUnits.empty()) {
532  SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
533  assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
534  RootSU->isAvailable = true;
535  AvailableQueue.push(RootSU);
536  }
537 
538  // While Available queue is not empty, grab the node with the highest
539  // priority. If it is not ready put it back. Schedule the node.
540  SmallVector<SUnit*, 4> NotReady;
542  Sequence.reserve(SUnits.size());
543  while (!AvailableQueue.empty()) {
544  bool Delayed = false;
545  LRegsMap.clear();
546  SUnit *CurSU = AvailableQueue.pop();
547  while (CurSU) {
549  if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
550  break;
551  Delayed = true;
552  LRegsMap.insert(std::make_pair(CurSU, LRegs));
553 
554  CurSU->isPending = true; // This SU is not in AvailableQueue right now.
555  NotReady.push_back(CurSU);
556  CurSU = AvailableQueue.pop();
557  }
558 
559  // All candidates are delayed due to live physical reg dependencies.
560  // Try code duplication or inserting cross class copies
561  // to resolve it.
562  if (Delayed && !CurSU) {
563  if (!CurSU) {
564  // Try duplicating the nodes that produces these
565  // "expensive to copy" values to break the dependency. In case even
566  // that doesn't work, insert cross class copies.
567  SUnit *TrySU = NotReady[0];
568  SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
569  assert(LRegs.size() == 1 && "Can't handle this yet!");
570  unsigned Reg = LRegs[0];
571  SUnit *LRDef = LiveRegDefs[Reg];
572  MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
573  const TargetRegisterClass *RC =
574  TRI->getMinimalPhysRegClass(Reg, VT);
575  const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
576 
577  // If cross copy register class is the same as RC, then it must be
578  // possible copy the value directly. Do not try duplicate the def.
579  // If cross copy register class is not the same as RC, then it's
580  // possible to copy the value but it require cross register class copies
581  // and it is expensive.
582  // If cross copy register class is null, then it's not possible to copy
583  // the value at all.
584  SUnit *NewDef = nullptr;
585  if (DestRC != RC) {
586  NewDef = CopyAndMoveSuccessors(LRDef);
587  if (!DestRC && !NewDef)
588  report_fatal_error("Can't handle live physical "
589  "register dependency!");
590  }
591  if (!NewDef) {
592  // Issue copies, these can be expensive cross register class copies.
593  SmallVector<SUnit*, 2> Copies;
594  InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
595  DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
596  << " to SU #" << Copies.front()->NodeNum << "\n");
597  AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
598  NewDef = Copies.back();
599  }
600 
601  DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
602  << " to SU #" << TrySU->NodeNum << "\n");
603  LiveRegDefs[Reg] = NewDef;
604  AddPred(NewDef, SDep(TrySU, SDep::Artificial));
605  TrySU->isAvailable = false;
606  CurSU = NewDef;
607  }
608 
609  if (!CurSU) {
610  llvm_unreachable("Unable to resolve live physical register dependencies!");
611  }
612  }
613 
614  // Add the nodes that aren't ready back onto the available list.
615  for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
616  NotReady[i]->isPending = false;
617  // May no longer be available due to backtracking.
618  if (NotReady[i]->isAvailable)
619  AvailableQueue.push(NotReady[i]);
620  }
621  NotReady.clear();
622 
623  if (CurSU)
624  ScheduleNodeBottomUp(CurSU, CurCycle);
625  ++CurCycle;
626  }
627 
628  // Reverse the order since it is bottom up.
629  std::reverse(Sequence.begin(), Sequence.end());
630 
631 #ifndef NDEBUG
632  VerifyScheduledSequence(/*isBottomUp=*/true);
633 #endif
634 }
635 
636 
637 namespace {
638 //===----------------------------------------------------------------------===//
639 // ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
640 // DAG in topological order.
641 // IMPORTANT: this may not work for targets with phyreg dependency.
642 //
643 class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
644 public:
645  ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
646 
647  void Schedule() override;
648 
650  EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
651 
652 private:
653  std::vector<SDNode*> Sequence;
654  DenseMap<SDNode*, SDNode*> GluedMap; // Cache glue to its user
655 
656  void ScheduleNode(SDNode *N);
657 };
658 } // end anonymous namespace
659 
660 void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
661  if (N->getNodeId() != 0)
662  llvm_unreachable(nullptr);
663 
664  if (!N->isMachineOpcode() &&
665  (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
666  // These nodes do not need to be translated into MIs.
667  return;
668 
669  DEBUG(dbgs() << "\n*** Scheduling: ");
670  DEBUG(N->dump(DAG));
671  Sequence.push_back(N);
672 
673  unsigned NumOps = N->getNumOperands();
674  if (unsigned NumLeft = NumOps) {
675  SDNode *GluedOpN = nullptr;
676  do {
677  const SDValue &Op = N->getOperand(NumLeft-1);
678  SDNode *OpN = Op.getNode();
679 
680  if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
681  // Schedule glue operand right above N.
682  GluedOpN = OpN;
683  assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
684  OpN->setNodeId(0);
685  ScheduleNode(OpN);
686  continue;
687  }
688 
689  if (OpN == GluedOpN)
690  // Glue operand is already scheduled.
691  continue;
692 
693  DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
694  if (DI != GluedMap.end() && DI->second != N)
695  // Users of glues are counted against the glued users.
696  OpN = DI->second;
697 
698  unsigned Degree = OpN->getNodeId();
699  assert(Degree > 0 && "Predecessor over-released!");
700  OpN->setNodeId(--Degree);
701  if (Degree == 0)
702  ScheduleNode(OpN);
703  } while (--NumLeft);
704  }
705 }
706 
707 /// findGluedUser - Find the representative use of a glue value by walking
708 /// the use chain.
710  while (SDNode *Glued = N->getGluedUser())
711  N = Glued;
712  return N;
713 }
714 
715 void ScheduleDAGLinearize::Schedule() {
716  DEBUG(dbgs() << "********** DAG Linearization **********\n");
717 
719  unsigned DAGSize = 0;
720  for (SDNode &Node : DAG->allnodes()) {
721  SDNode *N = &Node;
722 
723  // Use node id to record degree.
724  unsigned Degree = N->use_size();
725  N->setNodeId(Degree);
726  unsigned NumVals = N->getNumValues();
727  if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
728  N->hasAnyUseOfValue(NumVals-1)) {
729  SDNode *User = findGluedUser(N);
730  if (User) {
731  Glues.push_back(N);
732  GluedMap.insert(std::make_pair(N, User));
733  }
734  }
735 
736  if (N->isMachineOpcode() ||
737  (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
738  ++DAGSize;
739  }
740 
741  for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
742  SDNode *Glue = Glues[i];
743  SDNode *GUser = GluedMap[Glue];
744  unsigned Degree = Glue->getNodeId();
745  unsigned UDegree = GUser->getNodeId();
746 
747  // Glue user must be scheduled together with the glue operand. So other
748  // users of the glue operand must be treated as its users.
749  SDNode *ImmGUser = Glue->getGluedUser();
750  for (const SDNode *U : Glue->uses())
751  if (U == ImmGUser)
752  --Degree;
753  GUser->setNodeId(UDegree + Degree);
754  Glue->setNodeId(1);
755  }
756 
757  Sequence.reserve(DAGSize);
758  ScheduleNode(DAG->getRoot().getNode());
759 }
760 
762 ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
763  InstrEmitter Emitter(BB, InsertPos);
764  DenseMap<SDValue, unsigned> VRBaseMap;
765 
766  DEBUG({
767  dbgs() << "\n*** Final schedule ***\n";
768  });
769 
770  // FIXME: Handle dbg_values.
771  unsigned NumNodes = Sequence.size();
772  for (unsigned i = 0; i != NumNodes; ++i) {
773  SDNode *N = Sequence[NumNodes-i-1];
774  DEBUG(N->dump(DAG));
775  Emitter.EmitNode(N, false, false, VRBaseMap);
776  }
777 
778  DEBUG(dbgs() << '\n');
779 
780  InsertPos = Emitter.getInsertPos();
781  return Emitter.getBlock();
782 }
783 
784 //===----------------------------------------------------------------------===//
785 // Public Constructor Functions
786 //===----------------------------------------------------------------------===//
787 
790  return new ScheduleDAGFast(*IS->MF);
791 }
792 
795  return new ScheduleDAGLinearize(*IS->MF);
796 }
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MCInstrDesc.h:425
SDNode * getNode() const
Returns the representative SDNode for this SUnit.
Definition: ScheduleDAG.h:360
void dump(const ScheduleDAG *G) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isTwoAddress
Is a two-address instruction.
Definition: ScheduleDAG.h:282
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
static bool isClobberKind(unsigned Flag)
Definition: InlineAsm.h:281
void setSUnit(SUnit *SU)
Definition: ScheduleDAG.h:493
static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg, std::vector< SUnit *> &LiveRegDefs, SmallSet< unsigned, 4 > &RegAdded, SmallVectorImpl< unsigned > &LRegs, const TargetRegisterInfo *TRI)
CheckForLiveRegDef - Return true and update live register vector if the specified register def of the...
unsigned second
STATISTIC(NumFunctions, "Total number of functions")
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
void removePred(const SDep &D)
Removes the specified edge as a pred of the current node if it exists.
void setNodeId(int Id)
Set unique node id.
unsigned getReg() const
Returns the register associated with this edge.
Definition: ScheduleDAG.h:219
SDNode * getNode() const
get the SDNode which holds the desired result
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
Definition: ScheduleDAG.h:212
const TargetRegisterClass * CopyDstRC
Is a special copy node if != nullptr.
Definition: ScheduleDAG.h:307
EntryToken - This is the marker used to indicate the start of a region.
Definition: ISDOpcodes.h:45
MachineFunction * MF
SmallVector< SDep, 4 > Preds
All sunit predecessors.
Definition: ScheduleDAG.h:261
MachineBasicBlock * getBlock()
getBlock - Return the current basic block.
Definition: InstrEmitter.h:127
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:289
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
unsigned NumSuccsLeft
of succs not scheduled.
Definition: ScheduleDAG.h:274
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:210
const HexagonInstrInfo * TII
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
Regular data dependence (aka true-dependence).
Definition: ScheduleDAG.h:54
Reg
All possible values of the reg field in the ModR/M byte.
static bool isRegDefEarlyClobberKind(unsigned Flag)
Definition: InlineAsm.h:278
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:634
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:232
ScheduleDAGSDNodes * createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createDAGLinearizer - This creates a "no-scheduling" scheduler which linearize the DAG using topologi...
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
bool isPending
True once pending.
Definition: ScheduleDAG.h:287
bool isCtrl() const
Shorthand for getKind() != SDep::Data.
Definition: ScheduleDAG.h:162
SUnit * getSUnit() const
Definition: ScheduleDAG.h:490
static bool isRegDefKind(unsigned Flag)
Definition: InlineAsm.h:275
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
TargetInstrInfo - Interface to description of machine instruction set.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
Scheduling dependency.
Definition: ScheduleDAG.h:50
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
void setHeightToAtLeast(unsigned NewHeight)
If NewDepth is greater than this node&#39;s depth value, set it to be the new height value.
bool isAvailable()
Definition: Compression.cpp:58
const MCPhysReg * getImplicitDefs() const
Return a list of registers that are potentially written by any instance of this machine instruction...
Definition: MCInstrDesc.h:534
ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
bool isArtificial() const
Tests if this is an Order dependence that is marked as "artificial", meaning it isn&#39;t necessary for c...
Definition: ScheduleDAG.h:201
Machine Value Type.
unsigned short Latency
Node latency.
Definition: ScheduleDAG.h:278
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
iterator_range< value_op_iterator > op_values() const
SDNode * getGluedUser() const
If this node has a glue value with a user, return the user (there is at most one).
const SDValue & getOperand(unsigned Num) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:36
const MCPhysReg * ImplicitDefs
Definition: MCInstrDesc.h:173
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
Definition: InlineAsm.h:336
MCRegAliasIterator enumerates all registers aliasing Reg.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:81
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
unsigned getNumOperands() const
Return the number of values used by this operation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned first
An unknown scheduling barrier.
Definition: ScheduleDAG.h:70
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
Definition: MCInstrDesc.h:187
void dump() const
Dump this node, for debugging.
bool isCommutable
Is a commutable instruction.
Definition: ScheduleDAG.h:283
static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, const TargetInstrInfo *TII)
getPhysicalRegisterVT - Returns the ValueType of the physical register definition of the specified no...
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
Represents one node in the SelectionDAG.
SelectionDAGISel - This is the common base class used for SelectionDAG-based pattern-matching instruc...
const TargetRegisterClass * CopySrcRC
Definition: ScheduleDAG.h:309
static SDNode * findGluedUser(SDNode *N)
findGluedUser - Find the representative use of a glue value by walking the use chain.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
size_t use_size() const
Return the number of uses of this node.
iterator_range< use_iterator > uses()
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
void setLatency(unsigned Lat)
Sets the latency for this edge.
Definition: ScheduleDAG.h:148
int getNodeId() const
Return the unique node id.
static RegisterScheduler fastDAGScheduler("fast", "Fast suboptimal list scheduling", createFastDAGScheduler)
bool isAvailable
True once available.
Definition: ScheduleDAG.h:288
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:411
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
void EmitNode(SDNode *Node, bool IsClone, bool IsCloned, DenseMap< SDValue, unsigned > &VRBaseMap)
EmitNode - Generate machine code for a node and needed dependencies.
Definition: InstrEmitter.h:118
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
static RegisterScheduler linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling", createDAGLinearizer)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
#define N
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
Definition: PtrState.h:41
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:175
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:269
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool addPred(const SDep &D, bool Required=true)
Adds the specified edge as a pred of the current node if not already.
MachineBasicBlock::iterator getInsertPos()
getInsertPos - Return the current insertion position.
Definition: InstrEmitter.h:130
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SmallVector< SDep, 4 > Succs
All sunit successors.
Definition: ScheduleDAG.h:262
Arbitrary strong DAG edge (no real dependence).
Definition: ScheduleDAG.h:73
ScheduleDAGSDNodes * createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createFastDAGScheduler - This creates a "fast" scheduler.
#define DEBUG(X)
Definition: Debug.h:118
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:247