LLVM  7.0.0svn
ScheduleDAGFast.cpp
Go to the documentation of this file.
1 //===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements a fast scheduler.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstrEmitter.h"
15 #include "ScheduleDAGSDNodes.h"
16 #include "SDNodeDbgValue.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/Statistic.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/InlineAsm.h"
26 #include "llvm/Support/Debug.h"
29 using namespace llvm;
30 
31 #define DEBUG_TYPE "pre-RA-sched"
32 
33 STATISTIC(NumUnfolds, "Number of nodes unfolded");
34 STATISTIC(NumDups, "Number of duplicated nodes");
35 STATISTIC(NumPRCopies, "Number of physical copies");
36 
37 static RegisterScheduler
38  fastDAGScheduler("fast", "Fast suboptimal list scheduling",
40 static RegisterScheduler
41  linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
43 
44 
45 namespace {
46  /// FastPriorityQueue - A degenerate priority queue that considers
47  /// all nodes to have the same priority.
48  ///
49  struct FastPriorityQueue {
51 
52  bool empty() const { return Queue.empty(); }
53 
54  void push(SUnit *U) {
55  Queue.push_back(U);
56  }
57 
58  SUnit *pop() {
59  if (empty()) return nullptr;
60  SUnit *V = Queue.back();
61  Queue.pop_back();
62  return V;
63  }
64  };
65 
66 //===----------------------------------------------------------------------===//
67 /// ScheduleDAGFast - The actual "fast" list scheduler implementation.
68 ///
69 class ScheduleDAGFast : public ScheduleDAGSDNodes {
70 private:
71  /// AvailableQueue - The priority queue to use for the available SUnits.
72  FastPriorityQueue AvailableQueue;
73 
74  /// LiveRegDefs - A set of physical registers and their definition
75  /// that are "live". These nodes must be scheduled before any other nodes that
76  /// modifies the registers can be scheduled.
77  unsigned NumLiveRegs;
78  std::vector<SUnit*> LiveRegDefs;
79  std::vector<unsigned> LiveRegCycles;
80 
81 public:
82  ScheduleDAGFast(MachineFunction &mf)
83  : ScheduleDAGSDNodes(mf) {}
84 
85  void Schedule() override;
86 
87  /// AddPred - adds a predecessor edge to SUnit SU.
88  /// This returns true if this is a new predecessor.
89  void AddPred(SUnit *SU, const SDep &D) {
90  SU->addPred(D);
91  }
92 
93  /// RemovePred - removes a predecessor edge from SUnit SU.
94  /// This returns true if an edge was removed.
95  void RemovePred(SUnit *SU, const SDep &D) {
96  SU->removePred(D);
97  }
98 
99 private:
100  void ReleasePred(SUnit *SU, SDep *PredEdge);
101  void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
102  void ScheduleNodeBottomUp(SUnit*, unsigned);
103  SUnit *CopyAndMoveSuccessors(SUnit*);
104  void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
105  const TargetRegisterClass*,
106  const TargetRegisterClass*,
108  bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
109  void ListScheduleBottomUp();
110 
111  /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
112  bool forceUnitLatencies() const override { return true; }
113 };
114 } // end anonymous namespace
115 
116 
117 /// Schedule - Schedule the DAG using list scheduling.
118 void ScheduleDAGFast::Schedule() {
119  LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
120 
121  NumLiveRegs = 0;
122  LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
123  LiveRegCycles.resize(TRI->getNumRegs(), 0);
124 
125  // Build the scheduling graph.
126  BuildSchedGraph(nullptr);
127 
128  LLVM_DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) SUnits[su]
129  .dumpAll(this));
130 
131  // Execute the actual scheduling loop.
132  ListScheduleBottomUp();
133 }
134 
135 //===----------------------------------------------------------------------===//
136 // Bottom-Up Scheduling
137 //===----------------------------------------------------------------------===//
138 
139 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
140 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
141 void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
142  SUnit *PredSU = PredEdge->getSUnit();
143 
144 #ifndef NDEBUG
145  if (PredSU->NumSuccsLeft == 0) {
146  dbgs() << "*** Scheduling failed! ***\n";
147  PredSU->dump(this);
148  dbgs() << " has been released too many times!\n";
149  llvm_unreachable(nullptr);
150  }
151 #endif
152  --PredSU->NumSuccsLeft;
153 
154  // If all the node's successors are scheduled, this node is ready
155  // to be scheduled. Ignore the special EntrySU node.
156  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
157  PredSU->isAvailable = true;
158  AvailableQueue.push(PredSU);
159  }
160 }
161 
162 void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
163  // Bottom up: release predecessors
164  for (SDep &Pred : SU->Preds) {
165  ReleasePred(SU, &Pred);
166  if (Pred.isAssignedRegDep()) {
167  // This is a physical register dependency and it's impossible or
168  // expensive to copy the register. Make sure nothing that can
169  // clobber the register is scheduled between the predecessor and
170  // this node.
171  if (!LiveRegDefs[Pred.getReg()]) {
172  ++NumLiveRegs;
173  LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
174  LiveRegCycles[Pred.getReg()] = CurCycle;
175  }
176  }
177  }
178 }
179 
180 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
181 /// count of its predecessors. If a predecessor pending count is zero, add it to
182 /// the Available queue.
183 void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
184  LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
185  LLVM_DEBUG(SU->dump(this));
186 
187  assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
188  SU->setHeightToAtLeast(CurCycle);
189  Sequence.push_back(SU);
190 
191  ReleasePredecessors(SU, CurCycle);
192 
193  // Release all the implicit physical register defs that are live.
194  for (SDep &Succ : SU->Succs) {
195  if (Succ.isAssignedRegDep()) {
196  if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
197  assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
198  assert(LiveRegDefs[Succ.getReg()] == SU &&
199  "Physical register dependency violated?");
200  --NumLiveRegs;
201  LiveRegDefs[Succ.getReg()] = nullptr;
202  LiveRegCycles[Succ.getReg()] = 0;
203  }
204  }
205  }
206 
207  SU->isScheduled = true;
208 }
209 
210 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
211 /// successors to the newly created node.
212 SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
213  if (SU->getNode()->getGluedNode())
214  return nullptr;
215 
216  SDNode *N = SU->getNode();
217  if (!N)
218  return nullptr;
219 
220  SUnit *NewSU;
221  bool TryUnfold = false;
222  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
223  MVT VT = N->getSimpleValueType(i);
224  if (VT == MVT::Glue)
225  return nullptr;
226  else if (VT == MVT::Other)
227  TryUnfold = true;
228  }
229  for (const SDValue &Op : N->op_values()) {
230  MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
231  if (VT == MVT::Glue)
232  return nullptr;
233  }
234 
235  if (TryUnfold) {
236  SmallVector<SDNode*, 2> NewNodes;
237  if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
238  return nullptr;
239 
240  LLVM_DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
241  assert(NewNodes.size() == 2 && "Expected a load folding node!");
242 
243  N = NewNodes[1];
244  SDNode *LoadNode = NewNodes[0];
245  unsigned NumVals = N->getNumValues();
246  unsigned OldNumVals = SU->getNode()->getNumValues();
247  for (unsigned i = 0; i != NumVals; ++i)
248  DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
249  DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
250  SDValue(LoadNode, 1));
251 
252  SUnit *NewSU = newSUnit(N);
253  assert(N->getNodeId() == -1 && "Node already inserted!");
254  N->setNodeId(NewSU->NodeNum);
255 
256  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
257  for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
258  if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
259  NewSU->isTwoAddress = true;
260  break;
261  }
262  }
263  if (MCID.isCommutable())
264  NewSU->isCommutable = true;
265 
266  // LoadNode may already exist. This can happen when there is another
267  // load from the same location and producing the same type of value
268  // but it has different alignment or volatileness.
269  bool isNewLoad = true;
270  SUnit *LoadSU;
271  if (LoadNode->getNodeId() != -1) {
272  LoadSU = &SUnits[LoadNode->getNodeId()];
273  isNewLoad = false;
274  } else {
275  LoadSU = newSUnit(LoadNode);
276  LoadNode->setNodeId(LoadSU->NodeNum);
277  }
278 
279  SDep ChainPred;
280  SmallVector<SDep, 4> ChainSuccs;
281  SmallVector<SDep, 4> LoadPreds;
282  SmallVector<SDep, 4> NodePreds;
283  SmallVector<SDep, 4> NodeSuccs;
284  for (SDep &Pred : SU->Preds) {
285  if (Pred.isCtrl())
286  ChainPred = Pred;
287  else if (Pred.getSUnit()->getNode() &&
288  Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
289  LoadPreds.push_back(Pred);
290  else
291  NodePreds.push_back(Pred);
292  }
293  for (SDep &Succ : SU->Succs) {
294  if (Succ.isCtrl())
295  ChainSuccs.push_back(Succ);
296  else
297  NodeSuccs.push_back(Succ);
298  }
299 
300  if (ChainPred.getSUnit()) {
301  RemovePred(SU, ChainPred);
302  if (isNewLoad)
303  AddPred(LoadSU, ChainPred);
304  }
305  for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
306  const SDep &Pred = LoadPreds[i];
307  RemovePred(SU, Pred);
308  if (isNewLoad) {
309  AddPred(LoadSU, Pred);
310  }
311  }
312  for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
313  const SDep &Pred = NodePreds[i];
314  RemovePred(SU, Pred);
315  AddPred(NewSU, Pred);
316  }
317  for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
318  SDep D = NodeSuccs[i];
319  SUnit *SuccDep = D.getSUnit();
320  D.setSUnit(SU);
321  RemovePred(SuccDep, D);
322  D.setSUnit(NewSU);
323  AddPred(SuccDep, D);
324  }
325  for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
326  SDep D = ChainSuccs[i];
327  SUnit *SuccDep = D.getSUnit();
328  D.setSUnit(SU);
329  RemovePred(SuccDep, D);
330  if (isNewLoad) {
331  D.setSUnit(LoadSU);
332  AddPred(SuccDep, D);
333  }
334  }
335  if (isNewLoad) {
336  SDep D(LoadSU, SDep::Barrier);
337  D.setLatency(LoadSU->Latency);
338  AddPred(NewSU, D);
339  }
340 
341  ++NumUnfolds;
342 
343  if (NewSU->NumSuccsLeft == 0) {
344  NewSU->isAvailable = true;
345  return NewSU;
346  }
347  SU = NewSU;
348  }
349 
350  LLVM_DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
351  NewSU = Clone(SU);
352 
353  // New SUnit has the exact same predecessors.
354  for (SDep &Pred : SU->Preds)
355  if (!Pred.isArtificial())
356  AddPred(NewSU, Pred);
357 
358  // Only copy scheduled successors. Cut them from old node's successor
359  // list and move them over.
361  for (SDep &Succ : SU->Succs) {
362  if (Succ.isArtificial())
363  continue;
364  SUnit *SuccSU = Succ.getSUnit();
365  if (SuccSU->isScheduled) {
366  SDep D = Succ;
367  D.setSUnit(NewSU);
368  AddPred(SuccSU, D);
369  D.setSUnit(SU);
370  DelDeps.push_back(std::make_pair(SuccSU, D));
371  }
372  }
373  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
374  RemovePred(DelDeps[i].first, DelDeps[i].second);
375 
376  ++NumDups;
377  return NewSU;
378 }
379 
380 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
381 /// scheduled successors of the given SUnit to the last copy.
382 void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
383  const TargetRegisterClass *DestRC,
384  const TargetRegisterClass *SrcRC,
385  SmallVectorImpl<SUnit*> &Copies) {
386  SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
387  CopyFromSU->CopySrcRC = SrcRC;
388  CopyFromSU->CopyDstRC = DestRC;
389 
390  SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
391  CopyToSU->CopySrcRC = DestRC;
392  CopyToSU->CopyDstRC = SrcRC;
393 
394  // Only copy scheduled successors. Cut them from old node's successor
395  // list and move them over.
397  for (SDep &Succ : SU->Succs) {
398  if (Succ.isArtificial())
399  continue;
400  SUnit *SuccSU = Succ.getSUnit();
401  if (SuccSU->isScheduled) {
402  SDep D = Succ;
403  D.setSUnit(CopyToSU);
404  AddPred(SuccSU, D);
405  DelDeps.push_back(std::make_pair(SuccSU, Succ));
406  }
407  }
408  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
409  RemovePred(DelDeps[i].first, DelDeps[i].second);
410  }
411  SDep FromDep(SU, SDep::Data, Reg);
412  FromDep.setLatency(SU->Latency);
413  AddPred(CopyFromSU, FromDep);
414  SDep ToDep(CopyFromSU, SDep::Data, 0);
415  ToDep.setLatency(CopyFromSU->Latency);
416  AddPred(CopyToSU, ToDep);
417 
418  Copies.push_back(CopyFromSU);
419  Copies.push_back(CopyToSU);
420 
421  ++NumPRCopies;
422 }
423 
424 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
425 /// definition of the specified node.
426 /// FIXME: Move to SelectionDAG?
427 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
428  const TargetInstrInfo *TII) {
429  unsigned NumRes;
430  if (N->getOpcode() == ISD::CopyFromReg) {
431  // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
432  NumRes = 1;
433  } else {
434  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
435  assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
436  NumRes = MCID.getNumDefs();
437  for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
438  if (Reg == *ImpDef)
439  break;
440  ++NumRes;
441  }
442  }
443  return N->getSimpleValueType(NumRes);
444 }
445 
446 /// CheckForLiveRegDef - Return true and update live register vector if the
447 /// specified register def of the specified SUnit clobbers any "live" registers.
448 static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
449  std::vector<SUnit*> &LiveRegDefs,
450  SmallSet<unsigned, 4> &RegAdded,
452  const TargetRegisterInfo *TRI) {
453  bool Added = false;
454  for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
455  if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
456  if (RegAdded.insert(*AI).second) {
457  LRegs.push_back(*AI);
458  Added = true;
459  }
460  }
461  }
462  return Added;
463 }
464 
465 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
466 /// scheduling of the given node to satisfy live physical register dependencies.
467 /// If the specific node is the last one that's available to schedule, do
468 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
469 bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
471  if (NumLiveRegs == 0)
472  return false;
473 
474  SmallSet<unsigned, 4> RegAdded;
475  // If this node would clobber any "live" register, then it's not ready.
476  for (SDep &Pred : SU->Preds) {
477  if (Pred.isAssignedRegDep()) {
478  CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
479  RegAdded, LRegs, TRI);
480  }
481  }
482 
483  for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
484  if (Node->getOpcode() == ISD::INLINEASM) {
485  // Inline asm can clobber physical defs.
486  unsigned NumOps = Node->getNumOperands();
487  if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
488  --NumOps; // Ignore the glue operand.
489 
490  for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
491  unsigned Flags =
492  cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
493  unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
494 
495  ++i; // Skip the ID value.
496  if (InlineAsm::isRegDefKind(Flags) ||
498  InlineAsm::isClobberKind(Flags)) {
499  // Check for def of register or earlyclobber register.
500  for (; NumVals; --NumVals, ++i) {
501  unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
503  CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
504  }
505  } else
506  i += NumVals;
507  }
508  continue;
509  }
510  if (!Node->isMachineOpcode())
511  continue;
512  const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
513  if (!MCID.ImplicitDefs)
514  continue;
515  for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
516  CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
517  }
518  }
519  return !LRegs.empty();
520 }
521 
522 
523 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
524 /// schedulers.
525 void ScheduleDAGFast::ListScheduleBottomUp() {
526  unsigned CurCycle = 0;
527 
528  // Release any predecessors of the special Exit node.
529  ReleasePredecessors(&ExitSU, CurCycle);
530 
531  // Add root to Available queue.
532  if (!SUnits.empty()) {
533  SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
534  assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
535  RootSU->isAvailable = true;
536  AvailableQueue.push(RootSU);
537  }
538 
539  // While Available queue is not empty, grab the node with the highest
540  // priority. If it is not ready put it back. Schedule the node.
541  SmallVector<SUnit*, 4> NotReady;
543  Sequence.reserve(SUnits.size());
544  while (!AvailableQueue.empty()) {
545  bool Delayed = false;
546  LRegsMap.clear();
547  SUnit *CurSU = AvailableQueue.pop();
548  while (CurSU) {
550  if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
551  break;
552  Delayed = true;
553  LRegsMap.insert(std::make_pair(CurSU, LRegs));
554 
555  CurSU->isPending = true; // This SU is not in AvailableQueue right now.
556  NotReady.push_back(CurSU);
557  CurSU = AvailableQueue.pop();
558  }
559 
560  // All candidates are delayed due to live physical reg dependencies.
561  // Try code duplication or inserting cross class copies
562  // to resolve it.
563  if (Delayed && !CurSU) {
564  if (!CurSU) {
565  // Try duplicating the nodes that produces these
566  // "expensive to copy" values to break the dependency. In case even
567  // that doesn't work, insert cross class copies.
568  SUnit *TrySU = NotReady[0];
569  SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
570  assert(LRegs.size() == 1 && "Can't handle this yet!");
571  unsigned Reg = LRegs[0];
572  SUnit *LRDef = LiveRegDefs[Reg];
573  MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
574  const TargetRegisterClass *RC =
575  TRI->getMinimalPhysRegClass(Reg, VT);
576  const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
577 
578  // If cross copy register class is the same as RC, then it must be
579  // possible copy the value directly. Do not try duplicate the def.
580  // If cross copy register class is not the same as RC, then it's
581  // possible to copy the value but it require cross register class copies
582  // and it is expensive.
583  // If cross copy register class is null, then it's not possible to copy
584  // the value at all.
585  SUnit *NewDef = nullptr;
586  if (DestRC != RC) {
587  NewDef = CopyAndMoveSuccessors(LRDef);
588  if (!DestRC && !NewDef)
589  report_fatal_error("Can't handle live physical "
590  "register dependency!");
591  }
592  if (!NewDef) {
593  // Issue copies, these can be expensive cross register class copies.
594  SmallVector<SUnit*, 2> Copies;
595  InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
596  LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
597  << " to SU #" << Copies.front()->NodeNum << "\n");
598  AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
599  NewDef = Copies.back();
600  }
601 
602  LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
603  << " to SU #" << TrySU->NodeNum << "\n");
604  LiveRegDefs[Reg] = NewDef;
605  AddPred(NewDef, SDep(TrySU, SDep::Artificial));
606  TrySU->isAvailable = false;
607  CurSU = NewDef;
608  }
609 
610  if (!CurSU) {
611  llvm_unreachable("Unable to resolve live physical register dependencies!");
612  }
613  }
614 
615  // Add the nodes that aren't ready back onto the available list.
616  for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
617  NotReady[i]->isPending = false;
618  // May no longer be available due to backtracking.
619  if (NotReady[i]->isAvailable)
620  AvailableQueue.push(NotReady[i]);
621  }
622  NotReady.clear();
623 
624  if (CurSU)
625  ScheduleNodeBottomUp(CurSU, CurCycle);
626  ++CurCycle;
627  }
628 
629  // Reverse the order since it is bottom up.
630  std::reverse(Sequence.begin(), Sequence.end());
631 
632 #ifndef NDEBUG
633  VerifyScheduledSequence(/*isBottomUp=*/true);
634 #endif
635 }
636 
637 
638 namespace {
639 //===----------------------------------------------------------------------===//
640 // ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
641 // DAG in topological order.
642 // IMPORTANT: this may not work for targets with phyreg dependency.
643 //
644 class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
645 public:
646  ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
647 
648  void Schedule() override;
649 
651  EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
652 
653 private:
654  std::vector<SDNode*> Sequence;
655  DenseMap<SDNode*, SDNode*> GluedMap; // Cache glue to its user
656 
657  void ScheduleNode(SDNode *N);
658 };
659 } // end anonymous namespace
660 
661 void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
662  if (N->getNodeId() != 0)
663  llvm_unreachable(nullptr);
664 
665  if (!N->isMachineOpcode() &&
666  (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
667  // These nodes do not need to be translated into MIs.
668  return;
669 
670  LLVM_DEBUG(dbgs() << "\n*** Scheduling: ");
671  LLVM_DEBUG(N->dump(DAG));
672  Sequence.push_back(N);
673 
674  unsigned NumOps = N->getNumOperands();
675  if (unsigned NumLeft = NumOps) {
676  SDNode *GluedOpN = nullptr;
677  do {
678  const SDValue &Op = N->getOperand(NumLeft-1);
679  SDNode *OpN = Op.getNode();
680 
681  if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
682  // Schedule glue operand right above N.
683  GluedOpN = OpN;
684  assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
685  OpN->setNodeId(0);
686  ScheduleNode(OpN);
687  continue;
688  }
689 
690  if (OpN == GluedOpN)
691  // Glue operand is already scheduled.
692  continue;
693 
694  DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
695  if (DI != GluedMap.end() && DI->second != N)
696  // Users of glues are counted against the glued users.
697  OpN = DI->second;
698 
699  unsigned Degree = OpN->getNodeId();
700  assert(Degree > 0 && "Predecessor over-released!");
701  OpN->setNodeId(--Degree);
702  if (Degree == 0)
703  ScheduleNode(OpN);
704  } while (--NumLeft);
705  }
706 }
707 
708 /// findGluedUser - Find the representative use of a glue value by walking
709 /// the use chain.
711  while (SDNode *Glued = N->getGluedUser())
712  N = Glued;
713  return N;
714 }
715 
716 void ScheduleDAGLinearize::Schedule() {
717  LLVM_DEBUG(dbgs() << "********** DAG Linearization **********\n");
718 
720  unsigned DAGSize = 0;
721  for (SDNode &Node : DAG->allnodes()) {
722  SDNode *N = &Node;
723 
724  // Use node id to record degree.
725  unsigned Degree = N->use_size();
726  N->setNodeId(Degree);
727  unsigned NumVals = N->getNumValues();
728  if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
729  N->hasAnyUseOfValue(NumVals-1)) {
730  SDNode *User = findGluedUser(N);
731  if (User) {
732  Glues.push_back(N);
733  GluedMap.insert(std::make_pair(N, User));
734  }
735  }
736 
737  if (N->isMachineOpcode() ||
738  (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
739  ++DAGSize;
740  }
741 
742  for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
743  SDNode *Glue = Glues[i];
744  SDNode *GUser = GluedMap[Glue];
745  unsigned Degree = Glue->getNodeId();
746  unsigned UDegree = GUser->getNodeId();
747 
748  // Glue user must be scheduled together with the glue operand. So other
749  // users of the glue operand must be treated as its users.
750  SDNode *ImmGUser = Glue->getGluedUser();
751  for (const SDNode *U : Glue->uses())
752  if (U == ImmGUser)
753  --Degree;
754  GUser->setNodeId(UDegree + Degree);
755  Glue->setNodeId(1);
756  }
757 
758  Sequence.reserve(DAGSize);
759  ScheduleNode(DAG->getRoot().getNode());
760 }
761 
763 ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
764  InstrEmitter Emitter(BB, InsertPos);
765  DenseMap<SDValue, unsigned> VRBaseMap;
766 
767  LLVM_DEBUG({ dbgs() << "\n*** Final schedule ***\n"; });
768 
769  unsigned NumNodes = Sequence.size();
770  MachineBasicBlock *BB = Emitter.getBlock();
771  for (unsigned i = 0; i != NumNodes; ++i) {
772  SDNode *N = Sequence[NumNodes-i-1];
773  LLVM_DEBUG(N->dump(DAG));
774  Emitter.EmitNode(N, false, false, VRBaseMap);
775 
776  // Emit any debug values associated with the node.
777  if (N->getHasDebugValue()) {
778  MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
779  for (auto DV : DAG->GetDbgValues(N)) {
780  if (DV->isInvalidated())
781  continue;
782  if (auto *DbgMI = Emitter.EmitDbgValue(DV, VRBaseMap))
783  BB->insert(InsertPos, DbgMI);
784  DV->setIsInvalidated();
785  }
786  }
787  }
788 
789  LLVM_DEBUG(dbgs() << '\n');
790 
791  InsertPos = Emitter.getInsertPos();
792  return Emitter.getBlock();
793 }
794 
795 //===----------------------------------------------------------------------===//
796 // Public Constructor Functions
797 //===----------------------------------------------------------------------===//
798 
801  return new ScheduleDAGFast(*IS->MF);
802 }
803 
806  return new ScheduleDAGLinearize(*IS->MF);
807 }
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
Definition: MCInstrDesc.h:422
SDNode * getNode() const
Returns the representative SDNode for this SUnit.
Definition: ScheduleDAG.h:360
void dump(const ScheduleDAG *G) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isTwoAddress
Is a two-address instruction.
Definition: ScheduleDAG.h:282
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:160
unsigned Reg
static bool isClobberKind(unsigned Flag)
Definition: InlineAsm.h:281
void setSUnit(SUnit *SU)
Definition: ScheduleDAG.h:493
static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg, std::vector< SUnit *> &LiveRegDefs, SmallSet< unsigned, 4 > &RegAdded, SmallVectorImpl< unsigned > &LRegs, const TargetRegisterInfo *TRI)
CheckForLiveRegDef - Return true and update live register vector if the specified register def of the...
unsigned second
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
void removePred(const SDep &D)
Removes the specified edge as a pred of the current node if it exists.
void setNodeId(int Id)
Set unique node id.
unsigned getReg() const
Returns the register associated with this edge.
Definition: ScheduleDAG.h:219
SDNode * getNode() const
get the SDNode which holds the desired result
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
Definition: ScheduleDAG.h:212
const TargetRegisterClass * CopyDstRC
Is a special copy node if != nullptr.
Definition: ScheduleDAG.h:307
EntryToken - This is the marker used to indicate the start of a region.
Definition: ISDOpcodes.h:45
MachineFunction * MF
SmallVector< SDep, 4 > Preds
All sunit predecessors.
Definition: ScheduleDAG.h:261
MachineBasicBlock * getBlock()
getBlock - Return the current basic block.
Definition: InstrEmitter.h:130
bool isScheduled
True once scheduled.
Definition: ScheduleDAG.h:289
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
unsigned NumSuccsLeft
of succs not scheduled.
Definition: ScheduleDAG.h:274
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:207
const HexagonInstrInfo * TII
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
Regular data dependence (aka true-dependence).
Definition: ScheduleDAG.h:54
static bool isRegDefEarlyClobberKind(unsigned Flag)
Definition: InlineAsm.h:278
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:636
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:237
ScheduleDAGSDNodes * createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createDAGLinearizer - This creates a "no-scheduling" scheduler which linearize the DAG using topologi...
bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
bool isPending
True once pending.
Definition: ScheduleDAG.h:287
bool isCtrl() const
Shorthand for getKind() != SDep::Data.
Definition: ScheduleDAG.h:162
SUnit * getSUnit() const
Definition: ScheduleDAG.h:490
static bool isRegDefKind(unsigned Flag)
Definition: InlineAsm.h:275
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
bool getHasDebugValue() const
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
TargetInstrInfo - Interface to description of machine instruction set.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
Scheduling dependency.
Definition: ScheduleDAG.h:50
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
void setHeightToAtLeast(unsigned NewHeight)
If NewDepth is greater than this node&#39;s depth value, set it to be the new height value.
bool isAvailable()
Definition: Compression.cpp:58
const MCPhysReg * getImplicitDefs() const
Return a list of registers that are potentially written by any instance of this machine instruction...
Definition: MCInstrDesc.h:531
ScheduleDAGSDNodes - A ScheduleDAG for scheduling SDNode-based DAGs.
* if(!EatIfPresent(lltok::kw_thread_local)) return false
ParseOptionalThreadLocal := /*empty.
bool isArtificial() const
Tests if this is an Order dependence that is marked as "artificial", meaning it isn&#39;t necessary for c...
Definition: ScheduleDAG.h:201
Machine Value Type.
unsigned short Latency
Node latency.
Definition: ScheduleDAG.h:278
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
iterator_range< value_op_iterator > op_values() const
SDNode * getGluedUser() const
If this node has a glue value with a user, return the user (there is at most one).
const SDValue & getOperand(unsigned Num) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:36
const MCPhysReg * ImplicitDefs
Definition: MCInstrDesc.h:170
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
Definition: InlineAsm.h:336
MCRegAliasIterator enumerates all registers aliasing Reg.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:81
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
unsigned getNumOperands() const
Return the number of values used by this operation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned first
An unknown scheduling barrier.
Definition: ScheduleDAG.h:70
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
Definition: MCInstrDesc.h:184
void dump() const
Dump this node, for debugging.
bool isCommutable
Is a commutable instruction.
Definition: ScheduleDAG.h:283
static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, const TargetInstrInfo *TII)
getPhysicalRegisterVT - Returns the ValueType of the physical register definition of the specified no...
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:222
Represents one node in the SelectionDAG.
SelectionDAGISel - This is the common base class used for SelectionDAG-based pattern-matching instruc...
const TargetRegisterClass * CopySrcRC
Definition: ScheduleDAG.h:309
static SDNode * findGluedUser(SDNode *N)
findGluedUser - Find the representative use of a glue value by walking the use chain.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
size_t use_size() const
Return the number of uses of this node.
iterator_range< use_iterator > uses()
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
void setLatency(unsigned Lat)
Sets the latency for this edge.
Definition: ScheduleDAG.h:148
int getNodeId() const
Return the unique node id.
static RegisterScheduler fastDAGScheduler("fast", "Fast suboptimal list scheduling", createFastDAGScheduler)
bool isAvailable
True once available.
Definition: ScheduleDAG.h:288
unsigned getHeight() const
Returns the height of this node, which is the length of the maximum path down to any node which has n...
Definition: ScheduleDAG.h:411
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
void EmitNode(SDNode *Node, bool IsClone, bool IsCloned, DenseMap< SDValue, unsigned > &VRBaseMap)
EmitNode - Generate machine code for a node and needed dependencies.
Definition: InstrEmitter.h:121
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
static RegisterScheduler linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling", createDAGLinearizer)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
#define N
Sequence
A sequence of states that a pointer may go through in which an objc_retain and objc_release are actua...
Definition: PtrState.h:41
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:175
unsigned NodeNum
Entry # of node in the node vector.
Definition: ScheduleDAG.h:269
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstr * EmitDbgValue(SDDbgValue *SD, DenseMap< SDValue, unsigned > &VRBaseMap)
EmitDbgValue - Generate machine instruction for a dbg_value node.
bool addPred(const SDep &D, bool Required=true)
Adds the specified edge as a pred of the current node if not already.
MachineBasicBlock::iterator getInsertPos()
getInsertPos - Return the current insertion position.
Definition: InstrEmitter.h:133
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SmallVector< SDep, 4 > Succs
All sunit successors.
Definition: ScheduleDAG.h:262
Arbitrary strong DAG edge (no real dependence).
Definition: ScheduleDAG.h:73
ScheduleDAGSDNodes * createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel)
createFastDAGScheduler - This creates a "fast" scheduler.
#define LLVM_DEBUG(X)
Definition: Debug.h:119
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
Scheduling unit. This is a node in the scheduling DAG.
Definition: ScheduleDAG.h:247