LCOV - code coverage report
Current view: top level - lib/CodeGen/SelectionDAG - ScheduleDAGFast.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 277 365 75.9 %
Date: 2017-09-14 15:23:50 Functions: 23 23 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : // This implements a fast scheduler.
      11             : //
      12             : //===----------------------------------------------------------------------===//
      13             : 
      14             : #include "InstrEmitter.h"
      15             : #include "ScheduleDAGSDNodes.h"
      16             : #include "llvm/ADT/STLExtras.h"
      17             : #include "llvm/ADT/SmallSet.h"
      18             : #include "llvm/ADT/Statistic.h"
      19             : #include "llvm/CodeGen/SchedulerRegistry.h"
      20             : #include "llvm/CodeGen/SelectionDAGISel.h"
      21             : #include "llvm/IR/DataLayout.h"
      22             : #include "llvm/IR/InlineAsm.h"
      23             : #include "llvm/Support/Debug.h"
      24             : #include "llvm/Support/ErrorHandling.h"
      25             : #include "llvm/Support/raw_ostream.h"
      26             : #include "llvm/Target/TargetInstrInfo.h"
      27             : #include "llvm/Target/TargetRegisterInfo.h"
      28             : using namespace llvm;
      29             : 
      30             : #define DEBUG_TYPE "pre-RA-sched"
      31             : 
      32             : STATISTIC(NumUnfolds,    "Number of nodes unfolded");
      33             : STATISTIC(NumDups,       "Number of duplicated nodes");
      34             : STATISTIC(NumPRCopies,   "Number of physical copies");
      35             : 
      36             : static RegisterScheduler
      37       72306 :   fastDAGScheduler("fast", "Fast suboptimal list scheduling",
      38       72306 :                    createFastDAGScheduler);
      39             : static RegisterScheduler
      40       72306 :   linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
      41       72306 :                         createDAGLinearizer);
      42             : 
      43             : 
      44             : namespace {
      45             :   /// FastPriorityQueue - A degenerate priority queue that considers
      46             :   /// all nodes to have the same priority.
      47             :   ///
      48         136 :   struct FastPriorityQueue {
      49             :     SmallVector<SUnit *, 16> Queue;
      50             : 
      51         450 :     bool empty() const { return Queue.empty(); }
      52             : 
      53             :     void push(SUnit *U) {
      54         202 :       Queue.push_back(U);
      55             :     }
      56             : 
      57             :     SUnit *pop() {
      58         215 :       if (empty()) return nullptr;
      59         404 :       SUnit *V = Queue.back();
      60         404 :       Queue.pop_back();
      61             :       return V;
      62             :     }
      63             :   };
      64             : 
      65             : //===----------------------------------------------------------------------===//
      66             : /// ScheduleDAGFast - The actual "fast" list scheduler implementation.
      67             : ///
      68         170 : class ScheduleDAGFast : public ScheduleDAGSDNodes {
      69             : private:
      70             :   /// AvailableQueue - The priority queue to use for the available SUnits.
      71             :   FastPriorityQueue AvailableQueue;
      72             : 
      73             :   /// LiveRegDefs - A set of physical registers and their definition
      74             :   /// that are "live". These nodes must be scheduled before any other nodes that
      75             :   /// modifies the registers can be scheduled.
      76             :   unsigned NumLiveRegs;
      77             :   std::vector<SUnit*> LiveRegDefs;
      78             :   std::vector<unsigned> LiveRegCycles;
      79             : 
      80             : public:
      81             :   ScheduleDAGFast(MachineFunction &mf)
      82         136 :     : ScheduleDAGSDNodes(mf) {}
      83             : 
      84             :   void Schedule() override;
      85             : 
      86             :   /// AddPred - adds a predecessor edge to SUnit SU.
      87             :   /// This returns true if this is a new predecessor.
      88             :   void AddPred(SUnit *SU, const SDep &D) {
      89          73 :     SU->addPred(D);
      90             :   }
      91             : 
      92             :   /// RemovePred - removes a predecessor edge from SUnit SU.
      93             :   /// This returns true if an edge was removed.
      94             :   void RemovePred(SUnit *SU, const SDep &D) {
      95          23 :     SU->removePred(D);
      96             :   }
      97             : 
      98             : private:
      99             :   void ReleasePred(SUnit *SU, SDep *PredEdge);
     100             :   void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
     101             :   void ScheduleNodeBottomUp(SUnit*, unsigned);
     102             :   SUnit *CopyAndMoveSuccessors(SUnit*);
     103             :   void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
     104             :                                 const TargetRegisterClass*,
     105             :                                 const TargetRegisterClass*,
     106             :                                 SmallVectorImpl<SUnit*>&);
     107             :   bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
     108             :   void ListScheduleBottomUp();
     109             : 
     110             :   /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
     111         204 :   bool forceUnitLatencies() const override { return true; }
     112             : };
     113             : }  // end anonymous namespace
     114             : 
     115             : 
     116             : /// Schedule - Schedule the DAG using list scheduling.
     117          34 : void ScheduleDAGFast::Schedule() {
     118             :   DEBUG(dbgs() << "********** List Scheduling **********\n");
     119             : 
     120          34 :   NumLiveRegs = 0;
     121          34 :   LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
     122          34 :   LiveRegCycles.resize(TRI->getNumRegs(), 0);
     123             : 
     124             :   // Build the scheduling graph.
     125          34 :   BuildSchedGraph(nullptr);
     126             : 
     127             :   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
     128             :           SUnits[su].dumpAll(this));
     129             : 
     130             :   // Execute the actual scheduling loop.
     131          34 :   ListScheduleBottomUp();
     132          34 : }
     133             : 
     134             : //===----------------------------------------------------------------------===//
     135             : //  Bottom-Up Scheduling
     136             : //===----------------------------------------------------------------------===//
     137             : 
     138             : /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
     139             : /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
     140         226 : void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
     141         226 :   SUnit *PredSU = PredEdge->getSUnit();
     142             : 
     143             : #ifndef NDEBUG
     144             :   if (PredSU->NumSuccsLeft == 0) {
     145             :     dbgs() << "*** Scheduling failed! ***\n";
     146             :     PredSU->dump(this);
     147             :     dbgs() << " has been released too many times!\n";
     148             :     llvm_unreachable(nullptr);
     149             :   }
     150             : #endif
     151         226 :   --PredSU->NumSuccsLeft;
     152             : 
     153             :   // If all the node's successors are scheduled, this node is ready
     154             :   // to be scheduled. Ignore the special EntrySU node.
     155         226 :   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
     156         171 :     PredSU->isAvailable = true;
     157         171 :     AvailableQueue.push(PredSU);
     158             :   }
     159         226 : }
     160             : 
     161         235 : void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
     162             :   // Bottom up: release predecessors
     163         931 :   for (SDep &Pred : SU->Preds) {
     164         226 :     ReleasePred(SU, &Pred);
     165          30 :     if (Pred.isAssignedRegDep()) {
     166             :       // This is a physical register dependency and it's impossible or
     167             :       // expensive to copy the register. Make sure nothing that can
     168             :       // clobber the register is scheduled between the predecessor and
     169             :       // this node.
     170          60 :       if (!LiveRegDefs[Pred.getReg()]) {
     171          30 :         ++NumLiveRegs;
     172          90 :         LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
     173          60 :         LiveRegCycles[Pred.getReg()] = CurCycle;
     174             :       }
     175             :     }
     176             :   }
     177         235 : }
     178             : 
     179             : /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
     180             : /// count of its predecessors. If a predecessor pending count is zero, add it to
     181             : /// the Available queue.
     182         201 : void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
     183             :   DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
     184             :   DEBUG(SU->dump(this));
     185             : 
     186             :   assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
     187         201 :   SU->setHeightToAtLeast(CurCycle);
     188         201 :   Sequence.push_back(SU);
     189             : 
     190         201 :   ReleasePredecessors(SU, CurCycle);
     191             : 
     192             :   // Release all the implicit physical register defs that are live.
     193         851 :   for (SDep &Succ : SU->Succs) {
     194          48 :     if (Succ.isAssignedRegDep()) {
     195         192 :       if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
     196             :         assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
     197             :         assert(LiveRegDefs[Succ.getReg()] == SU &&
     198             :                "Physical register dependency violated?");
     199          30 :         --NumLiveRegs;
     200          60 :         LiveRegDefs[Succ.getReg()] = nullptr;
     201          60 :         LiveRegCycles[Succ.getReg()] = 0;
     202             :       }
     203             :     }
     204             :   }
     205             : 
     206         201 :   SU->isScheduled = true;
     207         201 : }
     208             : 
     209             : /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
     210             : /// successors to the newly created node.
     211          13 : SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
     212          13 :   if (SU->getNode()->getGluedNode())
     213             :     return nullptr;
     214             : 
     215           1 :   SDNode *N = SU->getNode();
     216             :   if (!N)
     217             :     return nullptr;
     218             : 
     219             :   SUnit *NewSU;
     220           1 :   bool TryUnfold = false;
     221           4 :   for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
     222           2 :     MVT VT = N->getSimpleValueType(i);
     223           2 :     if (VT == MVT::Glue)
     224             :       return nullptr;
     225           2 :     else if (VT == MVT::Other)
     226           0 :       TryUnfold = true;
     227             :   }
     228           6 :   for (const SDValue &Op : N->op_values()) {
     229           4 :     MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
     230           2 :     if (VT == MVT::Glue)
     231             :       return nullptr;
     232             :   }
     233             : 
     234           1 :   if (TryUnfold) {
     235           0 :     SmallVector<SDNode*, 2> NewNodes;
     236           0 :     if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
     237           0 :       return nullptr;
     238             : 
     239             :     DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
     240             :     assert(NewNodes.size() == 2 && "Expected a load folding node!");
     241             : 
     242           0 :     N = NewNodes[1];
     243           0 :     SDNode *LoadNode = NewNodes[0];
     244           0 :     unsigned NumVals = N->getNumValues();
     245           0 :     unsigned OldNumVals = SU->getNode()->getNumValues();
     246           0 :     for (unsigned i = 0; i != NumVals; ++i)
     247           0 :       DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
     248           0 :     DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
     249             :                                    SDValue(LoadNode, 1));
     250             : 
     251           0 :     SUnit *NewSU = newSUnit(N);
     252             :     assert(N->getNodeId() == -1 && "Node already inserted!");
     253           0 :     N->setNodeId(NewSU->NodeNum);
     254             : 
     255           0 :     const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
     256           0 :     for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
     257           0 :       if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
     258           0 :         NewSU->isTwoAddress = true;
     259           0 :         break;
     260             :       }
     261             :     }
     262           0 :     if (MCID.isCommutable())
     263           0 :       NewSU->isCommutable = true;
     264             : 
     265             :     // LoadNode may already exist. This can happen when there is another
     266             :     // load from the same location and producing the same type of value
     267             :     // but it has different alignment or volatileness.
     268           0 :     bool isNewLoad = true;
     269             :     SUnit *LoadSU;
     270           0 :     if (LoadNode->getNodeId() != -1) {
     271           0 :       LoadSU = &SUnits[LoadNode->getNodeId()];
     272           0 :       isNewLoad = false;
     273             :     } else {
     274           0 :       LoadSU = newSUnit(LoadNode);
     275           0 :       LoadNode->setNodeId(LoadSU->NodeNum);
     276             :     }
     277             : 
     278           0 :     SDep ChainPred;
     279           0 :     SmallVector<SDep, 4> ChainSuccs;
     280           0 :     SmallVector<SDep, 4> LoadPreds;
     281           0 :     SmallVector<SDep, 4> NodePreds;
     282           0 :     SmallVector<SDep, 4> NodeSuccs;
     283           0 :     for (SDep &Pred : SU->Preds) {
     284           0 :       if (Pred.isCtrl())
     285           0 :         ChainPred = Pred;
     286           0 :       else if (Pred.getSUnit()->getNode() &&
     287           0 :                Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
     288           0 :         LoadPreds.push_back(Pred);
     289             :       else
     290           0 :         NodePreds.push_back(Pred);
     291             :     }
     292           0 :     for (SDep &Succ : SU->Succs) {
     293           0 :       if (Succ.isCtrl())
     294           0 :         ChainSuccs.push_back(Succ);
     295             :       else
     296           0 :         NodeSuccs.push_back(Succ);
     297             :     }
     298             : 
     299           0 :     if (ChainPred.getSUnit()) {
     300           0 :       RemovePred(SU, ChainPred);
     301           0 :       if (isNewLoad)
     302           0 :         AddPred(LoadSU, ChainPred);
     303             :     }
     304           0 :     for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
     305           0 :       const SDep &Pred = LoadPreds[i];
     306           0 :       RemovePred(SU, Pred);
     307           0 :       if (isNewLoad) {
     308           0 :         AddPred(LoadSU, Pred);
     309             :       }
     310             :     }
     311           0 :     for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
     312           0 :       const SDep &Pred = NodePreds[i];
     313           0 :       RemovePred(SU, Pred);
     314           0 :       AddPred(NewSU, Pred);
     315             :     }
     316           0 :     for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
     317           0 :       SDep D = NodeSuccs[i];
     318           0 :       SUnit *SuccDep = D.getSUnit();
     319           0 :       D.setSUnit(SU);
     320           0 :       RemovePred(SuccDep, D);
     321           0 :       D.setSUnit(NewSU);
     322           0 :       AddPred(SuccDep, D);
     323             :     }
     324           0 :     for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
     325           0 :       SDep D = ChainSuccs[i];
     326           0 :       SUnit *SuccDep = D.getSUnit();
     327           0 :       D.setSUnit(SU);
     328           0 :       RemovePred(SuccDep, D);
     329           0 :       if (isNewLoad) {
     330           0 :         D.setSUnit(LoadSU);
     331           0 :         AddPred(SuccDep, D);
     332             :       }
     333             :     }
     334           0 :     if (isNewLoad) {
     335           0 :       SDep D(LoadSU, SDep::Barrier);
     336           0 :       D.setLatency(LoadSU->Latency);
     337           0 :       AddPred(NewSU, D);
     338             :     }
     339             : 
     340           0 :     ++NumUnfolds;
     341             : 
     342           0 :     if (NewSU->NumSuccsLeft == 0) {
     343           0 :       NewSU->isAvailable = true;
     344           0 :       return NewSU;
     345             :     }
     346           0 :     SU = NewSU;
     347             :   }
     348             : 
     349             :   DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
     350           1 :   NewSU = Clone(SU);
     351             : 
     352             :   // New SUnit has the exact same predecessors.
     353           4 :   for (SDep &Pred : SU->Preds)
     354           1 :     if (!Pred.isArtificial())
     355           1 :       AddPred(NewSU, Pred);
     356             : 
     357             :   // Only copy scheduled successors. Cut them from old node's successor
     358             :   // list and move them over.
     359           1 :   SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
     360           5 :   for (SDep &Succ : SU->Succs) {
     361           2 :     if (Succ.isArtificial())
     362           0 :       continue;
     363           2 :     SUnit *SuccSU = Succ.getSUnit();
     364           2 :     if (SuccSU->isScheduled) {
     365           1 :       SDep D = Succ;
     366           1 :       D.setSUnit(NewSU);
     367           2 :       AddPred(SuccSU, D);
     368           1 :       D.setSUnit(SU);
     369           2 :       DelDeps.push_back(std::make_pair(SuccSU, D));
     370             :     }
     371             :   }
     372           3 :   for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
     373           4 :     RemovePred(DelDeps[i].first, DelDeps[i].second);
     374             : 
     375           1 :   ++NumDups;
     376           1 :   return NewSU;
     377             : }
     378             : 
     379             : /// InsertCopiesAndMoveSuccs - Insert register copies and move all
     380             : /// scheduled successors of the given SUnit to the last copy.
     381          12 : void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
     382             :                                               const TargetRegisterClass *DestRC,
     383             :                                               const TargetRegisterClass *SrcRC,
     384             :                                               SmallVectorImpl<SUnit*> &Copies) {
     385          12 :   SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
     386          12 :   CopyFromSU->CopySrcRC = SrcRC;
     387          12 :   CopyFromSU->CopyDstRC = DestRC;
     388             : 
     389          12 :   SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
     390          12 :   CopyToSU->CopySrcRC = DestRC;
     391          12 :   CopyToSU->CopyDstRC = SrcRC;
     392             : 
     393             :   // Only copy scheduled successors. Cut them from old node's successor
     394             :   // list and move them over.
     395          24 :   SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
     396          74 :   for (SDep &Succ : SU->Succs) {
     397          38 :     if (Succ.isArtificial())
     398           0 :       continue;
     399          38 :     SUnit *SuccSU = Succ.getSUnit();
     400          38 :     if (SuccSU->isScheduled) {
     401          22 :       SDep D = Succ;
     402          44 :       D.setSUnit(CopyToSU);
     403          44 :       AddPred(SuccSU, D);
     404          44 :       DelDeps.push_back(std::make_pair(SuccSU, Succ));
     405             :     }
     406             :   }
     407          46 :   for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
     408          88 :     RemovePred(DelDeps[i].first, DelDeps[i].second);
     409             :   }
     410          12 :   SDep FromDep(SU, SDep::Data, Reg);
     411          24 :   FromDep.setLatency(SU->Latency);
     412          24 :   AddPred(CopyFromSU, FromDep);
     413          24 :   SDep ToDep(CopyFromSU, SDep::Data, 0);
     414          24 :   ToDep.setLatency(CopyFromSU->Latency);
     415          24 :   AddPred(CopyToSU, ToDep);
     416             : 
     417          12 :   Copies.push_back(CopyFromSU);
     418          12 :   Copies.push_back(CopyToSU);
     419             : 
     420          12 :   ++NumPRCopies;
     421          12 : }
     422             : 
     423             : /// getPhysicalRegisterVT - Returns the ValueType of the physical register
     424             : /// definition of the specified node.
     425             : /// FIXME: Move to SelectionDAG?
     426          13 : static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
     427             :                                  const TargetInstrInfo *TII) {
     428             :   unsigned NumRes;
     429          26 :   if (N->getOpcode() == ISD::CopyFromReg) {
     430             :     // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
     431             :     NumRes = 1;
     432             :   } else {
     433           3 :     const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
     434             :     assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
     435           1 :     NumRes = MCID.getNumDefs();
     436           1 :     for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
     437           1 :       if (Reg == *ImpDef)
     438             :         break;
     439           0 :       ++NumRes;
     440             :     }
     441             :   }
     442          13 :   return N->getSimpleValueType(NumRes);
     443             : }
     444             : 
     445             : /// CheckForLiveRegDef - Return true and update live register vector if the
     446             : /// specified register def of the specified SUnit clobbers any "live" registers.
     447          53 : static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
     448             :                                std::vector<SUnit*> &LiveRegDefs,
     449             :                                SmallSet<unsigned, 4> &RegAdded,
     450             :                                SmallVectorImpl<unsigned> &LRegs,
     451             :                                const TargetRegisterInfo *TRI) {
     452          53 :   bool Added = false;
     453         424 :   for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
     454         349 :     if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
     455          14 :       if (RegAdded.insert(*AI).second) {
     456          28 :         LRegs.push_back(*AI);
     457          14 :         Added = true;
     458             :       }
     459             :     }
     460             :   }
     461          53 :   return Added;
     462             : }
     463             : 
     464             : /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
     465             : /// scheduling of the given node to satisfy live physical register dependencies.
     466             : /// If the specific node is the last one that's available to schedule, do
     467             : /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
     468         202 : bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
     469             :                                               SmallVectorImpl<unsigned> &LRegs){
     470         202 :   if (NumLiveRegs == 0)
     471             :     return false;
     472             : 
     473          78 :   SmallSet<unsigned, 4> RegAdded;
     474             :   // If this node would clobber any "live" register, then it's not ready.
     475         182 :   for (SDep &Pred : SU->Preds) {
     476           0 :     if (Pred.isAssignedRegDep()) {
     477           0 :       CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
     478             :                          RegAdded, LRegs, TRI);
     479             :     }
     480             :   }
     481             : 
     482          39 :   for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
     483         176 :     if (Node->getOpcode() == ISD::INLINEASM) {
     484             :       // Inline asm can clobber physical defs.
     485           4 :       unsigned NumOps = Node->getNumOperands();
     486           6 :       if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
     487           2 :         --NumOps;  // Ignore the glue operand.
     488             : 
     489           2 :       for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
     490             :         unsigned Flags =
     491          24 :           cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
     492           6 :         unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
     493             : 
     494           6 :         ++i; // Skip the ID value.
     495          10 :         if (InlineAsm::isRegDefKind(Flags) ||
     496          14 :             InlineAsm::isRegDefEarlyClobberKind(Flags) ||
     497           4 :             InlineAsm::isClobberKind(Flags)) {
     498             :           // Check for def of register or earlyclobber register.
     499          12 :           for (; NumVals; --NumVals, ++i) {
     500          12 :             unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
     501           4 :             if (TargetRegisterInfo::isPhysicalRegister(Reg))
     502           2 :               CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
     503             :           }
     504             :         } else
     505           2 :           i += NumVals;
     506             :       }
     507           2 :       continue;
     508             :     }
     509          86 :     if (!Node->isMachineOpcode())
     510          48 :       continue;
     511         114 :     const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
     512          38 :     if (!MCID.ImplicitDefs)
     513           9 :       continue;
     514         131 :     for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
     515          51 :       CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
     516             :     }
     517             :   }
     518          39 :   return !LRegs.empty();
     519             : }
     520             : 
     521             : 
     522             : /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
     523             : /// schedulers.
     524          34 : void ScheduleDAGFast::ListScheduleBottomUp() {
     525          34 :   unsigned CurCycle = 0;
     526             : 
     527             :   // Release any predecessors of the special Exit node.
     528          34 :   ReleasePredecessors(&ExitSU, CurCycle);
     529             : 
     530             :   // Add root to Available queue.
     531          68 :   if (!SUnits.empty()) {
     532          60 :     SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
     533             :     assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
     534          30 :     RootSU->isAvailable = true;
     535          30 :     AvailableQueue.push(RootSU);
     536             :   }
     537             : 
     538             :   // While Available queue is not empty, grab the node with the highest
     539             :   // priority. If it is not ready put it back.  Schedule the node.
     540          68 :   SmallVector<SUnit*, 4> NotReady;
     541          68 :   DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
     542          68 :   Sequence.reserve(SUnits.size());
     543         671 :   while (!AvailableQueue.empty()) {
     544         201 :     bool Delayed = false;
     545         201 :     LRegsMap.clear();
     546         402 :     SUnit *CurSU = AvailableQueue.pop();
     547         215 :     while (CurSU) {
     548         216 :       SmallVector<unsigned, 4> LRegs;
     549         202 :       if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
     550             :         break;
     551          14 :       Delayed = true;
     552          56 :       LRegsMap.insert(std::make_pair(CurSU, LRegs));
     553             : 
     554          14 :       CurSU->isPending = true;  // This SU is not in AvailableQueue right now.
     555          14 :       NotReady.push_back(CurSU);
     556          28 :       CurSU = AvailableQueue.pop();
     557             :     }
     558             : 
     559             :     // All candidates are delayed due to live physical reg dependencies.
     560             :     // Try code duplication or inserting cross class copies
     561             :     // to resolve it.
     562         201 :     if (Delayed && !CurSU) {
     563             :       if (!CurSU) {
     564             :         // Try duplicating the nodes that produces these
     565             :         // "expensive to copy" values to break the dependency. In case even
     566             :         // that doesn't work, insert cross class copies.
     567          13 :         SUnit *TrySU = NotReady[0];
     568          13 :         SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
     569             :         assert(LRegs.size() == 1 && "Can't handle this yet!");
     570          26 :         unsigned Reg = LRegs[0];
     571          26 :         SUnit *LRDef = LiveRegDefs[Reg];
     572          13 :         MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
     573             :         const TargetRegisterClass *RC =
     574          13 :           TRI->getMinimalPhysRegClass(Reg, VT);
     575          13 :         const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
     576             : 
     577             :         // If cross copy register class is the same as RC, then it must be
     578             :         // possible copy the value directly. Do not try duplicate the def.
     579             :         // If cross copy register class is not the same as RC, then it's
     580             :         // possible to copy the value but it require cross register class copies
     581             :         // and it is expensive.
     582             :         // If cross copy register class is null, then it's not possible to copy
     583             :         // the value at all.
     584          13 :         SUnit *NewDef = nullptr;
     585          13 :         if (DestRC != RC) {
     586          13 :           NewDef = CopyAndMoveSuccessors(LRDef);
     587          13 :           if (!DestRC && !NewDef)
     588           0 :             report_fatal_error("Can't handle live physical "
     589             :                                "register dependency!");
     590             :         }
     591          13 :         if (!NewDef) {
     592             :           // Issue copies, these can be expensive cross register class copies.
     593          24 :           SmallVector<SUnit*, 2> Copies;
     594          12 :           InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
     595             :           DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
     596             :                        << " to SU #" << Copies.front()->NodeNum << "\n");
     597          48 :           AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
     598          24 :           NewDef = Copies.back();
     599             :         }
     600             : 
     601             :         DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
     602             :                      << " to SU #" << TrySU->NodeNum << "\n");
     603          26 :         LiveRegDefs[Reg] = NewDef;
     604          39 :         AddPred(NewDef, SDep(TrySU, SDep::Artificial));
     605          13 :         TrySU->isAvailable = false;
     606          13 :         CurSU = NewDef;
     607             :       }
     608             : 
     609          13 :       if (!CurSU) {
     610           0 :         llvm_unreachable("Unable to resolve live physical register dependencies!");
     611             :       }
     612             :     }
     613             : 
     614             :     // Add the nodes that aren't ready back onto the available list.
     615         416 :     for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
     616          28 :       NotReady[i]->isPending = false;
     617             :       // May no longer be available due to backtracking.
     618          28 :       if (NotReady[i]->isAvailable)
     619           2 :         AvailableQueue.push(NotReady[i]);
     620             :     }
     621         201 :     NotReady.clear();
     622             : 
     623         201 :     if (CurSU)
     624         201 :       ScheduleNodeBottomUp(CurSU, CurCycle);
     625         201 :     ++CurCycle;
     626             :   }
     627             : 
     628             :   // Reverse the order since it is bottom up.
     629         136 :   std::reverse(Sequence.begin(), Sequence.end());
     630             : 
     631             : #ifndef NDEBUG
     632             :   VerifyScheduledSequence(/*isBottomUp=*/true);
     633             : #endif
     634          34 : }
     635             : 
     636             : 
     637             : namespace {
     638             : //===----------------------------------------------------------------------===//
     639             : // ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
     640             : // DAG in topological order.
     641             : // IMPORTANT: this may not work for targets with phyreg dependency.
     642             : //
     643          40 : class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
     644             : public:
     645          30 :   ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
     646             : 
     647             :   void Schedule() override;
     648             : 
     649             :   MachineBasicBlock *
     650             :     EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
     651             : 
     652             : private:
     653             :   std::vector<SDNode*> Sequence;
     654             :   DenseMap<SDNode*, SDNode*> GluedMap;  // Cache glue to its user
     655             : 
     656             :   void ScheduleNode(SDNode *N);
     657             : };
     658             : } // end anonymous namespace
     659             : 
     660         567 : void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
     661         567 :   if (N->getNodeId() != 0)
     662           0 :     llvm_unreachable(nullptr);
     663             : 
     664         891 :   if (!N->isMachineOpcode() &&
     665         638 :       (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
     666             :     // These nodes do not need to be translated into MIs.
     667             :     return;
     668             : 
     669             :   DEBUG(dbgs() << "\n*** Scheduling: ");
     670             :   DEBUG(N->dump(DAG));
     671         382 :   Sequence.push_back(N);
     672             : 
     673         764 :   unsigned NumOps = N->getNumOperands();
     674         382 :   if (unsigned NumLeft = NumOps) {
     675             :     SDNode *GluedOpN = nullptr;
     676             :     do {
     677        2230 :       const SDValue &Op = N->getOperand(NumLeft-1);
     678        1115 :       SDNode *OpN = Op.getNode();
     679             : 
     680        1929 :       if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
     681             :         // Schedule glue operand right above N.
     682          60 :         GluedOpN = OpN;
     683             :         assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
     684         120 :         OpN->setNodeId(0);
     685          60 :         ScheduleNode(OpN);
     686         124 :         continue;
     687             :       }
     688             : 
     689        1059 :       if (OpN == GluedOpN)
     690             :         // Glue operand is already scheduled.
     691           4 :         continue;
     692             : 
     693        1051 :       DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
     694        3153 :       if (DI != GluedMap.end() && DI->second != N)
     695             :         // Users of glues are counted against the glued users.
     696           0 :         OpN = DI->second;
     697             : 
     698        1051 :       unsigned Degree = OpN->getNodeId();
     699             :       assert(Degree > 0 && "Predecessor over-released!");
     700        2102 :       OpN->setNodeId(--Degree);
     701        1051 :       if (Degree == 0)
     702         497 :         ScheduleNode(OpN);
     703        1115 :     } while (--NumLeft);
     704             :   }
     705             : }
     706             : 
     707             : /// findGluedUser - Find the representative use of a glue value by walking
     708             : /// the use chain.
     709             : static SDNode *findGluedUser(SDNode *N) {
     710         120 :   while (SDNode *Glued = N->getGluedUser())
     711             :     N = Glued;
     712             :   return N;
     713             : }
     714             : 
     715          10 : void ScheduleDAGLinearize::Schedule() {
     716             :   DEBUG(dbgs() << "********** DAG Linearization **********\n");
     717             : 
     718          20 :   SmallVector<SDNode*, 8> Glues;
     719          10 :   unsigned DAGSize = 0;
     720         589 :   for (SDNode &Node : DAG->allnodes()) {
     721         569 :     SDNode *N = &Node;
     722             : 
     723             :     // Use node id to record degree.
     724        1138 :     unsigned Degree = N->use_size();
     725        1138 :     N->setNodeId(Degree);
     726        1138 :     unsigned NumVals = N->getNumValues();
     727        1767 :     if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
     728          60 :         N->hasAnyUseOfValue(NumVals-1)) {
     729         120 :       SDNode *User = findGluedUser(N);
     730             :       if (User) {
     731          60 :         Glues.push_back(N);
     732         180 :         GluedMap.insert(std::make_pair(N, User));
     733             :       }
     734             :     }
     735             : 
     736         895 :     if (N->isMachineOpcode() ||
     737         642 :         (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
     738         382 :       ++DAGSize;
     739             :   }
     740             : 
     741          80 :   for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
     742         120 :     SDNode *Glue = Glues[i];
     743         120 :     SDNode *GUser = GluedMap[Glue];
     744          60 :     unsigned Degree = Glue->getNodeId();
     745          60 :     unsigned UDegree = GUser->getNodeId();
     746             : 
     747             :     // Glue user must be scheduled together with the glue operand. So other
     748             :     // users of the glue operand must be treated as its users.
     749         120 :     SDNode *ImmGUser = Glue->getGluedUser();
     750         368 :     for (const SDNode *U : Glue->uses())
     751          64 :       if (U == ImmGUser)
     752          64 :         --Degree;
     753         120 :     GUser->setNodeId(UDegree + Degree);
     754         120 :     Glue->setNodeId(1);
     755             :   }
     756             : 
     757          10 :   Sequence.reserve(DAGSize);
     758          10 :   ScheduleNode(DAG->getRoot().getNode());
     759          10 : }
     760             : 
     761             : MachineBasicBlock*
     762          10 : ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
     763          10 :   InstrEmitter Emitter(BB, InsertPos);
     764          20 :   DenseMap<SDValue, unsigned> VRBaseMap;
     765             : 
     766             :   DEBUG({
     767             :       dbgs() << "\n*** Final schedule ***\n";
     768             :     });
     769             : 
     770             :   // FIXME: Handle dbg_values.
     771          20 :   unsigned NumNodes = Sequence.size();
     772         392 :   for (unsigned i = 0; i != NumNodes; ++i) {
     773         764 :     SDNode *N = Sequence[NumNodes-i-1];
     774             :     DEBUG(N->dump(DAG));
     775         382 :     Emitter.EmitNode(N, false, false, VRBaseMap);
     776             :   }
     777             : 
     778             :   DEBUG(dbgs() << '\n');
     779             : 
     780          10 :   InsertPos = Emitter.getInsertPos();
     781          20 :   return Emitter.getBlock();
     782             : }
     783             : 
     784             : //===----------------------------------------------------------------------===//
     785             : //                         Public Constructor Functions
     786             : //===----------------------------------------------------------------------===//
     787             : 
     788             : llvm::ScheduleDAGSDNodes *
     789          34 : llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
     790          68 :   return new ScheduleDAGFast(*IS->MF);
     791             : }
     792             : 
     793             : llvm::ScheduleDAGSDNodes *
     794          10 : llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) {
     795          10 :   return new ScheduleDAGLinearize(*IS->MF);
     796      216918 : }

Generated by: LCOV version 1.13