Bug Summary

File:lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
Warning:line 1421, column 11
Access to field 'isAvailable' results in a dereference of a null pointer (loaded from variable 'BtSU')

Annotated Source Code

1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms. The basic approach uses a priority
12// queue of available nodes to schedule. One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/CodeGen/SchedulerRegistry.h"
19#include "ScheduleDAGSDNodes.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
25#include "llvm/CodeGen/SelectionDAGISel.h"
26#include "llvm/IR/DataLayout.h"
27#include "llvm/IR/InlineAsm.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/ErrorHandling.h"
30#include "llvm/Support/raw_ostream.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetLowering.h"
33#include "llvm/Target/TargetRegisterInfo.h"
34#include "llvm/Target/TargetSubtargetInfo.h"
35#include <climits>
36using namespace llvm;
37
38#define DEBUG_TYPE"pre-RA-sched" "pre-RA-sched"
39
40STATISTIC(NumBacktracks, "Number of times scheduler backtracked")static llvm::Statistic NumBacktracks = {"pre-RA-sched", "NumBacktracks"
, "Number of times scheduler backtracked", {0}, false}
;
41STATISTIC(NumUnfolds, "Number of nodes unfolded")static llvm::Statistic NumUnfolds = {"pre-RA-sched", "NumUnfolds"
, "Number of nodes unfolded", {0}, false}
;
42STATISTIC(NumDups, "Number of duplicated nodes")static llvm::Statistic NumDups = {"pre-RA-sched", "NumDups", "Number of duplicated nodes"
, {0}, false}
;
43STATISTIC(NumPRCopies, "Number of physical register copies")static llvm::Statistic NumPRCopies = {"pre-RA-sched", "NumPRCopies"
, "Number of physical register copies", {0}, false}
;
44
45static RegisterScheduler
46 burrListDAGScheduler("list-burr",
47 "Bottom-up register reduction list scheduling",
48 createBURRListDAGScheduler);
49static RegisterScheduler
50 sourceListDAGScheduler("source",
51 "Similar to list-burr but schedules in source "
52 "order when possible",
53 createSourceListDAGScheduler);
54
55static RegisterScheduler
56 hybridListDAGScheduler("list-hybrid",
57 "Bottom-up register pressure aware list scheduling "
58 "which tries to balance latency and register pressure",
59 createHybridListDAGScheduler);
60
61static RegisterScheduler
62 ILPListDAGScheduler("list-ilp",
63 "Bottom-up register pressure aware list scheduling "
64 "which tries to balance ILP and register pressure",
65 createILPListDAGScheduler);
66
67static cl::opt<bool> DisableSchedCycles(
68 "disable-sched-cycles", cl::Hidden, cl::init(false),
69 cl::desc("Disable cycle-level precision during preRA scheduling"));
70
71// Temporary sched=list-ilp flags until the heuristics are robust.
72// Some options are also available under sched=list-hybrid.
73static cl::opt<bool> DisableSchedRegPressure(
74 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
75 cl::desc("Disable regpressure priority in sched=list-ilp"));
76static cl::opt<bool> DisableSchedLiveUses(
77 "disable-sched-live-uses", cl::Hidden, cl::init(true),
78 cl::desc("Disable live use priority in sched=list-ilp"));
79static cl::opt<bool> DisableSchedVRegCycle(
80 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
81 cl::desc("Disable virtual register cycle interference checks"));
82static cl::opt<bool> DisableSchedPhysRegJoin(
83 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
84 cl::desc("Disable physreg def-use affinity"));
85static cl::opt<bool> DisableSchedStalls(
86 "disable-sched-stalls", cl::Hidden, cl::init(true),
87 cl::desc("Disable no-stall priority in sched=list-ilp"));
88static cl::opt<bool> DisableSchedCriticalPath(
89 "disable-sched-critical-path", cl::Hidden, cl::init(false),
90 cl::desc("Disable critical path priority in sched=list-ilp"));
91static cl::opt<bool> DisableSchedHeight(
92 "disable-sched-height", cl::Hidden, cl::init(false),
93 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
94static cl::opt<bool> Disable2AddrHack(
95 "disable-2addr-hack", cl::Hidden, cl::init(true),
96 cl::desc("Disable scheduler's two-address hack"));
97
98static cl::opt<int> MaxReorderWindow(
99 "max-sched-reorder", cl::Hidden, cl::init(6),
100 cl::desc("Number of instructions to allow ahead of the critical path "
101 "in sched=list-ilp"));
102
103static cl::opt<unsigned> AvgIPC(
104 "sched-avg-ipc", cl::Hidden, cl::init(1),
105 cl::desc("Average inst/cycle whan no target itinerary exists."));
106
107namespace {
108//===----------------------------------------------------------------------===//
109/// ScheduleDAGRRList - The actual register reduction list scheduler
110/// implementation. This supports both top-down and bottom-up scheduling.
111///
112class ScheduleDAGRRList : public ScheduleDAGSDNodes {
113private:
114 /// NeedLatency - True if the scheduler will make use of latency information.
115 ///
116 bool NeedLatency;
117
118 /// AvailableQueue - The priority queue to use for the available SUnits.
119 SchedulingPriorityQueue *AvailableQueue;
120
121 /// PendingQueue - This contains all of the instructions whose operands have
122 /// been issued, but their results are not ready yet (due to the latency of
123 /// the operation). Once the operands becomes available, the instruction is
124 /// added to the AvailableQueue.
125 std::vector<SUnit*> PendingQueue;
126
127 /// HazardRec - The hazard recognizer to use.
128 ScheduleHazardRecognizer *HazardRec;
129
130 /// CurCycle - The current scheduler state corresponds to this cycle.
131 unsigned CurCycle;
132
133 /// MinAvailableCycle - Cycle of the soonest available instruction.
134 unsigned MinAvailableCycle;
135
136 /// IssueCount - Count instructions issued in this cycle
137 /// Currently valid only for bottom-up scheduling.
138 unsigned IssueCount;
139
140 /// LiveRegDefs - A set of physical registers and their definition
141 /// that are "live". These nodes must be scheduled before any other nodes that
142 /// modifies the registers can be scheduled.
143 unsigned NumLiveRegs;
144 std::unique_ptr<SUnit*[]> LiveRegDefs;
145 std::unique_ptr<SUnit*[]> LiveRegGens;
146
147 // Collect interferences between physical register use/defs.
148 // Each interference is an SUnit and set of physical registers.
149 SmallVector<SUnit*, 4> Interferences;
150 typedef DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMapT;
151 LRegsMapT LRegsMap;
152
153 /// Topo - A topological ordering for SUnits which permits fast IsReachable
154 /// and similar queries.
155 ScheduleDAGTopologicalSort Topo;
156
157 // Hack to keep track of the inverse of FindCallSeqStart without more crazy
158 // DAG crawling.
159 DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
160
161public:
162 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
163 SchedulingPriorityQueue *availqueue,
164 CodeGenOpt::Level OptLevel)
165 : ScheduleDAGSDNodes(mf),
166 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
167 Topo(SUnits, nullptr) {
168
169 const TargetSubtargetInfo &STI = mf.getSubtarget();
170 if (DisableSchedCycles || !NeedLatency)
171 HazardRec = new ScheduleHazardRecognizer();
172 else
173 HazardRec = STI.getInstrInfo()->CreateTargetHazardRecognizer(&STI, this);
174 }
175
176 ~ScheduleDAGRRList() override {
177 delete HazardRec;
178 delete AvailableQueue;
179 }
180
181 void Schedule() override;
182
183 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
184
185 /// IsReachable - Checks if SU is reachable from TargetSU.
186 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
187 return Topo.IsReachable(SU, TargetSU);
188 }
189
190 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
191 /// create a cycle.
192 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
193 return Topo.WillCreateCycle(SU, TargetSU);
194 }
195
196 /// AddPred - adds a predecessor edge to SUnit SU.
197 /// This returns true if this is a new predecessor.
198 /// Updates the topological ordering if required.
199 void AddPred(SUnit *SU, const SDep &D) {
200 Topo.AddPred(SU, D.getSUnit());
201 SU->addPred(D);
202 }
203
204 /// RemovePred - removes a predecessor edge from SUnit SU.
205 /// This returns true if an edge was removed.
206 /// Updates the topological ordering if required.
207 void RemovePred(SUnit *SU, const SDep &D) {
208 Topo.RemovePred(SU, D.getSUnit());
209 SU->removePred(D);
210 }
211
212private:
213 bool isReady(SUnit *SU) {
214 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
215 AvailableQueue->isReady(SU);
216 }
217
218 void ReleasePred(SUnit *SU, const SDep *PredEdge);
219 void ReleasePredecessors(SUnit *SU);
220 void ReleasePending();
221 void AdvanceToCycle(unsigned NextCycle);
222 void AdvancePastStalls(SUnit *SU);
223 void EmitNode(SUnit *SU);
224 void ScheduleNodeBottomUp(SUnit*);
225 void CapturePred(SDep *PredEdge);
226 void UnscheduleNodeBottomUp(SUnit*);
227 void RestoreHazardCheckerBottomUp();
228 void BacktrackBottomUp(SUnit*, SUnit*);
229 SUnit *CopyAndMoveSuccessors(SUnit*);
230 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
231 const TargetRegisterClass*,
232 const TargetRegisterClass*,
233 SmallVectorImpl<SUnit*>&);
234 bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
235
236 void releaseInterferences(unsigned Reg = 0);
237
238 SUnit *PickNodeToScheduleBottomUp();
239 void ListScheduleBottomUp();
240
241 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
242 /// Updates the topological ordering if required.
243 SUnit *CreateNewSUnit(SDNode *N) {
244 unsigned NumSUnits = SUnits.size();
245 SUnit *NewNode = newSUnit(N);
246 // Update the topological ordering.
247 if (NewNode->NodeNum >= NumSUnits)
248 Topo.InitDAGTopologicalSorting();
249 return NewNode;
250 }
251
252 /// CreateClone - Creates a new SUnit from an existing one.
253 /// Updates the topological ordering if required.
254 SUnit *CreateClone(SUnit *N) {
255 unsigned NumSUnits = SUnits.size();
256 SUnit *NewNode = Clone(N);
257 // Update the topological ordering.
258 if (NewNode->NodeNum >= NumSUnits)
259 Topo.InitDAGTopologicalSorting();
260 return NewNode;
261 }
262
263 /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
264 /// need actual latency information but the hybrid scheduler does.
265 bool forceUnitLatencies() const override {
266 return !NeedLatency;
267 }
268};
269} // end anonymous namespace
270
271/// GetCostForDef - Looks up the register class and cost for a given definition.
272/// Typically this just means looking up the representative register class,
273/// but for untyped values (MVT::Untyped) it means inspecting the node's
274/// opcode to determine what register class is being generated.
275static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
276 const TargetLowering *TLI,
277 const TargetInstrInfo *TII,
278 const TargetRegisterInfo *TRI,
279 unsigned &RegClass, unsigned &Cost,
280 const MachineFunction &MF) {
281 MVT VT = RegDefPos.GetValue();
282
283 // Special handling for untyped values. These values can only come from
284 // the expansion of custom DAG-to-DAG patterns.
285 if (VT == MVT::Untyped) {
286 const SDNode *Node = RegDefPos.GetNode();
287
288 // Special handling for CopyFromReg of untyped values.
289 if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) {
290 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
291 const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
292 RegClass = RC->getID();
293 Cost = 1;
294 return;
295 }
296
297 unsigned Opcode = Node->getMachineOpcode();
298 if (Opcode == TargetOpcode::REG_SEQUENCE) {
299 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
300 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
301 RegClass = RC->getID();
302 Cost = 1;
303 return;
304 }
305
306 unsigned Idx = RegDefPos.GetIdx();
307 const MCInstrDesc Desc = TII->get(Opcode);
308 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
309 RegClass = RC->getID();
310 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
311 // better way to determine it.
312 Cost = 1;
313 } else {
314 RegClass = TLI->getRepRegClassFor(VT)->getID();
315 Cost = TLI->getRepRegClassCostFor(VT);
316 }
317}
318
319/// Schedule - Schedule the DAG using list scheduling.
320void ScheduleDAGRRList::Schedule() {
321 DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "********** List Scheduling BB#"
<< BB->getNumber() << " '" << BB->getName
() << "' **********\n"; } } while (false)
322 << "********** List Scheduling BB#" << BB->getNumber()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "********** List Scheduling BB#"
<< BB->getNumber() << " '" << BB->getName
() << "' **********\n"; } } while (false)
323 << " '" << BB->getName() << "' **********\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "********** List Scheduling BB#"
<< BB->getNumber() << " '" << BB->getName
() << "' **********\n"; } } while (false)
;
324
325 CurCycle = 0;
326 IssueCount = 0;
327 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX(2147483647 *2U +1U);
328 NumLiveRegs = 0;
329 // Allocate slots for each physical register, plus one for a special register
330 // to track the virtual resource of a calling sequence.
331 LiveRegDefs.reset(new SUnit*[TRI->getNumRegs() + 1]());
332 LiveRegGens.reset(new SUnit*[TRI->getNumRegs() + 1]());
333 CallSeqEndForStart.clear();
334 assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences")((Interferences.empty() && LRegsMap.empty() &&
"stale Interferences") ? static_cast<void> (0) : __assert_fail
("Interferences.empty() && LRegsMap.empty() && \"stale Interferences\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 334, __PRETTY_FUNCTION__))
;
335
336 // Build the scheduling graph.
337 BuildSchedGraph(nullptr);
338
339 DEBUG(for (SUnit &SU : SUnits)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { for (SUnit &SU : SUnits) SU.dumpAll(this
); } } while (false)
340 SU.dumpAll(this))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { for (SUnit &SU : SUnits) SU.dumpAll(this
); } } while (false)
;
341 Topo.InitDAGTopologicalSorting();
342
343 AvailableQueue->initNodes(SUnits);
344
345 HazardRec->Reset();
346
347 // Execute the actual scheduling loop.
348 ListScheduleBottomUp();
349
350 AvailableQueue->releaseState();
351
352 DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n"
; dumpSchedule(); dbgs() << '\n'; }; } } while (false)
353 dbgs() << "*** Final schedule ***\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n"
; dumpSchedule(); dbgs() << '\n'; }; } } while (false)
354 dumpSchedule();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n"
; dumpSchedule(); dbgs() << '\n'; }; } } while (false)
355 dbgs() << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n"
; dumpSchedule(); dbgs() << '\n'; }; } } while (false)
356 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n"
; dumpSchedule(); dbgs() << '\n'; }; } } while (false)
;
357}
358
359//===----------------------------------------------------------------------===//
360// Bottom-Up Scheduling
361//===----------------------------------------------------------------------===//
362
363/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
364/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
365void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
366 SUnit *PredSU = PredEdge->getSUnit();
367
368#ifndef NDEBUG
369 if (PredSU->NumSuccsLeft == 0) {
370 dbgs() << "*** Scheduling failed! ***\n";
371 PredSU->dump(this);
372 dbgs() << " has been released too many times!\n";
373 llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 373)
;
374 }
375#endif
376 --PredSU->NumSuccsLeft;
377
378 if (!forceUnitLatencies()) {
379 // Updating predecessor's height. This is now the cycle when the
380 // predecessor can be scheduled without causing a pipeline stall.
381 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
382 }
383
384 // If all the node's successors are scheduled, this node is ready
385 // to be scheduled. Ignore the special EntrySU node.
386 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
387 PredSU->isAvailable = true;
388
389 unsigned Height = PredSU->getHeight();
390 if (Height < MinAvailableCycle)
391 MinAvailableCycle = Height;
392
393 if (isReady(PredSU)) {
394 AvailableQueue->push(PredSU);
395 }
396 // CapturePred and others may have left the node in the pending queue, avoid
397 // adding it twice.
398 else if (!PredSU->isPending) {
399 PredSU->isPending = true;
400 PendingQueue.push_back(PredSU);
401 }
402 }
403}
404
405/// IsChainDependent - Test if Outer is reachable from Inner through
406/// chain dependencies.
407static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
408 unsigned NestLevel,
409 const TargetInstrInfo *TII) {
410 SDNode *N = Outer;
411 for (;;) {
412 if (N == Inner)
413 return true;
414 // For a TokenFactor, examine each operand. There may be multiple ways
415 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
416 // most nesting in order to ensure that we find the corresponding match.
417 if (N->getOpcode() == ISD::TokenFactor) {
418 for (const SDValue &Op : N->op_values())
419 if (IsChainDependent(Op.getNode(), Inner, NestLevel, TII))
420 return true;
421 return false;
422 }
423 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
424 if (N->isMachineOpcode()) {
425 if (N->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) {
426 ++NestLevel;
427 } else if (N->getMachineOpcode() == TII->getCallFrameSetupOpcode()) {
428 if (NestLevel == 0)
429 return false;
430 --NestLevel;
431 }
432 }
433 // Otherwise, find the chain and continue climbing.
434 for (const SDValue &Op : N->op_values())
435 if (Op.getValueType() == MVT::Other) {
436 N = Op.getNode();
437 goto found_chain_operand;
438 }
439 return false;
440 found_chain_operand:;
441 if (N->getOpcode() == ISD::EntryToken)
442 return false;
443 }
444}
445
446/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
447/// the corresponding (lowered) CALLSEQ_BEGIN node.
448///
449/// NestLevel and MaxNested are used in recursion to indcate the current level
450/// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
451/// level seen so far.
452///
453/// TODO: It would be better to give CALLSEQ_END an explicit operand to point
454/// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
455static SDNode *
456FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
457 const TargetInstrInfo *TII) {
458 for (;;) {
459 // For a TokenFactor, examine each operand. There may be multiple ways
460 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
461 // most nesting in order to ensure that we find the corresponding match.
462 if (N->getOpcode() == ISD::TokenFactor) {
463 SDNode *Best = nullptr;
464 unsigned BestMaxNest = MaxNest;
465 for (const SDValue &Op : N->op_values()) {
466 unsigned MyNestLevel = NestLevel;
467 unsigned MyMaxNest = MaxNest;
468 if (SDNode *New = FindCallSeqStart(Op.getNode(),
469 MyNestLevel, MyMaxNest, TII))
470 if (!Best || (MyMaxNest > BestMaxNest)) {
471 Best = New;
472 BestMaxNest = MyMaxNest;
473 }
474 }
475 assert(Best)((Best) ? static_cast<void> (0) : __assert_fail ("Best"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 475, __PRETTY_FUNCTION__))
;
476 MaxNest = BestMaxNest;
477 return Best;
478 }
479 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
480 if (N->isMachineOpcode()) {
481 if (N->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) {
482 ++NestLevel;
483 MaxNest = std::max(MaxNest, NestLevel);
484 } else if (N->getMachineOpcode() == TII->getCallFrameSetupOpcode()) {
485 assert(NestLevel != 0)((NestLevel != 0) ? static_cast<void> (0) : __assert_fail
("NestLevel != 0", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 485, __PRETTY_FUNCTION__))
;
486 --NestLevel;
487 if (NestLevel == 0)
488 return N;
489 }
490 }
491 // Otherwise, find the chain and continue climbing.
492 for (const SDValue &Op : N->op_values())
493 if (Op.getValueType() == MVT::Other) {
494 N = Op.getNode();
495 goto found_chain_operand;
496 }
497 return nullptr;
498 found_chain_operand:;
499 if (N->getOpcode() == ISD::EntryToken)
500 return nullptr;
501 }
502}
503
504/// Call ReleasePred for each predecessor, then update register live def/gen.
505/// Always update LiveRegDefs for a register dependence even if the current SU
506/// also defines the register. This effectively create one large live range
507/// across a sequence of two-address node. This is important because the
508/// entire chain must be scheduled together. Example:
509///
510/// flags = (3) add
511/// flags = (2) addc flags
512/// flags = (1) addc flags
513///
514/// results in
515///
516/// LiveRegDefs[flags] = 3
517/// LiveRegGens[flags] = 1
518///
519/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
520/// interference on flags.
521void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
522 // Bottom up: release predecessors
523 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
524 I != E; ++I) {
525 ReleasePred(SU, &*I);
526 if (I->isAssignedRegDep()) {
527 // This is a physical register dependency and it's impossible or
528 // expensive to copy the register. Make sure nothing that can
529 // clobber the register is scheduled between the predecessor and
530 // this node.
531 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
532 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&(((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
"interference on register dependence") ? static_cast<void
> (0) : __assert_fail ("(!RegDef || RegDef == SU || RegDef == I->getSUnit()) && \"interference on register dependence\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 533, __PRETTY_FUNCTION__))
533 "interference on register dependence")(((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
"interference on register dependence") ? static_cast<void
> (0) : __assert_fail ("(!RegDef || RegDef == SU || RegDef == I->getSUnit()) && \"interference on register dependence\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 533, __PRETTY_FUNCTION__))
;
534 LiveRegDefs[I->getReg()] = I->getSUnit();
535 if (!LiveRegGens[I->getReg()]) {
536 ++NumLiveRegs;
537 LiveRegGens[I->getReg()] = SU;
538 }
539 }
540 }
541
542 // If we're scheduling a lowered CALLSEQ_END, find the corresponding
543 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
544 // these nodes, to prevent other calls from being interscheduled with them.
545 unsigned CallResource = TRI->getNumRegs();
546 if (!LiveRegDefs[CallResource])
547 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
548 if (Node->isMachineOpcode() &&
549 Node->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) {
550 unsigned NestLevel = 0;
551 unsigned MaxNest = 0;
552 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
553
554 SUnit *Def = &SUnits[N->getNodeId()];
555 CallSeqEndForStart[Def] = SU;
556
557 ++NumLiveRegs;
558 LiveRegDefs[CallResource] = Def;
559 LiveRegGens[CallResource] = SU;
560 break;
561 }
562}
563
564/// Check to see if any of the pending instructions are ready to issue. If
565/// so, add them to the available queue.
566void ScheduleDAGRRList::ReleasePending() {
567 if (DisableSchedCycles) {
568 assert(PendingQueue.empty() && "pending instrs not allowed in this mode")((PendingQueue.empty() && "pending instrs not allowed in this mode"
) ? static_cast<void> (0) : __assert_fail ("PendingQueue.empty() && \"pending instrs not allowed in this mode\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 568, __PRETTY_FUNCTION__))
;
569 return;
570 }
571
572 // If the available queue is empty, it is safe to reset MinAvailableCycle.
573 if (AvailableQueue->empty())
574 MinAvailableCycle = UINT_MAX(2147483647 *2U +1U);
575
576 // Check to see if any of the pending instructions are ready to issue. If
577 // so, add them to the available queue.
578 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
579 unsigned ReadyCycle = PendingQueue[i]->getHeight();
580 if (ReadyCycle < MinAvailableCycle)
581 MinAvailableCycle = ReadyCycle;
582
583 if (PendingQueue[i]->isAvailable) {
584 if (!isReady(PendingQueue[i]))
585 continue;
586 AvailableQueue->push(PendingQueue[i]);
587 }
588 PendingQueue[i]->isPending = false;
589 PendingQueue[i] = PendingQueue.back();
590 PendingQueue.pop_back();
591 --i; --e;
592 }
593}
594
595/// Move the scheduler state forward by the specified number of Cycles.
596void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
597 if (NextCycle <= CurCycle)
598 return;
599
600 IssueCount = 0;
601 AvailableQueue->setCurCycle(NextCycle);
602 if (!HazardRec->isEnabled()) {
603 // Bypass lots of virtual calls in case of long latency.
604 CurCycle = NextCycle;
605 }
606 else {
607 for (; CurCycle != NextCycle; ++CurCycle) {
608 HazardRec->RecedeCycle();
609 }
610 }
611 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
612 // available Q to release pending nodes at least once before popping.
613 ReleasePending();
614}
615
616/// Move the scheduler state forward until the specified node's dependents are
617/// ready and can be scheduled with no resource conflicts.
618void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
619 if (DisableSchedCycles)
620 return;
621
622 // FIXME: Nodes such as CopyFromReg probably should not advance the current
623 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
624 // has predecessors the cycle will be advanced when they are scheduled.
625 // But given the crude nature of modeling latency though such nodes, we
626 // currently need to treat these nodes like real instructions.
627 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
628
629 unsigned ReadyCycle = SU->getHeight();
630
631 // Bump CurCycle to account for latency. We assume the latency of other
632 // available instructions may be hidden by the stall (not a full pipe stall).
633 // This updates the hazard recognizer's cycle before reserving resources for
634 // this instruction.
635 AdvanceToCycle(ReadyCycle);
636
637 // Calls are scheduled in their preceding cycle, so don't conflict with
638 // hazards from instructions after the call. EmitNode will reset the
639 // scoreboard state before emitting the call.
640 if (SU->isCall)
641 return;
642
643 // FIXME: For resource conflicts in very long non-pipelined stages, we
644 // should probably skip ahead here to avoid useless scoreboard checks.
645 int Stalls = 0;
646 while (true) {
647 ScheduleHazardRecognizer::HazardType HT =
648 HazardRec->getHazardType(SU, -Stalls);
649
650 if (HT == ScheduleHazardRecognizer::NoHazard)
651 break;
652
653 ++Stalls;
654 }
655 AdvanceToCycle(CurCycle + Stalls);
656}
657
658/// Record this SUnit in the HazardRecognizer.
659/// Does not update CurCycle.
660void ScheduleDAGRRList::EmitNode(SUnit *SU) {
661 if (!HazardRec->isEnabled())
662 return;
663
664 // Check for phys reg copy.
665 if (!SU->getNode())
666 return;
667
668 switch (SU->getNode()->getOpcode()) {
669 default:
670 assert(SU->getNode()->isMachineOpcode() &&((SU->getNode()->isMachineOpcode() && "This target-independent node should not be scheduled."
) ? static_cast<void> (0) : __assert_fail ("SU->getNode()->isMachineOpcode() && \"This target-independent node should not be scheduled.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 671, __PRETTY_FUNCTION__))
671 "This target-independent node should not be scheduled.")((SU->getNode()->isMachineOpcode() && "This target-independent node should not be scheduled."
) ? static_cast<void> (0) : __assert_fail ("SU->getNode()->isMachineOpcode() && \"This target-independent node should not be scheduled.\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 671, __PRETTY_FUNCTION__))
;
672 break;
673 case ISD::MERGE_VALUES:
674 case ISD::TokenFactor:
675 case ISD::LIFETIME_START:
676 case ISD::LIFETIME_END:
677 case ISD::CopyToReg:
678 case ISD::CopyFromReg:
679 case ISD::EH_LABEL:
680 // Noops don't affect the scoreboard state. Copies are likely to be
681 // removed.
682 return;
683 case ISD::INLINEASM:
684 // For inline asm, clear the pipeline state.
685 HazardRec->Reset();
686 return;
687 }
688 if (SU->isCall) {
689 // Calls are scheduled with their preceding instructions. For bottom-up
690 // scheduling, clear the pipeline state before emitting.
691 HazardRec->Reset();
692 }
693
694 HazardRec->EmitInstruction(SU);
695}
696
697static void resetVRegCycle(SUnit *SU);
698
699/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
700/// count of its predecessors. If a predecessor pending count is zero, add it to
701/// the Available queue.
702void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
703 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "\n*** Scheduling [" <<
CurCycle << "]: "; } } while (false)
;
704 DEBUG(SU->dump(this))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { SU->dump(this); } } while (false)
;
705
706#ifndef NDEBUG
707 if (CurCycle < SU->getHeight())
708 DEBUG(dbgs() << " Height [" << SU->getHeight()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Height [" << SU
->getHeight() << "] pipeline stall!\n"; } } while (false
)
709 << "] pipeline stall!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Height [" << SU
->getHeight() << "] pipeline stall!\n"; } } while (false
)
;
710#endif
711
712 // FIXME: Do not modify node height. It may interfere with
713 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
714 // node its ready cycle can aid heuristics, and after scheduling it can
715 // indicate the scheduled cycle.
716 SU->setHeightToAtLeast(CurCycle);
717
718 // Reserve resources for the scheduled instruction.
719 EmitNode(SU);
720
721 Sequence.push_back(SU);
722
723 AvailableQueue->scheduledNode(SU);
724
725 // If HazardRec is disabled, and each inst counts as one cycle, then
726 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
727 // PendingQueue for schedulers that implement HasReadyFilter.
728 if (!HazardRec->isEnabled() && AvgIPC < 2)
729 AdvanceToCycle(CurCycle + 1);
730
731 // Update liveness of predecessors before successors to avoid treating a
732 // two-address node as a live range def.
733 ReleasePredecessors(SU);
734
735 // Release all the implicit physical register defs that are live.
736 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
737 I != E; ++I) {
738 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
739 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
740 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!")((NumLiveRegs > 0 && "NumLiveRegs is already zero!"
) ? static_cast<void> (0) : __assert_fail ("NumLiveRegs > 0 && \"NumLiveRegs is already zero!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 740, __PRETTY_FUNCTION__))
;
741 --NumLiveRegs;
742 LiveRegDefs[I->getReg()] = nullptr;
743 LiveRegGens[I->getReg()] = nullptr;
744 releaseInterferences(I->getReg());
745 }
746 }
747 // Release the special call resource dependence, if this is the beginning
748 // of a call.
749 unsigned CallResource = TRI->getNumRegs();
750 if (LiveRegDefs[CallResource] == SU)
751 for (const SDNode *SUNode = SU->getNode(); SUNode;
752 SUNode = SUNode->getGluedNode()) {
753 if (SUNode->isMachineOpcode() &&
754 SUNode->getMachineOpcode() == TII->getCallFrameSetupOpcode()) {
755 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!")((NumLiveRegs > 0 && "NumLiveRegs is already zero!"
) ? static_cast<void> (0) : __assert_fail ("NumLiveRegs > 0 && \"NumLiveRegs is already zero!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 755, __PRETTY_FUNCTION__))
;
756 --NumLiveRegs;
757 LiveRegDefs[CallResource] = nullptr;
758 LiveRegGens[CallResource] = nullptr;
759 releaseInterferences(CallResource);
760 }
761 }
762
763 resetVRegCycle(SU);
764
765 SU->isScheduled = true;
766
767 // Conditions under which the scheduler should eagerly advance the cycle:
768 // (1) No available instructions
769 // (2) All pipelines full, so available instructions must have hazards.
770 //
771 // If HazardRec is disabled, the cycle was pre-advanced before calling
772 // ReleasePredecessors. In that case, IssueCount should remain 0.
773 //
774 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
775 if (HazardRec->isEnabled() || AvgIPC > 1) {
776 if (SU->getNode() && SU->getNode()->isMachineOpcode())
777 ++IssueCount;
778 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
779 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
780 AdvanceToCycle(CurCycle + 1);
781 }
782}
783
784/// CapturePred - This does the opposite of ReleasePred. Since SU is being
785/// unscheduled, incrcease the succ left count of its predecessors. Remove
786/// them from AvailableQueue if necessary.
787void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
788 SUnit *PredSU = PredEdge->getSUnit();
789 if (PredSU->isAvailable) {
790 PredSU->isAvailable = false;
791 if (!PredSU->isPending)
792 AvailableQueue->remove(PredSU);
793 }
794
795 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!")((PredSU->NumSuccsLeft < (2147483647 *2U +1U) &&
"NumSuccsLeft will overflow!") ? static_cast<void> (0)
: __assert_fail ("PredSU->NumSuccsLeft < UINT_MAX && \"NumSuccsLeft will overflow!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 795, __PRETTY_FUNCTION__))
;
796 ++PredSU->NumSuccsLeft;
797}
798
799/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
800/// its predecessor states to reflect the change.
801void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
802 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "*** Unscheduling [" <<
SU->getHeight() << "]: "; } } while (false)
;
803 DEBUG(SU->dump(this))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { SU->dump(this); } } while (false)
;
804
805 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
806 I != E; ++I) {
807 CapturePred(&*I);
808 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
809 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!")((NumLiveRegs > 0 && "NumLiveRegs is already zero!"
) ? static_cast<void> (0) : __assert_fail ("NumLiveRegs > 0 && \"NumLiveRegs is already zero!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 809, __PRETTY_FUNCTION__))
;
810 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&((LiveRegDefs[I->getReg()] == I->getSUnit() && "Physical register dependency violated?"
) ? static_cast<void> (0) : __assert_fail ("LiveRegDefs[I->getReg()] == I->getSUnit() && \"Physical register dependency violated?\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 811, __PRETTY_FUNCTION__))
811 "Physical register dependency violated?")((LiveRegDefs[I->getReg()] == I->getSUnit() && "Physical register dependency violated?"
) ? static_cast<void> (0) : __assert_fail ("LiveRegDefs[I->getReg()] == I->getSUnit() && \"Physical register dependency violated?\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 811, __PRETTY_FUNCTION__))
;
812 --NumLiveRegs;
813 LiveRegDefs[I->getReg()] = nullptr;
814 LiveRegGens[I->getReg()] = nullptr;
815 releaseInterferences(I->getReg());
816 }
817 }
818
819 // Reclaim the special call resource dependence, if this is the beginning
820 // of a call.
821 unsigned CallResource = TRI->getNumRegs();
822 for (const SDNode *SUNode = SU->getNode(); SUNode;
823 SUNode = SUNode->getGluedNode()) {
824 if (SUNode->isMachineOpcode() &&
825 SUNode->getMachineOpcode() == TII->getCallFrameSetupOpcode()) {
826 ++NumLiveRegs;
827 LiveRegDefs[CallResource] = SU;
828 LiveRegGens[CallResource] = CallSeqEndForStart[SU];
829 }
830 }
831
832 // Release the special call resource dependence, if this is the end
833 // of a call.
834 if (LiveRegGens[CallResource] == SU)
835 for (const SDNode *SUNode = SU->getNode(); SUNode;
836 SUNode = SUNode->getGluedNode()) {
837 if (SUNode->isMachineOpcode() &&
838 SUNode->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) {
839 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!")((NumLiveRegs > 0 && "NumLiveRegs is already zero!"
) ? static_cast<void> (0) : __assert_fail ("NumLiveRegs > 0 && \"NumLiveRegs is already zero!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 839, __PRETTY_FUNCTION__))
;
840 --NumLiveRegs;
841 LiveRegDefs[CallResource] = nullptr;
842 LiveRegGens[CallResource] = nullptr;
843 releaseInterferences(CallResource);
844 }
845 }
846
847 for (auto &Succ : SU->Succs) {
848 if (Succ.isAssignedRegDep()) {
849 auto Reg = Succ.getReg();
850 if (!LiveRegDefs[Reg])
851 ++NumLiveRegs;
852 // This becomes the nearest def. Note that an earlier def may still be
853 // pending if this is a two-address node.
854 LiveRegDefs[Reg] = SU;
855
856 // Update LiveRegGen only if was empty before this unscheduling.
857 // This is to avoid incorrect updating LiveRegGen set in previous run.
858 if (!LiveRegGens[Reg]) {
859 // Find the successor with the lowest height.
860 LiveRegGens[Reg] = Succ.getSUnit();
861 for (auto &Succ2 : SU->Succs) {
862 if (Succ2.isAssignedRegDep() && Succ2.getReg() == Reg &&
863 Succ2.getSUnit()->getHeight() < LiveRegGens[Reg]->getHeight())
864 LiveRegGens[Reg] = Succ2.getSUnit();
865 }
866 }
867 }
868 }
869 if (SU->getHeight() < MinAvailableCycle)
870 MinAvailableCycle = SU->getHeight();
871
872 SU->setHeightDirty();
873 SU->isScheduled = false;
874 SU->isAvailable = true;
875 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
876 // Don't make available until backtracking is complete.
877 SU->isPending = true;
878 PendingQueue.push_back(SU);
879 }
880 else {
881 AvailableQueue->push(SU);
882 }
883 AvailableQueue->unscheduledNode(SU);
884}
885
886/// After backtracking, the hazard checker needs to be restored to a state
887/// corresponding the current cycle.
888void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
889 HazardRec->Reset();
890
891 unsigned LookAhead = std::min((unsigned)Sequence.size(),
892 HazardRec->getMaxLookAhead());
893 if (LookAhead == 0)
894 return;
895
896 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
897 unsigned HazardCycle = (*I)->getHeight();
898 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
899 SUnit *SU = *I;
900 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
901 HazardRec->RecedeCycle();
902 }
903 EmitNode(SU);
904 }
905}
906
907/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
908/// BTCycle in order to schedule a specific node.
909void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
910 SUnit *OldSU = Sequence.back();
911 while (true) {
912 Sequence.pop_back();
913 // FIXME: use ready cycle instead of height
914 CurCycle = OldSU->getHeight();
915 UnscheduleNodeBottomUp(OldSU);
916 AvailableQueue->setCurCycle(CurCycle);
917 if (OldSU == BtSU)
918 break;
919 OldSU = Sequence.back();
920 }
921
922 assert(!SU->isSucc(OldSU) && "Something is wrong!")((!SU->isSucc(OldSU) && "Something is wrong!") ? static_cast
<void> (0) : __assert_fail ("!SU->isSucc(OldSU) && \"Something is wrong!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 922, __PRETTY_FUNCTION__))
;
923
924 RestoreHazardCheckerBottomUp();
925
926 ReleasePending();
927
928 ++NumBacktracks;
929}
930
931static bool isOperandOf(const SUnit *SU, SDNode *N) {
932 for (const SDNode *SUNode = SU->getNode(); SUNode;
933 SUNode = SUNode->getGluedNode()) {
934 if (SUNode->isOperandOf(N))
935 return true;
936 }
937 return false;
938}
939
940/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
941/// successors to the newly created node.
942SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
943 SDNode *N = SU->getNode();
944 if (!N)
945 return nullptr;
946
947 if (SU->getNode()->getGluedNode())
948 return nullptr;
949
950 SUnit *NewSU;
951 bool TryUnfold = false;
952 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
953 MVT VT = N->getSimpleValueType(i);
954 if (VT == MVT::Glue)
955 return nullptr;
956 else if (VT == MVT::Other)
957 TryUnfold = true;
958 }
959 for (const SDValue &Op : N->op_values()) {
960 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
961 if (VT == MVT::Glue)
962 return nullptr;
963 }
964
965 if (TryUnfold) {
966 SmallVector<SDNode*, 2> NewNodes;
967 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
968 return nullptr;
969
970 // unfolding an x86 DEC64m operation results in store, dec, load which
971 // can't be handled here so quit
972 if (NewNodes.size() == 3)
973 return nullptr;
974
975 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "Unfolding SU #" <<
SU->NodeNum << "\n"; } } while (false)
;
976 assert(NewNodes.size() == 2 && "Expected a load folding node!")((NewNodes.size() == 2 && "Expected a load folding node!"
) ? static_cast<void> (0) : __assert_fail ("NewNodes.size() == 2 && \"Expected a load folding node!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 976, __PRETTY_FUNCTION__))
;
977
978 N = NewNodes[1];
979 SDNode *LoadNode = NewNodes[0];
980 unsigned NumVals = N->getNumValues();
981 unsigned OldNumVals = SU->getNode()->getNumValues();
982 for (unsigned i = 0; i != NumVals; ++i)
983 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
984 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
985 SDValue(LoadNode, 1));
986
987 // LoadNode may already exist. This can happen when there is another
988 // load from the same location and producing the same type of value
989 // but it has different alignment or volatileness.
990 bool isNewLoad = true;
991 SUnit *LoadSU;
992 if (LoadNode->getNodeId() != -1) {
993 LoadSU = &SUnits[LoadNode->getNodeId()];
994 isNewLoad = false;
995 } else {
996 LoadSU = CreateNewSUnit(LoadNode);
997 LoadNode->setNodeId(LoadSU->NodeNum);
998
999 InitNumRegDefsLeft(LoadSU);
1000 computeLatency(LoadSU);
1001 }
1002
1003 SUnit *NewSU = CreateNewSUnit(N);
1004 assert(N->getNodeId() == -1 && "Node already inserted!")((N->getNodeId() == -1 && "Node already inserted!"
) ? static_cast<void> (0) : __assert_fail ("N->getNodeId() == -1 && \"Node already inserted!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1004, __PRETTY_FUNCTION__))
;
1005 N->setNodeId(NewSU->NodeNum);
1006
1007 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1008 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
1009 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
1010 NewSU->isTwoAddress = true;
1011 break;
1012 }
1013 }
1014 if (MCID.isCommutable())
1015 NewSU->isCommutable = true;
1016
1017 InitNumRegDefsLeft(NewSU);
1018 computeLatency(NewSU);
1019
1020 // Record all the edges to and from the old SU, by category.
1021 SmallVector<SDep, 4> ChainPreds;
1022 SmallVector<SDep, 4> ChainSuccs;
1023 SmallVector<SDep, 4> LoadPreds;
1024 SmallVector<SDep, 4> NodePreds;
1025 SmallVector<SDep, 4> NodeSuccs;
1026 for (SDep &Pred : SU->Preds) {
1027 if (Pred.isCtrl())
1028 ChainPreds.push_back(Pred);
1029 else if (isOperandOf(Pred.getSUnit(), LoadNode))
1030 LoadPreds.push_back(Pred);
1031 else
1032 NodePreds.push_back(Pred);
1033 }
1034 for (SDep &Succ : SU->Succs) {
1035 if (Succ.isCtrl())
1036 ChainSuccs.push_back(Succ);
1037 else
1038 NodeSuccs.push_back(Succ);
1039 }
1040
1041 // Now assign edges to the newly-created nodes.
1042 for (const SDep &Pred : ChainPreds) {
1043 RemovePred(SU, Pred);
1044 if (isNewLoad)
1045 AddPred(LoadSU, Pred);
1046 }
1047 for (const SDep &Pred : LoadPreds) {
1048 RemovePred(SU, Pred);
1049 if (isNewLoad)
1050 AddPred(LoadSU, Pred);
1051 }
1052 for (const SDep &Pred : NodePreds) {
1053 RemovePred(SU, Pred);
1054 AddPred(NewSU, Pred);
1055 }
1056 for (SDep D : NodeSuccs) {
1057 SUnit *SuccDep = D.getSUnit();
1058 D.setSUnit(SU);
1059 RemovePred(SuccDep, D);
1060 D.setSUnit(NewSU);
1061 AddPred(SuccDep, D);
1062 // Balance register pressure.
1063 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1064 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1065 --NewSU->NumRegDefsLeft;
1066 }
1067 for (SDep D : ChainSuccs) {
1068 SUnit *SuccDep = D.getSUnit();
1069 D.setSUnit(SU);
1070 RemovePred(SuccDep, D);
1071 if (isNewLoad) {
1072 D.setSUnit(LoadSU);
1073 AddPred(SuccDep, D);
1074 }
1075 }
1076
1077 // Add a data dependency to reflect that NewSU reads the value defined
1078 // by LoadSU.
1079 SDep D(LoadSU, SDep::Data, 0);
1080 D.setLatency(LoadSU->Latency);
1081 AddPred(NewSU, D);
1082
1083 if (isNewLoad)
1084 AvailableQueue->addNode(LoadSU);
1085 AvailableQueue->addNode(NewSU);
1086
1087 ++NumUnfolds;
1088
1089 if (NewSU->NumSuccsLeft == 0) {
1090 NewSU->isAvailable = true;
1091 return NewSU;
1092 }
1093 SU = NewSU;
1094 }
1095
1096 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Duplicating SU #" <<
SU->NodeNum << "\n"; } } while (false)
;
1097 NewSU = CreateClone(SU);
1098
1099 // New SUnit has the exact same predecessors.
1100 for (SDep &Pred : SU->Preds)
1101 if (!Pred.isArtificial())
1102 AddPred(NewSU, Pred);
1103
1104 // Only copy scheduled successors. Cut them from old node's successor
1105 // list and move them over.
1106 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1107 for (SDep &Succ : SU->Succs) {
1108 if (Succ.isArtificial())
1109 continue;
1110 SUnit *SuccSU = Succ.getSUnit();
1111 if (SuccSU->isScheduled) {
1112 SDep D = Succ;
1113 D.setSUnit(NewSU);
1114 AddPred(SuccSU, D);
1115 D.setSUnit(SU);
1116 DelDeps.push_back(std::make_pair(SuccSU, D));
1117 }
1118 }
1119 for (auto &DelDep : DelDeps)
1120 RemovePred(DelDep.first, DelDep.second);
1121
1122 AvailableQueue->updateNode(SU);
1123 AvailableQueue->addNode(NewSU);
1124
1125 ++NumDups;
1126 return NewSU;
1127}
1128
1129/// InsertCopiesAndMoveSuccs - Insert register copies and move all
1130/// scheduled successors of the given SUnit to the last copy.
1131void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1132 const TargetRegisterClass *DestRC,
1133 const TargetRegisterClass *SrcRC,
1134 SmallVectorImpl<SUnit*> &Copies) {
1135 SUnit *CopyFromSU = CreateNewSUnit(nullptr);
1136 CopyFromSU->CopySrcRC = SrcRC;
1137 CopyFromSU->CopyDstRC = DestRC;
1138
1139 SUnit *CopyToSU = CreateNewSUnit(nullptr);
1140 CopyToSU->CopySrcRC = DestRC;
1141 CopyToSU->CopyDstRC = SrcRC;
1142
1143 // Only copy scheduled successors. Cut them from old node's successor
1144 // list and move them over.
1145 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1146 for (SDep &Succ : SU->Succs) {
1147 if (Succ.isArtificial())
1148 continue;
1149 SUnit *SuccSU = Succ.getSUnit();
1150 if (SuccSU->isScheduled) {
1151 SDep D = Succ;
1152 D.setSUnit(CopyToSU);
1153 AddPred(SuccSU, D);
1154 DelDeps.push_back(std::make_pair(SuccSU, Succ));
1155 }
1156 else {
1157 // Avoid scheduling the def-side copy before other successors. Otherwise
1158 // we could introduce another physreg interference on the copy and
1159 // continue inserting copies indefinitely.
1160 AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
1161 }
1162 }
1163 for (auto &DelDep : DelDeps)
1164 RemovePred(DelDep.first, DelDep.second);
1165
1166 SDep FromDep(SU, SDep::Data, Reg);
1167 FromDep.setLatency(SU->Latency);
1168 AddPred(CopyFromSU, FromDep);
1169 SDep ToDep(CopyFromSU, SDep::Data, 0);
1170 ToDep.setLatency(CopyFromSU->Latency);
1171 AddPred(CopyToSU, ToDep);
1172
1173 AvailableQueue->updateNode(SU);
1174 AvailableQueue->addNode(CopyFromSU);
1175 AvailableQueue->addNode(CopyToSU);
1176 Copies.push_back(CopyFromSU);
1177 Copies.push_back(CopyToSU);
1178
1179 ++NumPRCopies;
1180}
1181
1182/// getPhysicalRegisterVT - Returns the ValueType of the physical register
1183/// definition of the specified node.
1184/// FIXME: Move to SelectionDAG?
1185static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1186 const TargetInstrInfo *TII) {
1187 unsigned NumRes;
1188 if (N->getOpcode() == ISD::CopyFromReg) {
1189 // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
1190 NumRes = 1;
1191 } else {
1192 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1193 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!")((MCID.ImplicitDefs && "Physical reg def must be in implicit def list!"
) ? static_cast<void> (0) : __assert_fail ("MCID.ImplicitDefs && \"Physical reg def must be in implicit def list!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1193, __PRETTY_FUNCTION__))
;
1194 NumRes = MCID.getNumDefs();
1195 for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1196 if (Reg == *ImpDef)
1197 break;
1198 ++NumRes;
1199 }
1200 }
1201 return N->getSimpleValueType(NumRes);
1202}
1203
1204/// CheckForLiveRegDef - Return true and update live register vector if the
1205/// specified register def of the specified SUnit clobbers any "live" registers.
1206static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1207 SUnit **LiveRegDefs,
1208 SmallSet<unsigned, 4> &RegAdded,
1209 SmallVectorImpl<unsigned> &LRegs,
1210 const TargetRegisterInfo *TRI) {
1211 for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
1212
1213 // Check if Ref is live.
1214 if (!LiveRegDefs[*AliasI]) continue;
1215
1216 // Allow multiple uses of the same def.
1217 if (LiveRegDefs[*AliasI] == SU) continue;
1218
1219 // Add Reg to the set of interfering live regs.
1220 if (RegAdded.insert(*AliasI).second) {
1221 LRegs.push_back(*AliasI);
1222 }
1223 }
1224}
1225
1226/// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
1227/// by RegMask, and add them to LRegs.
1228static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
1229 ArrayRef<SUnit*> LiveRegDefs,
1230 SmallSet<unsigned, 4> &RegAdded,
1231 SmallVectorImpl<unsigned> &LRegs) {
1232 // Look at all live registers. Skip Reg0 and the special CallResource.
1233 for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
1234 if (!LiveRegDefs[i]) continue;
1235 if (LiveRegDefs[i] == SU) continue;
1236 if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
1237 if (RegAdded.insert(i).second)
1238 LRegs.push_back(i);
1239 }
1240}
1241
1242/// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
1243static const uint32_t *getNodeRegMask(const SDNode *N) {
1244 for (const SDValue &Op : N->op_values())
1245 if (const auto *RegOp = dyn_cast<RegisterMaskSDNode>(Op.getNode()))
1246 return RegOp->getRegMask();
1247 return nullptr;
1248}
1249
1250/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1251/// scheduling of the given node to satisfy live physical register dependencies.
1252/// If the specific node is the last one that's available to schedule, do
1253/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1254bool ScheduleDAGRRList::
1255DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
1256 if (NumLiveRegs == 0)
1257 return false;
1258
1259 SmallSet<unsigned, 4> RegAdded;
1260 // If this node would clobber any "live" register, then it's not ready.
1261 //
1262 // If SU is the currently live definition of the same register that it uses,
1263 // then we are free to schedule it.
1264 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1265 I != E; ++I) {
1266 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1267 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs.get(),
1268 RegAdded, LRegs, TRI);
1269 }
1270
1271 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1272 if (Node->getOpcode() == ISD::INLINEASM) {
1273 // Inline asm can clobber physical defs.
1274 unsigned NumOps = Node->getNumOperands();
1275 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1276 --NumOps; // Ignore the glue operand.
1277
1278 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1279 unsigned Flags =
1280 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1281 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1282
1283 ++i; // Skip the ID value.
1284 if (InlineAsm::isRegDefKind(Flags) ||
1285 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1286 InlineAsm::isClobberKind(Flags)) {
1287 // Check for def of register or earlyclobber register.
1288 for (; NumVals; --NumVals, ++i) {
1289 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1290 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1291 CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
1292 }
1293 } else
1294 i += NumVals;
1295 }
1296 continue;
1297 }
1298
1299 if (!Node->isMachineOpcode())
1300 continue;
1301 // If we're in the middle of scheduling a call, don't begin scheduling
1302 // another call. Also, don't allow any physical registers to be live across
1303 // the call.
1304 if ((Node->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) ||
1305 (Node->getMachineOpcode() == TII->getCallFrameSetupOpcode())) {
1306 // Check the special calling-sequence resource.
1307 unsigned CallResource = TRI->getNumRegs();
1308 if (LiveRegDefs[CallResource]) {
1309 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1310 while (SDNode *Glued = Gen->getGluedNode())
1311 Gen = Glued;
1312 if (!IsChainDependent(Gen, Node, 0, TII) &&
1313 RegAdded.insert(CallResource).second)
1314 LRegs.push_back(CallResource);
1315 }
1316 }
1317 if (const uint32_t *RegMask = getNodeRegMask(Node))
1318 CheckForLiveRegDefMasked(SU, RegMask,
1319 makeArrayRef(LiveRegDefs.get(), TRI->getNumRegs()),
1320 RegAdded, LRegs);
1321
1322 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1323 if (MCID.hasOptionalDef()) {
1324 // Most ARM instructions have an OptionalDef for CPSR, to model the S-bit.
1325 // This operand can be either a def of CPSR, if the S bit is set; or a use
1326 // of %noreg. When the OptionalDef is set to a valid register, we need to
1327 // handle it in the same way as an ImplicitDef.
1328 for (unsigned i = 0; i < MCID.getNumDefs(); ++i)
1329 if (MCID.OpInfo[i].isOptionalDef()) {
1330 const SDValue &OptionalDef = Node->getOperand(i - Node->getNumValues());
1331 unsigned Reg = cast<RegisterSDNode>(OptionalDef)->getReg();
1332 CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
1333 }
1334 }
1335 if (!MCID.ImplicitDefs)
1336 continue;
1337 for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
1338 CheckForLiveRegDef(SU, *Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
1339 }
1340
1341 return !LRegs.empty();
1342}
1343
1344void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
1345 // Add the nodes that aren't ready back onto the available list.
1346 for (unsigned i = Interferences.size(); i > 0; --i) {
1347 SUnit *SU = Interferences[i-1];
1348 LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
1349 if (Reg) {
1350 SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
1351 if (!is_contained(LRegs, Reg))
1352 continue;
1353 }
1354 SU->isPending = false;
1355 // The interfering node may no longer be available due to backtracking.
1356 // Furthermore, it may have been made available again, in which case it is
1357 // now already in the AvailableQueue.
1358 if (SU->isAvailable && !SU->NodeQueueId) {
1359 DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Repushing SU #" <<
SU->NodeNum << '\n'; } } while (false)
;
1360 AvailableQueue->push(SU);
1361 }
1362 if (i < Interferences.size())
1363 Interferences[i-1] = Interferences.back();
1364 Interferences.pop_back();
1365 LRegsMap.erase(LRegsPos);
1366 }
1367}
1368
1369/// Return a node that can be scheduled in this cycle. Requirements:
1370/// (1) Ready: latency has been satisfied
1371/// (2) No Hazards: resources are available
1372/// (3) No Interferences: may unschedule to break register interferences.
1373SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1374 SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop();
1
Assuming the condition is false
2
'?' condition is false
1375 while (CurSU) {
3
Loop condition is false. Execution continues on line 1396
1376 SmallVector<unsigned, 4> LRegs;
1377 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1378 break;
1379 DEBUG(dbgs() << " Interfering reg " <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Interfering reg " <<
(LRegs[0] == TRI->getNumRegs() ? "CallResource" : TRI->
getName(LRegs[0])) << " SU #" << CurSU->NodeNum
<< '\n'; } } while (false)
1380 (LRegs[0] == TRI->getNumRegs() ? "CallResource"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Interfering reg " <<
(LRegs[0] == TRI->getNumRegs() ? "CallResource" : TRI->
getName(LRegs[0])) << " SU #" << CurSU->NodeNum
<< '\n'; } } while (false)
1381 : TRI->getName(LRegs[0]))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Interfering reg " <<
(LRegs[0] == TRI->getNumRegs() ? "CallResource" : TRI->
getName(LRegs[0])) << " SU #" << CurSU->NodeNum
<< '\n'; } } while (false)
1382 << " SU #" << CurSU->NodeNum << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Interfering reg " <<
(LRegs[0] == TRI->getNumRegs() ? "CallResource" : TRI->
getName(LRegs[0])) << " SU #" << CurSU->NodeNum
<< '\n'; } } while (false)
;
1383 std::pair<LRegsMapT::iterator, bool> LRegsPair =
1384 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1385 if (LRegsPair.second) {
1386 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1387 Interferences.push_back(CurSU);
1388 }
1389 else {
1390 assert(CurSU->isPending && "Interferences are pending")((CurSU->isPending && "Interferences are pending")
? static_cast<void> (0) : __assert_fail ("CurSU->isPending && \"Interferences are pending\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1390, __PRETTY_FUNCTION__))
;
1391 // Update the interference with current live regs.
1392 LRegsPair.first->second = LRegs;
1393 }
1394 CurSU = AvailableQueue->pop();
1395 }
1396 if (CurSU)
4
Taking false branch
1397 return CurSU;
1398
1399 // All candidates are delayed due to live physical reg dependencies.
1400 // Try backtracking, code duplication, or inserting cross class copies
1401 // to resolve it.
1402 for (SUnit *TrySU : Interferences) {
5
Assuming '__begin' is not equal to '__end'
1403 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1404
1405 // Try unscheduling up to the point where it's safe to schedule
1406 // this node.
1407 SUnit *BtSU = nullptr;
9
'BtSU' initialized to a null pointer value
1408 unsigned LiveCycle = UINT_MAX(2147483647 *2U +1U);
1409 for (unsigned Reg : LRegs) {
6
Assuming '__begin' is equal to '__end'
10
Assuming '__begin' is equal to '__end'
1410 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1411 BtSU = LiveRegGens[Reg];
1412 LiveCycle = BtSU->getHeight();
1413 }
1414 }
1415 if (!WillCreateCycle(TrySU, BtSU)) {
7
Assuming the condition is false
8
Taking false branch
11
Assuming the condition is true
12
Taking true branch
1416 // BacktrackBottomUp mutates Interferences!
1417 BacktrackBottomUp(TrySU, BtSU);
1418
1419 // Force the current node to be scheduled before the node that
1420 // requires the physical reg dep.
1421 if (BtSU->isAvailable) {
13
Access to field 'isAvailable' results in a dereference of a null pointer (loaded from variable 'BtSU')
1422 BtSU->isAvailable = false;
1423 if (!BtSU->isPending)
1424 AvailableQueue->remove(BtSU);
1425 }
1426 DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "ARTIFICIAL edge from SU("
<< BtSU->NodeNum << ") to SU(" << TrySU
->NodeNum << ")\n"; } } while (false)
1427 << TrySU->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "ARTIFICIAL edge from SU("
<< BtSU->NodeNum << ") to SU(" << TrySU
->NodeNum << ")\n"; } } while (false)
;
1428 AddPred(TrySU, SDep(BtSU, SDep::Artificial));
1429
1430 // If one or more successors has been unscheduled, then the current
1431 // node is no longer available.
1432 if (!TrySU->isAvailable || !TrySU->NodeQueueId)
1433 CurSU = AvailableQueue->pop();
1434 else {
1435 // Available and in AvailableQueue
1436 AvailableQueue->remove(TrySU);
1437 CurSU = TrySU;
1438 }
1439 // Interferences has been mutated. We must break.
1440 break;
1441 }
1442 }
1443
1444 if (!CurSU) {
1445 // Can't backtrack. If it's too expensive to copy the value, then try
1446 // duplicate the nodes that produces these "too expensive to copy"
1447 // values to break the dependency. In case even that doesn't work,
1448 // insert cross class copies.
1449 // If it's not too expensive, i.e. cost != -1, issue copies.
1450 SUnit *TrySU = Interferences[0];
1451 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1452 assert(LRegs.size() == 1 && "Can't handle this yet!")((LRegs.size() == 1 && "Can't handle this yet!") ? static_cast
<void> (0) : __assert_fail ("LRegs.size() == 1 && \"Can't handle this yet!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1452, __PRETTY_FUNCTION__))
;
1453 unsigned Reg = LRegs[0];
1454 SUnit *LRDef = LiveRegDefs[Reg];
1455 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1456 const TargetRegisterClass *RC =
1457 TRI->getMinimalPhysRegClass(Reg, VT);
1458 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1459
1460 // If cross copy register class is the same as RC, then it must be possible
1461 // copy the value directly. Do not try duplicate the def.
1462 // If cross copy register class is not the same as RC, then it's possible to
1463 // copy the value but it require cross register class copies and it is
1464 // expensive.
1465 // If cross copy register class is null, then it's not possible to copy
1466 // the value at all.
1467 SUnit *NewDef = nullptr;
1468 if (DestRC != RC) {
1469 NewDef = CopyAndMoveSuccessors(LRDef);
1470 if (!DestRC && !NewDef)
1471 report_fatal_error("Can't handle live physical register dependency!");
1472 }
1473 if (!NewDef) {
1474 // Issue copies, these can be expensive cross register class copies.
1475 SmallVector<SUnit*, 2> Copies;
1476 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1477 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Adding an edge from SU #"
<< TrySU->NodeNum << " to SU #" << Copies
.front()->NodeNum << "\n"; } } while (false)
1478 << " to SU #" << Copies.front()->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Adding an edge from SU #"
<< TrySU->NodeNum << " to SU #" << Copies
.front()->NodeNum << "\n"; } } while (false)
;
1479 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
1480 NewDef = Copies.back();
1481 }
1482
1483 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Adding an edge from SU #"
<< NewDef->NodeNum << " to SU #" << TrySU
->NodeNum << "\n"; } } while (false)
1484 << " to SU #" << TrySU->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Adding an edge from SU #"
<< NewDef->NodeNum << " to SU #" << TrySU
->NodeNum << "\n"; } } while (false)
;
1485 LiveRegDefs[Reg] = NewDef;
1486 AddPred(NewDef, SDep(TrySU, SDep::Artificial));
1487 TrySU->isAvailable = false;
1488 CurSU = NewDef;
1489 }
1490 assert(CurSU && "Unable to resolve live physical register dependencies!")((CurSU && "Unable to resolve live physical register dependencies!"
) ? static_cast<void> (0) : __assert_fail ("CurSU && \"Unable to resolve live physical register dependencies!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1490, __PRETTY_FUNCTION__))
;
1491 return CurSU;
1492}
1493
1494/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1495/// schedulers.
1496void ScheduleDAGRRList::ListScheduleBottomUp() {
1497 // Release any predecessors of the special Exit node.
1498 ReleasePredecessors(&ExitSU);
1499
1500 // Add root to Available queue.
1501 if (!SUnits.empty()) {
1502 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1503 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!")((RootSU->Succs.empty() && "Graph root shouldn't have successors!"
) ? static_cast<void> (0) : __assert_fail ("RootSU->Succs.empty() && \"Graph root shouldn't have successors!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1503, __PRETTY_FUNCTION__))
;
1504 RootSU->isAvailable = true;
1505 AvailableQueue->push(RootSU);
1506 }
1507
1508 // While Available queue is not empty, grab the node with the highest
1509 // priority. If it is not ready put it back. Schedule the node.
1510 Sequence.reserve(SUnits.size());
1511 while (!AvailableQueue->empty() || !Interferences.empty()) {
1512 DEBUG(dbgs() << "\nExamining Available:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "\nExamining Available:\n"
; AvailableQueue->dump(this); } } while (false)
1513 AvailableQueue->dump(this))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "\nExamining Available:\n"
; AvailableQueue->dump(this); } } while (false)
;
1514
1515 // Pick the best node to schedule taking all constraints into
1516 // consideration.
1517 SUnit *SU = PickNodeToScheduleBottomUp();
1518
1519 AdvancePastStalls(SU);
1520
1521 ScheduleNodeBottomUp(SU);
1522
1523 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1524 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1525 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized")((MinAvailableCycle < (2147483647 *2U +1U) && "MinAvailableCycle uninitialized"
) ? static_cast<void> (0) : __assert_fail ("MinAvailableCycle < UINT_MAX && \"MinAvailableCycle uninitialized\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1525, __PRETTY_FUNCTION__))
;
1526 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1527 }
1528 }
1529
1530 // Reverse the order if it is bottom up.
1531 std::reverse(Sequence.begin(), Sequence.end());
1532
1533#ifndef NDEBUG
1534 VerifyScheduledSequence(/*isBottomUp=*/true);
1535#endif
1536}
1537
1538//===----------------------------------------------------------------------===//
1539// RegReductionPriorityQueue Definition
1540//===----------------------------------------------------------------------===//
1541//
1542// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1543// to reduce register pressure.
1544//
1545namespace {
1546class RegReductionPQBase;
1547
1548struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1549 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1550};
1551
1552#ifndef NDEBUG
1553template<class SF>
1554struct reverse_sort : public queue_sort {
1555 SF &SortFunc;
1556 reverse_sort(SF &sf) : SortFunc(sf) {}
1557
1558 bool operator()(SUnit* left, SUnit* right) const {
1559 // reverse left/right rather than simply !SortFunc(left, right)
1560 // to expose different paths in the comparison logic.
1561 return SortFunc(right, left);
1562 }
1563};
1564#endif // NDEBUG
1565
1566/// bu_ls_rr_sort - Priority function for bottom up register pressure
1567// reduction scheduler.
1568struct bu_ls_rr_sort : public queue_sort {
1569 enum {
1570 IsBottomUp = true,
1571 HasReadyFilter = false
1572 };
1573
1574 RegReductionPQBase *SPQ;
1575 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1576
1577 bool operator()(SUnit* left, SUnit* right) const;
1578};
1579
1580// src_ls_rr_sort - Priority function for source order scheduler.
1581struct src_ls_rr_sort : public queue_sort {
1582 enum {
1583 IsBottomUp = true,
1584 HasReadyFilter = false
1585 };
1586
1587 RegReductionPQBase *SPQ;
1588 src_ls_rr_sort(RegReductionPQBase *spq)
1589 : SPQ(spq) {}
1590
1591 bool operator()(SUnit* left, SUnit* right) const;
1592};
1593
1594// hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1595struct hybrid_ls_rr_sort : public queue_sort {
1596 enum {
1597 IsBottomUp = true,
1598 HasReadyFilter = false
1599 };
1600
1601 RegReductionPQBase *SPQ;
1602 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1603 : SPQ(spq) {}
1604
1605 bool isReady(SUnit *SU, unsigned CurCycle) const;
1606
1607 bool operator()(SUnit* left, SUnit* right) const;
1608};
1609
1610// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1611// scheduler.
1612struct ilp_ls_rr_sort : public queue_sort {
1613 enum {
1614 IsBottomUp = true,
1615 HasReadyFilter = false
1616 };
1617
1618 RegReductionPQBase *SPQ;
1619 ilp_ls_rr_sort(RegReductionPQBase *spq)
1620 : SPQ(spq) {}
1621
1622 bool isReady(SUnit *SU, unsigned CurCycle) const;
1623
1624 bool operator()(SUnit* left, SUnit* right) const;
1625};
1626
1627class RegReductionPQBase : public SchedulingPriorityQueue {
1628protected:
1629 std::vector<SUnit*> Queue;
1630 unsigned CurQueueId;
1631 bool TracksRegPressure;
1632 bool SrcOrder;
1633
1634 // SUnits - The SUnits for the current graph.
1635 std::vector<SUnit> *SUnits;
1636
1637 MachineFunction &MF;
1638 const TargetInstrInfo *TII;
1639 const TargetRegisterInfo *TRI;
1640 const TargetLowering *TLI;
1641 ScheduleDAGRRList *scheduleDAG;
1642
1643 // SethiUllmanNumbers - The SethiUllman number for each node.
1644 std::vector<unsigned> SethiUllmanNumbers;
1645
1646 /// RegPressure - Tracking current reg pressure per register class.
1647 ///
1648 std::vector<unsigned> RegPressure;
1649
1650 /// RegLimit - Tracking the number of allocatable registers per register
1651 /// class.
1652 std::vector<unsigned> RegLimit;
1653
1654public:
1655 RegReductionPQBase(MachineFunction &mf,
1656 bool hasReadyFilter,
1657 bool tracksrp,
1658 bool srcorder,
1659 const TargetInstrInfo *tii,
1660 const TargetRegisterInfo *tri,
1661 const TargetLowering *tli)
1662 : SchedulingPriorityQueue(hasReadyFilter),
1663 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
1664 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(nullptr) {
1665 if (TracksRegPressure) {
1666 unsigned NumRC = TRI->getNumRegClasses();
1667 RegLimit.resize(NumRC);
1668 RegPressure.resize(NumRC);
1669 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1670 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1671 for (const TargetRegisterClass *RC : TRI->regclasses())
1672 RegLimit[RC->getID()] = tri->getRegPressureLimit(RC, MF);
1673 }
1674 }
1675
1676 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1677 scheduleDAG = scheduleDag;
1678 }
1679
1680 ScheduleHazardRecognizer* getHazardRec() {
1681 return scheduleDAG->getHazardRec();
1682 }
1683
1684 void initNodes(std::vector<SUnit> &sunits) override;
1685
1686 void addNode(const SUnit *SU) override;
1687
1688 void updateNode(const SUnit *SU) override;
1689
1690 void releaseState() override {
1691 SUnits = nullptr;
1692 SethiUllmanNumbers.clear();
1693 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1694 }
1695
1696 unsigned getNodePriority(const SUnit *SU) const;
1697
1698 unsigned getNodeOrdering(const SUnit *SU) const {
1699 if (!SU->getNode()) return 0;
1700
1701 return SU->getNode()->getIROrder();
1702 }
1703
1704 bool empty() const override { return Queue.empty(); }
1705
1706 void push(SUnit *U) override {
1707 assert(!U->NodeQueueId && "Node in the queue already")((!U->NodeQueueId && "Node in the queue already") ?
static_cast<void> (0) : __assert_fail ("!U->NodeQueueId && \"Node in the queue already\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1707, __PRETTY_FUNCTION__))
;
1708 U->NodeQueueId = ++CurQueueId;
1709 Queue.push_back(U);
1710 }
1711
1712 void remove(SUnit *SU) override {
1713 assert(!Queue.empty() && "Queue is empty!")((!Queue.empty() && "Queue is empty!") ? static_cast<
void> (0) : __assert_fail ("!Queue.empty() && \"Queue is empty!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1713, __PRETTY_FUNCTION__))
;
1714 assert(SU->NodeQueueId != 0 && "Not in queue!")((SU->NodeQueueId != 0 && "Not in queue!") ? static_cast
<void> (0) : __assert_fail ("SU->NodeQueueId != 0 && \"Not in queue!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1714, __PRETTY_FUNCTION__))
;
1715 std::vector<SUnit *>::iterator I = find(Queue, SU);
1716 if (I != std::prev(Queue.end()))
1717 std::swap(*I, Queue.back());
1718 Queue.pop_back();
1719 SU->NodeQueueId = 0;
1720 }
1721
1722 bool tracksRegPressure() const override { return TracksRegPressure; }
1723
1724 void dumpRegPressure() const;
1725
1726 bool HighRegPressure(const SUnit *SU) const;
1727
1728 bool MayReduceRegPressure(SUnit *SU) const;
1729
1730 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1731
1732 void scheduledNode(SUnit *SU) override;
1733
1734 void unscheduledNode(SUnit *SU) override;
1735
1736protected:
1737 bool canClobber(const SUnit *SU, const SUnit *Op);
1738 void AddPseudoTwoAddrDeps();
1739 void PrescheduleNodesWithMultipleUses();
1740 void CalculateSethiUllmanNumbers();
1741};
1742
1743template<class SF>
1744static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1745 std::vector<SUnit *>::iterator Best = Q.begin();
1746 for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
1747 E = Q.end(); I != E; ++I)
1748 if (Picker(*Best, *I))
1749 Best = I;
1750 SUnit *V = *Best;
1751 if (Best != std::prev(Q.end()))
1752 std::swap(*Best, Q.back());
1753 Q.pop_back();
1754 return V;
1755}
1756
1757template<class SF>
1758SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1759#ifndef NDEBUG
1760 if (DAG->StressSched) {
1761 reverse_sort<SF> RPicker(Picker);
1762 return popFromQueueImpl(Q, RPicker);
1763 }
1764#endif
1765 (void)DAG;
1766 return popFromQueueImpl(Q, Picker);
1767}
1768
1769template<class SF>
1770class RegReductionPriorityQueue : public RegReductionPQBase {
1771 SF Picker;
1772
1773public:
1774 RegReductionPriorityQueue(MachineFunction &mf,
1775 bool tracksrp,
1776 bool srcorder,
1777 const TargetInstrInfo *tii,
1778 const TargetRegisterInfo *tri,
1779 const TargetLowering *tli)
1780 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1781 tii, tri, tli),
1782 Picker(this) {}
1783
1784 bool isBottomUp() const override { return SF::IsBottomUp; }
1785
1786 bool isReady(SUnit *U) const override {
1787 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1788 }
1789
1790 SUnit *pop() override {
1791 if (Queue.empty()) return nullptr;
1792
1793 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1794 V->NodeQueueId = 0;
1795 return V;
1796 }
1797
1798#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1799 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump(ScheduleDAG *DAG) const override {
1800 // Emulate pop() without clobbering NodeQueueIds.
1801 std::vector<SUnit*> DumpQueue = Queue;
1802 SF DumpPicker = Picker;
1803 while (!DumpQueue.empty()) {
1804 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1805 dbgs() << "Height " << SU->getHeight() << ": ";
1806 SU->dump(DAG);
1807 }
1808 }
1809#endif
1810};
1811
1812typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1813BURegReductionPriorityQueue;
1814
1815typedef RegReductionPriorityQueue<src_ls_rr_sort>
1816SrcRegReductionPriorityQueue;
1817
1818typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1819HybridBURRPriorityQueue;
1820
1821typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1822ILPBURRPriorityQueue;
1823} // end anonymous namespace
1824
1825//===----------------------------------------------------------------------===//
1826// Static Node Priority for Register Pressure Reduction
1827//===----------------------------------------------------------------------===//
1828
1829// Check for special nodes that bypass scheduling heuristics.
1830// Currently this pushes TokenFactor nodes down, but may be used for other
1831// pseudo-ops as well.
1832//
1833// Return -1 to schedule right above left, 1 for left above right.
1834// Return 0 if no bias exists.
1835static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1836 bool LSchedLow = left->isScheduleLow;
1837 bool RSchedLow = right->isScheduleLow;
1838 if (LSchedLow != RSchedLow)
1839 return LSchedLow < RSchedLow ? 1 : -1;
1840 return 0;
1841}
1842
1843/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1844/// Smaller number is the higher priority.
1845static unsigned
1846CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1847 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1848 if (SethiUllmanNumber != 0)
1849 return SethiUllmanNumber;
1850
1851 unsigned Extra = 0;
1852 for (const SDep &Pred : SU->Preds) {
1853 if (Pred.isCtrl()) continue; // ignore chain preds
1854 SUnit *PredSU = Pred.getSUnit();
1855 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1856 if (PredSethiUllman > SethiUllmanNumber) {
1857 SethiUllmanNumber = PredSethiUllman;
1858 Extra = 0;
1859 } else if (PredSethiUllman == SethiUllmanNumber)
1860 ++Extra;
1861 }
1862
1863 SethiUllmanNumber += Extra;
1864
1865 if (SethiUllmanNumber == 0)
1866 SethiUllmanNumber = 1;
1867
1868 return SethiUllmanNumber;
1869}
1870
1871/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1872/// scheduling units.
1873void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1874 SethiUllmanNumbers.assign(SUnits->size(), 0);
1875
1876 for (const SUnit &SU : *SUnits)
1877 CalcNodeSethiUllmanNumber(&SU, SethiUllmanNumbers);
1878}
1879
1880void RegReductionPQBase::addNode(const SUnit *SU) {
1881 unsigned SUSize = SethiUllmanNumbers.size();
1882 if (SUnits->size() > SUSize)
1883 SethiUllmanNumbers.resize(SUSize*2, 0);
1884 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1885}
1886
1887void RegReductionPQBase::updateNode(const SUnit *SU) {
1888 SethiUllmanNumbers[SU->NodeNum] = 0;
1889 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1890}
1891
1892// Lower priority means schedule further down. For bottom-up scheduling, lower
1893// priority SUs are scheduled before higher priority SUs.
1894unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1895 assert(SU->NodeNum < SethiUllmanNumbers.size())((SU->NodeNum < SethiUllmanNumbers.size()) ? static_cast
<void> (0) : __assert_fail ("SU->NodeNum < SethiUllmanNumbers.size()"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 1895, __PRETTY_FUNCTION__))
;
1896 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1897 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1898 // CopyToReg should be close to its uses to facilitate coalescing and
1899 // avoid spilling.
1900 return 0;
1901 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1902 Opc == TargetOpcode::SUBREG_TO_REG ||
1903 Opc == TargetOpcode::INSERT_SUBREG)
1904 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1905 // close to their uses to facilitate coalescing.
1906 return 0;
1907 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1908 // If SU does not have a register use, i.e. it doesn't produce a value
1909 // that would be consumed (e.g. store), then it terminates a chain of
1910 // computation. Give it a large SethiUllman number so it will be
1911 // scheduled right before its predecessors that it doesn't lengthen
1912 // their live ranges.
1913 return 0xffff;
1914 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1915 // If SU does not have a register def, schedule it close to its uses
1916 // because it does not lengthen any live ranges.
1917 return 0;
1918#if 1
1919 return SethiUllmanNumbers[SU->NodeNum];
1920#else
1921 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1922 if (SU->isCallOp) {
1923 // FIXME: This assumes all of the defs are used as call operands.
1924 int NP = (int)Priority - SU->getNode()->getNumValues();
1925 return (NP > 0) ? NP : 0;
1926 }
1927 return Priority;
1928#endif
1929}
1930
1931//===----------------------------------------------------------------------===//
1932// Register Pressure Tracking
1933//===----------------------------------------------------------------------===//
1934
1935#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1936LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void RegReductionPQBase::dumpRegPressure() const {
1937 for (const TargetRegisterClass *RC : TRI->regclasses()) {
1938 unsigned Id = RC->getID();
1939 unsigned RP = RegPressure[Id];
1940 if (!RP) continue;
1941 DEBUG(dbgs() << TRI->getRegClassName(RC) << ": " << RP << " / "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << TRI->getRegClassName(RC
) << ": " << RP << " / " << RegLimit[
Id] << '\n'; } } while (false)
1942 << RegLimit[Id] << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << TRI->getRegClassName(RC
) << ": " << RP << " / " << RegLimit[
Id] << '\n'; } } while (false)
;
1943 }
1944}
1945#endif
1946
1947bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1948 if (!TLI)
1949 return false;
1950
1951 for (const SDep &Pred : SU->Preds) {
1952 if (Pred.isCtrl())
1953 continue;
1954 SUnit *PredSU = Pred.getSUnit();
1955 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1956 // to cover the number of registers defined (they are all live).
1957 if (PredSU->NumRegDefsLeft == 0) {
1958 continue;
1959 }
1960 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1961 RegDefPos.IsValid(); RegDefPos.Advance()) {
1962 unsigned RCId, Cost;
1963 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
1964
1965 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1966 return true;
1967 }
1968 }
1969 return false;
1970}
1971
1972bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1973 const SDNode *N = SU->getNode();
1974
1975 if (!N->isMachineOpcode() || !SU->NumSuccs)
1976 return false;
1977
1978 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1979 for (unsigned i = 0; i != NumDefs; ++i) {
1980 MVT VT = N->getSimpleValueType(i);
1981 if (!N->hasAnyUseOfValue(i))
1982 continue;
1983 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1984 if (RegPressure[RCId] >= RegLimit[RCId])
1985 return true;
1986 }
1987 return false;
1988}
1989
1990// Compute the register pressure contribution by this instruction by count up
1991// for uses that are not live and down for defs. Only count register classes
1992// that are already under high pressure. As a side effect, compute the number of
1993// uses of registers that are already live.
1994//
1995// FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1996// so could probably be factored.
1997int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1998 LiveUses = 0;
1999 int PDiff = 0;
2000 for (const SDep &Pred : SU->Preds) {
2001 if (Pred.isCtrl())
2002 continue;
2003 SUnit *PredSU = Pred.getSUnit();
2004 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2005 // to cover the number of registers defined (they are all live).
2006 if (PredSU->NumRegDefsLeft == 0) {
2007 if (PredSU->getNode()->isMachineOpcode())
2008 ++LiveUses;
2009 continue;
2010 }
2011 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2012 RegDefPos.IsValid(); RegDefPos.Advance()) {
2013 MVT VT = RegDefPos.GetValue();
2014 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2015 if (RegPressure[RCId] >= RegLimit[RCId])
2016 ++PDiff;
2017 }
2018 }
2019 const SDNode *N = SU->getNode();
2020
2021 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
2022 return PDiff;
2023
2024 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2025 for (unsigned i = 0; i != NumDefs; ++i) {
2026 MVT VT = N->getSimpleValueType(i);
2027 if (!N->hasAnyUseOfValue(i))
2028 continue;
2029 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2030 if (RegPressure[RCId] >= RegLimit[RCId])
2031 --PDiff;
2032 }
2033 return PDiff;
2034}
2035
2036void RegReductionPQBase::scheduledNode(SUnit *SU) {
2037 if (!TracksRegPressure)
2038 return;
2039
2040 if (!SU->getNode())
2041 return;
2042
2043 for (const SDep &Pred : SU->Preds) {
2044 if (Pred.isCtrl())
2045 continue;
2046 SUnit *PredSU = Pred.getSUnit();
2047 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2048 // to cover the number of registers defined (they are all live).
2049 if (PredSU->NumRegDefsLeft == 0) {
2050 continue;
2051 }
2052 // FIXME: The ScheduleDAG currently loses information about which of a
2053 // node's values is consumed by each dependence. Consequently, if the node
2054 // defines multiple register classes, we don't know which to pressurize
2055 // here. Instead the following loop consumes the register defs in an
2056 // arbitrary order. At least it handles the common case of clustered loads
2057 // to the same class. For precise liveness, each SDep needs to indicate the
2058 // result number. But that tightly couples the ScheduleDAG with the
2059 // SelectionDAG making updates tricky. A simpler hack would be to attach a
2060 // value type or register class to SDep.
2061 //
2062 // The most important aspect of register tracking is balancing the increase
2063 // here with the reduction further below. Note that this SU may use multiple
2064 // defs in PredSU. The can't be determined here, but we've already
2065 // compensated by reducing NumRegDefsLeft in PredSU during
2066 // ScheduleDAGSDNodes::AddSchedEdges.
2067 --PredSU->NumRegDefsLeft;
2068 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2069 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2070 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2071 if (SkipRegDefs)
2072 continue;
2073
2074 unsigned RCId, Cost;
2075 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2076 RegPressure[RCId] += Cost;
2077 break;
2078 }
2079 }
2080
2081 // We should have this assert, but there may be dead SDNodes that never
2082 // materialize as SUnits, so they don't appear to generate liveness.
2083 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2084 int SkipRegDefs = (int)SU->NumRegDefsLeft;
2085 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2086 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2087 if (SkipRegDefs > 0)
2088 continue;
2089 unsigned RCId, Cost;
2090 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2091 if (RegPressure[RCId] < Cost) {
2092 // Register pressure tracking is imprecise. This can happen. But we try
2093 // hard not to let it happen because it likely results in poor scheduling.
2094 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " SU(" << SU->NodeNum
<< ") has too many regdefs\n"; } } while (false)
;
2095 RegPressure[RCId] = 0;
2096 }
2097 else {
2098 RegPressure[RCId] -= Cost;
2099 }
2100 }
2101 DEBUG(dumpRegPressure())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dumpRegPressure(); } } while (false)
;
2102}
2103
2104void RegReductionPQBase::unscheduledNode(SUnit *SU) {
2105 if (!TracksRegPressure)
2106 return;
2107
2108 const SDNode *N = SU->getNode();
2109 if (!N) return;
2110
2111 if (!N->isMachineOpcode()) {
2112 if (N->getOpcode() != ISD::CopyToReg)
2113 return;
2114 } else {
2115 unsigned Opc = N->getMachineOpcode();
2116 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2117 Opc == TargetOpcode::INSERT_SUBREG ||
2118 Opc == TargetOpcode::SUBREG_TO_REG ||
2119 Opc == TargetOpcode::REG_SEQUENCE ||
2120 Opc == TargetOpcode::IMPLICIT_DEF)
2121 return;
2122 }
2123
2124 for (const SDep &Pred : SU->Preds) {
2125 if (Pred.isCtrl())
2126 continue;
2127 SUnit *PredSU = Pred.getSUnit();
2128 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2129 // counts data deps.
2130 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2131 continue;
2132 const SDNode *PN = PredSU->getNode();
2133 if (!PN->isMachineOpcode()) {
2134 if (PN->getOpcode() == ISD::CopyFromReg) {
2135 MVT VT = PN->getSimpleValueType(0);
2136 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2137 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2138 }
2139 continue;
2140 }
2141 unsigned POpc = PN->getMachineOpcode();
2142 if (POpc == TargetOpcode::IMPLICIT_DEF)
2143 continue;
2144 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2145 POpc == TargetOpcode::INSERT_SUBREG ||
2146 POpc == TargetOpcode::SUBREG_TO_REG) {
2147 MVT VT = PN->getSimpleValueType(0);
2148 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2149 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2150 continue;
2151 }
2152 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2153 for (unsigned i = 0; i != NumDefs; ++i) {
2154 MVT VT = PN->getSimpleValueType(i);
2155 if (!PN->hasAnyUseOfValue(i))
2156 continue;
2157 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2158 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2159 // Register pressure tracking is imprecise. This can happen.
2160 RegPressure[RCId] = 0;
2161 else
2162 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2163 }
2164 }
2165
2166 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2167 // may transfer data dependencies to CopyToReg.
2168 if (SU->NumSuccs && N->isMachineOpcode()) {
2169 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2170 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2171 MVT VT = N->getSimpleValueType(i);
2172 if (VT == MVT::Glue || VT == MVT::Other)
2173 continue;
2174 if (!N->hasAnyUseOfValue(i))
2175 continue;
2176 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2177 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2178 }
2179 }
2180
2181 DEBUG(dumpRegPressure())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dumpRegPressure(); } } while (false)
;
2182}
2183
2184//===----------------------------------------------------------------------===//
2185// Dynamic Node Priority for Register Pressure Reduction
2186//===----------------------------------------------------------------------===//
2187
2188/// closestSucc - Returns the scheduled cycle of the successor which is
2189/// closest to the current cycle.
2190static unsigned closestSucc(const SUnit *SU) {
2191 unsigned MaxHeight = 0;
2192 for (const SDep &Succ : SU->Succs) {
2193 if (Succ.isCtrl()) continue; // ignore chain succs
2194 unsigned Height = Succ.getSUnit()->getHeight();
2195 // If there are bunch of CopyToRegs stacked up, they should be considered
2196 // to be at the same position.
2197 if (Succ.getSUnit()->getNode() &&
2198 Succ.getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2199 Height = closestSucc(Succ.getSUnit())+1;
2200 if (Height > MaxHeight)
2201 MaxHeight = Height;
2202 }
2203 return MaxHeight;
2204}
2205
2206/// calcMaxScratches - Returns an cost estimate of the worse case requirement
2207/// for scratch registers, i.e. number of data dependencies.
2208static unsigned calcMaxScratches(const SUnit *SU) {
2209 unsigned Scratches = 0;
2210 for (const SDep &Pred : SU->Preds) {
2211 if (Pred.isCtrl()) continue; // ignore chain preds
2212 Scratches++;
2213 }
2214 return Scratches;
2215}
2216
2217/// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2218/// CopyFromReg from a virtual register.
2219static bool hasOnlyLiveInOpers(const SUnit *SU) {
2220 bool RetVal = false;
2221 for (const SDep &Pred : SU->Preds) {
2222 if (Pred.isCtrl()) continue;
2223 const SUnit *PredSU = Pred.getSUnit();
2224 if (PredSU->getNode() &&
2225 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2226 unsigned Reg =
2227 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2228 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2229 RetVal = true;
2230 continue;
2231 }
2232 }
2233 return false;
2234 }
2235 return RetVal;
2236}
2237
2238/// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2239/// CopyToReg to a virtual register. This SU def is probably a liveout and
2240/// it has no other use. It should be scheduled closer to the terminator.
2241static bool hasOnlyLiveOutUses(const SUnit *SU) {
2242 bool RetVal = false;
2243 for (const SDep &Succ : SU->Succs) {
2244 if (Succ.isCtrl()) continue;
2245 const SUnit *SuccSU = Succ.getSUnit();
2246 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2247 unsigned Reg =
2248 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2249 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2250 RetVal = true;
2251 continue;
2252 }
2253 }
2254 return false;
2255 }
2256 return RetVal;
2257}
2258
2259// Set isVRegCycle for a node with only live in opers and live out uses. Also
2260// set isVRegCycle for its CopyFromReg operands.
2261//
2262// This is only relevant for single-block loops, in which case the VRegCycle
2263// node is likely an induction variable in which the operand and target virtual
2264// registers should be coalesced (e.g. pre/post increment values). Setting the
2265// isVRegCycle flag helps the scheduler prioritize other uses of the same
2266// CopyFromReg so that this node becomes the virtual register "kill". This
2267// avoids interference between the values live in and out of the block and
2268// eliminates a copy inside the loop.
2269static void initVRegCycle(SUnit *SU) {
2270 if (DisableSchedVRegCycle)
2271 return;
2272
2273 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2274 return;
2275
2276 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "VRegCycle: SU(" <<
SU->NodeNum << ")\n"; } } while (false)
;
2277
2278 SU->isVRegCycle = true;
2279
2280 for (const SDep &Pred : SU->Preds) {
2281 if (Pred.isCtrl()) continue;
2282 Pred.getSUnit()->isVRegCycle = true;
2283 }
2284}
2285
2286// After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2287// CopyFromReg operands. We should no longer penalize other uses of this VReg.
2288static void resetVRegCycle(SUnit *SU) {
2289 if (!SU->isVRegCycle)
2290 return;
2291
2292 for (const SDep &Pred : SU->Preds) {
2293 if (Pred.isCtrl()) continue; // ignore chain preds
2294 SUnit *PredSU = Pred.getSUnit();
2295 if (PredSU->isVRegCycle) {
2296 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&((PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
"VRegCycle def must be CopyFromReg") ? static_cast<void>
(0) : __assert_fail ("PredSU->getNode()->getOpcode() == ISD::CopyFromReg && \"VRegCycle def must be CopyFromReg\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 2297, __PRETTY_FUNCTION__))
2297 "VRegCycle def must be CopyFromReg")((PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
"VRegCycle def must be CopyFromReg") ? static_cast<void>
(0) : __assert_fail ("PredSU->getNode()->getOpcode() == ISD::CopyFromReg && \"VRegCycle def must be CopyFromReg\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 2297, __PRETTY_FUNCTION__))
;
2298 Pred.getSUnit()->isVRegCycle = false;
2299 }
2300 }
2301}
2302
2303// Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2304// means a node that defines the VRegCycle has not been scheduled yet.
2305static bool hasVRegCycleUse(const SUnit *SU) {
2306 // If this SU also defines the VReg, don't hoist it as a "use".
2307 if (SU->isVRegCycle)
2308 return false;
2309
2310 for (const SDep &Pred : SU->Preds) {
2311 if (Pred.isCtrl()) continue; // ignore chain preds
2312 if (Pred.getSUnit()->isVRegCycle &&
2313 Pred.getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2314 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " VReg cycle use: SU (" <<
SU->NodeNum << ")\n"; } } while (false)
;
2315 return true;
2316 }
2317 }
2318 return false;
2319}
2320
2321// Check for either a dependence (latency) or resource (hazard) stall.
2322//
2323// Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2324static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2325 if ((int)SPQ->getCurCycle() < Height) return true;
2326 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2327 != ScheduleHazardRecognizer::NoHazard)
2328 return true;
2329 return false;
2330}
2331
2332// Return -1 if left has higher priority, 1 if right has higher priority.
2333// Return 0 if latency-based priority is equivalent.
2334static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2335 RegReductionPQBase *SPQ) {
2336 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2337 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2338 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2339 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2340 int LHeight = (int)left->getHeight() + LPenalty;
2341 int RHeight = (int)right->getHeight() + RPenalty;
2342
2343 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2344 BUHasStall(left, LHeight, SPQ);
2345 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2346 BUHasStall(right, RHeight, SPQ);
2347
2348 // If scheduling one of the node will cause a pipeline stall, delay it.
2349 // If scheduling either one of the node will cause a pipeline stall, sort
2350 // them according to their height.
2351 if (LStall) {
2352 if (!RStall)
2353 return 1;
2354 if (LHeight != RHeight)
2355 return LHeight > RHeight ? 1 : -1;
2356 } else if (RStall)
2357 return -1;
2358
2359 // If either node is scheduling for latency, sort them by height/depth
2360 // and latency.
2361 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2362 right->SchedulingPref == Sched::ILP)) {
2363 // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
2364 // is enabled, grouping instructions by cycle, then its height is already
2365 // covered so only its depth matters. We also reach this point if both stall
2366 // but have the same height.
2367 if (!SPQ->getHazardRec()->isEnabled()) {
2368 if (LHeight != RHeight)
2369 return LHeight > RHeight ? 1 : -1;
2370 }
2371 int LDepth = left->getDepth() - LPenalty;
2372 int RDepth = right->getDepth() - RPenalty;
2373 if (LDepth != RDepth) {
2374 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Comparing latency of SU ("
<< left->NodeNum << ") depth " << LDepth
<< " vs SU (" << right->NodeNum << ") depth "
<< RDepth << "\n"; } } while (false)
2375 << ") depth " << LDepth << " vs SU (" << right->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Comparing latency of SU ("
<< left->NodeNum << ") depth " << LDepth
<< " vs SU (" << right->NodeNum << ") depth "
<< RDepth << "\n"; } } while (false)
2376 << ") depth " << RDepth << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Comparing latency of SU ("
<< left->NodeNum << ") depth " << LDepth
<< " vs SU (" << right->NodeNum << ") depth "
<< RDepth << "\n"; } } while (false)
;
2377 return LDepth < RDepth ? 1 : -1;
2378 }
2379 if (left->Latency != right->Latency)
2380 return left->Latency > right->Latency ? 1 : -1;
2381 }
2382 return 0;
2383}
2384
2385static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2386 // Schedule physical register definitions close to their use. This is
2387 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2388 // long as shortening physreg live ranges is generally good, we can defer
2389 // creating a subtarget hook.
2390 if (!DisableSchedPhysRegJoin) {
2391 bool LHasPhysReg = left->hasPhysRegDefs;
2392 bool RHasPhysReg = right->hasPhysRegDefs;
2393 if (LHasPhysReg != RHasPhysReg) {
2394 #ifndef NDEBUG
2395 static const char *const PhysRegMsg[] = { " has no physreg",
2396 " defines a physreg" };
2397 #endif
2398 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " SU (" << left->
NodeNum << ") " << PhysRegMsg[LHasPhysReg] <<
" SU(" << right->NodeNum << ") " << PhysRegMsg
[RHasPhysReg] << "\n"; } } while (false)
2399 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " SU (" << left->
NodeNum << ") " << PhysRegMsg[LHasPhysReg] <<
" SU(" << right->NodeNum << ") " << PhysRegMsg
[RHasPhysReg] << "\n"; } } while (false)
2400 << PhysRegMsg[RHasPhysReg] << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " SU (" << left->
NodeNum << ") " << PhysRegMsg[LHasPhysReg] <<
" SU(" << right->NodeNum << ") " << PhysRegMsg
[RHasPhysReg] << "\n"; } } while (false)
;
2401 return LHasPhysReg < RHasPhysReg;
2402 }
2403 }
2404
2405 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2406 unsigned LPriority = SPQ->getNodePriority(left);
2407 unsigned RPriority = SPQ->getNodePriority(right);
2408
2409 // Be really careful about hoisting call operands above previous calls.
2410 // Only allows it if it would reduce register pressure.
2411 if (left->isCall && right->isCallOp) {
2412 unsigned RNumVals = right->getNode()->getNumValues();
2413 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2414 }
2415 if (right->isCall && left->isCallOp) {
2416 unsigned LNumVals = left->getNode()->getNumValues();
2417 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2418 }
2419
2420 if (LPriority != RPriority)
2421 return LPriority > RPriority;
2422
2423 // One or both of the nodes are calls and their sethi-ullman numbers are the
2424 // same, then keep source order.
2425 if (left->isCall || right->isCall) {
2426 unsigned LOrder = SPQ->getNodeOrdering(left);
2427 unsigned ROrder = SPQ->getNodeOrdering(right);
2428
2429 // Prefer an ordering where the lower the non-zero order number, the higher
2430 // the preference.
2431 if ((LOrder || ROrder) && LOrder != ROrder)
2432 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2433 }
2434
2435 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2436 // e.g.
2437 // t1 = op t2, c1
2438 // t3 = op t4, c2
2439 //
2440 // and the following instructions are both ready.
2441 // t2 = op c3
2442 // t4 = op c4
2443 //
2444 // Then schedule t2 = op first.
2445 // i.e.
2446 // t4 = op c4
2447 // t2 = op c3
2448 // t1 = op t2, c1
2449 // t3 = op t4, c2
2450 //
2451 // This creates more short live intervals.
2452 unsigned LDist = closestSucc(left);
2453 unsigned RDist = closestSucc(right);
2454 if (LDist != RDist)
2455 return LDist < RDist;
2456
2457 // How many registers becomes live when the node is scheduled.
2458 unsigned LScratch = calcMaxScratches(left);
2459 unsigned RScratch = calcMaxScratches(right);
2460 if (LScratch != RScratch)
2461 return LScratch > RScratch;
2462
2463 // Comparing latency against a call makes little sense unless the node
2464 // is register pressure-neutral.
2465 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2466 return (left->NodeQueueId > right->NodeQueueId);
2467
2468 // Do not compare latencies when one or both of the nodes are calls.
2469 if (!DisableSchedCycles &&
2470 !(left->isCall || right->isCall)) {
2471 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2472 if (result != 0)
2473 return result > 0;
2474 }
2475 else {
2476 if (left->getHeight() != right->getHeight())
2477 return left->getHeight() > right->getHeight();
2478
2479 if (left->getDepth() != right->getDepth())
2480 return left->getDepth() < right->getDepth();
2481 }
2482
2483 assert(left->NodeQueueId && right->NodeQueueId &&((left->NodeQueueId && right->NodeQueueId &&
"NodeQueueId cannot be zero") ? static_cast<void> (0) :
__assert_fail ("left->NodeQueueId && right->NodeQueueId && \"NodeQueueId cannot be zero\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 2484, __PRETTY_FUNCTION__))
2484 "NodeQueueId cannot be zero")((left->NodeQueueId && right->NodeQueueId &&
"NodeQueueId cannot be zero") ? static_cast<void> (0) :
__assert_fail ("left->NodeQueueId && right->NodeQueueId && \"NodeQueueId cannot be zero\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 2484, __PRETTY_FUNCTION__))
;
2485 return (left->NodeQueueId > right->NodeQueueId);
2486}
2487
2488// Bottom up
2489bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2490 if (int res = checkSpecialNodes(left, right))
2491 return res > 0;
2492
2493 return BURRSort(left, right, SPQ);
2494}
2495
2496// Source order, otherwise bottom up.
2497bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2498 if (int res = checkSpecialNodes(left, right))
2499 return res > 0;
2500
2501 unsigned LOrder = SPQ->getNodeOrdering(left);
2502 unsigned ROrder = SPQ->getNodeOrdering(right);
2503
2504 // Prefer an ordering where the lower the non-zero order number, the higher
2505 // the preference.
2506 if ((LOrder || ROrder) && LOrder != ROrder)
2507 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2508
2509 return BURRSort(left, right, SPQ);
2510}
2511
2512// If the time between now and when the instruction will be ready can cover
2513// the spill code, then avoid adding it to the ready queue. This gives long
2514// stalls highest priority and allows hoisting across calls. It should also
2515// speed up processing the available queue.
2516bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2517 static const unsigned ReadyDelay = 3;
2518
2519 if (SPQ->MayReduceRegPressure(SU)) return true;
2520
2521 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2522
2523 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2524 != ScheduleHazardRecognizer::NoHazard)
2525 return false;
2526
2527 return true;
2528}
2529
2530// Return true if right should be scheduled with higher priority than left.
2531bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2532 if (int res = checkSpecialNodes(left, right))
2533 return res > 0;
2534
2535 if (left->isCall || right->isCall)
2536 // No way to compute latency of calls.
2537 return BURRSort(left, right, SPQ);
2538
2539 bool LHigh = SPQ->HighRegPressure(left);
2540 bool RHigh = SPQ->HighRegPressure(right);
2541 // Avoid causing spills. If register pressure is high, schedule for
2542 // register pressure reduction.
2543 if (LHigh && !RHigh) {
2544 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " pressure SU(" <<
left->NodeNum << ") > SU(" << right->NodeNum
<< ")\n"; } } while (false)
2545 << right->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " pressure SU(" <<
left->NodeNum << ") > SU(" << right->NodeNum
<< ")\n"; } } while (false)
;
2546 return true;
2547 }
2548 else if (!LHigh && RHigh) {
2549 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " pressure SU(" <<
right->NodeNum << ") > SU(" << left->NodeNum
<< ")\n"; } } while (false)
2550 << left->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " pressure SU(" <<
right->NodeNum << ") > SU(" << left->NodeNum
<< ")\n"; } } while (false)
;
2551 return false;
2552 }
2553 if (!LHigh && !RHigh) {
2554 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2555 if (result != 0)
2556 return result > 0;
2557 }
2558 return BURRSort(left, right, SPQ);
2559}
2560
2561// Schedule as many instructions in each cycle as possible. So don't make an
2562// instruction available unless it is ready in the current cycle.
2563bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2564 if (SU->getHeight() > CurCycle) return false;
2565
2566 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2567 != ScheduleHazardRecognizer::NoHazard)
2568 return false;
2569
2570 return true;
2571}
2572
2573static bool canEnableCoalescing(SUnit *SU) {
2574 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2575 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2576 // CopyToReg should be close to its uses to facilitate coalescing and
2577 // avoid spilling.
2578 return true;
2579
2580 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2581 Opc == TargetOpcode::SUBREG_TO_REG ||
2582 Opc == TargetOpcode::INSERT_SUBREG)
2583 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2584 // close to their uses to facilitate coalescing.
2585 return true;
2586
2587 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2588 // If SU does not have a register def, schedule it close to its uses
2589 // because it does not lengthen any live ranges.
2590 return true;
2591
2592 return false;
2593}
2594
2595// list-ilp is currently an experimental scheduler that allows various
2596// heuristics to be enabled prior to the normal register reduction logic.
2597bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2598 if (int res = checkSpecialNodes(left, right))
2599 return res > 0;
2600
2601 if (left->isCall || right->isCall)
2602 // No way to compute latency of calls.
2603 return BURRSort(left, right, SPQ);
2604
2605 unsigned LLiveUses = 0, RLiveUses = 0;
2606 int LPDiff = 0, RPDiff = 0;
2607 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2608 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2609 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2610 }
2611 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2612 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiffdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "RegPressureDiff SU(" <<
left->NodeNum << "): " << LPDiff << " != SU("
<< right->NodeNum << "): " << RPDiff <<
"\n"; } } while (false)
2613 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "RegPressureDiff SU(" <<
left->NodeNum << "): " << LPDiff << " != SU("
<< right->NodeNum << "): " << RPDiff <<
"\n"; } } while (false)
;
2614 return LPDiff > RPDiff;
2615 }
2616
2617 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2618 bool LReduce = canEnableCoalescing(left);
2619 bool RReduce = canEnableCoalescing(right);
2620 if (LReduce && !RReduce) return false;
2621 if (RReduce && !LReduce) return true;
2622 }
2623
2624 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2625 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUsesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "Live uses SU(" << left
->NodeNum << "): " << LLiveUses << " != SU("
<< right->NodeNum << "): " << RLiveUses
<< "\n"; } } while (false)
2626 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "Live uses SU(" << left
->NodeNum << "): " << LLiveUses << " != SU("
<< right->NodeNum << "): " << RLiveUses
<< "\n"; } } while (false)
;
2627 return LLiveUses < RLiveUses;
2628 }
2629
2630 if (!DisableSchedStalls) {
2631 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2632 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2633 if (LStall != RStall)
2634 return left->getHeight() > right->getHeight();
2635 }
2636
2637 if (!DisableSchedCriticalPath) {
2638 int spread = (int)left->getDepth() - (int)right->getDepth();
2639 if (std::abs(spread) > MaxReorderWindow) {
2640 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "Depth of SU(" << left
->NodeNum << "): " << left->getDepth() <<
" != SU(" << right->NodeNum << "): " <<
right->getDepth() << "\n"; } } while (false)
2641 << left->getDepth() << " != SU(" << right->NodeNum << "): "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "Depth of SU(" << left
->NodeNum << "): " << left->getDepth() <<
" != SU(" << right->NodeNum << "): " <<
right->getDepth() << "\n"; } } while (false)
2642 << right->getDepth() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << "Depth of SU(" << left
->NodeNum << "): " << left->getDepth() <<
" != SU(" << right->NodeNum << "): " <<
right->getDepth() << "\n"; } } while (false)
;
2643 return left->getDepth() < right->getDepth();
2644 }
2645 }
2646
2647 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2648 int spread = (int)left->getHeight() - (int)right->getHeight();
2649 if (std::abs(spread) > MaxReorderWindow)
2650 return left->getHeight() > right->getHeight();
2651 }
2652
2653 return BURRSort(left, right, SPQ);
2654}
2655
2656void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2657 SUnits = &sunits;
2658 // Add pseudo dependency edges for two-address nodes.
2659 if (!Disable2AddrHack)
2660 AddPseudoTwoAddrDeps();
2661 // Reroute edges to nodes with multiple uses.
2662 if (!TracksRegPressure && !SrcOrder)
2663 PrescheduleNodesWithMultipleUses();
2664 // Calculate node priorities.
2665 CalculateSethiUllmanNumbers();
2666
2667 // For single block loops, mark nodes that look like canonical IV increments.
2668 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB))
2669 for (SUnit &SU : sunits)
2670 initVRegCycle(&SU);
2671}
2672
2673//===----------------------------------------------------------------------===//
2674// Preschedule for Register Pressure
2675//===----------------------------------------------------------------------===//
2676
2677bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2678 if (SU->isTwoAddress) {
2679 unsigned Opc = SU->getNode()->getMachineOpcode();
2680 const MCInstrDesc &MCID = TII->get(Opc);
2681 unsigned NumRes = MCID.getNumDefs();
2682 unsigned NumOps = MCID.getNumOperands() - NumRes;
2683 for (unsigned i = 0; i != NumOps; ++i) {
2684 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2685 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2686 if (DU->getNodeId() != -1 &&
2687 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2688 return true;
2689 }
2690 }
2691 }
2692 return false;
2693}
2694
2695/// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2696/// successor's explicit physregs whose definition can reach DepSU.
2697/// i.e. DepSU should not be scheduled above SU.
2698static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2699 ScheduleDAGRRList *scheduleDAG,
2700 const TargetInstrInfo *TII,
2701 const TargetRegisterInfo *TRI) {
2702 const MCPhysReg *ImpDefs
2703 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2704 const uint32_t *RegMask = getNodeRegMask(SU->getNode());
2705 if(!ImpDefs && !RegMask)
2706 return false;
2707
2708 for (const SDep &Succ : SU->Succs) {
2709 SUnit *SuccSU = Succ.getSUnit();
2710 for (const SDep &SuccPred : SuccSU->Preds) {
2711 if (!SuccPred.isAssignedRegDep())
2712 continue;
2713
2714 if (RegMask &&
2715 MachineOperand::clobbersPhysReg(RegMask, SuccPred.getReg()) &&
2716 scheduleDAG->IsReachable(DepSU, SuccPred.getSUnit()))
2717 return true;
2718
2719 if (ImpDefs)
2720 for (const MCPhysReg *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
2721 // Return true if SU clobbers this physical register use and the
2722 // definition of the register reaches from DepSU. IsReachable queries
2723 // a topological forward sort of the DAG (following the successors).
2724 if (TRI->regsOverlap(*ImpDef, SuccPred.getReg()) &&
2725 scheduleDAG->IsReachable(DepSU, SuccPred.getSUnit()))
2726 return true;
2727 }
2728 }
2729 return false;
2730}
2731
2732/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2733/// physical register defs.
2734static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2735 const TargetInstrInfo *TII,
2736 const TargetRegisterInfo *TRI) {
2737 SDNode *N = SuccSU->getNode();
2738 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2739 const MCPhysReg *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2740 assert(ImpDefs && "Caller should check hasPhysRegDefs")((ImpDefs && "Caller should check hasPhysRegDefs") ? static_cast
<void> (0) : __assert_fail ("ImpDefs && \"Caller should check hasPhysRegDefs\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 2740, __PRETTY_FUNCTION__))
;
2741 for (const SDNode *SUNode = SU->getNode(); SUNode;
2742 SUNode = SUNode->getGluedNode()) {
2743 if (!SUNode->isMachineOpcode())
2744 continue;
2745 const MCPhysReg *SUImpDefs =
2746 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2747 const uint32_t *SURegMask = getNodeRegMask(SUNode);
2748 if (!SUImpDefs && !SURegMask)
2749 continue;
2750 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2751 MVT VT = N->getSimpleValueType(i);
2752 if (VT == MVT::Glue || VT == MVT::Other)
2753 continue;
2754 if (!N->hasAnyUseOfValue(i))
2755 continue;
2756 unsigned Reg = ImpDefs[i - NumDefs];
2757 if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
2758 return true;
2759 if (!SUImpDefs)
2760 continue;
2761 for (;*SUImpDefs; ++SUImpDefs) {
2762 unsigned SUReg = *SUImpDefs;
2763 if (TRI->regsOverlap(Reg, SUReg))
2764 return true;
2765 }
2766 }
2767 }
2768 return false;
2769}
2770
2771/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2772/// are not handled well by the general register pressure reduction
2773/// heuristics. When presented with code like this:
2774///
2775/// N
2776/// / |
2777/// / |
2778/// U store
2779/// |
2780/// ...
2781///
2782/// the heuristics tend to push the store up, but since the
2783/// operand of the store has another use (U), this would increase
2784/// the length of that other use (the U->N edge).
2785///
2786/// This function transforms code like the above to route U's
2787/// dependence through the store when possible, like this:
2788///
2789/// N
2790/// ||
2791/// ||
2792/// store
2793/// |
2794/// U
2795/// |
2796/// ...
2797///
2798/// This results in the store being scheduled immediately
2799/// after N, which shortens the U->N live range, reducing
2800/// register pressure.
2801///
2802void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2803 // Visit all the nodes in topological order, working top-down.
2804 for (SUnit &SU : *SUnits) {
2805 // For now, only look at nodes with no data successors, such as stores.
2806 // These are especially important, due to the heuristics in
2807 // getNodePriority for nodes with no data successors.
2808 if (SU.NumSuccs != 0)
2809 continue;
2810 // For now, only look at nodes with exactly one data predecessor.
2811 if (SU.NumPreds != 1)
2812 continue;
2813 // Avoid prescheduling copies to virtual registers, which don't behave
2814 // like other nodes from the perspective of scheduling heuristics.
2815 if (SDNode *N = SU.getNode())
2816 if (N->getOpcode() == ISD::CopyToReg &&
2817 TargetRegisterInfo::isVirtualRegister
2818 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2819 continue;
2820
2821 // Locate the single data predecessor.
2822 SUnit *PredSU = nullptr;
2823 for (const SDep &Pred : SU.Preds)
2824 if (!Pred.isCtrl()) {
2825 PredSU = Pred.getSUnit();
2826 break;
2827 }
2828 assert(PredSU)((PredSU) ? static_cast<void> (0) : __assert_fail ("PredSU"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 2828, __PRETTY_FUNCTION__))
;
2829
2830 // Don't rewrite edges that carry physregs, because that requires additional
2831 // support infrastructure.
2832 if (PredSU->hasPhysRegDefs)
2833 continue;
2834 // Short-circuit the case where SU is PredSU's only data successor.
2835 if (PredSU->NumSuccs == 1)
2836 continue;
2837 // Avoid prescheduling to copies from virtual registers, which don't behave
2838 // like other nodes from the perspective of scheduling heuristics.
2839 if (SDNode *N = SU.getNode())
2840 if (N->getOpcode() == ISD::CopyFromReg &&
2841 TargetRegisterInfo::isVirtualRegister
2842 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2843 continue;
2844
2845 // Perform checks on the successors of PredSU.
2846 for (const SDep &PredSucc : PredSU->Succs) {
2847 SUnit *PredSuccSU = PredSucc.getSUnit();
2848 if (PredSuccSU == &SU) continue;
2849 // If PredSU has another successor with no data successors, for
2850 // now don't attempt to choose either over the other.
2851 if (PredSuccSU->NumSuccs == 0)
2852 goto outer_loop_continue;
2853 // Don't break physical register dependencies.
2854 if (SU.hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2855 if (canClobberPhysRegDefs(PredSuccSU, &SU, TII, TRI))
2856 goto outer_loop_continue;
2857 // Don't introduce graph cycles.
2858 if (scheduleDAG->IsReachable(&SU, PredSuccSU))
2859 goto outer_loop_continue;
2860 }
2861
2862 // Ok, the transformation is safe and the heuristics suggest it is
2863 // profitable. Update the graph.
2864 DEBUG(dbgs() << " Prescheduling SU #" << SU.NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Prescheduling SU #" <<
SU.NodeNum << " next to PredSU #" << PredSU->
NodeNum << " to guide scheduling in the presence of multiple uses\n"
; } } while (false)
2865 << " next to PredSU #" << PredSU->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Prescheduling SU #" <<
SU.NodeNum << " next to PredSU #" << PredSU->
NodeNum << " to guide scheduling in the presence of multiple uses\n"
; } } while (false)
2866 << " to guide scheduling in the presence of multiple uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Prescheduling SU #" <<
SU.NodeNum << " next to PredSU #" << PredSU->
NodeNum << " to guide scheduling in the presence of multiple uses\n"
; } } while (false)
;
2867 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2868 SDep Edge = PredSU->Succs[i];
2869 assert(!Edge.isAssignedRegDep())((!Edge.isAssignedRegDep()) ? static_cast<void> (0) : __assert_fail
("!Edge.isAssignedRegDep()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn301389/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp"
, 2869, __PRETTY_FUNCTION__))
;
2870 SUnit *SuccSU = Edge.getSUnit();
2871 if (SuccSU != &SU) {
2872 Edge.setSUnit(PredSU);
2873 scheduleDAG->RemovePred(SuccSU, Edge);
2874 scheduleDAG->AddPred(&SU, Edge);
2875 Edge.setSUnit(&SU);
2876 scheduleDAG->AddPred(SuccSU, Edge);
2877 --i;
2878 }
2879 }
2880 outer_loop_continue:;
2881 }
2882}
2883
2884/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2885/// it as a def&use operand. Add a pseudo control edge from it to the other
2886/// node (if it won't create a cycle) so the two-address one will be scheduled
2887/// first (lower in the schedule). If both nodes are two-address, favor the
2888/// one that has a CopyToReg use (more likely to be a loop induction update).
2889/// If both are two-address, but one is commutable while the other is not
2890/// commutable, favor the one that's not commutable.
2891void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2892 for (SUnit &SU : *SUnits) {
2893 if (!SU.isTwoAddress)
2894 continue;
2895
2896 SDNode *Node = SU.getNode();
2897 if (!Node || !Node->isMachineOpcode() || SU.getNode()->getGluedNode())
2898 continue;
2899
2900 bool isLiveOut = hasOnlyLiveOutUses(&SU);
2901 unsigned Opc = Node->getMachineOpcode();
2902 const MCInstrDesc &MCID = TII->get(Opc);
2903 unsigned NumRes = MCID.getNumDefs();
2904 unsigned NumOps = MCID.getNumOperands() - NumRes;
2905 for (unsigned j = 0; j != NumOps; ++j) {
2906 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2907 continue;
2908 SDNode *DU = SU.getNode()->getOperand(j).getNode();
2909 if (DU->getNodeId() == -1)
2910 continue;
2911 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2912 if (!DUSU)
2913 continue;
2914 for (const SDep &Succ : DUSU->Succs) {
2915 if (Succ.isCtrl())
2916 continue;
2917 SUnit *SuccSU = Succ.getSUnit();
2918 if (SuccSU == &SU)
2919 continue;
2920 // Be conservative. Ignore if nodes aren't at roughly the same
2921 // depth and height.
2922 if (SuccSU->getHeight() < SU.getHeight() &&
2923 (SU.getHeight() - SuccSU->getHeight()) > 1)
2924 continue;
2925 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2926 // constrains whatever is using the copy, instead of the copy
2927 // itself. In the case that the copy is coalesced, this
2928 // preserves the intent of the pseudo two-address heurietics.
2929 while (SuccSU->Succs.size() == 1 &&
2930 SuccSU->getNode()->isMachineOpcode() &&
2931 SuccSU->getNode()->getMachineOpcode() ==
2932 TargetOpcode::COPY_TO_REGCLASS)
2933 SuccSU = SuccSU->Succs.front().getSUnit();
2934 // Don't constrain non-instruction nodes.
2935 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2936 continue;
2937 // Don't constrain nodes with physical register defs if the
2938 // predecessor can clobber them.
2939 if (SuccSU->hasPhysRegDefs && SU.hasPhysRegClobbers) {
2940 if (canClobberPhysRegDefs(SuccSU, &SU, TII, TRI))
2941 continue;
2942 }
2943 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2944 // these may be coalesced away. We want them close to their uses.
2945 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2946 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2947 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2948 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2949 continue;
2950 if (!canClobberReachingPhysRegUse(SuccSU, &SU, scheduleDAG, TII, TRI) &&
2951 (!canClobber(SuccSU, DUSU) ||
2952 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2953 (!SU.isCommutable && SuccSU->isCommutable)) &&
2954 !scheduleDAG->IsReachable(SuccSU, &SU)) {
2955 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Adding a pseudo-two-addr edge from SU #"
<< SU.NodeNum << " to SU #" << SuccSU->
NodeNum << "\n"; } } while (false)
2956 << SU.NodeNum << " to SU #" << SuccSU->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("pre-RA-sched")) { dbgs() << " Adding a pseudo-two-addr edge from SU #"
<< SU.NodeNum << " to SU #" << SuccSU->
NodeNum << "\n"; } } while (false)
;
2957 scheduleDAG->AddPred(&SU, SDep(SuccSU, SDep::Artificial));
2958 }
2959 }
2960 }
2961 }
2962}
2963
2964//===----------------------------------------------------------------------===//
2965// Public Constructor Functions
2966//===----------------------------------------------------------------------===//
2967
2968llvm::ScheduleDAGSDNodes *
2969llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2970 CodeGenOpt::Level OptLevel) {
2971 const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
2972 const TargetInstrInfo *TII = STI.getInstrInfo();
2973 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
2974
2975 BURegReductionPriorityQueue *PQ =
2976 new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
2977 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2978 PQ->setScheduleDAG(SD);
2979 return SD;
2980}
2981
2982llvm::ScheduleDAGSDNodes *
2983llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2984 CodeGenOpt::Level OptLevel) {
2985 const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
2986 const TargetInstrInfo *TII = STI.getInstrInfo();
2987 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
2988
2989 SrcRegReductionPriorityQueue *PQ =
2990 new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
2991 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2992 PQ->setScheduleDAG(SD);
2993 return SD;
2994}
2995
2996llvm::ScheduleDAGSDNodes *
2997llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2998 CodeGenOpt::Level OptLevel) {
2999 const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
3000 const TargetInstrInfo *TII = STI.getInstrInfo();
3001 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
3002 const TargetLowering *TLI = IS->TLI;
3003
3004 HybridBURRPriorityQueue *PQ =
3005 new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3006
3007 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3008 PQ->setScheduleDAG(SD);
3009 return SD;
3010}
3011
3012llvm::ScheduleDAGSDNodes *
3013llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
3014 CodeGenOpt::Level OptLevel) {
3015 const TargetSubtargetInfo &STI = IS->MF->getSubtarget();
3016 const TargetInstrInfo *TII = STI.getInstrInfo();
3017 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
3018 const TargetLowering *TLI = IS->TLI;
3019
3020 ILPBURRPriorityQueue *PQ =
3021 new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3022 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3023 PQ->setScheduleDAG(SD);
3024 return SD;
3025}