File: | llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp |
Warning: | line 1177, column 5 Value stored to 'N' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler ------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This implements bottom-up and top-down register pressure reduction list |
10 | // schedulers, using standard algorithms. The basic approach uses a priority |
11 | // queue of available nodes to schedule. One at a time, nodes are taken from |
12 | // the priority queue (thus in priority order), checked for legality to |
13 | // schedule, and emitted if legal. |
14 | // |
15 | //===----------------------------------------------------------------------===// |
16 | |
17 | #include "ScheduleDAGSDNodes.h" |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/DenseMap.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/SmallSet.h" |
22 | #include "llvm/ADT/SmallVector.h" |
23 | #include "llvm/ADT/Statistic.h" |
24 | #include "llvm/CodeGen/ISDOpcodes.h" |
25 | #include "llvm/CodeGen/MachineFunction.h" |
26 | #include "llvm/CodeGen/MachineOperand.h" |
27 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
28 | #include "llvm/CodeGen/ScheduleDAG.h" |
29 | #include "llvm/CodeGen/ScheduleHazardRecognizer.h" |
30 | #include "llvm/CodeGen/SchedulerRegistry.h" |
31 | #include "llvm/CodeGen/SelectionDAGISel.h" |
32 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
33 | #include "llvm/CodeGen/TargetInstrInfo.h" |
34 | #include "llvm/CodeGen/TargetLowering.h" |
35 | #include "llvm/CodeGen/TargetOpcodes.h" |
36 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
37 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
38 | #include "llvm/Config/llvm-config.h" |
39 | #include "llvm/IR/InlineAsm.h" |
40 | #include "llvm/MC/MCInstrDesc.h" |
41 | #include "llvm/MC/MCRegisterInfo.h" |
42 | #include "llvm/Support/Casting.h" |
43 | #include "llvm/Support/CodeGen.h" |
44 | #include "llvm/Support/CommandLine.h" |
45 | #include "llvm/Support/Compiler.h" |
46 | #include "llvm/Support/Debug.h" |
47 | #include "llvm/Support/ErrorHandling.h" |
48 | #include "llvm/Support/MachineValueType.h" |
49 | #include "llvm/Support/raw_ostream.h" |
50 | #include <algorithm> |
51 | #include <cassert> |
52 | #include <cstdint> |
53 | #include <cstdlib> |
54 | #include <iterator> |
55 | #include <limits> |
56 | #include <memory> |
57 | #include <utility> |
58 | #include <vector> |
59 | |
60 | using namespace llvm; |
61 | |
62 | #define DEBUG_TYPE"pre-RA-sched" "pre-RA-sched" |
63 | |
64 | STATISTIC(NumBacktracks, "Number of times scheduler backtracked")static llvm::Statistic NumBacktracks = {"pre-RA-sched", "NumBacktracks" , "Number of times scheduler backtracked"}; |
65 | STATISTIC(NumUnfolds, "Number of nodes unfolded")static llvm::Statistic NumUnfolds = {"pre-RA-sched", "NumUnfolds" , "Number of nodes unfolded"}; |
66 | STATISTIC(NumDups, "Number of duplicated nodes")static llvm::Statistic NumDups = {"pre-RA-sched", "NumDups", "Number of duplicated nodes" }; |
67 | STATISTIC(NumPRCopies, "Number of physical register copies")static llvm::Statistic NumPRCopies = {"pre-RA-sched", "NumPRCopies" , "Number of physical register copies"}; |
68 | |
69 | static RegisterScheduler |
70 | burrListDAGScheduler("list-burr", |
71 | "Bottom-up register reduction list scheduling", |
72 | createBURRListDAGScheduler); |
73 | |
74 | static RegisterScheduler |
75 | sourceListDAGScheduler("source", |
76 | "Similar to list-burr but schedules in source " |
77 | "order when possible", |
78 | createSourceListDAGScheduler); |
79 | |
80 | static RegisterScheduler |
81 | hybridListDAGScheduler("list-hybrid", |
82 | "Bottom-up register pressure aware list scheduling " |
83 | "which tries to balance latency and register pressure", |
84 | createHybridListDAGScheduler); |
85 | |
86 | static RegisterScheduler |
87 | ILPListDAGScheduler("list-ilp", |
88 | "Bottom-up register pressure aware list scheduling " |
89 | "which tries to balance ILP and register pressure", |
90 | createILPListDAGScheduler); |
91 | |
92 | static cl::opt<bool> DisableSchedCycles( |
93 | "disable-sched-cycles", cl::Hidden, cl::init(false), |
94 | cl::desc("Disable cycle-level precision during preRA scheduling")); |
95 | |
96 | // Temporary sched=list-ilp flags until the heuristics are robust. |
97 | // Some options are also available under sched=list-hybrid. |
98 | static cl::opt<bool> DisableSchedRegPressure( |
99 | "disable-sched-reg-pressure", cl::Hidden, cl::init(false), |
100 | cl::desc("Disable regpressure priority in sched=list-ilp")); |
101 | static cl::opt<bool> DisableSchedLiveUses( |
102 | "disable-sched-live-uses", cl::Hidden, cl::init(true), |
103 | cl::desc("Disable live use priority in sched=list-ilp")); |
104 | static cl::opt<bool> DisableSchedVRegCycle( |
105 | "disable-sched-vrcycle", cl::Hidden, cl::init(false), |
106 | cl::desc("Disable virtual register cycle interference checks")); |
107 | static cl::opt<bool> DisableSchedPhysRegJoin( |
108 | "disable-sched-physreg-join", cl::Hidden, cl::init(false), |
109 | cl::desc("Disable physreg def-use affinity")); |
110 | static cl::opt<bool> DisableSchedStalls( |
111 | "disable-sched-stalls", cl::Hidden, cl::init(true), |
112 | cl::desc("Disable no-stall priority in sched=list-ilp")); |
113 | static cl::opt<bool> DisableSchedCriticalPath( |
114 | "disable-sched-critical-path", cl::Hidden, cl::init(false), |
115 | cl::desc("Disable critical path priority in sched=list-ilp")); |
116 | static cl::opt<bool> DisableSchedHeight( |
117 | "disable-sched-height", cl::Hidden, cl::init(false), |
118 | cl::desc("Disable scheduled-height priority in sched=list-ilp")); |
119 | static cl::opt<bool> Disable2AddrHack( |
120 | "disable-2addr-hack", cl::Hidden, cl::init(true), |
121 | cl::desc("Disable scheduler's two-address hack")); |
122 | |
123 | static cl::opt<int> MaxReorderWindow( |
124 | "max-sched-reorder", cl::Hidden, cl::init(6), |
125 | cl::desc("Number of instructions to allow ahead of the critical path " |
126 | "in sched=list-ilp")); |
127 | |
128 | static cl::opt<unsigned> AvgIPC( |
129 | "sched-avg-ipc", cl::Hidden, cl::init(1), |
130 | cl::desc("Average inst/cycle whan no target itinerary exists.")); |
131 | |
132 | namespace { |
133 | |
134 | //===----------------------------------------------------------------------===// |
135 | /// ScheduleDAGRRList - The actual register reduction list scheduler |
136 | /// implementation. This supports both top-down and bottom-up scheduling. |
137 | /// |
138 | class ScheduleDAGRRList : public ScheduleDAGSDNodes { |
139 | private: |
140 | /// NeedLatency - True if the scheduler will make use of latency information. |
141 | bool NeedLatency; |
142 | |
143 | /// AvailableQueue - The priority queue to use for the available SUnits. |
144 | SchedulingPriorityQueue *AvailableQueue; |
145 | |
146 | /// PendingQueue - This contains all of the instructions whose operands have |
147 | /// been issued, but their results are not ready yet (due to the latency of |
148 | /// the operation). Once the operands becomes available, the instruction is |
149 | /// added to the AvailableQueue. |
150 | std::vector<SUnit *> PendingQueue; |
151 | |
152 | /// HazardRec - The hazard recognizer to use. |
153 | ScheduleHazardRecognizer *HazardRec; |
154 | |
155 | /// CurCycle - The current scheduler state corresponds to this cycle. |
156 | unsigned CurCycle = 0; |
157 | |
158 | /// MinAvailableCycle - Cycle of the soonest available instruction. |
159 | unsigned MinAvailableCycle; |
160 | |
161 | /// IssueCount - Count instructions issued in this cycle |
162 | /// Currently valid only for bottom-up scheduling. |
163 | unsigned IssueCount; |
164 | |
165 | /// LiveRegDefs - A set of physical registers and their definition |
166 | /// that are "live". These nodes must be scheduled before any other nodes that |
167 | /// modifies the registers can be scheduled. |
168 | unsigned NumLiveRegs; |
169 | std::unique_ptr<SUnit*[]> LiveRegDefs; |
170 | std::unique_ptr<SUnit*[]> LiveRegGens; |
171 | |
172 | // Collect interferences between physical register use/defs. |
173 | // Each interference is an SUnit and set of physical registers. |
174 | SmallVector<SUnit*, 4> Interferences; |
175 | |
176 | using LRegsMapT = DenseMap<SUnit *, SmallVector<unsigned, 4>>; |
177 | |
178 | LRegsMapT LRegsMap; |
179 | |
180 | /// Topo - A topological ordering for SUnits which permits fast IsReachable |
181 | /// and similar queries. |
182 | ScheduleDAGTopologicalSort Topo; |
183 | |
184 | // Hack to keep track of the inverse of FindCallSeqStart without more crazy |
185 | // DAG crawling. |
186 | DenseMap<SUnit*, SUnit*> CallSeqEndForStart; |
187 | |
188 | public: |
189 | ScheduleDAGRRList(MachineFunction &mf, bool needlatency, |
190 | SchedulingPriorityQueue *availqueue, |
191 | CodeGenOpt::Level OptLevel) |
192 | : ScheduleDAGSDNodes(mf), |
193 | NeedLatency(needlatency), AvailableQueue(availqueue), |
194 | Topo(SUnits, nullptr) { |
195 | const TargetSubtargetInfo &STI = mf.getSubtarget(); |
196 | if (DisableSchedCycles || !NeedLatency) |
197 | HazardRec = new ScheduleHazardRecognizer(); |
198 | else |
199 | HazardRec = STI.getInstrInfo()->CreateTargetHazardRecognizer(&STI, this); |
200 | } |
201 | |
202 | ~ScheduleDAGRRList() override { |
203 | delete HazardRec; |
204 | delete AvailableQueue; |
205 | } |
206 | |
207 | void Schedule() override; |
208 | |
209 | ScheduleHazardRecognizer *getHazardRec() { return HazardRec; } |
210 | |
211 | /// IsReachable - Checks if SU is reachable from TargetSU. |
212 | bool IsReachable(const SUnit *SU, const SUnit *TargetSU) { |
213 | return Topo.IsReachable(SU, TargetSU); |
214 | } |
215 | |
216 | /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will |
217 | /// create a cycle. |
218 | bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) { |
219 | return Topo.WillCreateCycle(SU, TargetSU); |
220 | } |
221 | |
222 | /// AddPredQueued - Queues and update to add a predecessor edge to SUnit SU. |
223 | /// This returns true if this is a new predecessor. |
224 | /// Does *NOT* update the topological ordering! It just queues an update. |
225 | void AddPredQueued(SUnit *SU, const SDep &D) { |
226 | Topo.AddPredQueued(SU, D.getSUnit()); |
227 | SU->addPred(D); |
228 | } |
229 | |
230 | /// AddPred - adds a predecessor edge to SUnit SU. |
231 | /// This returns true if this is a new predecessor. |
232 | /// Updates the topological ordering if required. |
233 | void AddPred(SUnit *SU, const SDep &D) { |
234 | Topo.AddPred(SU, D.getSUnit()); |
235 | SU->addPred(D); |
236 | } |
237 | |
238 | /// RemovePred - removes a predecessor edge from SUnit SU. |
239 | /// This returns true if an edge was removed. |
240 | /// Updates the topological ordering if required. |
241 | void RemovePred(SUnit *SU, const SDep &D) { |
242 | Topo.RemovePred(SU, D.getSUnit()); |
243 | SU->removePred(D); |
244 | } |
245 | |
246 | private: |
247 | bool isReady(SUnit *SU) { |
248 | return DisableSchedCycles || !AvailableQueue->hasReadyFilter() || |
249 | AvailableQueue->isReady(SU); |
250 | } |
251 | |
252 | void ReleasePred(SUnit *SU, const SDep *PredEdge); |
253 | void ReleasePredecessors(SUnit *SU); |
254 | void ReleasePending(); |
255 | void AdvanceToCycle(unsigned NextCycle); |
256 | void AdvancePastStalls(SUnit *SU); |
257 | void EmitNode(SUnit *SU); |
258 | void ScheduleNodeBottomUp(SUnit*); |
259 | void CapturePred(SDep *PredEdge); |
260 | void UnscheduleNodeBottomUp(SUnit*); |
261 | void RestoreHazardCheckerBottomUp(); |
262 | void BacktrackBottomUp(SUnit*, SUnit*); |
263 | SUnit *TryUnfoldSU(SUnit *); |
264 | SUnit *CopyAndMoveSuccessors(SUnit*); |
265 | void InsertCopiesAndMoveSuccs(SUnit*, unsigned, |
266 | const TargetRegisterClass*, |
267 | const TargetRegisterClass*, |
268 | SmallVectorImpl<SUnit*>&); |
269 | bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&); |
270 | |
271 | void releaseInterferences(unsigned Reg = 0); |
272 | |
273 | SUnit *PickNodeToScheduleBottomUp(); |
274 | void ListScheduleBottomUp(); |
275 | |
276 | /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. |
277 | SUnit *CreateNewSUnit(SDNode *N) { |
278 | unsigned NumSUnits = SUnits.size(); |
279 | SUnit *NewNode = newSUnit(N); |
280 | // Update the topological ordering. |
281 | if (NewNode->NodeNum >= NumSUnits) |
282 | Topo.AddSUnitWithoutPredecessors(NewNode); |
283 | return NewNode; |
284 | } |
285 | |
286 | /// CreateClone - Creates a new SUnit from an existing one. |
287 | SUnit *CreateClone(SUnit *N) { |
288 | unsigned NumSUnits = SUnits.size(); |
289 | SUnit *NewNode = Clone(N); |
290 | // Update the topological ordering. |
291 | if (NewNode->NodeNum >= NumSUnits) |
292 | Topo.AddSUnitWithoutPredecessors(NewNode); |
293 | return NewNode; |
294 | } |
295 | |
296 | /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't |
297 | /// need actual latency information but the hybrid scheduler does. |
298 | bool forceUnitLatencies() const override { |
299 | return !NeedLatency; |
300 | } |
301 | }; |
302 | |
303 | } // end anonymous namespace |
304 | |
305 | /// GetCostForDef - Looks up the register class and cost for a given definition. |
306 | /// Typically this just means looking up the representative register class, |
307 | /// but for untyped values (MVT::Untyped) it means inspecting the node's |
308 | /// opcode to determine what register class is being generated. |
309 | static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos, |
310 | const TargetLowering *TLI, |
311 | const TargetInstrInfo *TII, |
312 | const TargetRegisterInfo *TRI, |
313 | unsigned &RegClass, unsigned &Cost, |
314 | const MachineFunction &MF) { |
315 | MVT VT = RegDefPos.GetValue(); |
316 | |
317 | // Special handling for untyped values. These values can only come from |
318 | // the expansion of custom DAG-to-DAG patterns. |
319 | if (VT == MVT::Untyped) { |
320 | const SDNode *Node = RegDefPos.GetNode(); |
321 | |
322 | // Special handling for CopyFromReg of untyped values. |
323 | if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) { |
324 | unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); |
325 | const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg); |
326 | RegClass = RC->getID(); |
327 | Cost = 1; |
328 | return; |
329 | } |
330 | |
331 | unsigned Opcode = Node->getMachineOpcode(); |
332 | if (Opcode == TargetOpcode::REG_SEQUENCE) { |
333 | unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue(); |
334 | const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); |
335 | RegClass = RC->getID(); |
336 | Cost = 1; |
337 | return; |
338 | } |
339 | |
340 | unsigned Idx = RegDefPos.GetIdx(); |
341 | const MCInstrDesc Desc = TII->get(Opcode); |
342 | const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF); |
343 | RegClass = RC->getID(); |
344 | // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a |
345 | // better way to determine it. |
346 | Cost = 1; |
347 | } else { |
348 | RegClass = TLI->getRepRegClassFor(VT)->getID(); |
349 | Cost = TLI->getRepRegClassCostFor(VT); |
350 | } |
351 | } |
352 | |
353 | /// Schedule - Schedule the DAG using list scheduling. |
354 | void ScheduleDAGRRList::Schedule() { |
355 | LLVM_DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "********** List Scheduling " << printMBBReference(*BB) << " '" << BB-> getName() << "' **********\n"; } } while (false) |
356 | << " '" << BB->getName() << "' **********\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "********** List Scheduling " << printMBBReference(*BB) << " '" << BB-> getName() << "' **********\n"; } } while (false); |
357 | |
358 | CurCycle = 0; |
359 | IssueCount = 0; |
360 | MinAvailableCycle = |
361 | DisableSchedCycles ? 0 : std::numeric_limits<unsigned>::max(); |
362 | NumLiveRegs = 0; |
363 | // Allocate slots for each physical register, plus one for a special register |
364 | // to track the virtual resource of a calling sequence. |
365 | LiveRegDefs.reset(new SUnit*[TRI->getNumRegs() + 1]()); |
366 | LiveRegGens.reset(new SUnit*[TRI->getNumRegs() + 1]()); |
367 | CallSeqEndForStart.clear(); |
368 | assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences")(static_cast <bool> (Interferences.empty() && LRegsMap .empty() && "stale Interferences") ? void (0) : __assert_fail ("Interferences.empty() && LRegsMap.empty() && \"stale Interferences\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 368, __extension__ __PRETTY_FUNCTION__)); |
369 | |
370 | // Build the scheduling graph. |
371 | BuildSchedGraph(nullptr); |
372 | |
373 | LLVM_DEBUG(dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dump(); } } while (false); |
374 | Topo.MarkDirty(); |
375 | |
376 | AvailableQueue->initNodes(SUnits); |
377 | |
378 | HazardRec->Reset(); |
379 | |
380 | // Execute the actual scheduling loop. |
381 | ListScheduleBottomUp(); |
382 | |
383 | AvailableQueue->releaseState(); |
384 | |
385 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n" ; dumpSchedule(); dbgs() << '\n'; }; } } while (false) |
386 | dbgs() << "*** Final schedule ***\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n" ; dumpSchedule(); dbgs() << '\n'; }; } } while (false) |
387 | dumpSchedule();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n" ; dumpSchedule(); dbgs() << '\n'; }; } } while (false) |
388 | dbgs() << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n" ; dumpSchedule(); dbgs() << '\n'; }; } } while (false) |
389 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { { dbgs() << "*** Final schedule ***\n" ; dumpSchedule(); dbgs() << '\n'; }; } } while (false); |
390 | } |
391 | |
392 | //===----------------------------------------------------------------------===// |
393 | // Bottom-Up Scheduling |
394 | //===----------------------------------------------------------------------===// |
395 | |
396 | /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to |
397 | /// the AvailableQueue if the count reaches zero. Also update its cycle bound. |
398 | void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) { |
399 | SUnit *PredSU = PredEdge->getSUnit(); |
400 | |
401 | #ifndef NDEBUG |
402 | if (PredSU->NumSuccsLeft == 0) { |
403 | dbgs() << "*** Scheduling failed! ***\n"; |
404 | dumpNode(*PredSU); |
405 | dbgs() << " has been released too many times!\n"; |
406 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 406); |
407 | } |
408 | #endif |
409 | --PredSU->NumSuccsLeft; |
410 | |
411 | if (!forceUnitLatencies()) { |
412 | // Updating predecessor's height. This is now the cycle when the |
413 | // predecessor can be scheduled without causing a pipeline stall. |
414 | PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency()); |
415 | } |
416 | |
417 | // If all the node's successors are scheduled, this node is ready |
418 | // to be scheduled. Ignore the special EntrySU node. |
419 | if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) { |
420 | PredSU->isAvailable = true; |
421 | |
422 | unsigned Height = PredSU->getHeight(); |
423 | if (Height < MinAvailableCycle) |
424 | MinAvailableCycle = Height; |
425 | |
426 | if (isReady(PredSU)) { |
427 | AvailableQueue->push(PredSU); |
428 | } |
429 | // CapturePred and others may have left the node in the pending queue, avoid |
430 | // adding it twice. |
431 | else if (!PredSU->isPending) { |
432 | PredSU->isPending = true; |
433 | PendingQueue.push_back(PredSU); |
434 | } |
435 | } |
436 | } |
437 | |
438 | /// IsChainDependent - Test if Outer is reachable from Inner through |
439 | /// chain dependencies. |
440 | static bool IsChainDependent(SDNode *Outer, SDNode *Inner, |
441 | unsigned NestLevel, |
442 | const TargetInstrInfo *TII) { |
443 | SDNode *N = Outer; |
444 | while (true) { |
445 | if (N == Inner) |
446 | return true; |
447 | // For a TokenFactor, examine each operand. There may be multiple ways |
448 | // to get to the CALLSEQ_BEGIN, but we need to find the path with the |
449 | // most nesting in order to ensure that we find the corresponding match. |
450 | if (N->getOpcode() == ISD::TokenFactor) { |
451 | for (const SDValue &Op : N->op_values()) |
452 | if (IsChainDependent(Op.getNode(), Inner, NestLevel, TII)) |
453 | return true; |
454 | return false; |
455 | } |
456 | // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END. |
457 | if (N->isMachineOpcode()) { |
458 | if (N->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) { |
459 | ++NestLevel; |
460 | } else if (N->getMachineOpcode() == TII->getCallFrameSetupOpcode()) { |
461 | if (NestLevel == 0) |
462 | return false; |
463 | --NestLevel; |
464 | } |
465 | } |
466 | // Otherwise, find the chain and continue climbing. |
467 | for (const SDValue &Op : N->op_values()) |
468 | if (Op.getValueType() == MVT::Other) { |
469 | N = Op.getNode(); |
470 | goto found_chain_operand; |
471 | } |
472 | return false; |
473 | found_chain_operand:; |
474 | if (N->getOpcode() == ISD::EntryToken) |
475 | return false; |
476 | } |
477 | } |
478 | |
479 | /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate |
480 | /// the corresponding (lowered) CALLSEQ_BEGIN node. |
481 | /// |
482 | /// NestLevel and MaxNested are used in recursion to indcate the current level |
483 | /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum |
484 | /// level seen so far. |
485 | /// |
486 | /// TODO: It would be better to give CALLSEQ_END an explicit operand to point |
487 | /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it. |
488 | static SDNode * |
489 | FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest, |
490 | const TargetInstrInfo *TII) { |
491 | while (true) { |
492 | // For a TokenFactor, examine each operand. There may be multiple ways |
493 | // to get to the CALLSEQ_BEGIN, but we need to find the path with the |
494 | // most nesting in order to ensure that we find the corresponding match. |
495 | if (N->getOpcode() == ISD::TokenFactor) { |
496 | SDNode *Best = nullptr; |
497 | unsigned BestMaxNest = MaxNest; |
498 | for (const SDValue &Op : N->op_values()) { |
499 | unsigned MyNestLevel = NestLevel; |
500 | unsigned MyMaxNest = MaxNest; |
501 | if (SDNode *New = FindCallSeqStart(Op.getNode(), |
502 | MyNestLevel, MyMaxNest, TII)) |
503 | if (!Best || (MyMaxNest > BestMaxNest)) { |
504 | Best = New; |
505 | BestMaxNest = MyMaxNest; |
506 | } |
507 | } |
508 | assert(Best)(static_cast <bool> (Best) ? void (0) : __assert_fail ( "Best", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 508, __extension__ __PRETTY_FUNCTION__)); |
509 | MaxNest = BestMaxNest; |
510 | return Best; |
511 | } |
512 | // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END. |
513 | if (N->isMachineOpcode()) { |
514 | if (N->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) { |
515 | ++NestLevel; |
516 | MaxNest = std::max(MaxNest, NestLevel); |
517 | } else if (N->getMachineOpcode() == TII->getCallFrameSetupOpcode()) { |
518 | assert(NestLevel != 0)(static_cast <bool> (NestLevel != 0) ? void (0) : __assert_fail ("NestLevel != 0", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 518, __extension__ __PRETTY_FUNCTION__)); |
519 | --NestLevel; |
520 | if (NestLevel == 0) |
521 | return N; |
522 | } |
523 | } |
524 | // Otherwise, find the chain and continue climbing. |
525 | for (const SDValue &Op : N->op_values()) |
526 | if (Op.getValueType() == MVT::Other) { |
527 | N = Op.getNode(); |
528 | goto found_chain_operand; |
529 | } |
530 | return nullptr; |
531 | found_chain_operand:; |
532 | if (N->getOpcode() == ISD::EntryToken) |
533 | return nullptr; |
534 | } |
535 | } |
536 | |
537 | /// Call ReleasePred for each predecessor, then update register live def/gen. |
538 | /// Always update LiveRegDefs for a register dependence even if the current SU |
539 | /// also defines the register. This effectively create one large live range |
540 | /// across a sequence of two-address node. This is important because the |
541 | /// entire chain must be scheduled together. Example: |
542 | /// |
543 | /// flags = (3) add |
544 | /// flags = (2) addc flags |
545 | /// flags = (1) addc flags |
546 | /// |
547 | /// results in |
548 | /// |
549 | /// LiveRegDefs[flags] = 3 |
550 | /// LiveRegGens[flags] = 1 |
551 | /// |
552 | /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid |
553 | /// interference on flags. |
554 | void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) { |
555 | // Bottom up: release predecessors |
556 | for (SDep &Pred : SU->Preds) { |
557 | ReleasePred(SU, &Pred); |
558 | if (Pred.isAssignedRegDep()) { |
559 | // This is a physical register dependency and it's impossible or |
560 | // expensive to copy the register. Make sure nothing that can |
561 | // clobber the register is scheduled between the predecessor and |
562 | // this node. |
563 | SUnit *RegDef = LiveRegDefs[Pred.getReg()]; (void)RegDef; |
564 | assert((!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) &&(static_cast <bool> ((!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) && "interference on register dependence" ) ? void (0) : __assert_fail ("(!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) && \"interference on register dependence\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 565, __extension__ __PRETTY_FUNCTION__)) |
565 | "interference on register dependence")(static_cast <bool> ((!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) && "interference on register dependence" ) ? void (0) : __assert_fail ("(!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) && \"interference on register dependence\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 565, __extension__ __PRETTY_FUNCTION__)); |
566 | LiveRegDefs[Pred.getReg()] = Pred.getSUnit(); |
567 | if (!LiveRegGens[Pred.getReg()]) { |
568 | ++NumLiveRegs; |
569 | LiveRegGens[Pred.getReg()] = SU; |
570 | } |
571 | } |
572 | } |
573 | |
574 | // If we're scheduling a lowered CALLSEQ_END, find the corresponding |
575 | // CALLSEQ_BEGIN. Inject an artificial physical register dependence between |
576 | // these nodes, to prevent other calls from being interscheduled with them. |
577 | unsigned CallResource = TRI->getNumRegs(); |
578 | if (!LiveRegDefs[CallResource]) |
579 | for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) |
580 | if (Node->isMachineOpcode() && |
581 | Node->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) { |
582 | unsigned NestLevel = 0; |
583 | unsigned MaxNest = 0; |
584 | SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII); |
585 | assert(N && "Must find call sequence start")(static_cast <bool> (N && "Must find call sequence start" ) ? void (0) : __assert_fail ("N && \"Must find call sequence start\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 585, __extension__ __PRETTY_FUNCTION__)); |
586 | |
587 | SUnit *Def = &SUnits[N->getNodeId()]; |
588 | CallSeqEndForStart[Def] = SU; |
589 | |
590 | ++NumLiveRegs; |
591 | LiveRegDefs[CallResource] = Def; |
592 | LiveRegGens[CallResource] = SU; |
593 | break; |
594 | } |
595 | } |
596 | |
597 | /// Check to see if any of the pending instructions are ready to issue. If |
598 | /// so, add them to the available queue. |
599 | void ScheduleDAGRRList::ReleasePending() { |
600 | if (DisableSchedCycles) { |
601 | assert(PendingQueue.empty() && "pending instrs not allowed in this mode")(static_cast <bool> (PendingQueue.empty() && "pending instrs not allowed in this mode" ) ? void (0) : __assert_fail ("PendingQueue.empty() && \"pending instrs not allowed in this mode\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 601, __extension__ __PRETTY_FUNCTION__)); |
602 | return; |
603 | } |
604 | |
605 | // If the available queue is empty, it is safe to reset MinAvailableCycle. |
606 | if (AvailableQueue->empty()) |
607 | MinAvailableCycle = std::numeric_limits<unsigned>::max(); |
608 | |
609 | // Check to see if any of the pending instructions are ready to issue. If |
610 | // so, add them to the available queue. |
611 | for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { |
612 | unsigned ReadyCycle = PendingQueue[i]->getHeight(); |
613 | if (ReadyCycle < MinAvailableCycle) |
614 | MinAvailableCycle = ReadyCycle; |
615 | |
616 | if (PendingQueue[i]->isAvailable) { |
617 | if (!isReady(PendingQueue[i])) |
618 | continue; |
619 | AvailableQueue->push(PendingQueue[i]); |
620 | } |
621 | PendingQueue[i]->isPending = false; |
622 | PendingQueue[i] = PendingQueue.back(); |
623 | PendingQueue.pop_back(); |
624 | --i; --e; |
625 | } |
626 | } |
627 | |
628 | /// Move the scheduler state forward by the specified number of Cycles. |
629 | void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) { |
630 | if (NextCycle <= CurCycle) |
631 | return; |
632 | |
633 | IssueCount = 0; |
634 | AvailableQueue->setCurCycle(NextCycle); |
635 | if (!HazardRec->isEnabled()) { |
636 | // Bypass lots of virtual calls in case of long latency. |
637 | CurCycle = NextCycle; |
638 | } |
639 | else { |
640 | for (; CurCycle != NextCycle; ++CurCycle) { |
641 | HazardRec->RecedeCycle(); |
642 | } |
643 | } |
644 | // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the |
645 | // available Q to release pending nodes at least once before popping. |
646 | ReleasePending(); |
647 | } |
648 | |
649 | /// Move the scheduler state forward until the specified node's dependents are |
650 | /// ready and can be scheduled with no resource conflicts. |
651 | void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) { |
652 | if (DisableSchedCycles) |
653 | return; |
654 | |
655 | // FIXME: Nodes such as CopyFromReg probably should not advance the current |
656 | // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node |
657 | // has predecessors the cycle will be advanced when they are scheduled. |
658 | // But given the crude nature of modeling latency though such nodes, we |
659 | // currently need to treat these nodes like real instructions. |
660 | // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return; |
661 | |
662 | unsigned ReadyCycle = SU->getHeight(); |
663 | |
664 | // Bump CurCycle to account for latency. We assume the latency of other |
665 | // available instructions may be hidden by the stall (not a full pipe stall). |
666 | // This updates the hazard recognizer's cycle before reserving resources for |
667 | // this instruction. |
668 | AdvanceToCycle(ReadyCycle); |
669 | |
670 | // Calls are scheduled in their preceding cycle, so don't conflict with |
671 | // hazards from instructions after the call. EmitNode will reset the |
672 | // scoreboard state before emitting the call. |
673 | if (SU->isCall) |
674 | return; |
675 | |
676 | // FIXME: For resource conflicts in very long non-pipelined stages, we |
677 | // should probably skip ahead here to avoid useless scoreboard checks. |
678 | int Stalls = 0; |
679 | while (true) { |
680 | ScheduleHazardRecognizer::HazardType HT = |
681 | HazardRec->getHazardType(SU, -Stalls); |
682 | |
683 | if (HT == ScheduleHazardRecognizer::NoHazard) |
684 | break; |
685 | |
686 | ++Stalls; |
687 | } |
688 | AdvanceToCycle(CurCycle + Stalls); |
689 | } |
690 | |
691 | /// Record this SUnit in the HazardRecognizer. |
692 | /// Does not update CurCycle. |
693 | void ScheduleDAGRRList::EmitNode(SUnit *SU) { |
694 | if (!HazardRec->isEnabled()) |
695 | return; |
696 | |
697 | // Check for phys reg copy. |
698 | if (!SU->getNode()) |
699 | return; |
700 | |
701 | switch (SU->getNode()->getOpcode()) { |
702 | default: |
703 | assert(SU->getNode()->isMachineOpcode() &&(static_cast <bool> (SU->getNode()->isMachineOpcode () && "This target-independent node should not be scheduled." ) ? void (0) : __assert_fail ("SU->getNode()->isMachineOpcode() && \"This target-independent node should not be scheduled.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 704, __extension__ __PRETTY_FUNCTION__)) |
704 | "This target-independent node should not be scheduled.")(static_cast <bool> (SU->getNode()->isMachineOpcode () && "This target-independent node should not be scheduled." ) ? void (0) : __assert_fail ("SU->getNode()->isMachineOpcode() && \"This target-independent node should not be scheduled.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 704, __extension__ __PRETTY_FUNCTION__)); |
705 | break; |
706 | case ISD::MERGE_VALUES: |
707 | case ISD::TokenFactor: |
708 | case ISD::LIFETIME_START: |
709 | case ISD::LIFETIME_END: |
710 | case ISD::CopyToReg: |
711 | case ISD::CopyFromReg: |
712 | case ISD::EH_LABEL: |
713 | // Noops don't affect the scoreboard state. Copies are likely to be |
714 | // removed. |
715 | return; |
716 | case ISD::INLINEASM: |
717 | case ISD::INLINEASM_BR: |
718 | // For inline asm, clear the pipeline state. |
719 | HazardRec->Reset(); |
720 | return; |
721 | } |
722 | if (SU->isCall) { |
723 | // Calls are scheduled with their preceding instructions. For bottom-up |
724 | // scheduling, clear the pipeline state before emitting. |
725 | HazardRec->Reset(); |
726 | } |
727 | |
728 | HazardRec->EmitInstruction(SU); |
729 | } |
730 | |
731 | static void resetVRegCycle(SUnit *SU); |
732 | |
733 | /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending |
734 | /// count of its predecessors. If a predecessor pending count is zero, add it to |
735 | /// the Available queue. |
736 | void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) { |
737 | LLVM_DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "\n*** Scheduling [" << CurCycle << "]: "; } } while (false); |
738 | LLVM_DEBUG(dumpNode(*SU))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dumpNode(*SU); } } while (false); |
739 | |
740 | #ifndef NDEBUG |
741 | if (CurCycle < SU->getHeight()) |
742 | LLVM_DEBUG(dbgs() << " Height [" << SU->getHeight()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Height [" << SU ->getHeight() << "] pipeline stall!\n"; } } while (false ) |
743 | << "] pipeline stall!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Height [" << SU ->getHeight() << "] pipeline stall!\n"; } } while (false ); |
744 | #endif |
745 | |
746 | // FIXME: Do not modify node height. It may interfere with |
747 | // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the |
748 | // node its ready cycle can aid heuristics, and after scheduling it can |
749 | // indicate the scheduled cycle. |
750 | SU->setHeightToAtLeast(CurCycle); |
751 | |
752 | // Reserve resources for the scheduled instruction. |
753 | EmitNode(SU); |
754 | |
755 | Sequence.push_back(SU); |
756 | |
757 | AvailableQueue->scheduledNode(SU); |
758 | |
759 | // If HazardRec is disabled, and each inst counts as one cycle, then |
760 | // advance CurCycle before ReleasePredecessors to avoid useless pushes to |
761 | // PendingQueue for schedulers that implement HasReadyFilter. |
762 | if (!HazardRec->isEnabled() && AvgIPC < 2) |
763 | AdvanceToCycle(CurCycle + 1); |
764 | |
765 | // Update liveness of predecessors before successors to avoid treating a |
766 | // two-address node as a live range def. |
767 | ReleasePredecessors(SU); |
768 | |
769 | // Release all the implicit physical register defs that are live. |
770 | for (SDep &Succ : SU->Succs) { |
771 | // LiveRegDegs[Succ.getReg()] != SU when SU is a two-address node. |
772 | if (Succ.isAssignedRegDep() && LiveRegDefs[Succ.getReg()] == SU) { |
773 | assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!")(static_cast <bool> (NumLiveRegs > 0 && "NumLiveRegs is already zero!" ) ? void (0) : __assert_fail ("NumLiveRegs > 0 && \"NumLiveRegs is already zero!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 773, __extension__ __PRETTY_FUNCTION__)); |
774 | --NumLiveRegs; |
775 | LiveRegDefs[Succ.getReg()] = nullptr; |
776 | LiveRegGens[Succ.getReg()] = nullptr; |
777 | releaseInterferences(Succ.getReg()); |
778 | } |
779 | } |
780 | // Release the special call resource dependence, if this is the beginning |
781 | // of a call. |
782 | unsigned CallResource = TRI->getNumRegs(); |
783 | if (LiveRegDefs[CallResource] == SU) |
784 | for (const SDNode *SUNode = SU->getNode(); SUNode; |
785 | SUNode = SUNode->getGluedNode()) { |
786 | if (SUNode->isMachineOpcode() && |
787 | SUNode->getMachineOpcode() == TII->getCallFrameSetupOpcode()) { |
788 | assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!")(static_cast <bool> (NumLiveRegs > 0 && "NumLiveRegs is already zero!" ) ? void (0) : __assert_fail ("NumLiveRegs > 0 && \"NumLiveRegs is already zero!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 788, __extension__ __PRETTY_FUNCTION__)); |
789 | --NumLiveRegs; |
790 | LiveRegDefs[CallResource] = nullptr; |
791 | LiveRegGens[CallResource] = nullptr; |
792 | releaseInterferences(CallResource); |
793 | } |
794 | } |
795 | |
796 | resetVRegCycle(SU); |
797 | |
798 | SU->isScheduled = true; |
799 | |
800 | // Conditions under which the scheduler should eagerly advance the cycle: |
801 | // (1) No available instructions |
802 | // (2) All pipelines full, so available instructions must have hazards. |
803 | // |
804 | // If HazardRec is disabled, the cycle was pre-advanced before calling |
805 | // ReleasePredecessors. In that case, IssueCount should remain 0. |
806 | // |
807 | // Check AvailableQueue after ReleasePredecessors in case of zero latency. |
808 | if (HazardRec->isEnabled() || AvgIPC > 1) { |
809 | if (SU->getNode() && SU->getNode()->isMachineOpcode()) |
810 | ++IssueCount; |
811 | if ((HazardRec->isEnabled() && HazardRec->atIssueLimit()) |
812 | || (!HazardRec->isEnabled() && IssueCount == AvgIPC)) |
813 | AdvanceToCycle(CurCycle + 1); |
814 | } |
815 | } |
816 | |
817 | /// CapturePred - This does the opposite of ReleasePred. Since SU is being |
818 | /// unscheduled, increase the succ left count of its predecessors. Remove |
819 | /// them from AvailableQueue if necessary. |
820 | void ScheduleDAGRRList::CapturePred(SDep *PredEdge) { |
821 | SUnit *PredSU = PredEdge->getSUnit(); |
822 | if (PredSU->isAvailable) { |
823 | PredSU->isAvailable = false; |
824 | if (!PredSU->isPending) |
825 | AvailableQueue->remove(PredSU); |
826 | } |
827 | |
828 | assert(PredSU->NumSuccsLeft < std::numeric_limits<unsigned>::max() &&(static_cast <bool> (PredSU->NumSuccsLeft < std:: numeric_limits<unsigned>::max() && "NumSuccsLeft will overflow!" ) ? void (0) : __assert_fail ("PredSU->NumSuccsLeft < std::numeric_limits<unsigned>::max() && \"NumSuccsLeft will overflow!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 829, __extension__ __PRETTY_FUNCTION__)) |
829 | "NumSuccsLeft will overflow!")(static_cast <bool> (PredSU->NumSuccsLeft < std:: numeric_limits<unsigned>::max() && "NumSuccsLeft will overflow!" ) ? void (0) : __assert_fail ("PredSU->NumSuccsLeft < std::numeric_limits<unsigned>::max() && \"NumSuccsLeft will overflow!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 829, __extension__ __PRETTY_FUNCTION__)); |
830 | ++PredSU->NumSuccsLeft; |
831 | } |
832 | |
833 | /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and |
834 | /// its predecessor states to reflect the change. |
835 | void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { |
836 | LLVM_DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: "; } } while (false); |
837 | LLVM_DEBUG(dumpNode(*SU))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dumpNode(*SU); } } while (false); |
838 | |
839 | for (SDep &Pred : SU->Preds) { |
840 | CapturePred(&Pred); |
841 | if (Pred.isAssignedRegDep() && SU == LiveRegGens[Pred.getReg()]){ |
842 | assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!")(static_cast <bool> (NumLiveRegs > 0 && "NumLiveRegs is already zero!" ) ? void (0) : __assert_fail ("NumLiveRegs > 0 && \"NumLiveRegs is already zero!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 842, __extension__ __PRETTY_FUNCTION__)); |
843 | assert(LiveRegDefs[Pred.getReg()] == Pred.getSUnit() &&(static_cast <bool> (LiveRegDefs[Pred.getReg()] == Pred .getSUnit() && "Physical register dependency violated?" ) ? void (0) : __assert_fail ("LiveRegDefs[Pred.getReg()] == Pred.getSUnit() && \"Physical register dependency violated?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 844, __extension__ __PRETTY_FUNCTION__)) |
844 | "Physical register dependency violated?")(static_cast <bool> (LiveRegDefs[Pred.getReg()] == Pred .getSUnit() && "Physical register dependency violated?" ) ? void (0) : __assert_fail ("LiveRegDefs[Pred.getReg()] == Pred.getSUnit() && \"Physical register dependency violated?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 844, __extension__ __PRETTY_FUNCTION__)); |
845 | --NumLiveRegs; |
846 | LiveRegDefs[Pred.getReg()] = nullptr; |
847 | LiveRegGens[Pred.getReg()] = nullptr; |
848 | releaseInterferences(Pred.getReg()); |
849 | } |
850 | } |
851 | |
852 | // Reclaim the special call resource dependence, if this is the beginning |
853 | // of a call. |
854 | unsigned CallResource = TRI->getNumRegs(); |
855 | for (const SDNode *SUNode = SU->getNode(); SUNode; |
856 | SUNode = SUNode->getGluedNode()) { |
857 | if (SUNode->isMachineOpcode() && |
858 | SUNode->getMachineOpcode() == TII->getCallFrameSetupOpcode()) { |
859 | SUnit *SeqEnd = CallSeqEndForStart[SU]; |
860 | assert(SeqEnd && "Call sequence start/end must be known")(static_cast <bool> (SeqEnd && "Call sequence start/end must be known" ) ? void (0) : __assert_fail ("SeqEnd && \"Call sequence start/end must be known\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 860, __extension__ __PRETTY_FUNCTION__)); |
861 | assert(!LiveRegDefs[CallResource])(static_cast <bool> (!LiveRegDefs[CallResource]) ? void (0) : __assert_fail ("!LiveRegDefs[CallResource]", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 861, __extension__ __PRETTY_FUNCTION__)); |
862 | assert(!LiveRegGens[CallResource])(static_cast <bool> (!LiveRegGens[CallResource]) ? void (0) : __assert_fail ("!LiveRegGens[CallResource]", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 862, __extension__ __PRETTY_FUNCTION__)); |
863 | ++NumLiveRegs; |
864 | LiveRegDefs[CallResource] = SU; |
865 | LiveRegGens[CallResource] = SeqEnd; |
866 | } |
867 | } |
868 | |
869 | // Release the special call resource dependence, if this is the end |
870 | // of a call. |
871 | if (LiveRegGens[CallResource] == SU) |
872 | for (const SDNode *SUNode = SU->getNode(); SUNode; |
873 | SUNode = SUNode->getGluedNode()) { |
874 | if (SUNode->isMachineOpcode() && |
875 | SUNode->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) { |
876 | assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!")(static_cast <bool> (NumLiveRegs > 0 && "NumLiveRegs is already zero!" ) ? void (0) : __assert_fail ("NumLiveRegs > 0 && \"NumLiveRegs is already zero!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 876, __extension__ __PRETTY_FUNCTION__)); |
877 | assert(LiveRegDefs[CallResource])(static_cast <bool> (LiveRegDefs[CallResource]) ? void ( 0) : __assert_fail ("LiveRegDefs[CallResource]", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 877, __extension__ __PRETTY_FUNCTION__)); |
878 | assert(LiveRegGens[CallResource])(static_cast <bool> (LiveRegGens[CallResource]) ? void ( 0) : __assert_fail ("LiveRegGens[CallResource]", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 878, __extension__ __PRETTY_FUNCTION__)); |
879 | --NumLiveRegs; |
880 | LiveRegDefs[CallResource] = nullptr; |
881 | LiveRegGens[CallResource] = nullptr; |
882 | releaseInterferences(CallResource); |
883 | } |
884 | } |
885 | |
886 | for (auto &Succ : SU->Succs) { |
887 | if (Succ.isAssignedRegDep()) { |
888 | auto Reg = Succ.getReg(); |
889 | if (!LiveRegDefs[Reg]) |
890 | ++NumLiveRegs; |
891 | // This becomes the nearest def. Note that an earlier def may still be |
892 | // pending if this is a two-address node. |
893 | LiveRegDefs[Reg] = SU; |
894 | |
895 | // Update LiveRegGen only if was empty before this unscheduling. |
896 | // This is to avoid incorrect updating LiveRegGen set in previous run. |
897 | if (!LiveRegGens[Reg]) { |
898 | // Find the successor with the lowest height. |
899 | LiveRegGens[Reg] = Succ.getSUnit(); |
900 | for (auto &Succ2 : SU->Succs) { |
901 | if (Succ2.isAssignedRegDep() && Succ2.getReg() == Reg && |
902 | Succ2.getSUnit()->getHeight() < LiveRegGens[Reg]->getHeight()) |
903 | LiveRegGens[Reg] = Succ2.getSUnit(); |
904 | } |
905 | } |
906 | } |
907 | } |
908 | if (SU->getHeight() < MinAvailableCycle) |
909 | MinAvailableCycle = SU->getHeight(); |
910 | |
911 | SU->setHeightDirty(); |
912 | SU->isScheduled = false; |
913 | SU->isAvailable = true; |
914 | if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) { |
915 | // Don't make available until backtracking is complete. |
916 | SU->isPending = true; |
917 | PendingQueue.push_back(SU); |
918 | } |
919 | else { |
920 | AvailableQueue->push(SU); |
921 | } |
922 | AvailableQueue->unscheduledNode(SU); |
923 | } |
924 | |
925 | /// After backtracking, the hazard checker needs to be restored to a state |
926 | /// corresponding the current cycle. |
927 | void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() { |
928 | HazardRec->Reset(); |
929 | |
930 | unsigned LookAhead = std::min((unsigned)Sequence.size(), |
931 | HazardRec->getMaxLookAhead()); |
932 | if (LookAhead == 0) |
933 | return; |
934 | |
935 | std::vector<SUnit *>::const_iterator I = (Sequence.end() - LookAhead); |
936 | unsigned HazardCycle = (*I)->getHeight(); |
937 | for (auto E = Sequence.end(); I != E; ++I) { |
938 | SUnit *SU = *I; |
939 | for (; SU->getHeight() > HazardCycle; ++HazardCycle) { |
940 | HazardRec->RecedeCycle(); |
941 | } |
942 | EmitNode(SU); |
943 | } |
944 | } |
945 | |
946 | /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in |
947 | /// BTCycle in order to schedule a specific node. |
948 | void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) { |
949 | SUnit *OldSU = Sequence.back(); |
950 | while (true) { |
951 | Sequence.pop_back(); |
952 | // FIXME: use ready cycle instead of height |
953 | CurCycle = OldSU->getHeight(); |
954 | UnscheduleNodeBottomUp(OldSU); |
955 | AvailableQueue->setCurCycle(CurCycle); |
956 | if (OldSU == BtSU) |
957 | break; |
958 | OldSU = Sequence.back(); |
959 | } |
960 | |
961 | assert(!SU->isSucc(OldSU) && "Something is wrong!")(static_cast <bool> (!SU->isSucc(OldSU) && "Something is wrong!" ) ? void (0) : __assert_fail ("!SU->isSucc(OldSU) && \"Something is wrong!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 961, __extension__ __PRETTY_FUNCTION__)); |
962 | |
963 | RestoreHazardCheckerBottomUp(); |
964 | |
965 | ReleasePending(); |
966 | |
967 | ++NumBacktracks; |
968 | } |
969 | |
970 | static bool isOperandOf(const SUnit *SU, SDNode *N) { |
971 | for (const SDNode *SUNode = SU->getNode(); SUNode; |
972 | SUNode = SUNode->getGluedNode()) { |
973 | if (SUNode->isOperandOf(N)) |
974 | return true; |
975 | } |
976 | return false; |
977 | } |
978 | |
979 | /// TryUnfold - Attempt to unfold |
980 | SUnit *ScheduleDAGRRList::TryUnfoldSU(SUnit *SU) { |
981 | SDNode *N = SU->getNode(); |
982 | // Use while over if to ease fall through. |
983 | SmallVector<SDNode *, 2> NewNodes; |
984 | if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes)) |
985 | return nullptr; |
986 | |
987 | // unfolding an x86 DEC64m operation results in store, dec, load which |
988 | // can't be handled here so quit |
989 | if (NewNodes.size() == 3) |
990 | return nullptr; |
991 | |
992 | assert(NewNodes.size() == 2 && "Expected a load folding node!")(static_cast <bool> (NewNodes.size() == 2 && "Expected a load folding node!" ) ? void (0) : __assert_fail ("NewNodes.size() == 2 && \"Expected a load folding node!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 992, __extension__ __PRETTY_FUNCTION__)); |
993 | |
994 | N = NewNodes[1]; |
995 | SDNode *LoadNode = NewNodes[0]; |
996 | unsigned NumVals = N->getNumValues(); |
997 | unsigned OldNumVals = SU->getNode()->getNumValues(); |
998 | |
999 | // LoadNode may already exist. This can happen when there is another |
1000 | // load from the same location and producing the same type of value |
1001 | // but it has different alignment or volatileness. |
1002 | bool isNewLoad = true; |
1003 | SUnit *LoadSU; |
1004 | if (LoadNode->getNodeId() != -1) { |
1005 | LoadSU = &SUnits[LoadNode->getNodeId()]; |
1006 | // If LoadSU has already been scheduled, we should clone it but |
1007 | // this would negate the benefit to unfolding so just return SU. |
1008 | if (LoadSU->isScheduled) |
1009 | return SU; |
1010 | isNewLoad = false; |
1011 | } else { |
1012 | LoadSU = CreateNewSUnit(LoadNode); |
1013 | LoadNode->setNodeId(LoadSU->NodeNum); |
1014 | |
1015 | InitNumRegDefsLeft(LoadSU); |
1016 | computeLatency(LoadSU); |
1017 | } |
1018 | |
1019 | bool isNewN = true; |
1020 | SUnit *NewSU; |
1021 | // This can only happen when isNewLoad is false. |
1022 | if (N->getNodeId() != -1) { |
1023 | NewSU = &SUnits[N->getNodeId()]; |
1024 | // If NewSU has already been scheduled, we need to clone it, but this |
1025 | // negates the benefit to unfolding so just return SU. |
1026 | if (NewSU->isScheduled) { |
1027 | return SU; |
1028 | } |
1029 | isNewN = false; |
1030 | } else { |
1031 | NewSU = CreateNewSUnit(N); |
1032 | N->setNodeId(NewSU->NodeNum); |
1033 | |
1034 | const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); |
1035 | for (unsigned i = 0; i != MCID.getNumOperands(); ++i) { |
1036 | if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) { |
1037 | NewSU->isTwoAddress = true; |
1038 | break; |
1039 | } |
1040 | } |
1041 | if (MCID.isCommutable()) |
1042 | NewSU->isCommutable = true; |
1043 | |
1044 | InitNumRegDefsLeft(NewSU); |
1045 | computeLatency(NewSU); |
1046 | } |
1047 | |
1048 | LLVM_DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Unfolding SU #" << SU->NodeNum << "\n"; } } while (false); |
1049 | |
1050 | // Now that we are committed to unfolding replace DAG Uses. |
1051 | for (unsigned i = 0; i != NumVals; ++i) |
1052 | DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i)); |
1053 | DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals - 1), |
1054 | SDValue(LoadNode, 1)); |
1055 | |
1056 | // Record all the edges to and from the old SU, by category. |
1057 | SmallVector<SDep, 4> ChainPreds; |
1058 | SmallVector<SDep, 4> ChainSuccs; |
1059 | SmallVector<SDep, 4> LoadPreds; |
1060 | SmallVector<SDep, 4> NodePreds; |
1061 | SmallVector<SDep, 4> NodeSuccs; |
1062 | for (SDep &Pred : SU->Preds) { |
1063 | if (Pred.isCtrl()) |
1064 | ChainPreds.push_back(Pred); |
1065 | else if (isOperandOf(Pred.getSUnit(), LoadNode)) |
1066 | LoadPreds.push_back(Pred); |
1067 | else |
1068 | NodePreds.push_back(Pred); |
1069 | } |
1070 | for (SDep &Succ : SU->Succs) { |
1071 | if (Succ.isCtrl()) |
1072 | ChainSuccs.push_back(Succ); |
1073 | else |
1074 | NodeSuccs.push_back(Succ); |
1075 | } |
1076 | |
1077 | // Now assign edges to the newly-created nodes. |
1078 | for (const SDep &Pred : ChainPreds) { |
1079 | RemovePred(SU, Pred); |
1080 | if (isNewLoad) |
1081 | AddPredQueued(LoadSU, Pred); |
1082 | } |
1083 | for (const SDep &Pred : LoadPreds) { |
1084 | RemovePred(SU, Pred); |
1085 | if (isNewLoad) |
1086 | AddPredQueued(LoadSU, Pred); |
1087 | } |
1088 | for (const SDep &Pred : NodePreds) { |
1089 | RemovePred(SU, Pred); |
1090 | AddPredQueued(NewSU, Pred); |
1091 | } |
1092 | for (SDep D : NodeSuccs) { |
1093 | SUnit *SuccDep = D.getSUnit(); |
1094 | D.setSUnit(SU); |
1095 | RemovePred(SuccDep, D); |
1096 | D.setSUnit(NewSU); |
1097 | AddPredQueued(SuccDep, D); |
1098 | // Balance register pressure. |
1099 | if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled && |
1100 | !D.isCtrl() && NewSU->NumRegDefsLeft > 0) |
1101 | --NewSU->NumRegDefsLeft; |
1102 | } |
1103 | for (SDep D : ChainSuccs) { |
1104 | SUnit *SuccDep = D.getSUnit(); |
1105 | D.setSUnit(SU); |
1106 | RemovePred(SuccDep, D); |
1107 | if (isNewLoad) { |
1108 | D.setSUnit(LoadSU); |
1109 | AddPredQueued(SuccDep, D); |
1110 | } |
1111 | } |
1112 | |
1113 | // Add a data dependency to reflect that NewSU reads the value defined |
1114 | // by LoadSU. |
1115 | SDep D(LoadSU, SDep::Data, 0); |
1116 | D.setLatency(LoadSU->Latency); |
1117 | AddPredQueued(NewSU, D); |
1118 | |
1119 | if (isNewLoad) |
1120 | AvailableQueue->addNode(LoadSU); |
1121 | if (isNewN) |
1122 | AvailableQueue->addNode(NewSU); |
1123 | |
1124 | ++NumUnfolds; |
1125 | |
1126 | if (NewSU->NumSuccsLeft == 0) |
1127 | NewSU->isAvailable = true; |
1128 | |
1129 | return NewSU; |
1130 | } |
1131 | |
1132 | /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled |
1133 | /// successors to the newly created node. |
1134 | SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { |
1135 | SDNode *N = SU->getNode(); |
1136 | if (!N) |
1137 | return nullptr; |
1138 | |
1139 | LLVM_DEBUG(dbgs() << "Considering duplicating the SU\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Considering duplicating the SU\n" ; } } while (false); |
1140 | LLVM_DEBUG(dumpNode(*SU))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dumpNode(*SU); } } while (false); |
1141 | |
1142 | if (N->getGluedNode() && |
1143 | !TII->canCopyGluedNodeDuringSchedule(N)) { |
1144 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Giving up because it has incoming glue and the target does not " "want to copy it\n"; } } while (false) |
1145 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Giving up because it has incoming glue and the target does not " "want to copy it\n"; } } while (false) |
1146 | << "Giving up because it has incoming glue and the target does not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Giving up because it has incoming glue and the target does not " "want to copy it\n"; } } while (false) |
1147 | "want to copy it\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Giving up because it has incoming glue and the target does not " "want to copy it\n"; } } while (false); |
1148 | return nullptr; |
1149 | } |
1150 | |
1151 | SUnit *NewSU; |
1152 | bool TryUnfold = false; |
1153 | for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { |
1154 | MVT VT = N->getSimpleValueType(i); |
1155 | if (VT == MVT::Glue) { |
1156 | LLVM_DEBUG(dbgs() << "Giving up because it has outgoing glue\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Giving up because it has outgoing glue\n" ; } } while (false); |
1157 | return nullptr; |
1158 | } else if (VT == MVT::Other) |
1159 | TryUnfold = true; |
1160 | } |
1161 | for (const SDValue &Op : N->op_values()) { |
1162 | MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); |
1163 | if (VT == MVT::Glue && !TII->canCopyGluedNodeDuringSchedule(N)) { |
1164 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Giving up because it one of the operands is glue and " "the target does not want to copy it\n"; } } while (false) |
1165 | dbgs() << "Giving up because it one of the operands is glue and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Giving up because it one of the operands is glue and " "the target does not want to copy it\n"; } } while (false) |
1166 | "the target does not want to copy it\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Giving up because it one of the operands is glue and " "the target does not want to copy it\n"; } } while (false); |
1167 | return nullptr; |
1168 | } |
1169 | } |
1170 | |
1171 | // If possible unfold instruction. |
1172 | if (TryUnfold) { |
1173 | SUnit *UnfoldSU = TryUnfoldSU(SU); |
1174 | if (!UnfoldSU) |
1175 | return nullptr; |
1176 | SU = UnfoldSU; |
1177 | N = SU->getNode(); |
Value stored to 'N' is never read | |
1178 | // If this can be scheduled don't bother duplicating and just return |
1179 | if (SU->NumSuccsLeft == 0) |
1180 | return SU; |
1181 | } |
1182 | |
1183 | LLVM_DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Duplicating SU #" << SU->NodeNum << "\n"; } } while (false); |
1184 | NewSU = CreateClone(SU); |
1185 | |
1186 | // New SUnit has the exact same predecessors. |
1187 | for (SDep &Pred : SU->Preds) |
1188 | if (!Pred.isArtificial()) |
1189 | AddPredQueued(NewSU, Pred); |
1190 | |
1191 | // Make sure the clone comes after the original. (InstrEmitter assumes |
1192 | // this ordering.) |
1193 | AddPredQueued(NewSU, SDep(SU, SDep::Artificial)); |
1194 | |
1195 | // Only copy scheduled successors. Cut them from old node's successor |
1196 | // list and move them over. |
1197 | SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; |
1198 | for (SDep &Succ : SU->Succs) { |
1199 | if (Succ.isArtificial()) |
1200 | continue; |
1201 | SUnit *SuccSU = Succ.getSUnit(); |
1202 | if (SuccSU->isScheduled) { |
1203 | SDep D = Succ; |
1204 | D.setSUnit(NewSU); |
1205 | AddPredQueued(SuccSU, D); |
1206 | D.setSUnit(SU); |
1207 | DelDeps.push_back(std::make_pair(SuccSU, D)); |
1208 | } |
1209 | } |
1210 | for (auto &DelDep : DelDeps) |
1211 | RemovePred(DelDep.first, DelDep.second); |
1212 | |
1213 | AvailableQueue->updateNode(SU); |
1214 | AvailableQueue->addNode(NewSU); |
1215 | |
1216 | ++NumDups; |
1217 | return NewSU; |
1218 | } |
1219 | |
1220 | /// InsertCopiesAndMoveSuccs - Insert register copies and move all |
1221 | /// scheduled successors of the given SUnit to the last copy. |
1222 | void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, |
1223 | const TargetRegisterClass *DestRC, |
1224 | const TargetRegisterClass *SrcRC, |
1225 | SmallVectorImpl<SUnit*> &Copies) { |
1226 | SUnit *CopyFromSU = CreateNewSUnit(nullptr); |
1227 | CopyFromSU->CopySrcRC = SrcRC; |
1228 | CopyFromSU->CopyDstRC = DestRC; |
1229 | |
1230 | SUnit *CopyToSU = CreateNewSUnit(nullptr); |
1231 | CopyToSU->CopySrcRC = DestRC; |
1232 | CopyToSU->CopyDstRC = SrcRC; |
1233 | |
1234 | // Only copy scheduled successors. Cut them from old node's successor |
1235 | // list and move them over. |
1236 | SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; |
1237 | for (SDep &Succ : SU->Succs) { |
1238 | if (Succ.isArtificial()) |
1239 | continue; |
1240 | SUnit *SuccSU = Succ.getSUnit(); |
1241 | if (SuccSU->isScheduled) { |
1242 | SDep D = Succ; |
1243 | D.setSUnit(CopyToSU); |
1244 | AddPredQueued(SuccSU, D); |
1245 | DelDeps.push_back(std::make_pair(SuccSU, Succ)); |
1246 | } |
1247 | else { |
1248 | // Avoid scheduling the def-side copy before other successors. Otherwise |
1249 | // we could introduce another physreg interference on the copy and |
1250 | // continue inserting copies indefinitely. |
1251 | AddPredQueued(SuccSU, SDep(CopyFromSU, SDep::Artificial)); |
1252 | } |
1253 | } |
1254 | for (auto &DelDep : DelDeps) |
1255 | RemovePred(DelDep.first, DelDep.second); |
1256 | |
1257 | SDep FromDep(SU, SDep::Data, Reg); |
1258 | FromDep.setLatency(SU->Latency); |
1259 | AddPredQueued(CopyFromSU, FromDep); |
1260 | SDep ToDep(CopyFromSU, SDep::Data, 0); |
1261 | ToDep.setLatency(CopyFromSU->Latency); |
1262 | AddPredQueued(CopyToSU, ToDep); |
1263 | |
1264 | AvailableQueue->updateNode(SU); |
1265 | AvailableQueue->addNode(CopyFromSU); |
1266 | AvailableQueue->addNode(CopyToSU); |
1267 | Copies.push_back(CopyFromSU); |
1268 | Copies.push_back(CopyToSU); |
1269 | |
1270 | ++NumPRCopies; |
1271 | } |
1272 | |
1273 | /// getPhysicalRegisterVT - Returns the ValueType of the physical register |
1274 | /// definition of the specified node. |
1275 | /// FIXME: Move to SelectionDAG? |
1276 | static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, |
1277 | const TargetInstrInfo *TII) { |
1278 | unsigned NumRes; |
1279 | if (N->getOpcode() == ISD::CopyFromReg) { |
1280 | // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type. |
1281 | NumRes = 1; |
1282 | } else { |
1283 | const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); |
1284 | assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!")(static_cast <bool> (MCID.ImplicitDefs && "Physical reg def must be in implicit def list!" ) ? void (0) : __assert_fail ("MCID.ImplicitDefs && \"Physical reg def must be in implicit def list!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1284, __extension__ __PRETTY_FUNCTION__)); |
1285 | NumRes = MCID.getNumDefs(); |
1286 | for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) { |
1287 | if (Reg == *ImpDef) |
1288 | break; |
1289 | ++NumRes; |
1290 | } |
1291 | } |
1292 | return N->getSimpleValueType(NumRes); |
1293 | } |
1294 | |
1295 | /// CheckForLiveRegDef - Return true and update live register vector if the |
1296 | /// specified register def of the specified SUnit clobbers any "live" registers. |
1297 | static void CheckForLiveRegDef(SUnit *SU, unsigned Reg, |
1298 | SUnit **LiveRegDefs, |
1299 | SmallSet<unsigned, 4> &RegAdded, |
1300 | SmallVectorImpl<unsigned> &LRegs, |
1301 | const TargetRegisterInfo *TRI) { |
1302 | for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) { |
1303 | |
1304 | // Check if Ref is live. |
1305 | if (!LiveRegDefs[*AliasI]) continue; |
1306 | |
1307 | // Allow multiple uses of the same def. |
1308 | if (LiveRegDefs[*AliasI] == SU) continue; |
1309 | |
1310 | // Add Reg to the set of interfering live regs. |
1311 | if (RegAdded.insert(*AliasI).second) { |
1312 | LRegs.push_back(*AliasI); |
1313 | } |
1314 | } |
1315 | } |
1316 | |
1317 | /// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered |
1318 | /// by RegMask, and add them to LRegs. |
1319 | static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask, |
1320 | ArrayRef<SUnit*> LiveRegDefs, |
1321 | SmallSet<unsigned, 4> &RegAdded, |
1322 | SmallVectorImpl<unsigned> &LRegs) { |
1323 | // Look at all live registers. Skip Reg0 and the special CallResource. |
1324 | for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) { |
1325 | if (!LiveRegDefs[i]) continue; |
1326 | if (LiveRegDefs[i] == SU) continue; |
1327 | if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue; |
1328 | if (RegAdded.insert(i).second) |
1329 | LRegs.push_back(i); |
1330 | } |
1331 | } |
1332 | |
1333 | /// getNodeRegMask - Returns the register mask attached to an SDNode, if any. |
1334 | static const uint32_t *getNodeRegMask(const SDNode *N) { |
1335 | for (const SDValue &Op : N->op_values()) |
1336 | if (const auto *RegOp = dyn_cast<RegisterMaskSDNode>(Op.getNode())) |
1337 | return RegOp->getRegMask(); |
1338 | return nullptr; |
1339 | } |
1340 | |
1341 | /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay |
1342 | /// scheduling of the given node to satisfy live physical register dependencies. |
1343 | /// If the specific node is the last one that's available to schedule, do |
1344 | /// whatever is necessary (i.e. backtracking or cloning) to make it possible. |
1345 | bool ScheduleDAGRRList:: |
1346 | DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) { |
1347 | if (NumLiveRegs == 0) |
1348 | return false; |
1349 | |
1350 | SmallSet<unsigned, 4> RegAdded; |
1351 | // If this node would clobber any "live" register, then it's not ready. |
1352 | // |
1353 | // If SU is the currently live definition of the same register that it uses, |
1354 | // then we are free to schedule it. |
1355 | for (SDep &Pred : SU->Preds) { |
1356 | if (Pred.isAssignedRegDep() && LiveRegDefs[Pred.getReg()] != SU) |
1357 | CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs.get(), |
1358 | RegAdded, LRegs, TRI); |
1359 | } |
1360 | |
1361 | for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) { |
1362 | if (Node->getOpcode() == ISD::INLINEASM || |
1363 | Node->getOpcode() == ISD::INLINEASM_BR) { |
1364 | // Inline asm can clobber physical defs. |
1365 | unsigned NumOps = Node->getNumOperands(); |
1366 | if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) |
1367 | --NumOps; // Ignore the glue operand. |
1368 | |
1369 | for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { |
1370 | unsigned Flags = |
1371 | cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); |
1372 | unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); |
1373 | |
1374 | ++i; // Skip the ID value. |
1375 | if (InlineAsm::isRegDefKind(Flags) || |
1376 | InlineAsm::isRegDefEarlyClobberKind(Flags) || |
1377 | InlineAsm::isClobberKind(Flags)) { |
1378 | // Check for def of register or earlyclobber register. |
1379 | for (; NumVals; --NumVals, ++i) { |
1380 | unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); |
1381 | if (Register::isPhysicalRegister(Reg)) |
1382 | CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI); |
1383 | } |
1384 | } else |
1385 | i += NumVals; |
1386 | } |
1387 | continue; |
1388 | } |
1389 | |
1390 | if (!Node->isMachineOpcode()) |
1391 | continue; |
1392 | // If we're in the middle of scheduling a call, don't begin scheduling |
1393 | // another call. Also, don't allow any physical registers to be live across |
1394 | // the call. |
1395 | if (Node->getMachineOpcode() == TII->getCallFrameDestroyOpcode()) { |
1396 | // Check the special calling-sequence resource. |
1397 | unsigned CallResource = TRI->getNumRegs(); |
1398 | if (LiveRegDefs[CallResource]) { |
1399 | SDNode *Gen = LiveRegGens[CallResource]->getNode(); |
1400 | while (SDNode *Glued = Gen->getGluedNode()) |
1401 | Gen = Glued; |
1402 | if (!IsChainDependent(Gen, Node, 0, TII) && |
1403 | RegAdded.insert(CallResource).second) |
1404 | LRegs.push_back(CallResource); |
1405 | } |
1406 | } |
1407 | if (const uint32_t *RegMask = getNodeRegMask(Node)) |
1408 | CheckForLiveRegDefMasked(SU, RegMask, |
1409 | makeArrayRef(LiveRegDefs.get(), TRI->getNumRegs()), |
1410 | RegAdded, LRegs); |
1411 | |
1412 | const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode()); |
1413 | if (MCID.hasOptionalDef()) { |
1414 | // Most ARM instructions have an OptionalDef for CPSR, to model the S-bit. |
1415 | // This operand can be either a def of CPSR, if the S bit is set; or a use |
1416 | // of %noreg. When the OptionalDef is set to a valid register, we need to |
1417 | // handle it in the same way as an ImplicitDef. |
1418 | for (unsigned i = 0; i < MCID.getNumDefs(); ++i) |
1419 | if (MCID.OpInfo[i].isOptionalDef()) { |
1420 | const SDValue &OptionalDef = Node->getOperand(i - Node->getNumValues()); |
1421 | unsigned Reg = cast<RegisterSDNode>(OptionalDef)->getReg(); |
1422 | CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI); |
1423 | } |
1424 | } |
1425 | if (!MCID.ImplicitDefs) |
1426 | continue; |
1427 | for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) |
1428 | CheckForLiveRegDef(SU, *Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI); |
1429 | } |
1430 | |
1431 | return !LRegs.empty(); |
1432 | } |
1433 | |
1434 | void ScheduleDAGRRList::releaseInterferences(unsigned Reg) { |
1435 | // Add the nodes that aren't ready back onto the available list. |
1436 | for (unsigned i = Interferences.size(); i > 0; --i) { |
1437 | SUnit *SU = Interferences[i-1]; |
1438 | LRegsMapT::iterator LRegsPos = LRegsMap.find(SU); |
1439 | if (Reg) { |
1440 | SmallVectorImpl<unsigned> &LRegs = LRegsPos->second; |
1441 | if (!is_contained(LRegs, Reg)) |
1442 | continue; |
1443 | } |
1444 | SU->isPending = false; |
1445 | // The interfering node may no longer be available due to backtracking. |
1446 | // Furthermore, it may have been made available again, in which case it is |
1447 | // now already in the AvailableQueue. |
1448 | if (SU->isAvailable && !SU->NodeQueueId) { |
1449 | LLVM_DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Repushing SU #" << SU->NodeNum << '\n'; } } while (false); |
1450 | AvailableQueue->push(SU); |
1451 | } |
1452 | if (i < Interferences.size()) |
1453 | Interferences[i-1] = Interferences.back(); |
1454 | Interferences.pop_back(); |
1455 | LRegsMap.erase(LRegsPos); |
1456 | } |
1457 | } |
1458 | |
1459 | /// Return a node that can be scheduled in this cycle. Requirements: |
1460 | /// (1) Ready: latency has been satisfied |
1461 | /// (2) No Hazards: resources are available |
1462 | /// (3) No Interferences: may unschedule to break register interferences. |
1463 | SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() { |
1464 | SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop(); |
1465 | auto FindAvailableNode = [&]() { |
1466 | while (CurSU) { |
1467 | SmallVector<unsigned, 4> LRegs; |
1468 | if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) |
1469 | break; |
1470 | LLVM_DEBUG(dbgs() << " Interfering reg ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Interfering reg "; if (LRegs[0] == TRI->getNumRegs()) dbgs() << "CallResource" ; else dbgs() << printReg(LRegs[0], TRI); dbgs() << " SU #" << CurSU->NodeNum << '\n'; } } while ( false) |
1471 | if (LRegs[0] == TRI->getNumRegs()) dbgs() << "CallResource";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Interfering reg "; if (LRegs[0] == TRI->getNumRegs()) dbgs() << "CallResource" ; else dbgs() << printReg(LRegs[0], TRI); dbgs() << " SU #" << CurSU->NodeNum << '\n'; } } while ( false) |
1472 | else dbgs() << printReg(LRegs[0], TRI);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Interfering reg "; if (LRegs[0] == TRI->getNumRegs()) dbgs() << "CallResource" ; else dbgs() << printReg(LRegs[0], TRI); dbgs() << " SU #" << CurSU->NodeNum << '\n'; } } while ( false) |
1473 | dbgs() << " SU #" << CurSU->NodeNum << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Interfering reg "; if (LRegs[0] == TRI->getNumRegs()) dbgs() << "CallResource" ; else dbgs() << printReg(LRegs[0], TRI); dbgs() << " SU #" << CurSU->NodeNum << '\n'; } } while ( false); |
1474 | std::pair<LRegsMapT::iterator, bool> LRegsPair = |
1475 | LRegsMap.insert(std::make_pair(CurSU, LRegs)); |
1476 | if (LRegsPair.second) { |
1477 | CurSU->isPending = true; // This SU is not in AvailableQueue right now. |
1478 | Interferences.push_back(CurSU); |
1479 | } |
1480 | else { |
1481 | assert(CurSU->isPending && "Interferences are pending")(static_cast <bool> (CurSU->isPending && "Interferences are pending" ) ? void (0) : __assert_fail ("CurSU->isPending && \"Interferences are pending\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1481, __extension__ __PRETTY_FUNCTION__)); |
1482 | // Update the interference with current live regs. |
1483 | LRegsPair.first->second = LRegs; |
1484 | } |
1485 | CurSU = AvailableQueue->pop(); |
1486 | } |
1487 | }; |
1488 | FindAvailableNode(); |
1489 | if (CurSU) |
1490 | return CurSU; |
1491 | |
1492 | // We query the topological order in the loop body, so make sure outstanding |
1493 | // updates are applied before entering it (we only enter the loop if there |
1494 | // are some interferences). If we make changes to the ordering, we exit |
1495 | // the loop. |
1496 | |
1497 | // All candidates are delayed due to live physical reg dependencies. |
1498 | // Try backtracking, code duplication, or inserting cross class copies |
1499 | // to resolve it. |
1500 | for (SUnit *TrySU : Interferences) { |
1501 | SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU]; |
1502 | |
1503 | // Try unscheduling up to the point where it's safe to schedule |
1504 | // this node. |
1505 | SUnit *BtSU = nullptr; |
1506 | unsigned LiveCycle = std::numeric_limits<unsigned>::max(); |
1507 | for (unsigned Reg : LRegs) { |
1508 | if (LiveRegGens[Reg]->getHeight() < LiveCycle) { |
1509 | BtSU = LiveRegGens[Reg]; |
1510 | LiveCycle = BtSU->getHeight(); |
1511 | } |
1512 | } |
1513 | if (!WillCreateCycle(TrySU, BtSU)) { |
1514 | // BacktrackBottomUp mutates Interferences! |
1515 | BacktrackBottomUp(TrySU, BtSU); |
1516 | |
1517 | // Force the current node to be scheduled before the node that |
1518 | // requires the physical reg dep. |
1519 | if (BtSU->isAvailable) { |
1520 | BtSU->isAvailable = false; |
1521 | if (!BtSU->isPending) |
1522 | AvailableQueue->remove(BtSU); |
1523 | } |
1524 | LLVM_DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU(" << TrySU ->NodeNum << ")\n"; } } while (false) |
1525 | << ") to SU(" << TrySU->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU(" << TrySU ->NodeNum << ")\n"; } } while (false); |
1526 | AddPredQueued(TrySU, SDep(BtSU, SDep::Artificial)); |
1527 | |
1528 | // If one or more successors has been unscheduled, then the current |
1529 | // node is no longer available. |
1530 | if (!TrySU->isAvailable || !TrySU->NodeQueueId) { |
1531 | LLVM_DEBUG(dbgs() << "TrySU not available; choosing node from queue\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "TrySU not available; choosing node from queue\n" ; } } while (false); |
1532 | CurSU = AvailableQueue->pop(); |
1533 | } else { |
1534 | LLVM_DEBUG(dbgs() << "TrySU available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "TrySU available\n"; } } while (false); |
1535 | // Available and in AvailableQueue |
1536 | AvailableQueue->remove(TrySU); |
1537 | CurSU = TrySU; |
1538 | } |
1539 | FindAvailableNode(); |
1540 | // Interferences has been mutated. We must break. |
1541 | break; |
1542 | } |
1543 | } |
1544 | |
1545 | if (!CurSU) { |
1546 | // Can't backtrack. If it's too expensive to copy the value, then try |
1547 | // duplicate the nodes that produces these "too expensive to copy" |
1548 | // values to break the dependency. In case even that doesn't work, |
1549 | // insert cross class copies. |
1550 | // If it's not too expensive, i.e. cost != -1, issue copies. |
1551 | SUnit *TrySU = Interferences[0]; |
1552 | SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU]; |
1553 | assert(LRegs.size() == 1 && "Can't handle this yet!")(static_cast <bool> (LRegs.size() == 1 && "Can't handle this yet!" ) ? void (0) : __assert_fail ("LRegs.size() == 1 && \"Can't handle this yet!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1553, __extension__ __PRETTY_FUNCTION__)); |
1554 | unsigned Reg = LRegs[0]; |
1555 | SUnit *LRDef = LiveRegDefs[Reg]; |
1556 | MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII); |
1557 | const TargetRegisterClass *RC = |
1558 | TRI->getMinimalPhysRegClass(Reg, VT); |
1559 | const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); |
1560 | |
1561 | // If cross copy register class is the same as RC, then it must be possible |
1562 | // copy the value directly. Do not try duplicate the def. |
1563 | // If cross copy register class is not the same as RC, then it's possible to |
1564 | // copy the value but it require cross register class copies and it is |
1565 | // expensive. |
1566 | // If cross copy register class is null, then it's not possible to copy |
1567 | // the value at all. |
1568 | SUnit *NewDef = nullptr; |
1569 | if (DestRC != RC) { |
1570 | NewDef = CopyAndMoveSuccessors(LRDef); |
1571 | if (!DestRC && !NewDef) |
1572 | report_fatal_error("Can't handle live physical register dependency!"); |
1573 | } |
1574 | if (!NewDef) { |
1575 | // Issue copies, these can be expensive cross register class copies. |
1576 | SmallVector<SUnit*, 2> Copies; |
1577 | InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); |
1578 | LLVM_DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Adding an edge from SU #" << TrySU->NodeNum << " to SU #" << Copies .front()->NodeNum << "\n"; } } while (false) |
1579 | << " to SU #" << Copies.front()->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Adding an edge from SU #" << TrySU->NodeNum << " to SU #" << Copies .front()->NodeNum << "\n"; } } while (false); |
1580 | AddPredQueued(TrySU, SDep(Copies.front(), SDep::Artificial)); |
1581 | NewDef = Copies.back(); |
1582 | } |
1583 | |
1584 | LLVM_DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Adding an edge from SU #" << NewDef->NodeNum << " to SU #" << TrySU ->NodeNum << "\n"; } } while (false) |
1585 | << " to SU #" << TrySU->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Adding an edge from SU #" << NewDef->NodeNum << " to SU #" << TrySU ->NodeNum << "\n"; } } while (false); |
1586 | LiveRegDefs[Reg] = NewDef; |
1587 | AddPredQueued(NewDef, SDep(TrySU, SDep::Artificial)); |
1588 | TrySU->isAvailable = false; |
1589 | CurSU = NewDef; |
1590 | } |
1591 | assert(CurSU && "Unable to resolve live physical register dependencies!")(static_cast <bool> (CurSU && "Unable to resolve live physical register dependencies!" ) ? void (0) : __assert_fail ("CurSU && \"Unable to resolve live physical register dependencies!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1591, __extension__ __PRETTY_FUNCTION__)); |
1592 | return CurSU; |
1593 | } |
1594 | |
1595 | /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up |
1596 | /// schedulers. |
1597 | void ScheduleDAGRRList::ListScheduleBottomUp() { |
1598 | // Release any predecessors of the special Exit node. |
1599 | ReleasePredecessors(&ExitSU); |
1600 | |
1601 | // Add root to Available queue. |
1602 | if (!SUnits.empty()) { |
1603 | SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; |
1604 | assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!")(static_cast <bool> (RootSU->Succs.empty() && "Graph root shouldn't have successors!") ? void (0) : __assert_fail ("RootSU->Succs.empty() && \"Graph root shouldn't have successors!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1604, __extension__ __PRETTY_FUNCTION__)); |
1605 | RootSU->isAvailable = true; |
1606 | AvailableQueue->push(RootSU); |
1607 | } |
1608 | |
1609 | // While Available queue is not empty, grab the node with the highest |
1610 | // priority. If it is not ready put it back. Schedule the node. |
1611 | Sequence.reserve(SUnits.size()); |
1612 | while (!AvailableQueue->empty() || !Interferences.empty()) { |
1613 | LLVM_DEBUG(dbgs() << "\nExamining Available:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "\nExamining Available:\n" ; AvailableQueue->dump(this); } } while (false) |
1614 | AvailableQueue->dump(this))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "\nExamining Available:\n" ; AvailableQueue->dump(this); } } while (false); |
1615 | |
1616 | // Pick the best node to schedule taking all constraints into |
1617 | // consideration. |
1618 | SUnit *SU = PickNodeToScheduleBottomUp(); |
1619 | |
1620 | AdvancePastStalls(SU); |
1621 | |
1622 | ScheduleNodeBottomUp(SU); |
1623 | |
1624 | while (AvailableQueue->empty() && !PendingQueue.empty()) { |
1625 | // Advance the cycle to free resources. Skip ahead to the next ready SU. |
1626 | assert(MinAvailableCycle < std::numeric_limits<unsigned>::max() &&(static_cast <bool> (MinAvailableCycle < std::numeric_limits <unsigned>::max() && "MinAvailableCycle uninitialized" ) ? void (0) : __assert_fail ("MinAvailableCycle < std::numeric_limits<unsigned>::max() && \"MinAvailableCycle uninitialized\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1627, __extension__ __PRETTY_FUNCTION__)) |
1627 | "MinAvailableCycle uninitialized")(static_cast <bool> (MinAvailableCycle < std::numeric_limits <unsigned>::max() && "MinAvailableCycle uninitialized" ) ? void (0) : __assert_fail ("MinAvailableCycle < std::numeric_limits<unsigned>::max() && \"MinAvailableCycle uninitialized\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1627, __extension__ __PRETTY_FUNCTION__)); |
1628 | AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle)); |
1629 | } |
1630 | } |
1631 | |
1632 | // Reverse the order if it is bottom up. |
1633 | std::reverse(Sequence.begin(), Sequence.end()); |
1634 | |
1635 | #ifndef NDEBUG |
1636 | VerifyScheduledSequence(/*isBottomUp=*/true); |
1637 | #endif |
1638 | } |
1639 | |
1640 | namespace { |
1641 | |
1642 | class RegReductionPQBase; |
1643 | |
1644 | struct queue_sort { |
1645 | bool isReady(SUnit* SU, unsigned CurCycle) const { return true; } |
1646 | }; |
1647 | |
1648 | #ifndef NDEBUG |
1649 | template<class SF> |
1650 | struct reverse_sort : public queue_sort { |
1651 | SF &SortFunc; |
1652 | |
1653 | reverse_sort(SF &sf) : SortFunc(sf) {} |
1654 | |
1655 | bool operator()(SUnit* left, SUnit* right) const { |
1656 | // reverse left/right rather than simply !SortFunc(left, right) |
1657 | // to expose different paths in the comparison logic. |
1658 | return SortFunc(right, left); |
1659 | } |
1660 | }; |
1661 | #endif // NDEBUG |
1662 | |
1663 | /// bu_ls_rr_sort - Priority function for bottom up register pressure |
1664 | // reduction scheduler. |
1665 | struct bu_ls_rr_sort : public queue_sort { |
1666 | enum { |
1667 | IsBottomUp = true, |
1668 | HasReadyFilter = false |
1669 | }; |
1670 | |
1671 | RegReductionPQBase *SPQ; |
1672 | |
1673 | bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {} |
1674 | |
1675 | bool operator()(SUnit* left, SUnit* right) const; |
1676 | }; |
1677 | |
1678 | // src_ls_rr_sort - Priority function for source order scheduler. |
1679 | struct src_ls_rr_sort : public queue_sort { |
1680 | enum { |
1681 | IsBottomUp = true, |
1682 | HasReadyFilter = false |
1683 | }; |
1684 | |
1685 | RegReductionPQBase *SPQ; |
1686 | |
1687 | src_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {} |
1688 | |
1689 | bool operator()(SUnit* left, SUnit* right) const; |
1690 | }; |
1691 | |
1692 | // hybrid_ls_rr_sort - Priority function for hybrid scheduler. |
1693 | struct hybrid_ls_rr_sort : public queue_sort { |
1694 | enum { |
1695 | IsBottomUp = true, |
1696 | HasReadyFilter = false |
1697 | }; |
1698 | |
1699 | RegReductionPQBase *SPQ; |
1700 | |
1701 | hybrid_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {} |
1702 | |
1703 | bool isReady(SUnit *SU, unsigned CurCycle) const; |
1704 | |
1705 | bool operator()(SUnit* left, SUnit* right) const; |
1706 | }; |
1707 | |
1708 | // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism) |
1709 | // scheduler. |
1710 | struct ilp_ls_rr_sort : public queue_sort { |
1711 | enum { |
1712 | IsBottomUp = true, |
1713 | HasReadyFilter = false |
1714 | }; |
1715 | |
1716 | RegReductionPQBase *SPQ; |
1717 | |
1718 | ilp_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {} |
1719 | |
1720 | bool isReady(SUnit *SU, unsigned CurCycle) const; |
1721 | |
1722 | bool operator()(SUnit* left, SUnit* right) const; |
1723 | }; |
1724 | |
1725 | class RegReductionPQBase : public SchedulingPriorityQueue { |
1726 | protected: |
1727 | std::vector<SUnit *> Queue; |
1728 | unsigned CurQueueId = 0; |
1729 | bool TracksRegPressure; |
1730 | bool SrcOrder; |
1731 | |
1732 | // SUnits - The SUnits for the current graph. |
1733 | std::vector<SUnit> *SUnits; |
1734 | |
1735 | MachineFunction &MF; |
1736 | const TargetInstrInfo *TII; |
1737 | const TargetRegisterInfo *TRI; |
1738 | const TargetLowering *TLI; |
1739 | ScheduleDAGRRList *scheduleDAG = nullptr; |
1740 | |
1741 | // SethiUllmanNumbers - The SethiUllman number for each node. |
1742 | std::vector<unsigned> SethiUllmanNumbers; |
1743 | |
1744 | /// RegPressure - Tracking current reg pressure per register class. |
1745 | std::vector<unsigned> RegPressure; |
1746 | |
1747 | /// RegLimit - Tracking the number of allocatable registers per register |
1748 | /// class. |
1749 | std::vector<unsigned> RegLimit; |
1750 | |
1751 | public: |
1752 | RegReductionPQBase(MachineFunction &mf, |
1753 | bool hasReadyFilter, |
1754 | bool tracksrp, |
1755 | bool srcorder, |
1756 | const TargetInstrInfo *tii, |
1757 | const TargetRegisterInfo *tri, |
1758 | const TargetLowering *tli) |
1759 | : SchedulingPriorityQueue(hasReadyFilter), TracksRegPressure(tracksrp), |
1760 | SrcOrder(srcorder), MF(mf), TII(tii), TRI(tri), TLI(tli) { |
1761 | if (TracksRegPressure) { |
1762 | unsigned NumRC = TRI->getNumRegClasses(); |
1763 | RegLimit.resize(NumRC); |
1764 | RegPressure.resize(NumRC); |
1765 | std::fill(RegLimit.begin(), RegLimit.end(), 0); |
1766 | std::fill(RegPressure.begin(), RegPressure.end(), 0); |
1767 | for (const TargetRegisterClass *RC : TRI->regclasses()) |
1768 | RegLimit[RC->getID()] = tri->getRegPressureLimit(RC, MF); |
1769 | } |
1770 | } |
1771 | |
1772 | void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { |
1773 | scheduleDAG = scheduleDag; |
1774 | } |
1775 | |
1776 | ScheduleHazardRecognizer* getHazardRec() { |
1777 | return scheduleDAG->getHazardRec(); |
1778 | } |
1779 | |
1780 | void initNodes(std::vector<SUnit> &sunits) override; |
1781 | |
1782 | void addNode(const SUnit *SU) override; |
1783 | |
1784 | void updateNode(const SUnit *SU) override; |
1785 | |
1786 | void releaseState() override { |
1787 | SUnits = nullptr; |
1788 | SethiUllmanNumbers.clear(); |
1789 | std::fill(RegPressure.begin(), RegPressure.end(), 0); |
1790 | } |
1791 | |
1792 | unsigned getNodePriority(const SUnit *SU) const; |
1793 | |
1794 | unsigned getNodeOrdering(const SUnit *SU) const { |
1795 | if (!SU->getNode()) return 0; |
1796 | |
1797 | return SU->getNode()->getIROrder(); |
1798 | } |
1799 | |
1800 | bool empty() const override { return Queue.empty(); } |
1801 | |
1802 | void push(SUnit *U) override { |
1803 | assert(!U->NodeQueueId && "Node in the queue already")(static_cast <bool> (!U->NodeQueueId && "Node in the queue already" ) ? void (0) : __assert_fail ("!U->NodeQueueId && \"Node in the queue already\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1803, __extension__ __PRETTY_FUNCTION__)); |
1804 | U->NodeQueueId = ++CurQueueId; |
1805 | Queue.push_back(U); |
1806 | } |
1807 | |
1808 | void remove(SUnit *SU) override { |
1809 | assert(!Queue.empty() && "Queue is empty!")(static_cast <bool> (!Queue.empty() && "Queue is empty!" ) ? void (0) : __assert_fail ("!Queue.empty() && \"Queue is empty!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1809, __extension__ __PRETTY_FUNCTION__)); |
1810 | assert(SU->NodeQueueId != 0 && "Not in queue!")(static_cast <bool> (SU->NodeQueueId != 0 && "Not in queue!") ? void (0) : __assert_fail ("SU->NodeQueueId != 0 && \"Not in queue!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1810, __extension__ __PRETTY_FUNCTION__)); |
1811 | std::vector<SUnit *>::iterator I = llvm::find(Queue, SU); |
1812 | if (I != std::prev(Queue.end())) |
1813 | std::swap(*I, Queue.back()); |
1814 | Queue.pop_back(); |
1815 | SU->NodeQueueId = 0; |
1816 | } |
1817 | |
1818 | bool tracksRegPressure() const override { return TracksRegPressure; } |
1819 | |
1820 | void dumpRegPressure() const; |
1821 | |
1822 | bool HighRegPressure(const SUnit *SU) const; |
1823 | |
1824 | bool MayReduceRegPressure(SUnit *SU) const; |
1825 | |
1826 | int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const; |
1827 | |
1828 | void scheduledNode(SUnit *SU) override; |
1829 | |
1830 | void unscheduledNode(SUnit *SU) override; |
1831 | |
1832 | protected: |
1833 | bool canClobber(const SUnit *SU, const SUnit *Op); |
1834 | void AddPseudoTwoAddrDeps(); |
1835 | void PrescheduleNodesWithMultipleUses(); |
1836 | void CalculateSethiUllmanNumbers(); |
1837 | }; |
1838 | |
1839 | template<class SF> |
1840 | static SUnit *popFromQueueImpl(std::vector<SUnit *> &Q, SF &Picker) { |
1841 | unsigned BestIdx = 0; |
1842 | // Only compute the cost for the first 1000 items in the queue, to avoid |
1843 | // excessive compile-times for very large queues. |
1844 | for (unsigned I = 1, E = std::min(Q.size(), (decltype(Q.size()))1000); I != E; |
1845 | I++) |
1846 | if (Picker(Q[BestIdx], Q[I])) |
1847 | BestIdx = I; |
1848 | SUnit *V = Q[BestIdx]; |
1849 | if (BestIdx + 1 != Q.size()) |
1850 | std::swap(Q[BestIdx], Q.back()); |
1851 | Q.pop_back(); |
1852 | return V; |
1853 | } |
1854 | |
1855 | template<class SF> |
1856 | SUnit *popFromQueue(std::vector<SUnit *> &Q, SF &Picker, ScheduleDAG *DAG) { |
1857 | #ifndef NDEBUG |
1858 | if (DAG->StressSched) { |
1859 | reverse_sort<SF> RPicker(Picker); |
1860 | return popFromQueueImpl(Q, RPicker); |
1861 | } |
1862 | #endif |
1863 | (void)DAG; |
1864 | return popFromQueueImpl(Q, Picker); |
1865 | } |
1866 | |
1867 | //===----------------------------------------------------------------------===// |
1868 | // RegReductionPriorityQueue Definition |
1869 | //===----------------------------------------------------------------------===// |
1870 | // |
1871 | // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers |
1872 | // to reduce register pressure. |
1873 | // |
1874 | template<class SF> |
1875 | class RegReductionPriorityQueue : public RegReductionPQBase { |
1876 | SF Picker; |
1877 | |
1878 | public: |
1879 | RegReductionPriorityQueue(MachineFunction &mf, |
1880 | bool tracksrp, |
1881 | bool srcorder, |
1882 | const TargetInstrInfo *tii, |
1883 | const TargetRegisterInfo *tri, |
1884 | const TargetLowering *tli) |
1885 | : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder, |
1886 | tii, tri, tli), |
1887 | Picker(this) {} |
1888 | |
1889 | bool isBottomUp() const override { return SF::IsBottomUp; } |
1890 | |
1891 | bool isReady(SUnit *U) const override { |
1892 | return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle()); |
1893 | } |
1894 | |
1895 | SUnit *pop() override { |
1896 | if (Queue.empty()) return nullptr; |
1897 | |
1898 | SUnit *V = popFromQueue(Queue, Picker, scheduleDAG); |
1899 | V->NodeQueueId = 0; |
1900 | return V; |
1901 | } |
1902 | |
1903 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
1904 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump(ScheduleDAG *DAG) const override { |
1905 | // Emulate pop() without clobbering NodeQueueIds. |
1906 | std::vector<SUnit *> DumpQueue = Queue; |
1907 | SF DumpPicker = Picker; |
1908 | while (!DumpQueue.empty()) { |
1909 | SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG); |
1910 | dbgs() << "Height " << SU->getHeight() << ": "; |
1911 | DAG->dumpNode(*SU); |
1912 | } |
1913 | } |
1914 | #endif |
1915 | }; |
1916 | |
1917 | using BURegReductionPriorityQueue = RegReductionPriorityQueue<bu_ls_rr_sort>; |
1918 | using SrcRegReductionPriorityQueue = RegReductionPriorityQueue<src_ls_rr_sort>; |
1919 | using HybridBURRPriorityQueue = RegReductionPriorityQueue<hybrid_ls_rr_sort>; |
1920 | using ILPBURRPriorityQueue = RegReductionPriorityQueue<ilp_ls_rr_sort>; |
1921 | |
1922 | } // end anonymous namespace |
1923 | |
1924 | //===----------------------------------------------------------------------===// |
1925 | // Static Node Priority for Register Pressure Reduction |
1926 | //===----------------------------------------------------------------------===// |
1927 | |
1928 | // Check for special nodes that bypass scheduling heuristics. |
1929 | // Currently this pushes TokenFactor nodes down, but may be used for other |
1930 | // pseudo-ops as well. |
1931 | // |
1932 | // Return -1 to schedule right above left, 1 for left above right. |
1933 | // Return 0 if no bias exists. |
1934 | static int checkSpecialNodes(const SUnit *left, const SUnit *right) { |
1935 | bool LSchedLow = left->isScheduleLow; |
1936 | bool RSchedLow = right->isScheduleLow; |
1937 | if (LSchedLow != RSchedLow) |
1938 | return LSchedLow < RSchedLow ? 1 : -1; |
1939 | return 0; |
1940 | } |
1941 | |
1942 | /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number. |
1943 | /// Smaller number is the higher priority. |
1944 | static unsigned |
1945 | CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) { |
1946 | if (SUNumbers[SU->NodeNum] != 0) |
1947 | return SUNumbers[SU->NodeNum]; |
1948 | |
1949 | // Use WorkList to avoid stack overflow on excessively large IRs. |
1950 | struct WorkState { |
1951 | WorkState(const SUnit *SU) : SU(SU) {} |
1952 | const SUnit *SU; |
1953 | unsigned PredsProcessed = 0; |
1954 | }; |
1955 | |
1956 | SmallVector<WorkState, 16> WorkList; |
1957 | WorkList.push_back(SU); |
1958 | while (!WorkList.empty()) { |
1959 | auto &Temp = WorkList.back(); |
1960 | auto *TempSU = Temp.SU; |
1961 | bool AllPredsKnown = true; |
1962 | // Try to find a non-evaluated pred and push it into the processing stack. |
1963 | for (unsigned P = Temp.PredsProcessed; P < TempSU->Preds.size(); ++P) { |
1964 | auto &Pred = TempSU->Preds[P]; |
1965 | if (Pred.isCtrl()) continue; // ignore chain preds |
1966 | SUnit *PredSU = Pred.getSUnit(); |
1967 | if (SUNumbers[PredSU->NodeNum] == 0) { |
1968 | #ifndef NDEBUG |
1969 | // In debug mode, check that we don't have such element in the stack. |
1970 | for (auto It : WorkList) |
1971 | assert(It.SU != PredSU && "Trying to push an element twice?")(static_cast <bool> (It.SU != PredSU && "Trying to push an element twice?" ) ? void (0) : __assert_fail ("It.SU != PredSU && \"Trying to push an element twice?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1971, __extension__ __PRETTY_FUNCTION__)); |
1972 | #endif |
1973 | // Next time start processing this one starting from the next pred. |
1974 | Temp.PredsProcessed = P + 1; |
1975 | WorkList.push_back(PredSU); |
1976 | AllPredsKnown = false; |
1977 | break; |
1978 | } |
1979 | } |
1980 | |
1981 | if (!AllPredsKnown) |
1982 | continue; |
1983 | |
1984 | // Once all preds are known, we can calculate the answer for this one. |
1985 | unsigned SethiUllmanNumber = 0; |
1986 | unsigned Extra = 0; |
1987 | for (const SDep &Pred : TempSU->Preds) { |
1988 | if (Pred.isCtrl()) continue; // ignore chain preds |
1989 | SUnit *PredSU = Pred.getSUnit(); |
1990 | unsigned PredSethiUllman = SUNumbers[PredSU->NodeNum]; |
1991 | assert(PredSethiUllman > 0 && "We should have evaluated this pred!")(static_cast <bool> (PredSethiUllman > 0 && "We should have evaluated this pred!" ) ? void (0) : __assert_fail ("PredSethiUllman > 0 && \"We should have evaluated this pred!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 1991, __extension__ __PRETTY_FUNCTION__)); |
1992 | if (PredSethiUllman > SethiUllmanNumber) { |
1993 | SethiUllmanNumber = PredSethiUllman; |
1994 | Extra = 0; |
1995 | } else if (PredSethiUllman == SethiUllmanNumber) |
1996 | ++Extra; |
1997 | } |
1998 | |
1999 | SethiUllmanNumber += Extra; |
2000 | if (SethiUllmanNumber == 0) |
2001 | SethiUllmanNumber = 1; |
2002 | SUNumbers[TempSU->NodeNum] = SethiUllmanNumber; |
2003 | WorkList.pop_back(); |
2004 | } |
2005 | |
2006 | assert(SUNumbers[SU->NodeNum] > 0 && "SethiUllman should never be zero!")(static_cast <bool> (SUNumbers[SU->NodeNum] > 0 && "SethiUllman should never be zero!") ? void (0) : __assert_fail ("SUNumbers[SU->NodeNum] > 0 && \"SethiUllman should never be zero!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 2006, __extension__ __PRETTY_FUNCTION__)); |
2007 | return SUNumbers[SU->NodeNum]; |
2008 | } |
2009 | |
2010 | /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all |
2011 | /// scheduling units. |
2012 | void RegReductionPQBase::CalculateSethiUllmanNumbers() { |
2013 | SethiUllmanNumbers.assign(SUnits->size(), 0); |
2014 | |
2015 | for (const SUnit &SU : *SUnits) |
2016 | CalcNodeSethiUllmanNumber(&SU, SethiUllmanNumbers); |
2017 | } |
2018 | |
2019 | void RegReductionPQBase::addNode(const SUnit *SU) { |
2020 | unsigned SUSize = SethiUllmanNumbers.size(); |
2021 | if (SUnits->size() > SUSize) |
2022 | SethiUllmanNumbers.resize(SUSize*2, 0); |
2023 | CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); |
2024 | } |
2025 | |
2026 | void RegReductionPQBase::updateNode(const SUnit *SU) { |
2027 | SethiUllmanNumbers[SU->NodeNum] = 0; |
2028 | CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); |
2029 | } |
2030 | |
2031 | // Lower priority means schedule further down. For bottom-up scheduling, lower |
2032 | // priority SUs are scheduled before higher priority SUs. |
2033 | unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const { |
2034 | assert(SU->NodeNum < SethiUllmanNumbers.size())(static_cast <bool> (SU->NodeNum < SethiUllmanNumbers .size()) ? void (0) : __assert_fail ("SU->NodeNum < SethiUllmanNumbers.size()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 2034, __extension__ __PRETTY_FUNCTION__)); |
2035 | unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; |
2036 | if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) |
2037 | // CopyToReg should be close to its uses to facilitate coalescing and |
2038 | // avoid spilling. |
2039 | return 0; |
2040 | if (Opc == TargetOpcode::EXTRACT_SUBREG || |
2041 | Opc == TargetOpcode::SUBREG_TO_REG || |
2042 | Opc == TargetOpcode::INSERT_SUBREG) |
2043 | // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be |
2044 | // close to their uses to facilitate coalescing. |
2045 | return 0; |
2046 | if (SU->NumSuccs == 0 && SU->NumPreds != 0) |
2047 | // If SU does not have a register use, i.e. it doesn't produce a value |
2048 | // that would be consumed (e.g. store), then it terminates a chain of |
2049 | // computation. Give it a large SethiUllman number so it will be |
2050 | // scheduled right before its predecessors that it doesn't lengthen |
2051 | // their live ranges. |
2052 | return 0xffff; |
2053 | if (SU->NumPreds == 0 && SU->NumSuccs != 0) |
2054 | // If SU does not have a register def, schedule it close to its uses |
2055 | // because it does not lengthen any live ranges. |
2056 | return 0; |
2057 | #if 1 |
2058 | return SethiUllmanNumbers[SU->NodeNum]; |
2059 | #else |
2060 | unsigned Priority = SethiUllmanNumbers[SU->NodeNum]; |
2061 | if (SU->isCallOp) { |
2062 | // FIXME: This assumes all of the defs are used as call operands. |
2063 | int NP = (int)Priority - SU->getNode()->getNumValues(); |
2064 | return (NP > 0) ? NP : 0; |
2065 | } |
2066 | return Priority; |
2067 | #endif |
2068 | } |
2069 | |
2070 | //===----------------------------------------------------------------------===// |
2071 | // Register Pressure Tracking |
2072 | //===----------------------------------------------------------------------===// |
2073 | |
2074 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
2075 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void RegReductionPQBase::dumpRegPressure() const { |
2076 | for (const TargetRegisterClass *RC : TRI->regclasses()) { |
2077 | unsigned Id = RC->getID(); |
2078 | unsigned RP = RegPressure[Id]; |
2079 | if (!RP) continue; |
2080 | LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << ": " << RP << " / "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << TRI->getRegClassName(RC ) << ": " << RP << " / " << RegLimit[ Id] << '\n'; } } while (false) |
2081 | << RegLimit[Id] << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << TRI->getRegClassName(RC ) << ": " << RP << " / " << RegLimit[ Id] << '\n'; } } while (false); |
2082 | } |
2083 | } |
2084 | #endif |
2085 | |
2086 | bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const { |
2087 | if (!TLI) |
2088 | return false; |
2089 | |
2090 | for (const SDep &Pred : SU->Preds) { |
2091 | if (Pred.isCtrl()) |
2092 | continue; |
2093 | SUnit *PredSU = Pred.getSUnit(); |
2094 | // NumRegDefsLeft is zero when enough uses of this node have been scheduled |
2095 | // to cover the number of registers defined (they are all live). |
2096 | if (PredSU->NumRegDefsLeft == 0) { |
2097 | continue; |
2098 | } |
2099 | for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); |
2100 | RegDefPos.IsValid(); RegDefPos.Advance()) { |
2101 | unsigned RCId, Cost; |
2102 | GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); |
2103 | |
2104 | if ((RegPressure[RCId] + Cost) >= RegLimit[RCId]) |
2105 | return true; |
2106 | } |
2107 | } |
2108 | return false; |
2109 | } |
2110 | |
2111 | bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const { |
2112 | const SDNode *N = SU->getNode(); |
2113 | |
2114 | if (!N->isMachineOpcode() || !SU->NumSuccs) |
2115 | return false; |
2116 | |
2117 | unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); |
2118 | for (unsigned i = 0; i != NumDefs; ++i) { |
2119 | MVT VT = N->getSimpleValueType(i); |
2120 | if (!N->hasAnyUseOfValue(i)) |
2121 | continue; |
2122 | unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); |
2123 | if (RegPressure[RCId] >= RegLimit[RCId]) |
2124 | return true; |
2125 | } |
2126 | return false; |
2127 | } |
2128 | |
2129 | // Compute the register pressure contribution by this instruction by count up |
2130 | // for uses that are not live and down for defs. Only count register classes |
2131 | // that are already under high pressure. As a side effect, compute the number of |
2132 | // uses of registers that are already live. |
2133 | // |
2134 | // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure |
2135 | // so could probably be factored. |
2136 | int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const { |
2137 | LiveUses = 0; |
2138 | int PDiff = 0; |
2139 | for (const SDep &Pred : SU->Preds) { |
2140 | if (Pred.isCtrl()) |
2141 | continue; |
2142 | SUnit *PredSU = Pred.getSUnit(); |
2143 | // NumRegDefsLeft is zero when enough uses of this node have been scheduled |
2144 | // to cover the number of registers defined (they are all live). |
2145 | if (PredSU->NumRegDefsLeft == 0) { |
2146 | if (PredSU->getNode()->isMachineOpcode()) |
2147 | ++LiveUses; |
2148 | continue; |
2149 | } |
2150 | for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); |
2151 | RegDefPos.IsValid(); RegDefPos.Advance()) { |
2152 | MVT VT = RegDefPos.GetValue(); |
2153 | unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); |
2154 | if (RegPressure[RCId] >= RegLimit[RCId]) |
2155 | ++PDiff; |
2156 | } |
2157 | } |
2158 | const SDNode *N = SU->getNode(); |
2159 | |
2160 | if (!N || !N->isMachineOpcode() || !SU->NumSuccs) |
2161 | return PDiff; |
2162 | |
2163 | unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); |
2164 | for (unsigned i = 0; i != NumDefs; ++i) { |
2165 | MVT VT = N->getSimpleValueType(i); |
2166 | if (!N->hasAnyUseOfValue(i)) |
2167 | continue; |
2168 | unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); |
2169 | if (RegPressure[RCId] >= RegLimit[RCId]) |
2170 | --PDiff; |
2171 | } |
2172 | return PDiff; |
2173 | } |
2174 | |
2175 | void RegReductionPQBase::scheduledNode(SUnit *SU) { |
2176 | if (!TracksRegPressure) |
2177 | return; |
2178 | |
2179 | if (!SU->getNode()) |
2180 | return; |
2181 | |
2182 | for (const SDep &Pred : SU->Preds) { |
2183 | if (Pred.isCtrl()) |
2184 | continue; |
2185 | SUnit *PredSU = Pred.getSUnit(); |
2186 | // NumRegDefsLeft is zero when enough uses of this node have been scheduled |
2187 | // to cover the number of registers defined (they are all live). |
2188 | if (PredSU->NumRegDefsLeft == 0) { |
2189 | continue; |
2190 | } |
2191 | // FIXME: The ScheduleDAG currently loses information about which of a |
2192 | // node's values is consumed by each dependence. Consequently, if the node |
2193 | // defines multiple register classes, we don't know which to pressurize |
2194 | // here. Instead the following loop consumes the register defs in an |
2195 | // arbitrary order. At least it handles the common case of clustered loads |
2196 | // to the same class. For precise liveness, each SDep needs to indicate the |
2197 | // result number. But that tightly couples the ScheduleDAG with the |
2198 | // SelectionDAG making updates tricky. A simpler hack would be to attach a |
2199 | // value type or register class to SDep. |
2200 | // |
2201 | // The most important aspect of register tracking is balancing the increase |
2202 | // here with the reduction further below. Note that this SU may use multiple |
2203 | // defs in PredSU. The can't be determined here, but we've already |
2204 | // compensated by reducing NumRegDefsLeft in PredSU during |
2205 | // ScheduleDAGSDNodes::AddSchedEdges. |
2206 | --PredSU->NumRegDefsLeft; |
2207 | unsigned SkipRegDefs = PredSU->NumRegDefsLeft; |
2208 | for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); |
2209 | RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) { |
2210 | if (SkipRegDefs) |
2211 | continue; |
2212 | |
2213 | unsigned RCId, Cost; |
2214 | GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); |
2215 | RegPressure[RCId] += Cost; |
2216 | break; |
2217 | } |
2218 | } |
2219 | |
2220 | // We should have this assert, but there may be dead SDNodes that never |
2221 | // materialize as SUnits, so they don't appear to generate liveness. |
2222 | //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses"); |
2223 | int SkipRegDefs = (int)SU->NumRegDefsLeft; |
2224 | for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG); |
2225 | RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) { |
2226 | if (SkipRegDefs > 0) |
2227 | continue; |
2228 | unsigned RCId, Cost; |
2229 | GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); |
2230 | if (RegPressure[RCId] < Cost) { |
2231 | // Register pressure tracking is imprecise. This can happen. But we try |
2232 | // hard not to let it happen because it likely results in poor scheduling. |
2233 | LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n"; } } while (false) |
2234 | << ") has too many regdefs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n"; } } while (false); |
2235 | RegPressure[RCId] = 0; |
2236 | } |
2237 | else { |
2238 | RegPressure[RCId] -= Cost; |
2239 | } |
2240 | } |
2241 | LLVM_DEBUG(dumpRegPressure())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dumpRegPressure(); } } while (false); |
2242 | } |
2243 | |
2244 | void RegReductionPQBase::unscheduledNode(SUnit *SU) { |
2245 | if (!TracksRegPressure) |
2246 | return; |
2247 | |
2248 | const SDNode *N = SU->getNode(); |
2249 | if (!N) return; |
2250 | |
2251 | if (!N->isMachineOpcode()) { |
2252 | if (N->getOpcode() != ISD::CopyToReg) |
2253 | return; |
2254 | } else { |
2255 | unsigned Opc = N->getMachineOpcode(); |
2256 | if (Opc == TargetOpcode::EXTRACT_SUBREG || |
2257 | Opc == TargetOpcode::INSERT_SUBREG || |
2258 | Opc == TargetOpcode::SUBREG_TO_REG || |
2259 | Opc == TargetOpcode::REG_SEQUENCE || |
2260 | Opc == TargetOpcode::IMPLICIT_DEF) |
2261 | return; |
2262 | } |
2263 | |
2264 | for (const SDep &Pred : SU->Preds) { |
2265 | if (Pred.isCtrl()) |
2266 | continue; |
2267 | SUnit *PredSU = Pred.getSUnit(); |
2268 | // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only |
2269 | // counts data deps. |
2270 | if (PredSU->NumSuccsLeft != PredSU->Succs.size()) |
2271 | continue; |
2272 | const SDNode *PN = PredSU->getNode(); |
2273 | if (!PN->isMachineOpcode()) { |
2274 | if (PN->getOpcode() == ISD::CopyFromReg) { |
2275 | MVT VT = PN->getSimpleValueType(0); |
2276 | unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); |
2277 | RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); |
2278 | } |
2279 | continue; |
2280 | } |
2281 | unsigned POpc = PN->getMachineOpcode(); |
2282 | if (POpc == TargetOpcode::IMPLICIT_DEF) |
2283 | continue; |
2284 | if (POpc == TargetOpcode::EXTRACT_SUBREG || |
2285 | POpc == TargetOpcode::INSERT_SUBREG || |
2286 | POpc == TargetOpcode::SUBREG_TO_REG) { |
2287 | MVT VT = PN->getSimpleValueType(0); |
2288 | unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); |
2289 | RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); |
2290 | continue; |
2291 | } |
2292 | unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs(); |
2293 | for (unsigned i = 0; i != NumDefs; ++i) { |
2294 | MVT VT = PN->getSimpleValueType(i); |
2295 | if (!PN->hasAnyUseOfValue(i)) |
2296 | continue; |
2297 | unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); |
2298 | if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT)) |
2299 | // Register pressure tracking is imprecise. This can happen. |
2300 | RegPressure[RCId] = 0; |
2301 | else |
2302 | RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT); |
2303 | } |
2304 | } |
2305 | |
2306 | // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses() |
2307 | // may transfer data dependencies to CopyToReg. |
2308 | if (SU->NumSuccs && N->isMachineOpcode()) { |
2309 | unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); |
2310 | for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { |
2311 | MVT VT = N->getSimpleValueType(i); |
2312 | if (VT == MVT::Glue || VT == MVT::Other) |
2313 | continue; |
2314 | if (!N->hasAnyUseOfValue(i)) |
2315 | continue; |
2316 | unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); |
2317 | RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); |
2318 | } |
2319 | } |
2320 | |
2321 | LLVM_DEBUG(dumpRegPressure())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dumpRegPressure(); } } while (false); |
2322 | } |
2323 | |
2324 | //===----------------------------------------------------------------------===// |
2325 | // Dynamic Node Priority for Register Pressure Reduction |
2326 | //===----------------------------------------------------------------------===// |
2327 | |
2328 | /// closestSucc - Returns the scheduled cycle of the successor which is |
2329 | /// closest to the current cycle. |
2330 | static unsigned closestSucc(const SUnit *SU) { |
2331 | unsigned MaxHeight = 0; |
2332 | for (const SDep &Succ : SU->Succs) { |
2333 | if (Succ.isCtrl()) continue; // ignore chain succs |
2334 | unsigned Height = Succ.getSUnit()->getHeight(); |
2335 | // If there are bunch of CopyToRegs stacked up, they should be considered |
2336 | // to be at the same position. |
2337 | if (Succ.getSUnit()->getNode() && |
2338 | Succ.getSUnit()->getNode()->getOpcode() == ISD::CopyToReg) |
2339 | Height = closestSucc(Succ.getSUnit())+1; |
2340 | if (Height > MaxHeight) |
2341 | MaxHeight = Height; |
2342 | } |
2343 | return MaxHeight; |
2344 | } |
2345 | |
2346 | /// calcMaxScratches - Returns an cost estimate of the worse case requirement |
2347 | /// for scratch registers, i.e. number of data dependencies. |
2348 | static unsigned calcMaxScratches(const SUnit *SU) { |
2349 | unsigned Scratches = 0; |
2350 | for (const SDep &Pred : SU->Preds) { |
2351 | if (Pred.isCtrl()) continue; // ignore chain preds |
2352 | Scratches++; |
2353 | } |
2354 | return Scratches; |
2355 | } |
2356 | |
2357 | /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are |
2358 | /// CopyFromReg from a virtual register. |
2359 | static bool hasOnlyLiveInOpers(const SUnit *SU) { |
2360 | bool RetVal = false; |
2361 | for (const SDep &Pred : SU->Preds) { |
2362 | if (Pred.isCtrl()) continue; |
2363 | const SUnit *PredSU = Pred.getSUnit(); |
2364 | if (PredSU->getNode() && |
2365 | PredSU->getNode()->getOpcode() == ISD::CopyFromReg) { |
2366 | unsigned Reg = |
2367 | cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg(); |
2368 | if (Register::isVirtualRegister(Reg)) { |
2369 | RetVal = true; |
2370 | continue; |
2371 | } |
2372 | } |
2373 | return false; |
2374 | } |
2375 | return RetVal; |
2376 | } |
2377 | |
2378 | /// hasOnlyLiveOutUses - Return true if SU has only value successors that are |
2379 | /// CopyToReg to a virtual register. This SU def is probably a liveout and |
2380 | /// it has no other use. It should be scheduled closer to the terminator. |
2381 | static bool hasOnlyLiveOutUses(const SUnit *SU) { |
2382 | bool RetVal = false; |
2383 | for (const SDep &Succ : SU->Succs) { |
2384 | if (Succ.isCtrl()) continue; |
2385 | const SUnit *SuccSU = Succ.getSUnit(); |
2386 | if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) { |
2387 | unsigned Reg = |
2388 | cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg(); |
2389 | if (Register::isVirtualRegister(Reg)) { |
2390 | RetVal = true; |
2391 | continue; |
2392 | } |
2393 | } |
2394 | return false; |
2395 | } |
2396 | return RetVal; |
2397 | } |
2398 | |
2399 | // Set isVRegCycle for a node with only live in opers and live out uses. Also |
2400 | // set isVRegCycle for its CopyFromReg operands. |
2401 | // |
2402 | // This is only relevant for single-block loops, in which case the VRegCycle |
2403 | // node is likely an induction variable in which the operand and target virtual |
2404 | // registers should be coalesced (e.g. pre/post increment values). Setting the |
2405 | // isVRegCycle flag helps the scheduler prioritize other uses of the same |
2406 | // CopyFromReg so that this node becomes the virtual register "kill". This |
2407 | // avoids interference between the values live in and out of the block and |
2408 | // eliminates a copy inside the loop. |
2409 | static void initVRegCycle(SUnit *SU) { |
2410 | if (DisableSchedVRegCycle) |
2411 | return; |
2412 | |
2413 | if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU)) |
2414 | return; |
2415 | |
2416 | LLVM_DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n"; } } while (false); |
2417 | |
2418 | SU->isVRegCycle = true; |
2419 | |
2420 | for (const SDep &Pred : SU->Preds) { |
2421 | if (Pred.isCtrl()) continue; |
2422 | Pred.getSUnit()->isVRegCycle = true; |
2423 | } |
2424 | } |
2425 | |
2426 | // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of |
2427 | // CopyFromReg operands. We should no longer penalize other uses of this VReg. |
2428 | static void resetVRegCycle(SUnit *SU) { |
2429 | if (!SU->isVRegCycle) |
2430 | return; |
2431 | |
2432 | for (const SDep &Pred : SU->Preds) { |
2433 | if (Pred.isCtrl()) continue; // ignore chain preds |
2434 | SUnit *PredSU = Pred.getSUnit(); |
2435 | if (PredSU->isVRegCycle) { |
2436 | assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&(static_cast <bool> (PredSU->getNode()->getOpcode () == ISD::CopyFromReg && "VRegCycle def must be CopyFromReg" ) ? void (0) : __assert_fail ("PredSU->getNode()->getOpcode() == ISD::CopyFromReg && \"VRegCycle def must be CopyFromReg\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 2437, __extension__ __PRETTY_FUNCTION__)) |
2437 | "VRegCycle def must be CopyFromReg")(static_cast <bool> (PredSU->getNode()->getOpcode () == ISD::CopyFromReg && "VRegCycle def must be CopyFromReg" ) ? void (0) : __assert_fail ("PredSU->getNode()->getOpcode() == ISD::CopyFromReg && \"VRegCycle def must be CopyFromReg\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 2437, __extension__ __PRETTY_FUNCTION__)); |
2438 | Pred.getSUnit()->isVRegCycle = false; |
2439 | } |
2440 | } |
2441 | } |
2442 | |
2443 | // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This |
2444 | // means a node that defines the VRegCycle has not been scheduled yet. |
2445 | static bool hasVRegCycleUse(const SUnit *SU) { |
2446 | // If this SU also defines the VReg, don't hoist it as a "use". |
2447 | if (SU->isVRegCycle) |
2448 | return false; |
2449 | |
2450 | for (const SDep &Pred : SU->Preds) { |
2451 | if (Pred.isCtrl()) continue; // ignore chain preds |
2452 | if (Pred.getSUnit()->isVRegCycle && |
2453 | Pred.getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) { |
2454 | LLVM_DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n"; } } while (false); |
2455 | return true; |
2456 | } |
2457 | } |
2458 | return false; |
2459 | } |
2460 | |
2461 | // Check for either a dependence (latency) or resource (hazard) stall. |
2462 | // |
2463 | // Note: The ScheduleHazardRecognizer interface requires a non-const SU. |
2464 | static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) { |
2465 | if ((int)SPQ->getCurCycle() < Height) return true; |
2466 | if (SPQ->getHazardRec()->getHazardType(SU, 0) |
2467 | != ScheduleHazardRecognizer::NoHazard) |
2468 | return true; |
2469 | return false; |
2470 | } |
2471 | |
2472 | // Return -1 if left has higher priority, 1 if right has higher priority. |
2473 | // Return 0 if latency-based priority is equivalent. |
2474 | static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref, |
2475 | RegReductionPQBase *SPQ) { |
2476 | // Scheduling an instruction that uses a VReg whose postincrement has not yet |
2477 | // been scheduled will induce a copy. Model this as an extra cycle of latency. |
2478 | int LPenalty = hasVRegCycleUse(left) ? 1 : 0; |
2479 | int RPenalty = hasVRegCycleUse(right) ? 1 : 0; |
2480 | int LHeight = (int)left->getHeight() + LPenalty; |
2481 | int RHeight = (int)right->getHeight() + RPenalty; |
2482 | |
2483 | bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) && |
2484 | BUHasStall(left, LHeight, SPQ); |
2485 | bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) && |
2486 | BUHasStall(right, RHeight, SPQ); |
2487 | |
2488 | // If scheduling one of the node will cause a pipeline stall, delay it. |
2489 | // If scheduling either one of the node will cause a pipeline stall, sort |
2490 | // them according to their height. |
2491 | if (LStall) { |
2492 | if (!RStall) |
2493 | return 1; |
2494 | if (LHeight != RHeight) |
2495 | return LHeight > RHeight ? 1 : -1; |
2496 | } else if (RStall) |
2497 | return -1; |
2498 | |
2499 | // If either node is scheduling for latency, sort them by height/depth |
2500 | // and latency. |
2501 | if (!checkPref || (left->SchedulingPref == Sched::ILP || |
2502 | right->SchedulingPref == Sched::ILP)) { |
2503 | // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer |
2504 | // is enabled, grouping instructions by cycle, then its height is already |
2505 | // covered so only its depth matters. We also reach this point if both stall |
2506 | // but have the same height. |
2507 | if (!SPQ->getHazardRec()->isEnabled()) { |
2508 | if (LHeight != RHeight) |
2509 | return LHeight > RHeight ? 1 : -1; |
2510 | } |
2511 | int LDepth = left->getDepth() - LPenalty; |
2512 | int RDepth = right->getDepth() - RPenalty; |
2513 | if (LDepth != RDepth) { |
2514 | LLVM_DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Comparing latency of SU (" << left->NodeNum << ") depth " << LDepth << " vs SU (" << right->NodeNum << ") depth " << RDepth << "\n"; } } while (false) |
2515 | << ") depth " << LDepth << " vs SU (" << right->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Comparing latency of SU (" << left->NodeNum << ") depth " << LDepth << " vs SU (" << right->NodeNum << ") depth " << RDepth << "\n"; } } while (false) |
2516 | << ") depth " << RDepth << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Comparing latency of SU (" << left->NodeNum << ") depth " << LDepth << " vs SU (" << right->NodeNum << ") depth " << RDepth << "\n"; } } while (false); |
2517 | return LDepth < RDepth ? 1 : -1; |
2518 | } |
2519 | if (left->Latency != right->Latency) |
2520 | return left->Latency > right->Latency ? 1 : -1; |
2521 | } |
2522 | return 0; |
2523 | } |
2524 | |
2525 | static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) { |
2526 | // Schedule physical register definitions close to their use. This is |
2527 | // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as |
2528 | // long as shortening physreg live ranges is generally good, we can defer |
2529 | // creating a subtarget hook. |
2530 | if (!DisableSchedPhysRegJoin) { |
2531 | bool LHasPhysReg = left->hasPhysRegDefs; |
2532 | bool RHasPhysReg = right->hasPhysRegDefs; |
2533 | if (LHasPhysReg != RHasPhysReg) { |
2534 | #ifndef NDEBUG |
2535 | static const char *const PhysRegMsg[] = { " has no physreg", |
2536 | " defines a physreg" }; |
2537 | #endif |
2538 | LLVM_DEBUG(dbgs() << " SU (" << left->NodeNum << ") "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " SU (" << left-> NodeNum << ") " << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") " << PhysRegMsg [RHasPhysReg] << "\n"; } } while (false) |
2539 | << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " SU (" << left-> NodeNum << ") " << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") " << PhysRegMsg [RHasPhysReg] << "\n"; } } while (false) |
2540 | << ") " << PhysRegMsg[RHasPhysReg] << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " SU (" << left-> NodeNum << ") " << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") " << PhysRegMsg [RHasPhysReg] << "\n"; } } while (false); |
2541 | return LHasPhysReg < RHasPhysReg; |
2542 | } |
2543 | } |
2544 | |
2545 | // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down. |
2546 | unsigned LPriority = SPQ->getNodePriority(left); |
2547 | unsigned RPriority = SPQ->getNodePriority(right); |
2548 | |
2549 | // Be really careful about hoisting call operands above previous calls. |
2550 | // Only allows it if it would reduce register pressure. |
2551 | if (left->isCall && right->isCallOp) { |
2552 | unsigned RNumVals = right->getNode()->getNumValues(); |
2553 | RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0; |
2554 | } |
2555 | if (right->isCall && left->isCallOp) { |
2556 | unsigned LNumVals = left->getNode()->getNumValues(); |
2557 | LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0; |
2558 | } |
2559 | |
2560 | if (LPriority != RPriority) |
2561 | return LPriority > RPriority; |
2562 | |
2563 | // One or both of the nodes are calls and their sethi-ullman numbers are the |
2564 | // same, then keep source order. |
2565 | if (left->isCall || right->isCall) { |
2566 | unsigned LOrder = SPQ->getNodeOrdering(left); |
2567 | unsigned ROrder = SPQ->getNodeOrdering(right); |
2568 | |
2569 | // Prefer an ordering where the lower the non-zero order number, the higher |
2570 | // the preference. |
2571 | if ((LOrder || ROrder) && LOrder != ROrder) |
2572 | return LOrder != 0 && (LOrder < ROrder || ROrder == 0); |
2573 | } |
2574 | |
2575 | // Try schedule def + use closer when Sethi-Ullman numbers are the same. |
2576 | // e.g. |
2577 | // t1 = op t2, c1 |
2578 | // t3 = op t4, c2 |
2579 | // |
2580 | // and the following instructions are both ready. |
2581 | // t2 = op c3 |
2582 | // t4 = op c4 |
2583 | // |
2584 | // Then schedule t2 = op first. |
2585 | // i.e. |
2586 | // t4 = op c4 |
2587 | // t2 = op c3 |
2588 | // t1 = op t2, c1 |
2589 | // t3 = op t4, c2 |
2590 | // |
2591 | // This creates more short live intervals. |
2592 | unsigned LDist = closestSucc(left); |
2593 | unsigned RDist = closestSucc(right); |
2594 | if (LDist != RDist) |
2595 | return LDist < RDist; |
2596 | |
2597 | // How many registers becomes live when the node is scheduled. |
2598 | unsigned LScratch = calcMaxScratches(left); |
2599 | unsigned RScratch = calcMaxScratches(right); |
2600 | if (LScratch != RScratch) |
2601 | return LScratch > RScratch; |
2602 | |
2603 | // Comparing latency against a call makes little sense unless the node |
2604 | // is register pressure-neutral. |
2605 | if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0)) |
2606 | return (left->NodeQueueId > right->NodeQueueId); |
2607 | |
2608 | // Do not compare latencies when one or both of the nodes are calls. |
2609 | if (!DisableSchedCycles && |
2610 | !(left->isCall || right->isCall)) { |
2611 | int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ); |
2612 | if (result != 0) |
2613 | return result > 0; |
2614 | } |
2615 | else { |
2616 | if (left->getHeight() != right->getHeight()) |
2617 | return left->getHeight() > right->getHeight(); |
2618 | |
2619 | if (left->getDepth() != right->getDepth()) |
2620 | return left->getDepth() < right->getDepth(); |
2621 | } |
2622 | |
2623 | assert(left->NodeQueueId && right->NodeQueueId &&(static_cast <bool> (left->NodeQueueId && right ->NodeQueueId && "NodeQueueId cannot be zero") ? void (0) : __assert_fail ("left->NodeQueueId && right->NodeQueueId && \"NodeQueueId cannot be zero\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 2624, __extension__ __PRETTY_FUNCTION__)) |
2624 | "NodeQueueId cannot be zero")(static_cast <bool> (left->NodeQueueId && right ->NodeQueueId && "NodeQueueId cannot be zero") ? void (0) : __assert_fail ("left->NodeQueueId && right->NodeQueueId && \"NodeQueueId cannot be zero\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 2624, __extension__ __PRETTY_FUNCTION__)); |
2625 | return (left->NodeQueueId > right->NodeQueueId); |
2626 | } |
2627 | |
2628 | // Bottom up |
2629 | bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { |
2630 | if (int res = checkSpecialNodes(left, right)) |
2631 | return res > 0; |
2632 | |
2633 | return BURRSort(left, right, SPQ); |
2634 | } |
2635 | |
2636 | // Source order, otherwise bottom up. |
2637 | bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { |
2638 | if (int res = checkSpecialNodes(left, right)) |
2639 | return res > 0; |
2640 | |
2641 | unsigned LOrder = SPQ->getNodeOrdering(left); |
2642 | unsigned ROrder = SPQ->getNodeOrdering(right); |
2643 | |
2644 | // Prefer an ordering where the lower the non-zero order number, the higher |
2645 | // the preference. |
2646 | if ((LOrder || ROrder) && LOrder != ROrder) |
2647 | return LOrder != 0 && (LOrder < ROrder || ROrder == 0); |
2648 | |
2649 | return BURRSort(left, right, SPQ); |
2650 | } |
2651 | |
2652 | // If the time between now and when the instruction will be ready can cover |
2653 | // the spill code, then avoid adding it to the ready queue. This gives long |
2654 | // stalls highest priority and allows hoisting across calls. It should also |
2655 | // speed up processing the available queue. |
2656 | bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const { |
2657 | static const unsigned ReadyDelay = 3; |
2658 | |
2659 | if (SPQ->MayReduceRegPressure(SU)) return true; |
2660 | |
2661 | if (SU->getHeight() > (CurCycle + ReadyDelay)) return false; |
2662 | |
2663 | if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay) |
2664 | != ScheduleHazardRecognizer::NoHazard) |
2665 | return false; |
2666 | |
2667 | return true; |
2668 | } |
2669 | |
2670 | // Return true if right should be scheduled with higher priority than left. |
2671 | bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { |
2672 | if (int res = checkSpecialNodes(left, right)) |
2673 | return res > 0; |
2674 | |
2675 | if (left->isCall || right->isCall) |
2676 | // No way to compute latency of calls. |
2677 | return BURRSort(left, right, SPQ); |
2678 | |
2679 | bool LHigh = SPQ->HighRegPressure(left); |
2680 | bool RHigh = SPQ->HighRegPressure(right); |
2681 | // Avoid causing spills. If register pressure is high, schedule for |
2682 | // register pressure reduction. |
2683 | if (LHigh && !RHigh) { |
2684 | LLVM_DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " pressure SU(" << left->NodeNum << ") > SU(" << right->NodeNum << ")\n"; } } while (false) |
2685 | << right->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " pressure SU(" << left->NodeNum << ") > SU(" << right->NodeNum << ")\n"; } } while (false); |
2686 | return true; |
2687 | } |
2688 | else if (!LHigh && RHigh) { |
2689 | LLVM_DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " pressure SU(" << right->NodeNum << ") > SU(" << left->NodeNum << ")\n"; } } while (false) |
2690 | << left->NodeNum << ")\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " pressure SU(" << right->NodeNum << ") > SU(" << left->NodeNum << ")\n"; } } while (false); |
2691 | return false; |
2692 | } |
2693 | if (!LHigh && !RHigh) { |
2694 | int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ); |
2695 | if (result != 0) |
2696 | return result > 0; |
2697 | } |
2698 | return BURRSort(left, right, SPQ); |
2699 | } |
2700 | |
2701 | // Schedule as many instructions in each cycle as possible. So don't make an |
2702 | // instruction available unless it is ready in the current cycle. |
2703 | bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const { |
2704 | if (SU->getHeight() > CurCycle) return false; |
2705 | |
2706 | if (SPQ->getHazardRec()->getHazardType(SU, 0) |
2707 | != ScheduleHazardRecognizer::NoHazard) |
2708 | return false; |
2709 | |
2710 | return true; |
2711 | } |
2712 | |
2713 | static bool canEnableCoalescing(SUnit *SU) { |
2714 | unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; |
2715 | if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) |
2716 | // CopyToReg should be close to its uses to facilitate coalescing and |
2717 | // avoid spilling. |
2718 | return true; |
2719 | |
2720 | if (Opc == TargetOpcode::EXTRACT_SUBREG || |
2721 | Opc == TargetOpcode::SUBREG_TO_REG || |
2722 | Opc == TargetOpcode::INSERT_SUBREG) |
2723 | // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be |
2724 | // close to their uses to facilitate coalescing. |
2725 | return true; |
2726 | |
2727 | if (SU->NumPreds == 0 && SU->NumSuccs != 0) |
2728 | // If SU does not have a register def, schedule it close to its uses |
2729 | // because it does not lengthen any live ranges. |
2730 | return true; |
2731 | |
2732 | return false; |
2733 | } |
2734 | |
2735 | // list-ilp is currently an experimental scheduler that allows various |
2736 | // heuristics to be enabled prior to the normal register reduction logic. |
2737 | bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { |
2738 | if (int res = checkSpecialNodes(left, right)) |
2739 | return res > 0; |
2740 | |
2741 | if (left->isCall || right->isCall) |
2742 | // No way to compute latency of calls. |
2743 | return BURRSort(left, right, SPQ); |
2744 | |
2745 | unsigned LLiveUses = 0, RLiveUses = 0; |
2746 | int LPDiff = 0, RPDiff = 0; |
2747 | if (!DisableSchedRegPressure || !DisableSchedLiveUses) { |
2748 | LPDiff = SPQ->RegPressureDiff(left, LLiveUses); |
2749 | RPDiff = SPQ->RegPressureDiff(right, RLiveUses); |
2750 | } |
2751 | if (!DisableSchedRegPressure && LPDiff != RPDiff) { |
2752 | LLVM_DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff << " != SU(" << right->NodeNum << "): " << RPDiff << "\n"; } } while (false) |
2753 | << "): " << LPDiff << " != SU(" << right->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff << " != SU(" << right->NodeNum << "): " << RPDiff << "\n"; } } while (false) |
2754 | << "): " << RPDiff << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff << " != SU(" << right->NodeNum << "): " << RPDiff << "\n"; } } while (false); |
2755 | return LPDiff > RPDiff; |
2756 | } |
2757 | |
2758 | if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) { |
2759 | bool LReduce = canEnableCoalescing(left); |
2760 | bool RReduce = canEnableCoalescing(right); |
2761 | if (LReduce && !RReduce) return false; |
2762 | if (RReduce && !LReduce) return true; |
2763 | } |
2764 | |
2765 | if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) { |
2766 | LLVM_DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUsesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Live uses SU(" << left ->NodeNum << "): " << LLiveUses << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n"; } } while (false) |
2767 | << " != SU(" << right->NodeNum << "): " << RLiveUsesdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Live uses SU(" << left ->NodeNum << "): " << LLiveUses << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n"; } } while (false) |
2768 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Live uses SU(" << left ->NodeNum << "): " << LLiveUses << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n"; } } while (false); |
2769 | return LLiveUses < RLiveUses; |
2770 | } |
2771 | |
2772 | if (!DisableSchedStalls) { |
2773 | bool LStall = BUHasStall(left, left->getHeight(), SPQ); |
2774 | bool RStall = BUHasStall(right, right->getHeight(), SPQ); |
2775 | if (LStall != RStall) |
2776 | return left->getHeight() > right->getHeight(); |
2777 | } |
2778 | |
2779 | if (!DisableSchedCriticalPath) { |
2780 | int spread = (int)left->getDepth() - (int)right->getDepth(); |
2781 | if (std::abs(spread) > MaxReorderWindow) { |
2782 | LLVM_DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Depth of SU(" << left ->NodeNum << "): " << left->getDepth() << " != SU(" << right->NodeNum << "): " << right->getDepth() << "\n"; } } while (false) |
2783 | << left->getDepth() << " != SU(" << right->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Depth of SU(" << left ->NodeNum << "): " << left->getDepth() << " != SU(" << right->NodeNum << "): " << right->getDepth() << "\n"; } } while (false) |
2784 | << "): " << right->getDepth() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << "Depth of SU(" << left ->NodeNum << "): " << left->getDepth() << " != SU(" << right->NodeNum << "): " << right->getDepth() << "\n"; } } while (false); |
2785 | return left->getDepth() < right->getDepth(); |
2786 | } |
2787 | } |
2788 | |
2789 | if (!DisableSchedHeight && left->getHeight() != right->getHeight()) { |
2790 | int spread = (int)left->getHeight() - (int)right->getHeight(); |
2791 | if (std::abs(spread) > MaxReorderWindow) |
2792 | return left->getHeight() > right->getHeight(); |
2793 | } |
2794 | |
2795 | return BURRSort(left, right, SPQ); |
2796 | } |
2797 | |
2798 | void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) { |
2799 | SUnits = &sunits; |
2800 | // Add pseudo dependency edges for two-address nodes. |
2801 | if (!Disable2AddrHack) |
2802 | AddPseudoTwoAddrDeps(); |
2803 | // Reroute edges to nodes with multiple uses. |
2804 | if (!TracksRegPressure && !SrcOrder) |
2805 | PrescheduleNodesWithMultipleUses(); |
2806 | // Calculate node priorities. |
2807 | CalculateSethiUllmanNumbers(); |
2808 | |
2809 | // For single block loops, mark nodes that look like canonical IV increments. |
2810 | if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) |
2811 | for (SUnit &SU : sunits) |
2812 | initVRegCycle(&SU); |
2813 | } |
2814 | |
2815 | //===----------------------------------------------------------------------===// |
2816 | // Preschedule for Register Pressure |
2817 | //===----------------------------------------------------------------------===// |
2818 | |
2819 | bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) { |
2820 | if (SU->isTwoAddress) { |
2821 | unsigned Opc = SU->getNode()->getMachineOpcode(); |
2822 | const MCInstrDesc &MCID = TII->get(Opc); |
2823 | unsigned NumRes = MCID.getNumDefs(); |
2824 | unsigned NumOps = MCID.getNumOperands() - NumRes; |
2825 | for (unsigned i = 0; i != NumOps; ++i) { |
2826 | if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) { |
2827 | SDNode *DU = SU->getNode()->getOperand(i).getNode(); |
2828 | if (DU->getNodeId() != -1 && |
2829 | Op->OrigNode == &(*SUnits)[DU->getNodeId()]) |
2830 | return true; |
2831 | } |
2832 | } |
2833 | } |
2834 | return false; |
2835 | } |
2836 | |
2837 | /// canClobberReachingPhysRegUse - True if SU would clobber one of it's |
2838 | /// successor's explicit physregs whose definition can reach DepSU. |
2839 | /// i.e. DepSU should not be scheduled above SU. |
2840 | static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU, |
2841 | ScheduleDAGRRList *scheduleDAG, |
2842 | const TargetInstrInfo *TII, |
2843 | const TargetRegisterInfo *TRI) { |
2844 | const MCPhysReg *ImpDefs |
2845 | = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs(); |
2846 | const uint32_t *RegMask = getNodeRegMask(SU->getNode()); |
2847 | if(!ImpDefs && !RegMask) |
2848 | return false; |
2849 | |
2850 | for (const SDep &Succ : SU->Succs) { |
2851 | SUnit *SuccSU = Succ.getSUnit(); |
2852 | for (const SDep &SuccPred : SuccSU->Preds) { |
2853 | if (!SuccPred.isAssignedRegDep()) |
2854 | continue; |
2855 | |
2856 | if (RegMask && |
2857 | MachineOperand::clobbersPhysReg(RegMask, SuccPred.getReg()) && |
2858 | scheduleDAG->IsReachable(DepSU, SuccPred.getSUnit())) |
2859 | return true; |
2860 | |
2861 | if (ImpDefs) |
2862 | for (const MCPhysReg *ImpDef = ImpDefs; *ImpDef; ++ImpDef) |
2863 | // Return true if SU clobbers this physical register use and the |
2864 | // definition of the register reaches from DepSU. IsReachable queries |
2865 | // a topological forward sort of the DAG (following the successors). |
2866 | if (TRI->regsOverlap(*ImpDef, SuccPred.getReg()) && |
2867 | scheduleDAG->IsReachable(DepSU, SuccPred.getSUnit())) |
2868 | return true; |
2869 | } |
2870 | } |
2871 | return false; |
2872 | } |
2873 | |
2874 | /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's |
2875 | /// physical register defs. |
2876 | static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU, |
2877 | const TargetInstrInfo *TII, |
2878 | const TargetRegisterInfo *TRI) { |
2879 | SDNode *N = SuccSU->getNode(); |
2880 | unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); |
2881 | const MCPhysReg *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs(); |
2882 | assert(ImpDefs && "Caller should check hasPhysRegDefs")(static_cast <bool> (ImpDefs && "Caller should check hasPhysRegDefs" ) ? void (0) : __assert_fail ("ImpDefs && \"Caller should check hasPhysRegDefs\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 2882, __extension__ __PRETTY_FUNCTION__)); |
2883 | for (const SDNode *SUNode = SU->getNode(); SUNode; |
2884 | SUNode = SUNode->getGluedNode()) { |
2885 | if (!SUNode->isMachineOpcode()) |
2886 | continue; |
2887 | const MCPhysReg *SUImpDefs = |
2888 | TII->get(SUNode->getMachineOpcode()).getImplicitDefs(); |
2889 | const uint32_t *SURegMask = getNodeRegMask(SUNode); |
2890 | if (!SUImpDefs && !SURegMask) |
2891 | continue; |
2892 | for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { |
2893 | MVT VT = N->getSimpleValueType(i); |
2894 | if (VT == MVT::Glue || VT == MVT::Other) |
2895 | continue; |
2896 | if (!N->hasAnyUseOfValue(i)) |
2897 | continue; |
2898 | unsigned Reg = ImpDefs[i - NumDefs]; |
2899 | if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg)) |
2900 | return true; |
2901 | if (!SUImpDefs) |
2902 | continue; |
2903 | for (;*SUImpDefs; ++SUImpDefs) { |
2904 | unsigned SUReg = *SUImpDefs; |
2905 | if (TRI->regsOverlap(Reg, SUReg)) |
2906 | return true; |
2907 | } |
2908 | } |
2909 | } |
2910 | return false; |
2911 | } |
2912 | |
2913 | /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses |
2914 | /// are not handled well by the general register pressure reduction |
2915 | /// heuristics. When presented with code like this: |
2916 | /// |
2917 | /// N |
2918 | /// / | |
2919 | /// / | |
2920 | /// U store |
2921 | /// | |
2922 | /// ... |
2923 | /// |
2924 | /// the heuristics tend to push the store up, but since the |
2925 | /// operand of the store has another use (U), this would increase |
2926 | /// the length of that other use (the U->N edge). |
2927 | /// |
2928 | /// This function transforms code like the above to route U's |
2929 | /// dependence through the store when possible, like this: |
2930 | /// |
2931 | /// N |
2932 | /// || |
2933 | /// || |
2934 | /// store |
2935 | /// | |
2936 | /// U |
2937 | /// | |
2938 | /// ... |
2939 | /// |
2940 | /// This results in the store being scheduled immediately |
2941 | /// after N, which shortens the U->N live range, reducing |
2942 | /// register pressure. |
2943 | void RegReductionPQBase::PrescheduleNodesWithMultipleUses() { |
2944 | // Visit all the nodes in topological order, working top-down. |
2945 | for (SUnit &SU : *SUnits) { |
2946 | // For now, only look at nodes with no data successors, such as stores. |
2947 | // These are especially important, due to the heuristics in |
2948 | // getNodePriority for nodes with no data successors. |
2949 | if (SU.NumSuccs != 0) |
2950 | continue; |
2951 | // For now, only look at nodes with exactly one data predecessor. |
2952 | if (SU.NumPreds != 1) |
2953 | continue; |
2954 | // Avoid prescheduling copies to virtual registers, which don't behave |
2955 | // like other nodes from the perspective of scheduling heuristics. |
2956 | if (SDNode *N = SU.getNode()) |
2957 | if (N->getOpcode() == ISD::CopyToReg && |
2958 | Register::isVirtualRegister( |
2959 | cast<RegisterSDNode>(N->getOperand(1))->getReg())) |
2960 | continue; |
2961 | |
2962 | SDNode *PredFrameSetup = nullptr; |
2963 | for (const SDep &Pred : SU.Preds) |
2964 | if (Pred.isCtrl() && Pred.getSUnit()) { |
2965 | // Find the predecessor which is not data dependence. |
2966 | SDNode *PredND = Pred.getSUnit()->getNode(); |
2967 | |
2968 | // If PredND is FrameSetup, we should not pre-scheduled the node, |
2969 | // or else, when bottom up scheduling, ADJCALLSTACKDOWN and |
2970 | // ADJCALLSTACKUP may hold CallResource too long and make other |
2971 | // calls can't be scheduled. If there's no other available node |
2972 | // to schedule, the schedular will try to rename the register by |
2973 | // creating copy to avoid the conflict which will fail because |
2974 | // CallResource is not a real physical register. |
2975 | if (PredND && PredND->isMachineOpcode() && |
2976 | (PredND->getMachineOpcode() == TII->getCallFrameSetupOpcode())) { |
2977 | PredFrameSetup = PredND; |
2978 | break; |
2979 | } |
2980 | } |
2981 | // Skip the node has FrameSetup parent. |
2982 | if (PredFrameSetup != nullptr) |
2983 | continue; |
2984 | |
2985 | // Locate the single data predecessor. |
2986 | SUnit *PredSU = nullptr; |
2987 | for (const SDep &Pred : SU.Preds) |
2988 | if (!Pred.isCtrl()) { |
2989 | PredSU = Pred.getSUnit(); |
2990 | break; |
2991 | } |
2992 | assert(PredSU)(static_cast <bool> (PredSU) ? void (0) : __assert_fail ("PredSU", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 2992, __extension__ __PRETTY_FUNCTION__)); |
2993 | |
2994 | // Don't rewrite edges that carry physregs, because that requires additional |
2995 | // support infrastructure. |
2996 | if (PredSU->hasPhysRegDefs) |
2997 | continue; |
2998 | // Short-circuit the case where SU is PredSU's only data successor. |
2999 | if (PredSU->NumSuccs == 1) |
3000 | continue; |
3001 | // Avoid prescheduling to copies from virtual registers, which don't behave |
3002 | // like other nodes from the perspective of scheduling heuristics. |
3003 | if (SDNode *N = SU.getNode()) |
3004 | if (N->getOpcode() == ISD::CopyFromReg && |
3005 | Register::isVirtualRegister( |
3006 | cast<RegisterSDNode>(N->getOperand(1))->getReg())) |
3007 | continue; |
3008 | |
3009 | // Perform checks on the successors of PredSU. |
3010 | for (const SDep &PredSucc : PredSU->Succs) { |
3011 | SUnit *PredSuccSU = PredSucc.getSUnit(); |
3012 | if (PredSuccSU == &SU) continue; |
3013 | // If PredSU has another successor with no data successors, for |
3014 | // now don't attempt to choose either over the other. |
3015 | if (PredSuccSU->NumSuccs == 0) |
3016 | goto outer_loop_continue; |
3017 | // Don't break physical register dependencies. |
3018 | if (SU.hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs) |
3019 | if (canClobberPhysRegDefs(PredSuccSU, &SU, TII, TRI)) |
3020 | goto outer_loop_continue; |
3021 | // Don't introduce graph cycles. |
3022 | if (scheduleDAG->IsReachable(&SU, PredSuccSU)) |
3023 | goto outer_loop_continue; |
3024 | } |
3025 | |
3026 | // Ok, the transformation is safe and the heuristics suggest it is |
3027 | // profitable. Update the graph. |
3028 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Prescheduling SU #" << SU.NodeNum << " next to PredSU #" << PredSU-> NodeNum << " to guide scheduling in the presence of multiple uses\n" ; } } while (false) |
3029 | dbgs() << " Prescheduling SU #" << SU.NodeNum << " next to PredSU #"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Prescheduling SU #" << SU.NodeNum << " next to PredSU #" << PredSU-> NodeNum << " to guide scheduling in the presence of multiple uses\n" ; } } while (false) |
3030 | << PredSU->NodeNumdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Prescheduling SU #" << SU.NodeNum << " next to PredSU #" << PredSU-> NodeNum << " to guide scheduling in the presence of multiple uses\n" ; } } while (false) |
3031 | << " to guide scheduling in the presence of multiple uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Prescheduling SU #" << SU.NodeNum << " next to PredSU #" << PredSU-> NodeNum << " to guide scheduling in the presence of multiple uses\n" ; } } while (false); |
3032 | for (unsigned i = 0; i != PredSU->Succs.size(); ++i) { |
3033 | SDep Edge = PredSU->Succs[i]; |
3034 | assert(!Edge.isAssignedRegDep())(static_cast <bool> (!Edge.isAssignedRegDep()) ? void ( 0) : __assert_fail ("!Edge.isAssignedRegDep()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp" , 3034, __extension__ __PRETTY_FUNCTION__)); |
3035 | SUnit *SuccSU = Edge.getSUnit(); |
3036 | if (SuccSU != &SU) { |
3037 | Edge.setSUnit(PredSU); |
3038 | scheduleDAG->RemovePred(SuccSU, Edge); |
3039 | scheduleDAG->AddPredQueued(&SU, Edge); |
3040 | Edge.setSUnit(&SU); |
3041 | scheduleDAG->AddPredQueued(SuccSU, Edge); |
3042 | --i; |
3043 | } |
3044 | } |
3045 | outer_loop_continue:; |
3046 | } |
3047 | } |
3048 | |
3049 | /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses |
3050 | /// it as a def&use operand. Add a pseudo control edge from it to the other |
3051 | /// node (if it won't create a cycle) so the two-address one will be scheduled |
3052 | /// first (lower in the schedule). If both nodes are two-address, favor the |
3053 | /// one that has a CopyToReg use (more likely to be a loop induction update). |
3054 | /// If both are two-address, but one is commutable while the other is not |
3055 | /// commutable, favor the one that's not commutable. |
3056 | void RegReductionPQBase::AddPseudoTwoAddrDeps() { |
3057 | for (SUnit &SU : *SUnits) { |
3058 | if (!SU.isTwoAddress) |
3059 | continue; |
3060 | |
3061 | SDNode *Node = SU.getNode(); |
3062 | if (!Node || !Node->isMachineOpcode() || SU.getNode()->getGluedNode()) |
3063 | continue; |
3064 | |
3065 | bool isLiveOut = hasOnlyLiveOutUses(&SU); |
3066 | unsigned Opc = Node->getMachineOpcode(); |
3067 | const MCInstrDesc &MCID = TII->get(Opc); |
3068 | unsigned NumRes = MCID.getNumDefs(); |
3069 | unsigned NumOps = MCID.getNumOperands() - NumRes; |
3070 | for (unsigned j = 0; j != NumOps; ++j) { |
3071 | if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1) |
3072 | continue; |
3073 | SDNode *DU = SU.getNode()->getOperand(j).getNode(); |
3074 | if (DU->getNodeId() == -1) |
3075 | continue; |
3076 | const SUnit *DUSU = &(*SUnits)[DU->getNodeId()]; |
3077 | if (!DUSU) |
3078 | continue; |
3079 | for (const SDep &Succ : DUSU->Succs) { |
3080 | if (Succ.isCtrl()) |
3081 | continue; |
3082 | SUnit *SuccSU = Succ.getSUnit(); |
3083 | if (SuccSU == &SU) |
3084 | continue; |
3085 | // Be conservative. Ignore if nodes aren't at roughly the same |
3086 | // depth and height. |
3087 | if (SuccSU->getHeight() < SU.getHeight() && |
3088 | (SU.getHeight() - SuccSU->getHeight()) > 1) |
3089 | continue; |
3090 | // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge |
3091 | // constrains whatever is using the copy, instead of the copy |
3092 | // itself. In the case that the copy is coalesced, this |
3093 | // preserves the intent of the pseudo two-address heurietics. |
3094 | while (SuccSU->Succs.size() == 1 && |
3095 | SuccSU->getNode()->isMachineOpcode() && |
3096 | SuccSU->getNode()->getMachineOpcode() == |
3097 | TargetOpcode::COPY_TO_REGCLASS) |
3098 | SuccSU = SuccSU->Succs.front().getSUnit(); |
3099 | // Don't constrain non-instruction nodes. |
3100 | if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode()) |
3101 | continue; |
3102 | // Don't constrain nodes with physical register defs if the |
3103 | // predecessor can clobber them. |
3104 | if (SuccSU->hasPhysRegDefs && SU.hasPhysRegClobbers) { |
3105 | if (canClobberPhysRegDefs(SuccSU, &SU, TII, TRI)) |
3106 | continue; |
3107 | } |
3108 | // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG; |
3109 | // these may be coalesced away. We want them close to their uses. |
3110 | unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode(); |
3111 | if (SuccOpc == TargetOpcode::EXTRACT_SUBREG || |
3112 | SuccOpc == TargetOpcode::INSERT_SUBREG || |
3113 | SuccOpc == TargetOpcode::SUBREG_TO_REG) |
3114 | continue; |
3115 | if (!canClobberReachingPhysRegUse(SuccSU, &SU, scheduleDAG, TII, TRI) && |
3116 | (!canClobber(SuccSU, DUSU) || |
3117 | (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) || |
3118 | (!SU.isCommutable && SuccSU->isCommutable)) && |
3119 | !scheduleDAG->IsReachable(SuccSU, &SU)) { |
3120 | LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Adding a pseudo-two-addr edge from SU #" << SU.NodeNum << " to SU #" << SuccSU-> NodeNum << "\n"; } } while (false) |
3121 | << " Adding a pseudo-two-addr edge from SU #"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Adding a pseudo-two-addr edge from SU #" << SU.NodeNum << " to SU #" << SuccSU-> NodeNum << "\n"; } } while (false) |
3122 | << SU.NodeNum << " to SU #" << SuccSU->NodeNum << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("pre-RA-sched")) { dbgs() << " Adding a pseudo-two-addr edge from SU #" << SU.NodeNum << " to SU #" << SuccSU-> NodeNum << "\n"; } } while (false); |
3123 | scheduleDAG->AddPredQueued(&SU, SDep(SuccSU, SDep::Artificial)); |
3124 | } |
3125 | } |
3126 | } |
3127 | } |
3128 | } |
3129 | |
3130 | //===----------------------------------------------------------------------===// |
3131 | // Public Constructor Functions |
3132 | //===----------------------------------------------------------------------===// |
3133 | |
3134 | ScheduleDAGSDNodes * |
3135 | llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, |
3136 | CodeGenOpt::Level OptLevel) { |
3137 | const TargetSubtargetInfo &STI = IS->MF->getSubtarget(); |
3138 | const TargetInstrInfo *TII = STI.getInstrInfo(); |
3139 | const TargetRegisterInfo *TRI = STI.getRegisterInfo(); |
3140 | |
3141 | BURegReductionPriorityQueue *PQ = |
3142 | new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr); |
3143 | ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel); |
3144 | PQ->setScheduleDAG(SD); |
3145 | return SD; |
3146 | } |
3147 | |
3148 | ScheduleDAGSDNodes * |
3149 | llvm::createSourceListDAGScheduler(SelectionDAGISel *IS, |
3150 | CodeGenOpt::Level OptLevel) { |
3151 | const TargetSubtargetInfo &STI = IS->MF->getSubtarget(); |
3152 | const TargetInstrInfo *TII = STI.getInstrInfo(); |
3153 | const TargetRegisterInfo *TRI = STI.getRegisterInfo(); |
3154 | |
3155 | SrcRegReductionPriorityQueue *PQ = |
3156 | new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr); |
3157 | ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel); |
3158 | PQ->setScheduleDAG(SD); |
3159 | return SD; |
3160 | } |
3161 | |
3162 | ScheduleDAGSDNodes * |
3163 | llvm::createHybridListDAGScheduler(SelectionDAGISel *IS, |
3164 | CodeGenOpt::Level OptLevel) { |
3165 | const TargetSubtargetInfo &STI = IS->MF->getSubtarget(); |
3166 | const TargetInstrInfo *TII = STI.getInstrInfo(); |
3167 | const TargetRegisterInfo *TRI = STI.getRegisterInfo(); |
3168 | const TargetLowering *TLI = IS->TLI; |
3169 | |
3170 | HybridBURRPriorityQueue *PQ = |
3171 | new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI); |
3172 | |
3173 | ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel); |
3174 | PQ->setScheduleDAG(SD); |
3175 | return SD; |
3176 | } |
3177 | |
3178 | ScheduleDAGSDNodes * |
3179 | llvm::createILPListDAGScheduler(SelectionDAGISel *IS, |
3180 | CodeGenOpt::Level OptLevel) { |
3181 | const TargetSubtargetInfo &STI = IS->MF->getSubtarget(); |
3182 | const TargetInstrInfo *TII = STI.getInstrInfo(); |
3183 | const TargetRegisterInfo *TRI = STI.getRegisterInfo(); |
3184 | const TargetLowering *TLI = IS->TLI; |
3185 | |
3186 | ILPBURRPriorityQueue *PQ = |
3187 | new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI); |
3188 | ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel); |
3189 | PQ->setScheduleDAG(SD); |
3190 | return SD; |
3191 | } |