File: | llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp |
Warning: | line 593, column 7 Forming reference to null pointer |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- GCNSchedStrategy.cpp - GCN Scheduler Strategy ---------------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | /// \file | |||
10 | /// This contains a MachineSchedStrategy implementation for maximizing wave | |||
11 | /// occupancy on GCN hardware. | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "GCNSchedStrategy.h" | |||
15 | #include "SIMachineFunctionInfo.h" | |||
16 | ||||
17 | #define DEBUG_TYPE"machine-scheduler" "machine-scheduler" | |||
18 | ||||
19 | using namespace llvm; | |||
20 | ||||
21 | GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy( | |||
22 | const MachineSchedContext *C) : | |||
23 | GenericScheduler(C), TargetOccupancy(0), MF(nullptr) { } | |||
24 | ||||
25 | void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) { | |||
26 | GenericScheduler::initialize(DAG); | |||
27 | ||||
28 | const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI); | |||
29 | ||||
30 | MF = &DAG->MF; | |||
31 | ||||
32 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | |||
33 | ||||
34 | // FIXME: This is also necessary, because some passes that run after | |||
35 | // scheduling and before regalloc increase register pressure. | |||
36 | const int ErrorMargin = 3; | |||
37 | ||||
38 | SGPRExcessLimit = Context->RegClassInfo | |||
39 | ->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass) - ErrorMargin; | |||
40 | VGPRExcessLimit = Context->RegClassInfo | |||
41 | ->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass) - ErrorMargin; | |||
42 | if (TargetOccupancy) { | |||
43 | SGPRCriticalLimit = ST.getMaxNumSGPRs(TargetOccupancy, true); | |||
44 | VGPRCriticalLimit = ST.getMaxNumVGPRs(TargetOccupancy); | |||
45 | } else { | |||
46 | SGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF, | |||
47 | AMDGPU::RegisterPressureSets::SReg_32); | |||
48 | VGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF, | |||
49 | AMDGPU::RegisterPressureSets::VGPR_32); | |||
50 | } | |||
51 | ||||
52 | SGPRCriticalLimit -= ErrorMargin; | |||
53 | VGPRCriticalLimit -= ErrorMargin; | |||
54 | } | |||
55 | ||||
56 | void GCNMaxOccupancySchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU, | |||
57 | bool AtTop, const RegPressureTracker &RPTracker, | |||
58 | const SIRegisterInfo *SRI, | |||
59 | unsigned SGPRPressure, | |||
60 | unsigned VGPRPressure) { | |||
61 | ||||
62 | Cand.SU = SU; | |||
63 | Cand.AtTop = AtTop; | |||
64 | ||||
65 | // getDownwardPressure() and getUpwardPressure() make temporary changes to | |||
66 | // the tracker, so we need to pass those function a non-const copy. | |||
67 | RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker); | |||
68 | ||||
69 | Pressure.clear(); | |||
70 | MaxPressure.clear(); | |||
71 | ||||
72 | if (AtTop) | |||
73 | TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure); | |||
74 | else { | |||
75 | // FIXME: I think for bottom up scheduling, the register pressure is cached | |||
76 | // and can be retrieved by DAG->getPressureDif(SU). | |||
77 | TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure); | |||
78 | } | |||
79 | ||||
80 | unsigned NewSGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32]; | |||
81 | unsigned NewVGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32]; | |||
82 | ||||
83 | // If two instructions increase the pressure of different register sets | |||
84 | // by the same amount, the generic scheduler will prefer to schedule the | |||
85 | // instruction that increases the set with the least amount of registers, | |||
86 | // which in our case would be SGPRs. This is rarely what we want, so | |||
87 | // when we report excess/critical register pressure, we do it either | |||
88 | // only for VGPRs or only for SGPRs. | |||
89 | ||||
90 | // FIXME: Better heuristics to determine whether to prefer SGPRs or VGPRs. | |||
91 | const unsigned MaxVGPRPressureInc = 16; | |||
92 | bool ShouldTrackVGPRs = VGPRPressure + MaxVGPRPressureInc >= VGPRExcessLimit; | |||
93 | bool ShouldTrackSGPRs = !ShouldTrackVGPRs && SGPRPressure >= SGPRExcessLimit; | |||
94 | ||||
95 | ||||
96 | // FIXME: We have to enter REG-EXCESS before we reach the actual threshold | |||
97 | // to increase the likelihood we don't go over the limits. We should improve | |||
98 | // the analysis to look through dependencies to find the path with the least | |||
99 | // register pressure. | |||
100 | ||||
101 | // We only need to update the RPDelta for instructions that increase register | |||
102 | // pressure. Instructions that decrease or keep reg pressure the same will be | |||
103 | // marked as RegExcess in tryCandidate() when they are compared with | |||
104 | // instructions that increase the register pressure. | |||
105 | if (ShouldTrackVGPRs && NewVGPRPressure >= VGPRExcessLimit) { | |||
106 | Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::VGPR_32); | |||
107 | Cand.RPDelta.Excess.setUnitInc(NewVGPRPressure - VGPRExcessLimit); | |||
108 | } | |||
109 | ||||
110 | if (ShouldTrackSGPRs && NewSGPRPressure >= SGPRExcessLimit) { | |||
111 | Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::SReg_32); | |||
112 | Cand.RPDelta.Excess.setUnitInc(NewSGPRPressure - SGPRExcessLimit); | |||
113 | } | |||
114 | ||||
115 | // Register pressure is considered 'CRITICAL' if it is approaching a value | |||
116 | // that would reduce the wave occupancy for the execution unit. When | |||
117 | // register pressure is 'CRITICAL', increading SGPR and VGPR pressure both | |||
118 | // has the same cost, so we don't need to prefer one over the other. | |||
119 | ||||
120 | int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit; | |||
121 | int VGPRDelta = NewVGPRPressure - VGPRCriticalLimit; | |||
122 | ||||
123 | if (SGPRDelta >= 0 || VGPRDelta >= 0) { | |||
124 | if (SGPRDelta > VGPRDelta) { | |||
125 | Cand.RPDelta.CriticalMax = | |||
126 | PressureChange(AMDGPU::RegisterPressureSets::SReg_32); | |||
127 | Cand.RPDelta.CriticalMax.setUnitInc(SGPRDelta); | |||
128 | } else { | |||
129 | Cand.RPDelta.CriticalMax = | |||
130 | PressureChange(AMDGPU::RegisterPressureSets::VGPR_32); | |||
131 | Cand.RPDelta.CriticalMax.setUnitInc(VGPRDelta); | |||
132 | } | |||
133 | } | |||
134 | } | |||
135 | ||||
136 | // This function is mostly cut and pasted from | |||
137 | // GenericScheduler::pickNodeFromQueue() | |||
138 | void GCNMaxOccupancySchedStrategy::pickNodeFromQueue(SchedBoundary &Zone, | |||
139 | const CandPolicy &ZonePolicy, | |||
140 | const RegPressureTracker &RPTracker, | |||
141 | SchedCandidate &Cand) { | |||
142 | const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI); | |||
143 | ArrayRef<unsigned> Pressure = RPTracker.getRegSetPressureAtPos(); | |||
144 | unsigned SGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32]; | |||
145 | unsigned VGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32]; | |||
146 | ReadyQueue &Q = Zone.Available; | |||
147 | for (SUnit *SU : Q) { | |||
148 | ||||
149 | SchedCandidate TryCand(ZonePolicy); | |||
150 | initCandidate(TryCand, SU, Zone.isTop(), RPTracker, SRI, | |||
151 | SGPRPressure, VGPRPressure); | |||
152 | // Pass SchedBoundary only when comparing nodes from the same boundary. | |||
153 | SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr; | |||
154 | GenericScheduler::tryCandidate(Cand, TryCand, ZoneArg); | |||
155 | if (TryCand.Reason != NoCand) { | |||
156 | // Initialize resource delta if needed in case future heuristics query it. | |||
157 | if (TryCand.ResDelta == SchedResourceDelta()) | |||
158 | TryCand.initResourceDelta(Zone.DAG, SchedModel); | |||
159 | Cand.setBest(TryCand); | |||
160 | LLVM_DEBUG(traceCandidate(Cand))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { traceCandidate(Cand); } } while (false ); | |||
161 | } | |||
162 | } | |||
163 | } | |||
164 | ||||
165 | // This function is mostly cut and pasted from | |||
166 | // GenericScheduler::pickNodeBidirectional() | |||
167 | SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) { | |||
168 | // Schedule as far as possible in the direction of no choice. This is most | |||
169 | // efficient, but also provides the best heuristics for CriticalPSets. | |||
170 | if (SUnit *SU = Bot.pickOnlyChoice()) { | |||
171 | IsTopNode = false; | |||
172 | return SU; | |||
173 | } | |||
174 | if (SUnit *SU = Top.pickOnlyChoice()) { | |||
175 | IsTopNode = true; | |||
176 | return SU; | |||
177 | } | |||
178 | // Set the bottom-up policy based on the state of the current bottom zone and | |||
179 | // the instructions outside the zone, including the top zone. | |||
180 | CandPolicy BotPolicy; | |||
181 | setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top); | |||
182 | // Set the top-down policy based on the state of the current top zone and | |||
183 | // the instructions outside the zone, including the bottom zone. | |||
184 | CandPolicy TopPolicy; | |||
185 | setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot); | |||
186 | ||||
187 | // See if BotCand is still valid (because we previously scheduled from Top). | |||
188 | LLVM_DEBUG(dbgs() << "Picking from Bot:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Picking from Bot:\n" ; } } while (false); | |||
189 | if (!BotCand.isValid() || BotCand.SU->isScheduled || | |||
190 | BotCand.Policy != BotPolicy) { | |||
191 | BotCand.reset(CandPolicy()); | |||
192 | pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand); | |||
193 | assert(BotCand.Reason != NoCand && "failed to find the first candidate")((BotCand.Reason != NoCand && "failed to find the first candidate" ) ? static_cast<void> (0) : __assert_fail ("BotCand.Reason != NoCand && \"failed to find the first candidate\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 193, __PRETTY_FUNCTION__)); | |||
194 | } else { | |||
195 | LLVM_DEBUG(traceCandidate(BotCand))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { traceCandidate(BotCand); } } while ( false); | |||
196 | #ifndef NDEBUG | |||
197 | if (VerifyScheduling) { | |||
198 | SchedCandidate TCand; | |||
199 | TCand.reset(CandPolicy()); | |||
200 | pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand); | |||
201 | assert(TCand.SU == BotCand.SU &&((TCand.SU == BotCand.SU && "Last pick result should correspond to re-picking right now" ) ? static_cast<void> (0) : __assert_fail ("TCand.SU == BotCand.SU && \"Last pick result should correspond to re-picking right now\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 202, __PRETTY_FUNCTION__)) | |||
202 | "Last pick result should correspond to re-picking right now")((TCand.SU == BotCand.SU && "Last pick result should correspond to re-picking right now" ) ? static_cast<void> (0) : __assert_fail ("TCand.SU == BotCand.SU && \"Last pick result should correspond to re-picking right now\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 202, __PRETTY_FUNCTION__)); | |||
203 | } | |||
204 | #endif | |||
205 | } | |||
206 | ||||
207 | // Check if the top Q has a better candidate. | |||
208 | LLVM_DEBUG(dbgs() << "Picking from Top:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Picking from Top:\n" ; } } while (false); | |||
209 | if (!TopCand.isValid() || TopCand.SU->isScheduled || | |||
210 | TopCand.Policy != TopPolicy) { | |||
211 | TopCand.reset(CandPolicy()); | |||
212 | pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand); | |||
213 | assert(TopCand.Reason != NoCand && "failed to find the first candidate")((TopCand.Reason != NoCand && "failed to find the first candidate" ) ? static_cast<void> (0) : __assert_fail ("TopCand.Reason != NoCand && \"failed to find the first candidate\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 213, __PRETTY_FUNCTION__)); | |||
214 | } else { | |||
215 | LLVM_DEBUG(traceCandidate(TopCand))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { traceCandidate(TopCand); } } while ( false); | |||
216 | #ifndef NDEBUG | |||
217 | if (VerifyScheduling) { | |||
218 | SchedCandidate TCand; | |||
219 | TCand.reset(CandPolicy()); | |||
220 | pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand); | |||
221 | assert(TCand.SU == TopCand.SU &&((TCand.SU == TopCand.SU && "Last pick result should correspond to re-picking right now" ) ? static_cast<void> (0) : __assert_fail ("TCand.SU == TopCand.SU && \"Last pick result should correspond to re-picking right now\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 222, __PRETTY_FUNCTION__)) | |||
222 | "Last pick result should correspond to re-picking right now")((TCand.SU == TopCand.SU && "Last pick result should correspond to re-picking right now" ) ? static_cast<void> (0) : __assert_fail ("TCand.SU == TopCand.SU && \"Last pick result should correspond to re-picking right now\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 222, __PRETTY_FUNCTION__)); | |||
223 | } | |||
224 | #endif | |||
225 | } | |||
226 | ||||
227 | // Pick best from BotCand and TopCand. | |||
228 | LLVM_DEBUG(dbgs() << "Top Cand: "; traceCandidate(TopCand);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Top Cand: "; traceCandidate (TopCand); dbgs() << "Bot Cand: "; traceCandidate(BotCand );; } } while (false) | |||
229 | dbgs() << "Bot Cand: "; traceCandidate(BotCand);)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Top Cand: "; traceCandidate (TopCand); dbgs() << "Bot Cand: "; traceCandidate(BotCand );; } } while (false); | |||
230 | SchedCandidate Cand = BotCand; | |||
231 | TopCand.Reason = NoCand; | |||
232 | GenericScheduler::tryCandidate(Cand, TopCand, nullptr); | |||
233 | if (TopCand.Reason != NoCand) { | |||
234 | Cand.setBest(TopCand); | |||
235 | } | |||
236 | LLVM_DEBUG(dbgs() << "Picking: "; traceCandidate(Cand);)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Picking: "; traceCandidate (Cand);; } } while (false); | |||
237 | ||||
238 | IsTopNode = Cand.AtTop; | |||
239 | return Cand.SU; | |||
240 | } | |||
241 | ||||
242 | // This function is mostly cut and pasted from | |||
243 | // GenericScheduler::pickNode() | |||
244 | SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) { | |||
245 | if (DAG->top() == DAG->bottom()) { | |||
246 | assert(Top.Available.empty() && Top.Pending.empty() &&((Top.Available.empty() && Top.Pending.empty() && Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage") ? static_cast<void> (0) : __assert_fail ("Top.Available.empty() && Top.Pending.empty() && Bot.Available.empty() && Bot.Pending.empty() && \"ReadyQ garbage\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 247, __PRETTY_FUNCTION__)) | |||
247 | Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage")((Top.Available.empty() && Top.Pending.empty() && Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage") ? static_cast<void> (0) : __assert_fail ("Top.Available.empty() && Top.Pending.empty() && Bot.Available.empty() && Bot.Pending.empty() && \"ReadyQ garbage\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 247, __PRETTY_FUNCTION__)); | |||
248 | return nullptr; | |||
249 | } | |||
250 | SUnit *SU; | |||
251 | do { | |||
252 | if (RegionPolicy.OnlyTopDown) { | |||
253 | SU = Top.pickOnlyChoice(); | |||
254 | if (!SU) { | |||
255 | CandPolicy NoPolicy; | |||
256 | TopCand.reset(NoPolicy); | |||
257 | pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand); | |||
258 | assert(TopCand.Reason != NoCand && "failed to find a candidate")((TopCand.Reason != NoCand && "failed to find a candidate" ) ? static_cast<void> (0) : __assert_fail ("TopCand.Reason != NoCand && \"failed to find a candidate\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 258, __PRETTY_FUNCTION__)); | |||
259 | SU = TopCand.SU; | |||
260 | } | |||
261 | IsTopNode = true; | |||
262 | } else if (RegionPolicy.OnlyBottomUp) { | |||
263 | SU = Bot.pickOnlyChoice(); | |||
264 | if (!SU) { | |||
265 | CandPolicy NoPolicy; | |||
266 | BotCand.reset(NoPolicy); | |||
267 | pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand); | |||
268 | assert(BotCand.Reason != NoCand && "failed to find a candidate")((BotCand.Reason != NoCand && "failed to find a candidate" ) ? static_cast<void> (0) : __assert_fail ("BotCand.Reason != NoCand && \"failed to find a candidate\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 268, __PRETTY_FUNCTION__)); | |||
269 | SU = BotCand.SU; | |||
270 | } | |||
271 | IsTopNode = false; | |||
272 | } else { | |||
273 | SU = pickNodeBidirectional(IsTopNode); | |||
274 | } | |||
275 | } while (SU->isScheduled); | |||
276 | ||||
277 | if (SU->isTopReady()) | |||
278 | Top.removeReady(SU); | |||
279 | if (SU->isBottomReady()) | |||
280 | Bot.removeReady(SU); | |||
281 | ||||
282 | LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr(); } } while (false) | |||
283 | << *SU->getInstr())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr(); } } while (false); | |||
284 | return SU; | |||
285 | } | |||
286 | ||||
287 | GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C, | |||
288 | std::unique_ptr<MachineSchedStrategy> S) : | |||
289 | ScheduleDAGMILive(C, std::move(S)), | |||
290 | ST(MF.getSubtarget<GCNSubtarget>()), | |||
291 | MFI(*MF.getInfo<SIMachineFunctionInfo>()), | |||
292 | StartingOccupancy(MFI.getOccupancy()), | |||
293 | MinOccupancy(StartingOccupancy), Stage(Collect), RegionIdx(0) { | |||
294 | ||||
295 | LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n"; } } while (false); | |||
296 | } | |||
297 | ||||
298 | void GCNScheduleDAGMILive::schedule() { | |||
299 | if (Stage == Collect) { | |||
300 | // Just record regions at the first pass. | |||
301 | Regions.push_back(std::make_pair(RegionBegin, RegionEnd)); | |||
302 | return; | |||
303 | } | |||
304 | ||||
305 | std::vector<MachineInstr*> Unsched; | |||
306 | Unsched.reserve(NumRegionInstrs); | |||
307 | for (auto &I : *this) { | |||
308 | Unsched.push_back(&I); | |||
309 | } | |||
310 | ||||
311 | GCNRegPressure PressureBefore; | |||
312 | if (LIS) { | |||
313 | PressureBefore = Pressure[RegionIdx]; | |||
314 | ||||
315 | LLVM_DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure before scheduling:\nRegion live-ins:" ; GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI ); dbgs() << "Region live-in pressure: "; llvm::getRegPressure (MRI, LiveIns[RegionIdx]).print(dbgs()); dbgs() << "Region register pressure: " ; PressureBefore.print(dbgs()); } } while (false) | |||
316 | GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure before scheduling:\nRegion live-ins:" ; GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI ); dbgs() << "Region live-in pressure: "; llvm::getRegPressure (MRI, LiveIns[RegionIdx]).print(dbgs()); dbgs() << "Region register pressure: " ; PressureBefore.print(dbgs()); } } while (false) | |||
317 | dbgs() << "Region live-in pressure: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure before scheduling:\nRegion live-ins:" ; GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI ); dbgs() << "Region live-in pressure: "; llvm::getRegPressure (MRI, LiveIns[RegionIdx]).print(dbgs()); dbgs() << "Region register pressure: " ; PressureBefore.print(dbgs()); } } while (false) | |||
318 | llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure before scheduling:\nRegion live-ins:" ; GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI ); dbgs() << "Region live-in pressure: "; llvm::getRegPressure (MRI, LiveIns[RegionIdx]).print(dbgs()); dbgs() << "Region register pressure: " ; PressureBefore.print(dbgs()); } } while (false) | |||
319 | dbgs() << "Region register pressure: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure before scheduling:\nRegion live-ins:" ; GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI ); dbgs() << "Region live-in pressure: "; llvm::getRegPressure (MRI, LiveIns[RegionIdx]).print(dbgs()); dbgs() << "Region register pressure: " ; PressureBefore.print(dbgs()); } } while (false) | |||
320 | PressureBefore.print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure before scheduling:\nRegion live-ins:" ; GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI ); dbgs() << "Region live-in pressure: "; llvm::getRegPressure (MRI, LiveIns[RegionIdx]).print(dbgs()); dbgs() << "Region register pressure: " ; PressureBefore.print(dbgs()); } } while (false); | |||
321 | } | |||
322 | ||||
323 | ScheduleDAGMILive::schedule(); | |||
324 | Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd); | |||
325 | RescheduleRegions[RegionIdx] = false; | |||
326 | ||||
327 | if (!LIS) | |||
328 | return; | |||
329 | ||||
330 | // Check the results of scheduling. | |||
331 | GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl; | |||
332 | auto PressureAfter = getRealRegPressure(); | |||
333 | ||||
334 | LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure after scheduling: " ; PressureAfter.print(dbgs()); } } while (false) | |||
335 | PressureAfter.print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure after scheduling: " ; PressureAfter.print(dbgs()); } } while (false); | |||
336 | ||||
337 | if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit && | |||
338 | PressureAfter.getVGPRNum(ST.hasGFX90AInsts()) <= S.VGPRCriticalLimit) { | |||
339 | Pressure[RegionIdx] = PressureAfter; | |||
340 | LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Pressure in desired limits, done.\n" ; } } while (false); | |||
341 | return; | |||
342 | } | |||
343 | unsigned Occ = MFI.getOccupancy(); | |||
344 | unsigned WavesAfter = std::min(Occ, PressureAfter.getOccupancy(ST)); | |||
345 | unsigned WavesBefore = std::min(Occ, PressureBefore.getOccupancy(ST)); | |||
346 | LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBeforedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Occupancy before scheduling: " << WavesBefore << ", after " << WavesAfter << ".\n"; } } while (false) | |||
347 | << ", after " << WavesAfter << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Occupancy before scheduling: " << WavesBefore << ", after " << WavesAfter << ".\n"; } } while (false); | |||
348 | ||||
349 | // We could not keep current target occupancy because of the just scheduled | |||
350 | // region. Record new occupancy for next scheduling cycle. | |||
351 | unsigned NewOccupancy = std::max(WavesAfter, WavesBefore); | |||
352 | // Allow memory bound functions to drop to 4 waves if not limited by an | |||
353 | // attribute. | |||
354 | if (WavesAfter < WavesBefore && WavesAfter < MinOccupancy && | |||
355 | WavesAfter >= MFI.getMinAllowedOccupancy()) { | |||
356 | LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Function is memory bound, allow occupancy drop up to " << MFI.getMinAllowedOccupancy() << " waves\n"; } } while (false) | |||
357 | << MFI.getMinAllowedOccupancy() << " waves\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Function is memory bound, allow occupancy drop up to " << MFI.getMinAllowedOccupancy() << " waves\n"; } } while (false); | |||
358 | NewOccupancy = WavesAfter; | |||
359 | } | |||
360 | if (NewOccupancy < MinOccupancy) { | |||
361 | MinOccupancy = NewOccupancy; | |||
362 | MFI.limitOccupancy(MinOccupancy); | |||
363 | LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Occupancy lowered for the function to " << MinOccupancy << ".\n"; } } while (false) | |||
364 | << MinOccupancy << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Occupancy lowered for the function to " << MinOccupancy << ".\n"; } } while (false); | |||
365 | } | |||
366 | ||||
367 | unsigned MaxVGPRs = ST.getMaxNumVGPRs(MF); | |||
368 | unsigned MaxSGPRs = ST.getMaxNumSGPRs(MF); | |||
369 | if (PressureAfter.getVGPRNum(false) > MaxVGPRs || | |||
370 | PressureAfter.getAGPRNum() > MaxVGPRs || | |||
371 | PressureAfter.getSGPRNum() > MaxSGPRs) | |||
372 | RescheduleRegions[RegionIdx] = true; | |||
373 | ||||
374 | if (WavesAfter >= MinOccupancy) { | |||
375 | if (Stage == UnclusteredReschedule && | |||
376 | !PressureAfter.less(ST, PressureBefore)) { | |||
377 | LLVM_DEBUG(dbgs() << "Unclustered reschedule did not help.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Unclustered reschedule did not help.\n" ; } } while (false); | |||
378 | } else if (WavesAfter > MFI.getMinWavesPerEU() || | |||
379 | PressureAfter.less(ST, PressureBefore) || | |||
380 | !RescheduleRegions[RegionIdx]) { | |||
381 | Pressure[RegionIdx] = PressureAfter; | |||
382 | return; | |||
383 | } else { | |||
384 | LLVM_DEBUG(dbgs() << "New pressure will result in more spilling.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "New pressure will result in more spilling.\n" ; } } while (false); | |||
385 | } | |||
386 | } | |||
387 | ||||
388 | LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Attempting to revert scheduling.\n" ; } } while (false); | |||
389 | RescheduleRegions[RegionIdx] = true; | |||
390 | RegionEnd = RegionBegin; | |||
391 | for (MachineInstr *MI : Unsched) { | |||
392 | if (MI->isDebugInstr()) | |||
393 | continue; | |||
394 | ||||
395 | if (MI->getIterator() != RegionEnd) { | |||
396 | BB->remove(MI); | |||
397 | BB->insert(RegionEnd, MI); | |||
398 | if (!MI->isDebugInstr()) | |||
399 | LIS->handleMove(*MI, true); | |||
400 | } | |||
401 | // Reset read-undef flags and update them later. | |||
402 | for (auto &Op : MI->operands()) | |||
403 | if (Op.isReg() && Op.isDef()) | |||
404 | Op.setIsUndef(false); | |||
405 | RegisterOperands RegOpers; | |||
406 | RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false); | |||
407 | if (!MI->isDebugInstr()) { | |||
408 | if (ShouldTrackLaneMasks) { | |||
409 | // Adjust liveness and add missing dead+read-undef flags. | |||
410 | SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot(); | |||
411 | RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI); | |||
412 | } else { | |||
413 | // Adjust for missing dead-def flags. | |||
414 | RegOpers.detectDeadDefs(*MI, *LIS); | |||
415 | } | |||
416 | } | |||
417 | RegionEnd = MI->getIterator(); | |||
418 | ++RegionEnd; | |||
419 | LLVM_DEBUG(dbgs() << "Scheduling " << *MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Scheduling " << *MI; } } while (false); | |||
420 | } | |||
421 | RegionBegin = Unsched.front()->getIterator(); | |||
422 | Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd); | |||
423 | ||||
424 | placeDebugValues(); | |||
425 | } | |||
426 | ||||
427 | GCNRegPressure GCNScheduleDAGMILive::getRealRegPressure() const { | |||
428 | GCNDownwardRPTracker RPTracker(*LIS); | |||
429 | RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]); | |||
430 | return RPTracker.moveMaxPressure(); | |||
431 | } | |||
432 | ||||
433 | void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) { | |||
434 | GCNDownwardRPTracker RPTracker(*LIS); | |||
435 | ||||
436 | // If the block has the only successor then live-ins of that successor are | |||
437 | // live-outs of the current block. We can reuse calculated live set if the | |||
438 | // successor will be sent to scheduling past current block. | |||
439 | const MachineBasicBlock *OnlySucc = nullptr; | |||
440 | if (MBB->succ_size() == 1 && !(*MBB->succ_begin())->empty()) { | |||
441 | SlotIndexes *Ind = LIS->getSlotIndexes(); | |||
442 | if (Ind->getMBBStartIdx(MBB) < Ind->getMBBStartIdx(*MBB->succ_begin())) | |||
443 | OnlySucc = *MBB->succ_begin(); | |||
444 | } | |||
445 | ||||
446 | // Scheduler sends regions from the end of the block upwards. | |||
447 | size_t CurRegion = RegionIdx; | |||
448 | for (size_t E = Regions.size(); CurRegion != E; ++CurRegion) | |||
449 | if (Regions[CurRegion].first->getParent() != MBB) | |||
450 | break; | |||
451 | --CurRegion; | |||
452 | ||||
453 | auto I = MBB->begin(); | |||
454 | auto LiveInIt = MBBLiveIns.find(MBB); | |||
455 | if (LiveInIt != MBBLiveIns.end()) { | |||
456 | auto LiveIn = std::move(LiveInIt->second); | |||
457 | RPTracker.reset(*MBB->begin(), &LiveIn); | |||
458 | MBBLiveIns.erase(LiveInIt); | |||
459 | } else { | |||
460 | auto &Rgn = Regions[CurRegion]; | |||
461 | I = Rgn.first; | |||
462 | auto *NonDbgMI = &*skipDebugInstructionsForward(Rgn.first, Rgn.second); | |||
463 | auto LRS = BBLiveInMap.lookup(NonDbgMI); | |||
464 | #ifdef EXPENSIVE_CHECKS | |||
465 | assert(isEqual(getLiveRegsBefore(*NonDbgMI, *LIS), LRS))((isEqual(getLiveRegsBefore(*NonDbgMI, *LIS), LRS)) ? static_cast <void> (0) : __assert_fail ("isEqual(getLiveRegsBefore(*NonDbgMI, *LIS), LRS)" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 465, __PRETTY_FUNCTION__)); | |||
466 | #endif | |||
467 | RPTracker.reset(*I, &LRS); | |||
468 | } | |||
469 | ||||
470 | for ( ; ; ) { | |||
471 | I = RPTracker.getNext(); | |||
472 | ||||
473 | if (Regions[CurRegion].first == I) { | |||
474 | LiveIns[CurRegion] = RPTracker.getLiveRegs(); | |||
475 | RPTracker.clearMaxPressure(); | |||
476 | } | |||
477 | ||||
478 | if (Regions[CurRegion].second == I) { | |||
479 | Pressure[CurRegion] = RPTracker.moveMaxPressure(); | |||
480 | if (CurRegion-- == RegionIdx) | |||
481 | break; | |||
482 | } | |||
483 | RPTracker.advanceToNext(); | |||
484 | RPTracker.advanceBeforeNext(); | |||
485 | } | |||
486 | ||||
487 | if (OnlySucc) { | |||
488 | if (I != MBB->end()) { | |||
489 | RPTracker.advanceToNext(); | |||
490 | RPTracker.advance(MBB->end()); | |||
491 | } | |||
492 | RPTracker.reset(*OnlySucc->begin(), &RPTracker.getLiveRegs()); | |||
493 | RPTracker.advanceBeforeNext(); | |||
494 | MBBLiveIns[OnlySucc] = RPTracker.moveLiveRegs(); | |||
495 | } | |||
496 | } | |||
497 | ||||
498 | DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> | |||
499 | GCNScheduleDAGMILive::getBBLiveInMap() const { | |||
500 | assert(!Regions.empty())((!Regions.empty()) ? static_cast<void> (0) : __assert_fail ("!Regions.empty()", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp" , 500, __PRETTY_FUNCTION__)); | |||
501 | std::vector<MachineInstr *> BBStarters; | |||
502 | BBStarters.reserve(Regions.size()); | |||
503 | auto I = Regions.rbegin(), E = Regions.rend(); | |||
504 | auto *BB = I->first->getParent(); | |||
505 | do { | |||
506 | auto *MI = &*skipDebugInstructionsForward(I->first, I->second); | |||
507 | BBStarters.push_back(MI); | |||
508 | do { | |||
509 | ++I; | |||
510 | } while (I != E && I->first->getParent() == BB); | |||
511 | } while (I != E); | |||
512 | return getLiveRegMap(BBStarters, false /*After*/, *LIS); | |||
513 | } | |||
514 | ||||
515 | void GCNScheduleDAGMILive::finalizeSchedule() { | |||
516 | GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl; | |||
517 | LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "All regions recorded, starting actual scheduling.\n" ; } } while (false); | |||
| ||||
518 | ||||
519 | LiveIns.resize(Regions.size()); | |||
520 | Pressure.resize(Regions.size()); | |||
521 | RescheduleRegions.resize(Regions.size()); | |||
522 | RescheduleRegions.set(); | |||
523 | ||||
524 | if (!Regions.empty()) | |||
525 | BBLiveInMap = getBBLiveInMap(); | |||
526 | ||||
527 | std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations; | |||
528 | ||||
529 | do { | |||
530 | Stage++; | |||
531 | RegionIdx = 0; | |||
532 | MachineBasicBlock *MBB = nullptr; | |||
533 | ||||
534 | if (Stage > InitialSchedule) { | |||
535 | if (!LIS) | |||
536 | break; | |||
537 | ||||
538 | // Retry function scheduling if we found resulting occupancy and it is | |||
539 | // lower than used for first pass scheduling. This will give more freedom | |||
540 | // to schedule low register pressure blocks. | |||
541 | // Code is partially copied from MachineSchedulerBase::scheduleRegions(). | |||
542 | ||||
543 | if (Stage == UnclusteredReschedule) { | |||
544 | if (RescheduleRegions.none()) | |||
545 | continue; | |||
546 | LLVM_DEBUG(dbgs() <<do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Retrying function scheduling without clustering.\n" ; } } while (false) | |||
547 | "Retrying function scheduling without clustering.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Retrying function scheduling without clustering.\n" ; } } while (false); | |||
548 | } | |||
549 | ||||
550 | if (Stage == ClusteredLowOccupancyReschedule) { | |||
551 | if (StartingOccupancy <= MinOccupancy) | |||
552 | break; | |||
553 | ||||
554 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Retrying function scheduling with lowest recorded occupancy " << MinOccupancy << ".\n"; } } while (false) | |||
555 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Retrying function scheduling with lowest recorded occupancy " << MinOccupancy << ".\n"; } } while (false) | |||
556 | << "Retrying function scheduling with lowest recorded occupancy "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Retrying function scheduling with lowest recorded occupancy " << MinOccupancy << ".\n"; } } while (false) | |||
557 | << MinOccupancy << ".\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "Retrying function scheduling with lowest recorded occupancy " << MinOccupancy << ".\n"; } } while (false); | |||
558 | ||||
559 | S.setTargetOccupancy(MinOccupancy); | |||
560 | } | |||
561 | } | |||
562 | ||||
563 | if (Stage
| |||
564 | SavedMutations.swap(Mutations); | |||
565 | ||||
566 | for (auto Region : Regions) { | |||
567 | if (Stage == UnclusteredReschedule && !RescheduleRegions[RegionIdx]) { | |||
568 | ++RegionIdx; | |||
569 | continue; | |||
570 | } | |||
571 | ||||
572 | RegionBegin = Region.first; | |||
573 | RegionEnd = Region.second; | |||
574 | ||||
575 | if (RegionBegin->getParent() != MBB) { | |||
576 | if (MBB) finishBlock(); | |||
577 | MBB = RegionBegin->getParent(); | |||
578 | startBlock(MBB); | |||
579 | if (Stage == InitialSchedule) | |||
580 | computeBlockPressure(MBB); | |||
581 | } | |||
582 | ||||
583 | unsigned NumRegionInstrs = std::distance(begin(), end()); | |||
584 | enterRegion(MBB, begin(), end(), NumRegionInstrs); | |||
585 | ||||
586 | // Skip empty scheduling regions (0 or 1 schedulable instructions). | |||
587 | if (begin() == end() || begin() == std::prev(end())) { | |||
588 | exitRegion(); | |||
589 | continue; | |||
590 | } | |||
591 | ||||
592 | LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << "********** MI Scheduling **********\n" ; } } while (false); | |||
593 | LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " " << MBB ->getName() << "\n From: " << *begin() << " To: "; if (RegionEnd != MBB->end()) dbgs() << * RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'; } } while (false) | |||
| ||||
594 | << MBB->getName() << "\n From: " << *begin()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " " << MBB ->getName() << "\n From: " << *begin() << " To: "; if (RegionEnd != MBB->end()) dbgs() << * RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'; } } while (false) | |||
595 | << " To: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " " << MBB ->getName() << "\n From: " << *begin() << " To: "; if (RegionEnd != MBB->end()) dbgs() << * RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'; } } while (false) | |||
596 | if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " " << MBB ->getName() << "\n From: " << *begin() << " To: "; if (RegionEnd != MBB->end()) dbgs() << * RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'; } } while (false) | |||
597 | else dbgs() << "End";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " " << MBB ->getName() << "\n From: " << *begin() << " To: "; if (RegionEnd != MBB->end()) dbgs() << * RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'; } } while (false) | |||
598 | dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("machine-scheduler")) { dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " " << MBB ->getName() << "\n From: " << *begin() << " To: "; if (RegionEnd != MBB->end()) dbgs() << * RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'; } } while (false); | |||
599 | ||||
600 | schedule(); | |||
601 | ||||
602 | exitRegion(); | |||
603 | ++RegionIdx; | |||
604 | } | |||
605 | finishBlock(); | |||
606 | ||||
607 | if (Stage == UnclusteredReschedule) | |||
608 | SavedMutations.swap(Mutations); | |||
609 | } while (Stage != LastStage); | |||
610 | } |