LLVM 17.0.0git
SILowerControlFlow.cpp
Go to the documentation of this file.
1//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass lowers the pseudo control flow instructions to real
11/// machine instructions.
12///
13/// All control flow is handled using predicated instructions and
14/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
15/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
16/// by writing to the 64-bit EXEC register (each bit corresponds to a
17/// single vector ALU). Typically, for predicates, a vector ALU will write
18/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
19/// Vector ALU) and then the ScalarALU will AND the VCC register with the
20/// EXEC to update the predicates.
21///
22/// For example:
23/// %vcc = V_CMP_GT_F32 %vgpr1, %vgpr2
24/// %sgpr0 = SI_IF %vcc
25/// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0
26/// %sgpr0 = SI_ELSE %sgpr0
27/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr0
28/// SI_END_CF %sgpr0
29///
30/// becomes:
31///
32/// %sgpr0 = S_AND_SAVEEXEC_B64 %vcc // Save and update the exec mask
33/// %sgpr0 = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask
34/// S_CBRANCH_EXECZ label0 // This instruction is an optional
35/// // optimization which allows us to
36/// // branch if all the bits of
37/// // EXEC are zero.
38/// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 // Do the IF block of the branch
39///
40/// label0:
41/// %sgpr0 = S_OR_SAVEEXEC_B64 %sgpr0 // Restore the exec mask for the Then
42/// // block
43/// %exec = S_XOR_B64 %sgpr0, %exec // Update the exec mask
44/// S_BRANCH_EXECZ label1 // Use our branch optimization
45/// // instruction again.
46/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr // Do the THEN block
47/// label1:
48/// %exec = S_OR_B64 %exec, %sgpr0 // Re-enable saved exec mask bits
49//===----------------------------------------------------------------------===//
50
51#include "AMDGPU.h"
52#include "GCNSubtarget.h"
54#include "llvm/ADT/SmallSet.h"
60
61using namespace llvm;
62
63#define DEBUG_TYPE "si-lower-control-flow"
64
65static cl::opt<bool>
66RemoveRedundantEndcf("amdgpu-remove-redundant-endcf",
68
69namespace {
70
71class SILowerControlFlow : public MachineFunctionPass {
72private:
73 const SIRegisterInfo *TRI = nullptr;
74 const SIInstrInfo *TII = nullptr;
75 LiveIntervals *LIS = nullptr;
76 LiveVariables *LV = nullptr;
77 MachineDominatorTree *MDT = nullptr;
78 MachineRegisterInfo *MRI = nullptr;
79 SetVector<MachineInstr*> LoweredEndCf;
80 DenseSet<Register> LoweredIf;
82
83 const TargetRegisterClass *BoolRC = nullptr;
84 unsigned AndOpc;
85 unsigned OrOpc;
86 unsigned XorOpc;
87 unsigned MovTermOpc;
88 unsigned Andn2TermOpc;
89 unsigned XorTermrOpc;
90 unsigned OrTermrOpc;
91 unsigned OrSaveExecOpc;
92 unsigned Exec;
93
94 bool EnableOptimizeEndCf = false;
95
96 bool hasKill(const MachineBasicBlock *Begin, const MachineBasicBlock *End);
97
98 void emitIf(MachineInstr &MI);
99 void emitElse(MachineInstr &MI);
100 void emitIfBreak(MachineInstr &MI);
101 void emitLoop(MachineInstr &MI);
102
103 MachineBasicBlock *emitEndCf(MachineInstr &MI);
104
105 void lowerInitExec(MachineBasicBlock *MBB, MachineInstr &MI);
106
107 void findMaskOperands(MachineInstr &MI, unsigned OpNo,
109
110 void combineMasks(MachineInstr &MI);
111
112 bool removeMBBifRedundant(MachineBasicBlock &MBB);
113
115
116 // Skip to the next instruction, ignoring debug instructions, and trivial
117 // block boundaries (blocks that have one (typically fallthrough) successor,
118 // and the successor has one predecessor.
120 skipIgnoreExecInstsTrivialSucc(MachineBasicBlock &MBB,
122
123 /// Find the insertion point for a new conditional branch.
125 skipToUncondBrOrEnd(MachineBasicBlock &MBB,
127 assert(I->isTerminator());
128
129 // FIXME: What if we had multiple pre-existing conditional branches?
131 while (I != End && !I->isUnconditionalBranch())
132 ++I;
133 return I;
134 }
135
136 // Remove redundant SI_END_CF instructions.
137 void optimizeEndCf();
138
139public:
140 static char ID;
141
142 SILowerControlFlow() : MachineFunctionPass(ID) {}
143
144 bool runOnMachineFunction(MachineFunction &MF) override;
145
146 StringRef getPassName() const override {
147 return "SI Lower control flow pseudo instructions";
148 }
149
150 void getAnalysisUsage(AnalysisUsage &AU) const override {
152 // Should preserve the same set that TwoAddressInstructions does.
158 }
159};
160
161} // end anonymous namespace
162
163char SILowerControlFlow::ID = 0;
164
165INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
166 "SI lower control flow", false, false)
167
168static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {
169 MachineOperand &ImpDefSCC = MI.getOperand(3);
170 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
171
172 ImpDefSCC.setIsDead(IsDead);
173}
174
175char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
176
177bool SILowerControlFlow::hasKill(const MachineBasicBlock *Begin,
178 const MachineBasicBlock *End) {
181
182 while (!Worklist.empty()) {
183 MachineBasicBlock *MBB = Worklist.pop_back_val();
184
185 if (MBB == End || !Visited.insert(MBB).second)
186 continue;
187 if (KillBlocks.contains(MBB))
188 return true;
189
190 Worklist.append(MBB->succ_begin(), MBB->succ_end());
191 }
192
193 return false;
194}
195
196static bool isSimpleIf(const MachineInstr &MI, const MachineRegisterInfo *MRI) {
197 Register SaveExecReg = MI.getOperand(0).getReg();
198 auto U = MRI->use_instr_nodbg_begin(SaveExecReg);
199
200 if (U == MRI->use_instr_nodbg_end() ||
201 std::next(U) != MRI->use_instr_nodbg_end() ||
202 U->getOpcode() != AMDGPU::SI_END_CF)
203 return false;
204
205 return true;
206}
207
208void SILowerControlFlow::emitIf(MachineInstr &MI) {
209 MachineBasicBlock &MBB = *MI.getParent();
210 const DebugLoc &DL = MI.getDebugLoc();
212 Register SaveExecReg = MI.getOperand(0).getReg();
213 MachineOperand& Cond = MI.getOperand(1);
214 assert(Cond.getSubReg() == AMDGPU::NoSubRegister);
215
216 MachineOperand &ImpDefSCC = MI.getOperand(4);
217 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
218
219 // If there is only one use of save exec register and that use is SI_END_CF,
220 // we can optimize SI_IF by returning the full saved exec mask instead of
221 // just cleared bits.
222 bool SimpleIf = isSimpleIf(MI, MRI);
223
224 if (SimpleIf) {
225 // Check for SI_KILL_*_TERMINATOR on path from if to endif.
226 // if there is any such terminator simplifications are not safe.
227 auto UseMI = MRI->use_instr_nodbg_begin(SaveExecReg);
228 SimpleIf = !hasKill(MI.getParent(), UseMI->getParent());
229 }
230
231 // Add an implicit def of exec to discourage scheduling VALU after this which
232 // will interfere with trying to form s_and_saveexec_b64 later.
233 Register CopyReg = SimpleIf ? SaveExecReg
234 : MRI->createVirtualRegister(BoolRC);
235 MachineInstr *CopyExec =
236 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
237 .addReg(Exec)
239 LoweredIf.insert(CopyReg);
240
241 Register Tmp = MRI->createVirtualRegister(BoolRC);
242
244 BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp)
245 .addReg(CopyReg)
246 .add(Cond);
247 if (LV)
248 LV->replaceKillInstruction(Cond.getReg(), MI, *And);
249
250 setImpSCCDefDead(*And, true);
251
252 MachineInstr *Xor = nullptr;
253 if (!SimpleIf) {
254 Xor =
255 BuildMI(MBB, I, DL, TII->get(XorOpc), SaveExecReg)
256 .addReg(Tmp)
257 .addReg(CopyReg);
258 setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
259 }
260
261 // Use a copy that is a terminator to get correct spill code placement it with
262 // fast regalloc.
263 MachineInstr *SetExec =
264 BuildMI(MBB, I, DL, TII->get(MovTermOpc), Exec)
265 .addReg(Tmp, RegState::Kill);
266 if (LV)
267 LV->getVarInfo(Tmp).Kills.push_back(SetExec);
268
269 // Skip ahead to the unconditional branch in case there are other terminators
270 // present.
271 I = skipToUncondBrOrEnd(MBB, I);
272
273 // Insert the S_CBRANCH_EXECZ instruction which will be optimized later
274 // during SIRemoveShortExecBranches.
275 MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
276 .add(MI.getOperand(2));
277
278 if (!LIS) {
279 MI.eraseFromParent();
280 return;
281 }
282
283 LIS->InsertMachineInstrInMaps(*CopyExec);
284
285 // Replace with and so we don't need to fix the live interval for condition
286 // register.
288
289 if (!SimpleIf)
291 LIS->InsertMachineInstrInMaps(*SetExec);
292 LIS->InsertMachineInstrInMaps(*NewBr);
293
294 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
295 MI.eraseFromParent();
296
297 // FIXME: Is there a better way of adjusting the liveness? It shouldn't be
298 // hard to add another def here but I'm not sure how to correctly update the
299 // valno.
300 LIS->removeInterval(SaveExecReg);
301 LIS->createAndComputeVirtRegInterval(SaveExecReg);
303 if (!SimpleIf)
305}
306
307void SILowerControlFlow::emitElse(MachineInstr &MI) {
308 MachineBasicBlock &MBB = *MI.getParent();
309 const DebugLoc &DL = MI.getDebugLoc();
310
311 Register DstReg = MI.getOperand(0).getReg();
312
314
315 // This must be inserted before phis and any spill code inserted before the
316 // else.
317 Register SaveReg = MRI->createVirtualRegister(BoolRC);
318 MachineInstr *OrSaveExec =
319 BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg)
320 .add(MI.getOperand(1)); // Saved EXEC
321 if (LV)
322 LV->replaceKillInstruction(MI.getOperand(1).getReg(), MI, *OrSaveExec);
323
324 MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
325
327
328 // This accounts for any modification of the EXEC mask within the block and
329 // can be optimized out pre-RA when not required.
330 MachineInstr *And = BuildMI(MBB, ElsePt, DL, TII->get(AndOpc), DstReg)
331 .addReg(Exec)
332 .addReg(SaveReg);
333
334 if (LIS)
336
338 BuildMI(MBB, ElsePt, DL, TII->get(XorTermrOpc), Exec)
339 .addReg(Exec)
340 .addReg(DstReg);
341
342 // Skip ahead to the unconditional branch in case there are other terminators
343 // present.
344 ElsePt = skipToUncondBrOrEnd(MBB, ElsePt);
345
347 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
348 .addMBB(DestBB);
349
350 if (!LIS) {
351 MI.eraseFromParent();
352 return;
353 }
354
356 MI.eraseFromParent();
357
358 LIS->InsertMachineInstrInMaps(*OrSaveExec);
359
361 LIS->InsertMachineInstrInMaps(*Branch);
362
363 LIS->removeInterval(DstReg);
366
367 // Let this be recomputed.
368 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
369}
370
371void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
372 MachineBasicBlock &MBB = *MI.getParent();
373 const DebugLoc &DL = MI.getDebugLoc();
374 auto Dst = MI.getOperand(0).getReg();
375
376 // Skip ANDing with exec if the break condition is already masked by exec
377 // because it is a V_CMP in the same basic block. (We know the break
378 // condition operand was an i1 in IR, so if it is a VALU instruction it must
379 // be one with a carry-out.)
380 bool SkipAnding = false;
381 if (MI.getOperand(1).isReg()) {
382 if (MachineInstr *Def = MRI->getUniqueVRegDef(MI.getOperand(1).getReg())) {
383 SkipAnding = Def->getParent() == MI.getParent()
384 && SIInstrInfo::isVALU(*Def);
385 }
386 }
387
388 // AND the break condition operand with exec, then OR that into the "loop
389 // exit" mask.
390 MachineInstr *And = nullptr, *Or = nullptr;
391 if (!SkipAnding) {
392 Register AndReg = MRI->createVirtualRegister(BoolRC);
393 And = BuildMI(MBB, &MI, DL, TII->get(AndOpc), AndReg)
394 .addReg(Exec)
395 .add(MI.getOperand(1));
396 if (LV)
397 LV->replaceKillInstruction(MI.getOperand(1).getReg(), MI, *And);
398 Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
399 .addReg(AndReg)
400 .add(MI.getOperand(2));
401 if (LIS)
403 } else {
404 Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
405 .add(MI.getOperand(1))
406 .add(MI.getOperand(2));
407 if (LV)
408 LV->replaceKillInstruction(MI.getOperand(1).getReg(), MI, *Or);
409 }
410 if (LV)
411 LV->replaceKillInstruction(MI.getOperand(2).getReg(), MI, *Or);
412
413 if (LIS) {
414 if (And)
417 }
418
419 MI.eraseFromParent();
420}
421
422void SILowerControlFlow::emitLoop(MachineInstr &MI) {
423 MachineBasicBlock &MBB = *MI.getParent();
424 const DebugLoc &DL = MI.getDebugLoc();
425
426 MachineInstr *AndN2 =
427 BuildMI(MBB, &MI, DL, TII->get(Andn2TermOpc), Exec)
428 .addReg(Exec)
429 .add(MI.getOperand(0));
430 if (LV)
431 LV->replaceKillInstruction(MI.getOperand(0).getReg(), MI, *AndN2);
432
433 auto BranchPt = skipToUncondBrOrEnd(MBB, MI.getIterator());
435 BuildMI(MBB, BranchPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
436 .add(MI.getOperand(1));
437
438 if (LIS) {
439 LIS->ReplaceMachineInstrInMaps(MI, *AndN2);
440 LIS->InsertMachineInstrInMaps(*Branch);
441 }
442
443 MI.eraseFromParent();
444}
445
447SILowerControlFlow::skipIgnoreExecInstsTrivialSucc(
449
452 do {
453 if (!Visited.insert(B).second)
454 return MBB.end();
455
456 auto E = B->end();
457 for ( ; It != E; ++It) {
458 if (TII->mayReadEXEC(*MRI, *It))
459 break;
460 }
461
462 if (It != E)
463 return It;
464
465 if (B->succ_size() != 1)
466 return MBB.end();
467
468 // If there is one trivial successor, advance to the next block.
469 MachineBasicBlock *Succ = *B->succ_begin();
470
471 It = Succ->begin();
472 B = Succ;
473 } while (true);
474}
475
476MachineBasicBlock *SILowerControlFlow::emitEndCf(MachineInstr &MI) {
477 MachineBasicBlock &MBB = *MI.getParent();
478 const DebugLoc &DL = MI.getDebugLoc();
479
481
482 // If we have instructions that aren't prolog instructions, split the block
483 // and emit a terminator instruction. This ensures correct spill placement.
484 // FIXME: We should unconditionally split the block here.
485 bool NeedBlockSplit = false;
486 Register DataReg = MI.getOperand(0).getReg();
487 for (MachineBasicBlock::iterator I = InsPt, E = MI.getIterator();
488 I != E; ++I) {
489 if (I->modifiesRegister(DataReg, TRI)) {
490 NeedBlockSplit = true;
491 break;
492 }
493 }
494
495 unsigned Opcode = OrOpc;
496 MachineBasicBlock *SplitBB = &MBB;
497 if (NeedBlockSplit) {
498 SplitBB = MBB.splitAt(MI, /*UpdateLiveIns*/true, LIS);
499 if (MDT && SplitBB != &MBB) {
500 MachineDomTreeNode *MBBNode = (*MDT)[&MBB];
502 MBBNode->end());
503 MachineDomTreeNode *SplitBBNode = MDT->addNewBlock(SplitBB, &MBB);
504 for (MachineDomTreeNode *Child : Children)
505 MDT->changeImmediateDominator(Child, SplitBBNode);
506 }
507 Opcode = OrTermrOpc;
508 InsPt = MI;
509 }
510
511 MachineInstr *NewMI =
512 BuildMI(MBB, InsPt, DL, TII->get(Opcode), Exec)
513 .addReg(Exec)
514 .add(MI.getOperand(0));
515 if (LV) {
516 LV->replaceKillInstruction(DataReg, MI, *NewMI);
517
518 if (SplitBB != &MBB) {
519 // Track the set of registers defined in the original block so we don't
520 // accidentally add the original block to AliveBlocks. AliveBlocks only
521 // includes blocks which are live through, which excludes live outs and
522 // local defs.
523 DenseSet<Register> DefInOrigBlock;
524
525 for (MachineBasicBlock *BlockPiece : {&MBB, SplitBB}) {
526 for (MachineInstr &X : *BlockPiece) {
527 for (MachineOperand &Op : X.operands()) {
528 if (Op.isReg() && Op.isDef() && Op.getReg().isVirtual())
529 DefInOrigBlock.insert(Op.getReg());
530 }
531 }
532 }
533
534 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
537
538 if (VI.AliveBlocks.test(MBB.getNumber()))
539 VI.AliveBlocks.set(SplitBB->getNumber());
540 else {
541 for (MachineInstr *Kill : VI.Kills) {
542 if (Kill->getParent() == SplitBB && !DefInOrigBlock.contains(Reg))
543 VI.AliveBlocks.set(MBB.getNumber());
544 }
545 }
546 }
547 }
548 }
549
550 LoweredEndCf.insert(NewMI);
551
552 if (LIS)
553 LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
554
555 MI.eraseFromParent();
556
557 if (LIS)
558 LIS->handleMove(*NewMI);
559 return SplitBB;
560}
561
562// Returns replace operands for a logical operation, either single result
563// for exec or two operands if source was another equivalent operation.
564void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
566 MachineOperand &Op = MI.getOperand(OpNo);
567 if (!Op.isReg() || !Op.getReg().isVirtual()) {
568 Src.push_back(Op);
569 return;
570 }
571
572 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
573 if (!Def || Def->getParent() != MI.getParent() ||
574 !(Def->isFullCopy() || (Def->getOpcode() == MI.getOpcode())))
575 return;
576
577 // Make sure we do not modify exec between def and use.
578 // A copy with implicitly defined exec inserted earlier is an exclusion, it
579 // does not really modify exec.
580 for (auto I = Def->getIterator(); I != MI.getIterator(); ++I)
581 if (I->modifiesRegister(AMDGPU::EXEC, TRI) &&
582 !(I->isCopy() && I->getOperand(0).getReg() != Exec))
583 return;
584
585 for (const auto &SrcOp : Def->explicit_operands())
586 if (SrcOp.isReg() && SrcOp.isUse() &&
587 (SrcOp.getReg().isVirtual() || SrcOp.getReg() == Exec))
588 Src.push_back(SrcOp);
589}
590
591// Search and combine pairs of equivalent instructions, like
592// S_AND_B64 x, (S_AND_B64 x, y) => S_AND_B64 x, y
593// S_OR_B64 x, (S_OR_B64 x, y) => S_OR_B64 x, y
594// One of the operands is exec mask.
595void SILowerControlFlow::combineMasks(MachineInstr &MI) {
596 assert(MI.getNumExplicitOperands() == 3);
598 unsigned OpToReplace = 1;
599 findMaskOperands(MI, 1, Ops);
600 if (Ops.size() == 1) OpToReplace = 2; // First operand can be exec or its copy
601 findMaskOperands(MI, 2, Ops);
602 if (Ops.size() != 3) return;
603
604 unsigned UniqueOpndIdx;
605 if (Ops[0].isIdenticalTo(Ops[1])) UniqueOpndIdx = 2;
606 else if (Ops[0].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
607 else if (Ops[1].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
608 else return;
609
610 Register Reg = MI.getOperand(OpToReplace).getReg();
611 MI.removeOperand(OpToReplace);
612 MI.addOperand(Ops[UniqueOpndIdx]);
613 if (MRI->use_empty(Reg))
614 MRI->getUniqueVRegDef(Reg)->eraseFromParent();
615}
616
617void SILowerControlFlow::optimizeEndCf() {
618 // If the only instruction immediately following this END_CF is another
619 // END_CF in the only successor we can avoid emitting exec mask restore here.
620 if (!EnableOptimizeEndCf)
621 return;
622
623 for (MachineInstr *MI : reverse(LoweredEndCf)) {
624 MachineBasicBlock &MBB = *MI->getParent();
625 auto Next =
626 skipIgnoreExecInstsTrivialSucc(MBB, std::next(MI->getIterator()));
627 if (Next == MBB.end() || !LoweredEndCf.count(&*Next))
628 continue;
629 // Only skip inner END_CF if outer ENDCF belongs to SI_IF.
630 // If that belongs to SI_ELSE then saved mask has an inverted value.
631 Register SavedExec
632 = TII->getNamedOperand(*Next, AMDGPU::OpName::src1)->getReg();
633 assert(SavedExec.isVirtual() && "Expected saved exec to be src1!");
634
635 const MachineInstr *Def = MRI->getUniqueVRegDef(SavedExec);
636 if (Def && LoweredIf.count(SavedExec)) {
637 LLVM_DEBUG(dbgs() << "Skip redundant "; MI->dump());
638 if (LIS)
641 if (LV)
642 Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::src1)->getReg();
643 MI->eraseFromParent();
644 if (LV)
646 removeMBBifRedundant(MBB);
647 }
648 }
649}
650
651MachineBasicBlock *SILowerControlFlow::process(MachineInstr &MI) {
652 MachineBasicBlock &MBB = *MI.getParent();
654 MachineInstr *Prev = (I != MBB.begin()) ? &*(std::prev(I)) : nullptr;
655
656 MachineBasicBlock *SplitBB = &MBB;
657
658 switch (MI.getOpcode()) {
659 case AMDGPU::SI_IF:
660 emitIf(MI);
661 break;
662
663 case AMDGPU::SI_ELSE:
664 emitElse(MI);
665 break;
666
667 case AMDGPU::SI_IF_BREAK:
668 emitIfBreak(MI);
669 break;
670
671 case AMDGPU::SI_LOOP:
672 emitLoop(MI);
673 break;
674
675 case AMDGPU::SI_WATERFALL_LOOP:
676 MI.setDesc(TII->get(AMDGPU::S_CBRANCH_EXECNZ));
677 break;
678
679 case AMDGPU::SI_END_CF:
680 SplitBB = emitEndCf(MI);
681 break;
682
683 default:
684 assert(false && "Attempt to process unsupported instruction");
685 break;
686 }
687
689 for (I = Prev ? Prev->getIterator() : MBB.begin(); I != MBB.end(); I = Next) {
690 Next = std::next(I);
691 MachineInstr &MaskMI = *I;
692 switch (MaskMI.getOpcode()) {
693 case AMDGPU::S_AND_B64:
694 case AMDGPU::S_OR_B64:
695 case AMDGPU::S_AND_B32:
696 case AMDGPU::S_OR_B32:
697 // Cleanup bit manipulations on exec mask
698 combineMasks(MaskMI);
699 break;
700 default:
701 I = MBB.end();
702 break;
703 }
704 }
705
706 return SplitBB;
707}
708
709void SILowerControlFlow::lowerInitExec(MachineBasicBlock *MBB,
710 MachineInstr &MI) {
711 MachineFunction &MF = *MBB->getParent();
713 bool IsWave32 = ST.isWave32();
714
715 if (MI.getOpcode() == AMDGPU::SI_INIT_EXEC) {
716 // This should be before all vector instructions.
717 BuildMI(*MBB, MBB->begin(), MI.getDebugLoc(),
718 TII->get(IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), Exec)
719 .addImm(MI.getOperand(0).getImm());
720 if (LIS)
722 MI.eraseFromParent();
723 return;
724 }
725
726 // Extract the thread count from an SGPR input and set EXEC accordingly.
727 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
728 //
729 // S_BFE_U32 count, input, {shift, 7}
730 // S_BFM_B64 exec, count, 0
731 // S_CMP_EQ_U32 count, 64
732 // S_CMOV_B64 exec, -1
733 Register InputReg = MI.getOperand(0).getReg();
734 MachineInstr *FirstMI = &*MBB->begin();
735 if (InputReg.isVirtual()) {
736 MachineInstr *DefInstr = MRI->getVRegDef(InputReg);
737 assert(DefInstr && DefInstr->isCopy());
738 if (DefInstr->getParent() == MBB) {
739 if (DefInstr != FirstMI) {
740 // If the `InputReg` is defined in current block, we also need to
741 // move that instruction to the beginning of the block.
742 DefInstr->removeFromParent();
743 MBB->insert(FirstMI, DefInstr);
744 if (LIS)
745 LIS->handleMove(*DefInstr);
746 } else {
747 // If first instruction is definition then move pointer after it.
748 FirstMI = &*std::next(FirstMI->getIterator());
749 }
750 }
751 }
752
753 // Insert instruction sequence at block beginning (before vector operations).
754 const DebugLoc DL = MI.getDebugLoc();
755 const unsigned WavefrontSize = ST.getWavefrontSize();
756 const unsigned Mask = (WavefrontSize << 1) - 1;
757 Register CountReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
758 auto BfeMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_BFE_U32), CountReg)
759 .addReg(InputReg)
760 .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
761 if (LV)
762 LV->recomputeForSingleDefVirtReg(InputReg);
763 auto BfmMI =
764 BuildMI(*MBB, FirstMI, DL,
765 TII->get(IsWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64), Exec)
766 .addReg(CountReg)
767 .addImm(0);
768 auto CmpMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_CMP_EQ_U32))
769 .addReg(CountReg, RegState::Kill)
770 .addImm(WavefrontSize);
771 if (LV)
772 LV->getVarInfo(CountReg).Kills.push_back(CmpMI);
773 auto CmovMI =
774 BuildMI(*MBB, FirstMI, DL,
775 TII->get(IsWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
776 Exec)
777 .addImm(-1);
778
779 if (!LIS) {
780 MI.eraseFromParent();
781 return;
782 }
783
785 MI.eraseFromParent();
786
787 LIS->InsertMachineInstrInMaps(*BfeMI);
788 LIS->InsertMachineInstrInMaps(*BfmMI);
789 LIS->InsertMachineInstrInMaps(*CmpMI);
790 LIS->InsertMachineInstrInMaps(*CmovMI);
791
792 LIS->removeInterval(InputReg);
793 LIS->createAndComputeVirtRegInterval(InputReg);
794 LIS->createAndComputeVirtRegInterval(CountReg);
795}
796
797bool SILowerControlFlow::removeMBBifRedundant(MachineBasicBlock &MBB) {
798 for (auto &I : MBB.instrs()) {
799 if (!I.isDebugInstr() && !I.isUnconditionalBranch())
800 return false;
801 }
802
803 assert(MBB.succ_size() == 1 && "MBB has more than one successor");
804
806 MachineBasicBlock *FallThrough = nullptr;
807
808 while (!MBB.predecessors().empty()) {
810 if (P->getFallThrough() == &MBB)
811 FallThrough = P;
812 P->ReplaceUsesOfBlockWith(&MBB, Succ);
813 }
814 MBB.removeSuccessor(Succ);
815 if (LIS) {
816 for (auto &I : MBB.instrs())
818 }
819 if (MDT) {
820 // If Succ, the single successor of MBB, is dominated by MBB, MDT needs
821 // updating by changing Succ's idom to the one of MBB; otherwise, MBB must
822 // be a leaf node in MDT and could be erased directly.
823 if (MDT->dominates(&MBB, Succ))
824 MDT->changeImmediateDominator(MDT->getNode(Succ),
825 MDT->getNode(&MBB)->getIDom());
826 MDT->eraseNode(&MBB);
827 }
828 MBB.clear();
830 if (FallThrough && !FallThrough->isLayoutSuccessor(Succ)) {
831 if (!Succ->canFallThrough()) {
832 MachineFunction *MF = FallThrough->getParent();
833 MachineFunction::iterator FallThroughPos(FallThrough);
834 MF->splice(std::next(FallThroughPos), Succ);
835 } else
836 BuildMI(*FallThrough, FallThrough->end(),
837 FallThrough->findBranchDebugLoc(), TII->get(AMDGPU::S_BRANCH))
838 .addMBB(Succ);
839 }
840
841 return true;
842}
843
844bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
846 TII = ST.getInstrInfo();
847 TRI = &TII->getRegisterInfo();
848 EnableOptimizeEndCf =
850
851 // This doesn't actually need LiveIntervals, but we can preserve them.
852 LIS = getAnalysisIfAvailable<LiveIntervals>();
853 // This doesn't actually need LiveVariables, but we can preserve them.
854 LV = getAnalysisIfAvailable<LiveVariables>();
855 MDT = getAnalysisIfAvailable<MachineDominatorTree>();
856 MRI = &MF.getRegInfo();
857 BoolRC = TRI->getBoolRC();
858
859 if (ST.isWave32()) {
860 AndOpc = AMDGPU::S_AND_B32;
861 OrOpc = AMDGPU::S_OR_B32;
862 XorOpc = AMDGPU::S_XOR_B32;
863 MovTermOpc = AMDGPU::S_MOV_B32_term;
864 Andn2TermOpc = AMDGPU::S_ANDN2_B32_term;
865 XorTermrOpc = AMDGPU::S_XOR_B32_term;
866 OrTermrOpc = AMDGPU::S_OR_B32_term;
867 OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B32;
868 Exec = AMDGPU::EXEC_LO;
869 } else {
870 AndOpc = AMDGPU::S_AND_B64;
871 OrOpc = AMDGPU::S_OR_B64;
872 XorOpc = AMDGPU::S_XOR_B64;
873 MovTermOpc = AMDGPU::S_MOV_B64_term;
874 Andn2TermOpc = AMDGPU::S_ANDN2_B64_term;
875 XorTermrOpc = AMDGPU::S_XOR_B64_term;
876 OrTermrOpc = AMDGPU::S_OR_B64_term;
877 OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B64;
878 Exec = AMDGPU::EXEC;
879 }
880
881 // Compute set of blocks with kills
882 const bool CanDemote =
884 for (auto &MBB : MF) {
885 bool IsKillBlock = false;
886 for (auto &Term : MBB.terminators()) {
887 if (TII->isKillTerminator(Term.getOpcode())) {
888 KillBlocks.insert(&MBB);
889 IsKillBlock = true;
890 break;
891 }
892 }
893 if (CanDemote && !IsKillBlock) {
894 for (auto &MI : MBB) {
895 if (MI.getOpcode() == AMDGPU::SI_DEMOTE_I1) {
896 KillBlocks.insert(&MBB);
897 break;
898 }
899 }
900 }
901 }
902
903 bool Changed = false;
905 for (MachineFunction::iterator BI = MF.begin();
906 BI != MF.end(); BI = NextBB) {
907 NextBB = std::next(BI);
908 MachineBasicBlock *MBB = &*BI;
909
911 E = MBB->end();
912 for (I = MBB->begin(); I != E; I = Next) {
913 Next = std::next(I);
914 MachineInstr &MI = *I;
915 MachineBasicBlock *SplitMBB = MBB;
916
917 switch (MI.getOpcode()) {
918 case AMDGPU::SI_IF:
919 case AMDGPU::SI_ELSE:
920 case AMDGPU::SI_IF_BREAK:
921 case AMDGPU::SI_WATERFALL_LOOP:
922 case AMDGPU::SI_LOOP:
923 case AMDGPU::SI_END_CF:
924 SplitMBB = process(MI);
925 Changed = true;
926 break;
927
928 // FIXME: find a better place for this
929 case AMDGPU::SI_INIT_EXEC:
930 case AMDGPU::SI_INIT_EXEC_FROM_INPUT:
931 lowerInitExec(MBB, MI);
932 if (LIS)
933 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
934 Changed = true;
935 break;
936
937 default:
938 break;
939 }
940
941 if (SplitMBB != MBB) {
942 MBB = Next->getParent();
943 E = MBB->end();
944 }
945 }
946 }
947
948 optimizeEndCf();
949
950 LoweredEndCf.clear();
951 LoweredIf.clear();
952 KillBlocks.clear();
953
954 return Changed;
955}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Provides AMDGPU specific target descriptions.
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
bool End
Definition: ELF_riscv.cpp:464
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
@ VI
static cl::opt< bool > RemoveRedundantEndcf("amdgpu-remove-redundant-endcf", cl::init(true), cl::ReallyHidden)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool IsDead
static bool isSimpleIf(const MachineInstr &MI, const MachineRegisterInfo *MRI)
#define DEBUG_TYPE
This file defines the SmallSet class.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreservedID(const void *ID)
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A debug info location.
Definition: DebugLoc.h:33
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Base class for the actual dominator tree node.
DomTreeNodeBase * getIDom() const
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
void removeAllRegUnitsForPhysReg(MCRegister Reg)
Remove associated live ranges for the register units associated with Reg.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
void handleMove(MachineInstr &MI, bool UpdateFlags=false)
Call this method to notify LiveIntervals that instruction MI has been moved within a basic block.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
void removeInterval(Register Reg)
Interval removal.
LiveInterval & createAndComputeVirtRegInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
void recomputeForSingleDefVirtReg(Register Reg)
Recompute liveness from scratch for a virtual register Reg that is known to have a single def that do...
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
bool canFallThrough()
Return true if the block can implicitly transfer control to the block after it by falling off the end...
unsigned succ_size() const
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
MachineBasicBlock * splitAt(MachineInstr &SplitInst, bool UpdateLiveIns=true, LiveIntervals *LIS=nullptr)
Split a basic block into 2 pieces at SplitPoint.
void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< iterator > terminators()
DebugLoc findBranchDebugLoc()
Find and return the merged DebugLoc of the branch instructions of the block.
iterator_range< succ_iterator > successors()
iterator_range< pred_iterator > predecessors()
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineDomTreeNode * addNewBlock(MachineBasicBlock *BB, MachineBasicBlock *DomBB)
addNewBlock - Add a new node to the dominator tree information.
MachineDomTreeNode * getNode(MachineBasicBlock *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
void eraseNode(MachineBasicBlock *BB)
eraseNode - Removes a node from the dominator tree.
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
void changeImmediateDominator(MachineBasicBlock *N, MachineBasicBlock *NewIDom)
changeImmediateDominator - This method is used to update the dominator tree information when a node's...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
void splice(iterator InsertPt, iterator MBBI)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
bool isCopy() const
MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:313
MachineOperand class - Representation of each machine instruction operand.
void setIsDead(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void dump() const
Definition: Pass.cpp:136
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
static bool isVALU(const MachineInstr &MI)
Definition: SIInstrInfo.h:390
A vector that has set insertion semantics.
Definition: SetVector.h:51
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:219
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:152
void clear()
Completely clear the SetVector.
Definition: SetVector.h:224
SlotIndexes pass.
Definition: SlotIndexes.h:319
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
void clear()
Definition: SmallSet.h:218
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:236
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Register getReg() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
self_iterator getIterator()
Definition: ilist_node.h:82
constexpr char WavefrontSize[]
Key for Kernel::CodeProps::Metadata::mWavefrontSize.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:119
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:191
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
@ ReallyHidden
Definition: CommandLine.h:139
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:511
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
char & SILowerControlFlowID
VarInfo - This represents the regions where a virtual register is live in the program.
Definition: LiveVariables.h:80
std::vector< MachineInstr * > Kills
Kills - List of MachineInstruction's which are the last use of this virtual register (kill it) in the...
Definition: LiveVariables.h:90