LLVM 20.0.0git
SILateBranchLowering.cpp
Go to the documentation of this file.
1//===-- SILateBranchLowering.cpp - Final preparation of branches ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass mainly lowers early terminate pseudo instructions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
19
20using namespace llvm;
21
22#define DEBUG_TYPE "si-late-branch-lowering"
23
24namespace {
25
26class SILateBranchLowering : public MachineFunctionPass {
27private:
28 const SIRegisterInfo *TRI = nullptr;
29 const SIInstrInfo *TII = nullptr;
30 MachineDominatorTree *MDT = nullptr;
31
32 void expandChainCall(MachineInstr &MI);
33 void earlyTerm(MachineInstr &MI, MachineBasicBlock *EarlyExitBlock);
34
35public:
36 static char ID;
37
38 unsigned MovOpc;
39 Register ExecReg;
40
41 SILateBranchLowering() : MachineFunctionPass(ID) {}
42
43 bool runOnMachineFunction(MachineFunction &MF) override;
44
45 StringRef getPassName() const override {
46 return "SI Final Branch Preparation";
47 }
48
49 void getAnalysisUsage(AnalysisUsage &AU) const override {
53 }
54};
55
56} // end anonymous namespace
57
58char SILateBranchLowering::ID = 0;
59
60INITIALIZE_PASS_BEGIN(SILateBranchLowering, DEBUG_TYPE,
61 "SI insert s_cbranch_execz instructions", false, false)
63INITIALIZE_PASS_END(SILateBranchLowering, DEBUG_TYPE,
64 "SI insert s_cbranch_execz instructions", false, false)
65
66char &llvm::SILateBranchLoweringPassID = SILateBranchLowering::ID;
67
69 MachineBasicBlock::iterator I, DebugLoc DL,
71 const Function &F = MF.getFunction();
72 bool IsPS = F.getCallingConv() == CallingConv::AMDGPU_PS;
73
74 // Check if hardware has been configured to expect color or depth exports.
75 bool HasColorExports = AMDGPU::getHasColorExport(F);
76 bool HasDepthExports = AMDGPU::getHasDepthExport(F);
77 bool HasExports = HasColorExports || HasDepthExports;
78
79 // Prior to GFX10, hardware always expects at least one export for PS.
80 bool MustExport = !AMDGPU::isGFX10Plus(TII->getSubtarget());
81
82 if (IsPS && (HasExports || MustExport)) {
83 // Generate "null export" if hardware is expecting PS to export.
85 int Target =
86 ST.hasNullExportTarget()
88 : (HasColorExports ? AMDGPU::Exp::ET_MRT0 : AMDGPU::Exp::ET_MRTZ);
89 BuildMI(MBB, I, DL, TII->get(AMDGPU::EXP_DONE))
91 .addReg(AMDGPU::VGPR0, RegState::Undef)
92 .addReg(AMDGPU::VGPR0, RegState::Undef)
93 .addReg(AMDGPU::VGPR0, RegState::Undef)
94 .addReg(AMDGPU::VGPR0, RegState::Undef)
95 .addImm(1) // vm
96 .addImm(0) // compr
97 .addImm(0); // en
98 }
99
100 // s_endpgm
101 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ENDPGM)).addImm(0);
102}
103
106 MachineBasicBlock *SplitBB = MBB.splitAt(MI, /*UpdateLiveIns*/ true);
107
108 // Update dominator tree
109 using DomTreeT = DomTreeBase<MachineBasicBlock>;
111 for (MachineBasicBlock *Succ : SplitBB->successors()) {
112 DTUpdates.push_back({DomTreeT::Insert, SplitBB, Succ});
113 DTUpdates.push_back({DomTreeT::Delete, &MBB, Succ});
114 }
115 DTUpdates.push_back({DomTreeT::Insert, &MBB, SplitBB});
116 MDT->applyUpdates(DTUpdates);
117}
118
119void SILateBranchLowering::expandChainCall(MachineInstr &MI) {
120 // This is a tail call that needs to be expanded into at least
121 // 2 instructions, one for setting EXEC and one for the actual tail call.
122 constexpr unsigned ExecIdx = 3;
123
124 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(MovOpc), ExecReg)
125 ->addOperand(MI.getOperand(ExecIdx));
126 MI.removeOperand(ExecIdx);
127
128 MI.setDesc(TII->get(AMDGPU::SI_TCRETURN));
129}
130
131void SILateBranchLowering::earlyTerm(MachineInstr &MI,
132 MachineBasicBlock *EarlyExitBlock) {
133 MachineBasicBlock &MBB = *MI.getParent();
134 const DebugLoc DL = MI.getDebugLoc();
135
136 auto BranchMI = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC0))
137 .addMBB(EarlyExitBlock);
138 auto Next = std::next(MI.getIterator());
139
140 if (Next != MBB.end() && !Next->isTerminator())
141 splitBlock(MBB, *BranchMI, MDT);
142
143 MBB.addSuccessor(EarlyExitBlock);
144 MDT->insertEdge(&MBB, EarlyExitBlock);
145}
146
147bool SILateBranchLowering::runOnMachineFunction(MachineFunction &MF) {
149 TII = ST.getInstrInfo();
150 TRI = &TII->getRegisterInfo();
151 MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
152
153 MovOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
154 ExecReg = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
155
156 SmallVector<MachineInstr *, 4> EarlyTermInstrs;
158 bool MadeChange = false;
159
160 for (MachineBasicBlock &MBB : MF) {
162 switch (MI.getOpcode()) {
163 case AMDGPU::S_BRANCH:
164 // Optimize out branches to the next block.
165 // This only occurs in -O0 when BranchFolding is not executed.
166 if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) {
167 assert(&MI == &MBB.back());
168 MI.eraseFromParent();
169 MadeChange = true;
170 }
171 break;
172
173 case AMDGPU::SI_CS_CHAIN_TC_W32:
174 case AMDGPU::SI_CS_CHAIN_TC_W64:
175 expandChainCall(MI);
176 MadeChange = true;
177 break;
178
179 case AMDGPU::SI_EARLY_TERMINATE_SCC0:
180 EarlyTermInstrs.push_back(&MI);
181 break;
182
183 case AMDGPU::SI_RETURN_TO_EPILOG:
184 EpilogInstrs.push_back(&MI);
185 break;
186
187 default:
188 break;
189 }
190 }
191 }
192
193 // Lower any early exit branches first
194 if (!EarlyTermInstrs.empty()) {
195 MachineBasicBlock *EarlyExitBlock = MF.CreateMachineBasicBlock();
196 DebugLoc DL;
197
198 MF.insert(MF.end(), EarlyExitBlock);
199 BuildMI(*EarlyExitBlock, EarlyExitBlock->end(), DL, TII->get(MovOpc),
200 ExecReg)
201 .addImm(0);
202 generateEndPgm(*EarlyExitBlock, EarlyExitBlock->end(), DL, TII, MF);
203
204 for (MachineInstr *Instr : EarlyTermInstrs) {
205 // Early termination in GS does nothing
206 if (MF.getFunction().getCallingConv() != CallingConv::AMDGPU_GS)
207 earlyTerm(*Instr, EarlyExitBlock);
208 Instr->eraseFromParent();
209 }
210
211 EarlyTermInstrs.clear();
212 MadeChange = true;
213 }
214
215 // Now check return to epilog instructions occur at function end
216 if (!EpilogInstrs.empty()) {
217 MachineBasicBlock *EmptyMBBAtEnd = nullptr;
218 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
219
220 // If there are multiple returns to epilog then all will
221 // become jumps to new empty end block.
222 if (EpilogInstrs.size() > 1) {
223 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
224 MF.insert(MF.end(), EmptyMBBAtEnd);
225 }
226
227 for (auto *MI : EpilogInstrs) {
228 auto *MBB = MI->getParent();
229 if (MBB == &MF.back() && MI == &MBB->back())
230 continue;
231
232 // SI_RETURN_TO_EPILOG is not the last instruction.
233 // Jump to empty block at function end.
234 if (!EmptyMBBAtEnd) {
235 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
236 MF.insert(MF.end(), EmptyMBBAtEnd);
237 }
238
239 MBB->addSuccessor(EmptyMBBAtEnd);
240 MDT->insertEdge(MBB, EmptyMBBAtEnd);
241 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
242 .addMBB(EmptyMBBAtEnd);
243 MI->eraseFromParent();
244 MadeChange = true;
245 }
246
247 EpilogInstrs.clear();
248 }
249
250 return MadeChange;
251}
aarch64 promote const
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
static void generateEndPgm(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, const SIInstrInfo *TII, MachineFunction &MF)
static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI, MachineDominatorTree *MDT)
#define DEBUG_TYPE
SI insert s_cbranch_execz instructions
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A debug info location.
Definition: DebugLoc.h:33
Core dominator tree base class.
void applyUpdates(ArrayRef< UpdateType > Updates)
Inform the dominator tree about a sequence of CFG edge insertions and deletions and perform a batch u...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
MachineBasicBlock * splitAt(MachineInstr &SplitInst, bool UpdateLiveIns=true, LiveIntervals *LIS=nullptr)
Split a basic block into 2 pieces at SplitPoint.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Target - Wrapper for Target specific information.
bool getHasColorExport(const Function &F)
bool getHasDepthExport(const Function &F)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
Definition: CallingConv.h:191
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
Definition: CallingConv.h:194
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Undef
Value of the register doesn't matter.
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
char & SILateBranchLoweringPassID