LLVM 22.0.0git
SILateBranchLowering.cpp
Go to the documentation of this file.
1//===-- SILateBranchLowering.cpp - Final preparation of branches ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass mainly lowers early terminate pseudo instructions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "AMDGPULaneMaskUtils.h"
16#include "GCNSubtarget.h"
22
23using namespace llvm;
24
25#define DEBUG_TYPE "si-late-branch-lowering"
26
27namespace {
28
29class SILateBranchLowering {
30private:
31 const GCNSubtarget &ST;
32 const SIInstrInfo *TII;
33 const SIRegisterInfo *TRI;
36
37 void expandChainCall(MachineInstr &MI, const GCNSubtarget &ST,
38 bool DynamicVGPR);
39 void earlyTerm(MachineInstr &MI, MachineBasicBlock *EarlyExitBlock);
40
41public:
42 SILateBranchLowering(const GCNSubtarget &ST, MachineDominatorTree *MDT)
43 : ST(ST), TII(ST.getInstrInfo()), TRI(&TII->getRegisterInfo()), MDT(MDT),
45
46 bool run(MachineFunction &MF);
47};
48
49class SILateBranchLoweringLegacy : public MachineFunctionPass {
50public:
51 static char ID;
52 SILateBranchLoweringLegacy() : MachineFunctionPass(ID) {}
53
54 bool runOnMachineFunction(MachineFunction &MF) override {
55 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
56 auto *MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
57 return SILateBranchLowering(ST, MDT).run(MF);
58 }
59
60 StringRef getPassName() const override {
61 return "SI Final Branch Preparation";
62 }
63
64 void getAnalysisUsage(AnalysisUsage &AU) const override {
68 }
69};
70
71} // end anonymous namespace
72
73char SILateBranchLoweringLegacy::ID = 0;
74
75INITIALIZE_PASS_BEGIN(SILateBranchLoweringLegacy, DEBUG_TYPE,
76 "SI insert s_cbranch_execz instructions", false, false)
78INITIALIZE_PASS_END(SILateBranchLoweringLegacy, DEBUG_TYPE,
79 "SI insert s_cbranch_execz instructions", false, false)
80
81char &llvm::SILateBranchLoweringPassID = SILateBranchLoweringLegacy::ID;
82
86 const Function &F = MF.getFunction();
87 bool IsPS = F.getCallingConv() == CallingConv::AMDGPU_PS;
88
89 // Check if hardware has been configured to expect color or depth exports.
90 bool HasColorExports = AMDGPU::getHasColorExport(F);
91 bool HasDepthExports = AMDGPU::getHasDepthExport(F);
92 bool HasExports = HasColorExports || HasDepthExports;
93
94 // Prior to GFX10, hardware always expects at least one export for PS.
95 bool MustExport = !AMDGPU::isGFX10Plus(TII->getSubtarget());
96
97 if (IsPS && (HasExports || MustExport)) {
98 // Generate "null export" if hardware is expecting PS to export.
99 const GCNSubtarget &ST = MBB.getParent()->getSubtarget<GCNSubtarget>();
100 int Target =
101 ST.hasNullExportTarget()
103 : (HasColorExports ? AMDGPU::Exp::ET_MRT0 : AMDGPU::Exp::ET_MRTZ);
104 BuildMI(MBB, I, DL, TII->get(AMDGPU::EXP_DONE))
105 .addImm(Target)
106 .addReg(AMDGPU::VGPR0, RegState::Undef)
107 .addReg(AMDGPU::VGPR0, RegState::Undef)
108 .addReg(AMDGPU::VGPR0, RegState::Undef)
109 .addReg(AMDGPU::VGPR0, RegState::Undef)
110 .addImm(1) // vm
111 .addImm(0) // compr
112 .addImm(0); // en
113 }
114
115 // s_endpgm
116 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ENDPGM)).addImm(0);
117}
118
121 MachineBasicBlock *SplitBB = MBB.splitAt(MI, /*UpdateLiveIns*/ true);
122
123 // Update dominator tree
124 using DomTreeT = DomTreeBase<MachineBasicBlock>;
126 for (MachineBasicBlock *Succ : SplitBB->successors()) {
127 DTUpdates.push_back({DomTreeT::Insert, SplitBB, Succ});
128 DTUpdates.push_back({DomTreeT::Delete, &MBB, Succ});
129 }
130 DTUpdates.push_back({DomTreeT::Insert, &MBB, SplitBB});
131 MDT->applyUpdates(DTUpdates);
132}
133
136 if (Op.isReg())
137 MIB.addReg(Op.getReg());
138 else
139 MIB.add(Op);
140}
141
142void SILateBranchLowering::expandChainCall(MachineInstr &MI,
143 const GCNSubtarget &ST,
144 bool DynamicVGPR) {
145 // This is a tail call that needs to be expanded into at least
146 // 2 instructions, one for setting EXEC and one for the actual tail call.
147 int ExecIdx =
148 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::exec);
149 assert(ExecIdx != -1 && "Missing EXEC operand");
150 const DebugLoc &DL = MI.getDebugLoc();
151 if (DynamicVGPR) {
152 // We have 3 extra operands and we need to:
153 // * Try to change the VGPR allocation
154 // * Select the callee based on the result of the reallocation attempt
155 // * Select the EXEC mask based on the result of the reallocation attempt
156 // If any of the register operands of the chain pseudo is used in more than
157 // one of these instructions, we need to make sure that the kill flags
158 // aren't copied along.
159 auto AllocMI =
160 BuildMI(*MI.getParent(), MI, DL, TII->get(AMDGPU::S_ALLOC_VGPR));
161 copyOpWithoutRegFlags(AllocMI,
162 *TII->getNamedOperand(MI, AMDGPU::OpName::numvgprs));
163
164 auto SelectCallee =
165 BuildMI(*MI.getParent(), MI, DL, TII->get(AMDGPU::S_CSELECT_B64))
166 .addDef(TII->getNamedOperand(MI, AMDGPU::OpName::src0)->getReg());
167 copyOpWithoutRegFlags(SelectCallee,
168 *TII->getNamedOperand(MI, AMDGPU::OpName::src0));
169 copyOpWithoutRegFlags(SelectCallee,
170 *TII->getNamedOperand(MI, AMDGPU::OpName::fbcallee));
171
172 auto SelectExec = BuildMI(*MI.getParent(), MI, DL, TII->get(LMC.CSelectOpc))
173 .addDef(LMC.ExecReg);
174
175 copyOpWithoutRegFlags(SelectExec,
176 *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
177 copyOpWithoutRegFlags(SelectExec,
178 *TII->getNamedOperand(MI, AMDGPU::OpName::fbexec));
179 } else {
180 auto SetExec =
181 BuildMI(*MI.getParent(), MI, DL, TII->get(LMC.MovOpc), LMC.ExecReg);
182 copyOpWithoutRegFlags(SetExec,
183 *TII->getNamedOperand(MI, AMDGPU::OpName::exec));
184 }
185
186 for (int OpIdx = MI.getNumExplicitOperands() - 1; OpIdx >= ExecIdx; --OpIdx)
187 MI.removeOperand(OpIdx);
188
189 MI.setDesc(TII->get(AMDGPU::SI_TCRETURN));
190}
191
192void SILateBranchLowering::earlyTerm(MachineInstr &MI,
193 MachineBasicBlock *EarlyExitBlock) {
194 MachineBasicBlock &MBB = *MI.getParent();
195 const DebugLoc DL = MI.getDebugLoc();
196
197 auto BranchMI = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC0))
198 .addMBB(EarlyExitBlock);
199 auto Next = std::next(MI.getIterator());
200
201 if (Next != MBB.end() && !Next->isTerminator())
202 splitBlock(MBB, *BranchMI, MDT);
203
204 MBB.addSuccessor(EarlyExitBlock);
205 MDT->insertEdge(&MBB, EarlyExitBlock);
206}
207
208PreservedAnalyses
219
220bool SILateBranchLowering::run(MachineFunction &MF) {
221 SmallVector<MachineInstr *, 4> EarlyTermInstrs;
223 bool MadeChange = false;
224
225 for (MachineBasicBlock &MBB : MF) {
227 switch (MI.getOpcode()) {
228 case AMDGPU::S_BRANCH:
229 // Optimize out branches to the next block.
230 // This only occurs in -O0 when BranchFolding is not executed.
231 if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) {
232 assert(&MI == &MBB.back());
233 MI.eraseFromParent();
234 MadeChange = true;
235 }
236 break;
237
238 case AMDGPU::SI_CS_CHAIN_TC_W32:
239 case AMDGPU::SI_CS_CHAIN_TC_W64:
240 expandChainCall(MI, ST, /*DynamicVGPR=*/false);
241 MadeChange = true;
242 break;
243 case AMDGPU::SI_CS_CHAIN_TC_W32_DVGPR:
244 case AMDGPU::SI_CS_CHAIN_TC_W64_DVGPR:
245 expandChainCall(MI, ST, /*DynamicVGPR=*/true);
246 MadeChange = true;
247 break;
248
249 case AMDGPU::SI_EARLY_TERMINATE_SCC0:
250 EarlyTermInstrs.push_back(&MI);
251 break;
252
253 case AMDGPU::SI_RETURN_TO_EPILOG:
254 EpilogInstrs.push_back(&MI);
255 break;
256
257 default:
258 break;
259 }
260 }
261 }
262
263 // Lower any early exit branches first
264 if (!EarlyTermInstrs.empty()) {
265 MachineBasicBlock *EarlyExitBlock = MF.CreateMachineBasicBlock();
266 DebugLoc DL;
267
268 MF.insert(MF.end(), EarlyExitBlock);
269 BuildMI(*EarlyExitBlock, EarlyExitBlock->end(), DL, TII->get(LMC.MovOpc),
270 LMC.ExecReg)
271 .addImm(0);
272 generateEndPgm(*EarlyExitBlock, EarlyExitBlock->end(), DL, TII, MF);
273
274 for (MachineInstr *Instr : EarlyTermInstrs) {
275 // Early termination in GS does nothing
276 if (MF.getFunction().getCallingConv() != CallingConv::AMDGPU_GS)
277 earlyTerm(*Instr, EarlyExitBlock);
278 Instr->eraseFromParent();
279 }
280
281 EarlyTermInstrs.clear();
282 MadeChange = true;
283 }
284
285 // Now check return to epilog instructions occur at function end
286 if (!EpilogInstrs.empty()) {
287 MachineBasicBlock *EmptyMBBAtEnd = nullptr;
288 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
289
290 // If there are multiple returns to epilog then all will
291 // become jumps to new empty end block.
292 if (EpilogInstrs.size() > 1) {
293 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
294 MF.insert(MF.end(), EmptyMBBAtEnd);
295 }
296
297 for (auto *MI : EpilogInstrs) {
298 auto *MBB = MI->getParent();
299 if (MBB == &MF.back() && MI == &MBB->back())
300 continue;
301
302 // SI_RETURN_TO_EPILOG is not the last instruction.
303 // Jump to empty block at function end.
304 if (!EmptyMBBAtEnd) {
305 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
306 MF.insert(MF.end(), EmptyMBBAtEnd);
307 }
308
309 MBB->addSuccessor(EmptyMBBAtEnd);
310 MDT->insertEdge(MBB, EmptyMBBAtEnd);
311 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
312 .addMBB(EmptyMBBAtEnd);
313 MI->eraseFromParent();
314 MadeChange = true;
315 }
316
317 EpilogInstrs.clear();
318 }
319
320 return MadeChange;
321}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
MachineInstr unsigned OpIdx
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static void generateEndPgm(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, const SIInstrInfo *TII, MachineFunction &MF)
static void copyOpWithoutRegFlags(MachineInstrBuilder &MIB, MachineOperand &Op)
static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI, MachineDominatorTree *MDT)
static const LaneMaskConstants & get(const GCNSubtarget &ST)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A debug info location.
Definition DebugLoc.h:124
void applyUpdates(ArrayRef< UpdateType > Updates)
Inform the dominator tree about a sequence of CFG edge insertions and deletions and perform a batch u...
void insertEdge(NodeT *From, NodeT *To)
Inform the dominator tree about a CFG edge insertion and update the tree.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator_range< succ_iterator > successors()
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Target - Wrapper for Target specific information.
bool getHasColorExport(const Function &F)
bool getHasDepthExport(const Function &F)
bool isGFX10Plus(const MCSubtargetInfo &STI)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ Undef
Value of the register doesn't matter.
NodeAddr< InstrNode * > Instr
Definition RDFGraph.h:389
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
char & SILateBranchLoweringPassID
DominatorTreeBase< T, false > DomTreeBase
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op