LLVM 22.0.0git
AMDGPULowerIntrinsics.cpp
Go to the documentation of this file.
1//===-- AMDGPULowerIntrinsics.cpp -------------------------------------------=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Lower intrinsics that would otherwise require separate handling in both
10// SelectionDAG and GlobalISel.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "AMDGPUTargetMachine.h"
16#include "GCNSubtarget.h"
17#include "llvm/IR/IRBuilder.h"
19#include "llvm/IR/IntrinsicsAMDGPU.h"
22
23#define DEBUG_TYPE "amdgpu-lower-intrinsics"
24
25using namespace llvm;
26
27namespace {
28
29class AMDGPULowerIntrinsicsImpl {
30public:
31 Module &M;
32 const AMDGPUTargetMachine &TM;
33
34 AMDGPULowerIntrinsicsImpl(Module &M, const AMDGPUTargetMachine &TM)
35 : M(M), TM(TM) {}
36
37 bool run();
38
39private:
40 bool visitBarrier(IntrinsicInst &I);
41};
42
43class AMDGPULowerIntrinsicsLegacy : public ModulePass {
44public:
45 static char ID;
46
47 AMDGPULowerIntrinsicsLegacy() : ModulePass(ID) {}
48
49 bool runOnModule(Module &M) override;
50
51 void getAnalysisUsage(AnalysisUsage &AU) const override {
53 }
54};
55
56template <class T> static void forEachCall(Function &Intrin, T Callback) {
57 for (User *U : make_early_inc_range(Intrin.users())) {
58 if (auto *CI = dyn_cast<IntrinsicInst>(U))
59 Callback(CI);
60 }
61}
62
63} // anonymous namespace
64
65bool AMDGPULowerIntrinsicsImpl::run() {
66 bool Changed = false;
67
68 for (Function &F : M) {
69 switch (F.getIntrinsicID()) {
70 default:
71 continue;
72 case Intrinsic::amdgcn_s_barrier:
73 case Intrinsic::amdgcn_s_barrier_signal:
74 case Intrinsic::amdgcn_s_barrier_signal_isfirst:
75 case Intrinsic::amdgcn_s_barrier_wait:
76 case Intrinsic::amdgcn_s_cluster_barrier:
77 forEachCall(F, [&](IntrinsicInst *II) { Changed |= visitBarrier(*II); });
78 break;
79 }
80 }
81
82 return Changed;
83}
84
85// Optimize barriers and lower s_(cluster_)barrier to a sequence of split
86// barrier intrinsics.
87bool AMDGPULowerIntrinsicsImpl::visitBarrier(IntrinsicInst &I) {
88 assert(I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier ||
89 I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier_signal ||
90 I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier_signal_isfirst ||
91 I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier_wait ||
92 I.getIntrinsicID() == Intrinsic::amdgcn_s_cluster_barrier);
93
94 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(*I.getFunction());
95 bool IsSingleWaveWG = false;
96
97 if (TM.getOptLevel() > CodeGenOptLevel::None) {
98 unsigned WGMaxSize = ST.getFlatWorkGroupSizes(*I.getFunction()).second;
99 IsSingleWaveWG = WGMaxSize <= ST.getWavefrontSize();
100 }
101
102 IRBuilder<> B(&I);
103
104 // Lower the s_cluster_barrier intrinsic first. There is no corresponding
105 // hardware instruction in any subtarget.
106 if (I.getIntrinsicID() == Intrinsic::amdgcn_s_cluster_barrier) {
107 // The default cluster barrier expects one signal per workgroup. So we need
108 // a workgroup barrier first.
109 if (IsSingleWaveWG) {
110 B.CreateIntrinsic(B.getVoidTy(), Intrinsic::amdgcn_wave_barrier, {});
111 } else {
112 Value *BarrierID_32 = B.getInt32(AMDGPU::Barrier::WORKGROUP);
113 Value *BarrierID_16 = B.getInt16(AMDGPU::Barrier::WORKGROUP);
114 Value *IsFirst = B.CreateIntrinsic(
115 B.getInt1Ty(), Intrinsic::amdgcn_s_barrier_signal_isfirst,
116 {BarrierID_32});
117 B.CreateIntrinsic(B.getVoidTy(), Intrinsic::amdgcn_s_barrier_wait,
118 {BarrierID_16});
119
120 Instruction *ThenTerm =
121 SplitBlockAndInsertIfThen(IsFirst, I.getIterator(), false);
122 B.SetInsertPoint(ThenTerm);
123 }
124
125 // Now we can signal the cluster barrier from a single wave and wait for the
126 // barrier in all waves.
127 Value *BarrierID_32 = B.getInt32(AMDGPU::Barrier::CLUSTER);
128 Value *BarrierID_16 = B.getInt16(AMDGPU::Barrier::CLUSTER);
129 B.CreateIntrinsic(B.getVoidTy(), Intrinsic::amdgcn_s_barrier_signal,
130 {BarrierID_32});
131
132 B.SetInsertPoint(&I);
133 B.CreateIntrinsic(B.getVoidTy(), Intrinsic::amdgcn_s_barrier_wait,
134 {BarrierID_16});
135
136 I.eraseFromParent();
137 return true;
138 }
139
140 bool IsWorkgroupScope = false;
141
142 if (I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier_wait ||
143 I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier_signal ||
144 I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier_signal_isfirst) {
145 int BarrierID = cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
146 if (BarrierID == AMDGPU::Barrier::TRAP ||
147 BarrierID == AMDGPU::Barrier::WORKGROUP ||
150 IsWorkgroupScope = true;
151 } else {
152 assert(I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier);
153 IsWorkgroupScope = true;
154 }
155
156 if (IsWorkgroupScope && IsSingleWaveWG) {
157 // Down-grade waits, remove split signals.
158 if (I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier ||
159 I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier_wait) {
160 B.CreateIntrinsic(B.getVoidTy(), Intrinsic::amdgcn_wave_barrier, {});
161 } else if (I.getIntrinsicID() ==
162 Intrinsic::amdgcn_s_barrier_signal_isfirst) {
163 // If we're the only wave of the workgroup, we're always first.
164 I.replaceAllUsesWith(B.getInt1(true));
165 }
166 I.eraseFromParent();
167 return true;
168 }
169
170 if (I.getIntrinsicID() == Intrinsic::amdgcn_s_barrier &&
171 ST.hasSplitBarriers()) {
172 // Lower to split barriers.
173 Value *BarrierID_32 = B.getInt32(AMDGPU::Barrier::WORKGROUP);
174 Value *BarrierID_16 = B.getInt16(AMDGPU::Barrier::WORKGROUP);
175 B.CreateIntrinsic(B.getVoidTy(), Intrinsic::amdgcn_s_barrier_signal,
176 {BarrierID_32});
177 B.CreateIntrinsic(B.getVoidTy(), Intrinsic::amdgcn_s_barrier_wait,
178 {BarrierID_16});
179 I.eraseFromParent();
180 return true;
181 }
182
183 return false;
184}
185
188 AMDGPULowerIntrinsicsImpl Impl(M, TM);
189 if (!Impl.run())
190 return PreservedAnalyses::all();
192}
193
194bool AMDGPULowerIntrinsicsLegacy::runOnModule(Module &M) {
195 auto &TPC = getAnalysis<TargetPassConfig>();
196 const AMDGPUTargetMachine &TM = TPC.getTM<AMDGPUTargetMachine>();
197
198 AMDGPULowerIntrinsicsImpl Impl(M, TM);
199 return Impl.run();
200}
201
202#define PASS_DESC "AMDGPU lower intrinsics"
203INITIALIZE_PASS_BEGIN(AMDGPULowerIntrinsicsLegacy, DEBUG_TYPE, PASS_DESC, false,
204 false)
206INITIALIZE_PASS_END(AMDGPULowerIntrinsicsLegacy, DEBUG_TYPE, PASS_DESC, false,
207 false)
208
209char AMDGPULowerIntrinsicsLegacy::ID = 0;
210
212 return new AMDGPULowerIntrinsicsLegacy;
213}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
The AMDGPU TargetMachine interface definition for hw codegen targets.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
#define DEBUG_TYPE
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define T
uint64_t IntrinsicInst * II
ModuleAnalysisManager MAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static bool forEachCall(Function &Intrin, T Callback)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
A wrapper class for inspecting calls to intrinsic functions.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
Target-Independent Code Generator Pass Configuration Options.
iterator_range< user_iterator > users()
Definition Value.h:426
Changed
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
ModulePass * createAMDGPULowerIntrinsicsLegacyPass()
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)