Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Warning:line 458, column 21
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUPromoteAlloca.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass eliminates allocas by either converting them into vectors or
10// by migrating them to local address space.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "llvm/Analysis/CaptureTracking.h"
18#include "llvm/Analysis/ValueTracking.h"
19#include "llvm/CodeGen/TargetPassConfig.h"
20#include "llvm/IR/IRBuilder.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/IntrinsicsAMDGPU.h"
23#include "llvm/IR/IntrinsicsR600.h"
24#include "llvm/Pass.h"
25#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE"amdgpu-promote-alloca" "amdgpu-promote-alloca"
28
29using namespace llvm;
30
31namespace {
32
33static cl::opt<bool> DisablePromoteAllocaToVector(
34 "disable-promote-alloca-to-vector",
35 cl::desc("Disable promote alloca to vector"),
36 cl::init(false));
37
38static cl::opt<bool> DisablePromoteAllocaToLDS(
39 "disable-promote-alloca-to-lds",
40 cl::desc("Disable promote alloca to LDS"),
41 cl::init(false));
42
43static cl::opt<unsigned> PromoteAllocaToVectorLimit(
44 "amdgpu-promote-alloca-to-vector-limit",
45 cl::desc("Maximum byte size to consider promote alloca to vector"),
46 cl::init(0));
47
48// FIXME: This can create globals so should be a module pass.
49class AMDGPUPromoteAlloca : public FunctionPass {
50public:
51 static char ID;
52
53 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
54
55 bool runOnFunction(Function &F) override;
56
57 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
58
59 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
60
61 void getAnalysisUsage(AnalysisUsage &AU) const override {
62 AU.setPreservesCFG();
63 FunctionPass::getAnalysisUsage(AU);
64 }
65};
66
67class AMDGPUPromoteAllocaImpl {
68private:
69 const TargetMachine &TM;
70 Module *Mod = nullptr;
71 const DataLayout *DL = nullptr;
72
73 // FIXME: This should be per-kernel.
74 uint32_t LocalMemLimit = 0;
75 uint32_t CurrentLocalMemUsage = 0;
76 unsigned MaxVGPRs;
77
78 bool IsAMDGCN = false;
79 bool IsAMDHSA = false;
80
81 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
82 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
83
84 /// BaseAlloca is the alloca root the search started from.
85 /// Val may be that alloca or a recursive user of it.
86 bool collectUsesWithPtrTypes(Value *BaseAlloca,
87 Value *Val,
88 std::vector<Value*> &WorkList) const;
89
90 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92 /// Returns true if both operands are derived from the same alloca. Val should
93 /// be the same value as one of the input operands of UseInst.
94 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95 Instruction *UseInst,
96 int OpIdx0, int OpIdx1) const;
97
98 /// Check whether we have enough local memory for promotion.
99 bool hasSufficientLocalMem(const Function &F);
100
101 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
102
103public:
104 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {}
105 bool run(Function &F);
106};
107
108class AMDGPUPromoteAllocaToVector : public FunctionPass {
109public:
110 static char ID;
111
112 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
113
114 bool runOnFunction(Function &F) override;
115
116 StringRef getPassName() const override {
117 return "AMDGPU Promote Alloca to vector";
118 }
119
120 void getAnalysisUsage(AnalysisUsage &AU) const override {
121 AU.setPreservesCFG();
122 FunctionPass::getAnalysisUsage(AU);
123 }
124};
125
126} // end anonymous namespace
127
128char AMDGPUPromoteAlloca::ID = 0;
129char AMDGPUPromoteAllocaToVector::ID = 0;
130
131INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
132 "AMDGPU promote alloca to vector or LDS", false, false)static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
133// Move LDS uses from functions to kernels before promote alloca for accurate
134// estimation of LDS available
135INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)initializeAMDGPULowerModuleLDSPass(Registry);
136INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
137 "AMDGPU promote alloca to vector or LDS", false, false)PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
138
139INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
140 "AMDGPU promote alloca to vector", false, false)static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
141
142char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
143char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
144
145bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146 if (skipFunction(F))
147 return false;
148
149 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
150 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F);
151 }
152 return false;
153}
154
155PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
156 FunctionAnalysisManager &AM) {
157 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F);
158 if (Changed) {
159 PreservedAnalyses PA;
160 PA.preserveSet<CFGAnalyses>();
161 return PA;
162 }
163 return PreservedAnalyses::all();
164}
165
166bool AMDGPUPromoteAllocaImpl::run(Function &F) {
167 Mod = F.getParent();
168 DL = &Mod->getDataLayout();
169
170 const Triple &TT = TM.getTargetTriple();
171 IsAMDGCN = TT.getArch() == Triple::amdgcn;
172 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
173
174 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
175 if (!ST.isPromoteAllocaEnabled())
176 return false;
177
178 if (IsAMDGCN) {
179 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
180 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
181 // A non-entry function has only 32 caller preserved registers.
182 // Do not promote alloca which will force spilling.
183 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
184 MaxVGPRs = std::min(MaxVGPRs, 32u);
185 } else {
186 MaxVGPRs = 128;
187 }
188
189 bool SufficientLDS = hasSufficientLocalMem(F);
190 bool Changed = false;
191 BasicBlock &EntryBB = *F.begin();
192
193 SmallVector<AllocaInst *, 16> Allocas;
194 for (Instruction &I : EntryBB) {
195 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
196 Allocas.push_back(AI);
197 }
198
199 for (AllocaInst *AI : Allocas) {
200 if (handleAlloca(*AI, SufficientLDS))
201 Changed = true;
202 }
203
204 return Changed;
205}
206
207std::pair<Value *, Value *>
208AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
209 Function &F = *Builder.GetInsertBlock()->getParent();
210 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
211
212 if (!IsAMDHSA) {
213 Function *LocalSizeYFn
214 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
215 Function *LocalSizeZFn
216 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
217
218 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
219 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
220
221 ST.makeLIDRangeMetadata(LocalSizeY);
222 ST.makeLIDRangeMetadata(LocalSizeZ);
223
224 return std::make_pair(LocalSizeY, LocalSizeZ);
225 }
226
227 // We must read the size out of the dispatch pointer.
228 assert(IsAMDGCN)(static_cast <bool> (IsAMDGCN) ? void (0) : __assert_fail
("IsAMDGCN", "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 228, __extension__ __PRETTY_FUNCTION__))
;
229
230 // We are indexing into this struct, and want to extract the workgroup_size_*
231 // fields.
232 //
233 // typedef struct hsa_kernel_dispatch_packet_s {
234 // uint16_t header;
235 // uint16_t setup;
236 // uint16_t workgroup_size_x ;
237 // uint16_t workgroup_size_y;
238 // uint16_t workgroup_size_z;
239 // uint16_t reserved0;
240 // uint32_t grid_size_x ;
241 // uint32_t grid_size_y ;
242 // uint32_t grid_size_z;
243 //
244 // uint32_t private_segment_size;
245 // uint32_t group_segment_size;
246 // uint64_t kernel_object;
247 //
248 // #ifdef HSA_LARGE_MODEL
249 // void *kernarg_address;
250 // #elif defined HSA_LITTLE_ENDIAN
251 // void *kernarg_address;
252 // uint32_t reserved1;
253 // #else
254 // uint32_t reserved1;
255 // void *kernarg_address;
256 // #endif
257 // uint64_t reserved2;
258 // hsa_signal_t completion_signal; // uint64_t wrapper
259 // } hsa_kernel_dispatch_packet_t
260 //
261 Function *DispatchPtrFn
262 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
263
264 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
265 DispatchPtr->addRetAttr(Attribute::NoAlias);
266 DispatchPtr->addRetAttr(Attribute::NonNull);
267 F.removeFnAttr("amdgpu-no-dispatch-ptr");
268
269 // Size of the dispatch packet struct.
270 DispatchPtr->addDereferenceableRetAttr(64);
271
272 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
273 Value *CastDispatchPtr = Builder.CreateBitCast(
274 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
275
276 // We could do a single 64-bit load here, but it's likely that the basic
277 // 32-bit and extract sequence is already present, and it is probably easier
278 // to CSE this. The loads should be mergeable later anyway.
279 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
280 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
281
282 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
283 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
284
285 MDNode *MD = MDNode::get(Mod->getContext(), None);
286 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
287 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
288 ST.makeLIDRangeMetadata(LoadZU);
289
290 // Extract y component. Upper half of LoadZU should be zero already.
291 Value *Y = Builder.CreateLShr(LoadXY, 16);
292
293 return std::make_pair(Y, LoadZU);
294}
295
296Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
297 unsigned N) {
298 Function *F = Builder.GetInsertBlock()->getParent();
299 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
300 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
301 StringRef AttrName;
302
303 switch (N) {
304 case 0:
305 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
306 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
307 AttrName = "amdgpu-no-workitem-id-x";
308 break;
309 case 1:
310 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
311 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
312 AttrName = "amdgpu-no-workitem-id-y";
313 break;
314
315 case 2:
316 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
317 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
318 AttrName = "amdgpu-no-workitem-id-z";
319 break;
320 default:
321 llvm_unreachable("invalid dimension")::llvm::llvm_unreachable_internal("invalid dimension", "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 321)
;
322 }
323
324 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
325 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
326 ST.makeLIDRangeMetadata(CI);
327 F->removeFnAttr(AttrName);
328
329 return CI;
330}
331
332static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
333 return FixedVectorType::get(ArrayTy->getElementType(),
334 ArrayTy->getNumElements());
335}
336
337static Value *
338calculateVectorIndex(Value *Ptr,
339 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
340 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
341 if (!GEP)
342 return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
343
344 auto I = GEPIdx.find(GEP);
345 assert(I != GEPIdx.end() && "Must have entry for GEP!")(static_cast <bool> (I != GEPIdx.end() && "Must have entry for GEP!"
) ? void (0) : __assert_fail ("I != GEPIdx.end() && \"Must have entry for GEP!\""
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 345, __extension__
__PRETTY_FUNCTION__))
;
346 return I->second;
347}
348
349static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
350 Type *VecElemTy, const DataLayout &DL) {
351 // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
352 // helper.
353 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
354 MapVector<Value *, APInt> VarOffsets;
355 APInt ConstOffset(BW, 0);
356 if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
357 !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
358 return nullptr;
359
360 unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
361 if (VarOffsets.size() > 1)
362 return nullptr;
363
364 if (VarOffsets.size() == 1) {
365 // Only handle cases where we don't need to insert extra arithmetic
366 // instructions.
367 const auto &VarOffset = VarOffsets.front();
368 if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
369 return nullptr;
370 return VarOffset.first;
371 }
372
373 APInt Quot;
374 uint64_t Rem;
375 APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
376 if (Rem != 0)
377 return nullptr;
378
379 return ConstantInt::get(GEP->getContext(), Quot);
380}
381
382static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
383 unsigned MaxVGPRs) {
384
385 if (DisablePromoteAllocaToVector) {
1
Assuming the condition is false
2
Taking false branch
386 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Promotion alloca to vector is disabled\n"
; } } while (false)
;
387 return false;
388 }
389
390 Type *AllocaTy = Alloca->getAllocatedType();
391 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
3
Assuming 'AllocaTy' is not a 'CastReturnType'
392 if (auto *ArrayTy
4.1
'ArrayTy' is non-null
4.1
'ArrayTy' is non-null
= dyn_cast<ArrayType>(AllocaTy)) {
4
Assuming 'AllocaTy' is a 'CastReturnType'
393 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
5
Assuming the condition is true
7
Taking true branch
394 ArrayTy->getNumElements() > 0)
6
Assuming the condition is true
395 VectorTy = arrayTypeToVecType(ArrayTy);
396 }
397
398 // Use up to 1/4 of available register budget for vectorization.
399 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
8
Assuming the condition is false
9
'?' condition is false
400 : (MaxVGPRs * 32);
401
402 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
10
Assuming the condition is false
403 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
404 << MaxVGPRs << " registers available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
;
405 return false;
406 }
407
408 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Alloca candidate for vectorization\n"
; } } while (false)
;
11
Taking false branch
12
Assuming 'DebugFlag' is false
409
410 // FIXME: There is no reason why we can't support larger arrays, we
411 // are just being conservative for now.
412 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
413 // could also be promoted but we don't currently handle this case
414 if (!VectorTy || VectorTy->getNumElements() > 16 ||
13
Assuming 'VectorTy' is non-null
14
Assuming the condition is false
16
Taking false branch
415 VectorTy->getNumElements() < 2) {
15
Assuming the condition is false
416 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot convert type to vector\n"
; } } while (false)
;
417 return false;
418 }
419
420 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
421 SmallVector<Instruction *> WorkList;
422 SmallVector<Use *, 8> Uses;
423 for (Use &U : Alloca->uses())
424 Uses.push_back(&U);
425
426 Type *VecEltTy = VectorTy->getElementType();
427 while (!Uses.empty()) {
17
Loop condition is true. Entering loop body
428 Use *U = Uses.pop_back_val();
429 Instruction *Inst = dyn_cast<Instruction>(U->getUser());
18
Assuming the object is not a 'CastReturnType'
19
'Inst' initialized to a null pointer value
430
431 if (Value *Ptr
29.1
'Ptr' is null
29.1
'Ptr' is null
= getLoadStorePointerOperand(Inst)) {
20
Calling 'getLoadStorePointerOperand'
29
Returning from 'getLoadStorePointerOperand'
30
Taking false branch
432 // This is a store of the pointer, not to the pointer.
433 if (isa<StoreInst>(Inst) &&
434 U->getOperandNo() != StoreInst::getPointerOperandIndex())
435 return false;
436
437 Type *AccessTy = getLoadStoreType(Inst);
438 Ptr = Ptr->stripPointerCasts();
439
440 // Alloca already accessed as vector, leave alone.
441 if (Ptr == Alloca && DL.getTypeStoreSize(Alloca->getAllocatedType()) ==
442 DL.getTypeStoreSize(AccessTy))
443 continue;
444
445 // Check that this is a simple access of a vector element.
446 bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
447 : cast<StoreInst>(Inst)->isSimple();
448 if (!IsSimple ||
449 !CastInst::isBitOrNoopPointerCastable(VecEltTy, AccessTy, DL))
450 return false;
451
452 WorkList.push_back(Inst);
453 continue;
454 }
455
456 if (isa<BitCastInst>(Inst)) {
31
Assuming 'Inst' is a 'class llvm::BitCastInst &'
32
Taking true branch
457 // Look through bitcasts.
458 for (Use &U : Inst->uses())
33
Called C++ object pointer is null
459 Uses.push_back(&U);
460 continue;
461 }
462
463 if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
464 // If we can't compute a vector index from this GEP, then we can't
465 // promote this alloca to vector.
466 Value *Index = GEPToVectorIndex(GEP, Alloca, VecEltTy, DL);
467 if (!Index) {
468 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
469 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
;
470 return false;
471 }
472
473 GEPVectorIdx[GEP] = Index;
474 for (Use &U : Inst->uses())
475 Uses.push_back(&U);
476 continue;
477 }
478
479 // Ignore assume-like intrinsics and comparisons used in assumes.
480 if (isAssumeLikeIntrinsic(Inst))
481 continue;
482
483 if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
484 return isAssumeLikeIntrinsic(cast<Instruction>(U));
485 }))
486 continue;
487
488 // Unknown user.
489 return false;
490 }
491
492 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
493 << *VectorTy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
;
494
495 for (Instruction *Inst : WorkList) {
496 IRBuilder<> Builder(Inst);
497 switch (Inst->getOpcode()) {
498 case Instruction::Load: {
499 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
500 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
501 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
502 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
503 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
504 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
505 if (Inst->getType() != VecEltTy)
506 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
507 Inst->replaceAllUsesWith(ExtractElement);
508 Inst->eraseFromParent();
509 break;
510 }
511 case Instruction::Store: {
512 StoreInst *SI = cast<StoreInst>(Inst);
513 Value *Ptr = SI->getPointerOperand();
514 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
515 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
516 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
517 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
518 Value *Elt = SI->getValueOperand();
519 if (Elt->getType() != VecEltTy)
520 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
521 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
522 Builder.CreateStore(NewVecValue, BitCast);
523 Inst->eraseFromParent();
524 break;
525 }
526
527 default:
528 llvm_unreachable("Inconsistency in instructions promotable to vector")::llvm::llvm_unreachable_internal("Inconsistency in instructions promotable to vector"
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 528)
;
529 }
530 }
531 return true;
532}
533
534static bool isCallPromotable(CallInst *CI) {
535 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
536 if (!II)
537 return false;
538
539 switch (II->getIntrinsicID()) {
540 case Intrinsic::memcpy:
541 case Intrinsic::memmove:
542 case Intrinsic::memset:
543 case Intrinsic::lifetime_start:
544 case Intrinsic::lifetime_end:
545 case Intrinsic::invariant_start:
546 case Intrinsic::invariant_end:
547 case Intrinsic::launder_invariant_group:
548 case Intrinsic::strip_invariant_group:
549 case Intrinsic::objectsize:
550 return true;
551 default:
552 return false;
553 }
554}
555
556bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
557 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
558 int OpIdx1) const {
559 // Figure out which operand is the one we might not be promoting.
560 Value *OtherOp = Inst->getOperand(OpIdx0);
561 if (Val == OtherOp)
562 OtherOp = Inst->getOperand(OpIdx1);
563
564 if (isa<ConstantPointerNull>(OtherOp))
565 return true;
566
567 Value *OtherObj = getUnderlyingObject(OtherOp);
568 if (!isa<AllocaInst>(OtherObj))
569 return false;
570
571 // TODO: We should be able to replace undefs with the right pointer type.
572
573 // TODO: If we know the other base object is another promotable
574 // alloca, not necessarily this alloca, we can do this. The
575 // important part is both must have the same address space at
576 // the end.
577 if (OtherObj != BaseAlloca) {
578 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
579 dbgs() << "Found a binary instruction with another alloca object\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
;
580 return false;
581 }
582
583 return true;
584}
585
586bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
587 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
588
589 for (User *User : Val->users()) {
590 if (is_contained(WorkList, User))
591 continue;
592
593 if (CallInst *CI = dyn_cast<CallInst>(User)) {
594 if (!isCallPromotable(CI))
595 return false;
596
597 WorkList.push_back(User);
598 continue;
599 }
600
601 Instruction *UseInst = cast<Instruction>(User);
602 if (UseInst->getOpcode() == Instruction::PtrToInt)
603 return false;
604
605 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
606 if (LI->isVolatile())
607 return false;
608
609 continue;
610 }
611
612 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
613 if (SI->isVolatile())
614 return false;
615
616 // Reject if the stored value is not the pointer operand.
617 if (SI->getPointerOperand() != Val)
618 return false;
619 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
620 if (RMW->isVolatile())
621 return false;
622 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
623 if (CAS->isVolatile())
624 return false;
625 }
626
627 // Only promote a select if we know that the other select operand
628 // is from another pointer that will also be promoted.
629 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
630 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
631 return false;
632
633 // May need to rewrite constant operands.
634 WorkList.push_back(ICmp);
635 }
636
637 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
638 // Give up if the pointer may be captured.
639 if (PointerMayBeCaptured(UseInst, true, true))
640 return false;
641 // Don't collect the users of this.
642 WorkList.push_back(User);
643 continue;
644 }
645
646 // Do not promote vector/aggregate type instructions. It is hard to track
647 // their users.
648 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
649 return false;
650
651 if (!User->getType()->isPointerTy())
652 continue;
653
654 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
655 // Be conservative if an address could be computed outside the bounds of
656 // the alloca.
657 if (!GEP->isInBounds())
658 return false;
659 }
660
661 // Only promote a select if we know that the other select operand is from
662 // another pointer that will also be promoted.
663 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
664 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
665 return false;
666 }
667
668 // Repeat for phis.
669 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
670 // TODO: Handle more complex cases. We should be able to replace loops
671 // over arrays.
672 switch (Phi->getNumIncomingValues()) {
673 case 1:
674 break;
675 case 2:
676 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
677 return false;
678 break;
679 default:
680 return false;
681 }
682 }
683
684 WorkList.push_back(User);
685 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
686 return false;
687 }
688
689 return true;
690}
691
692bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
693
694 FunctionType *FTy = F.getFunctionType();
695 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
696
697 // If the function has any arguments in the local address space, then it's
698 // possible these arguments require the entire local memory space, so
699 // we cannot use local memory in the pass.
700 for (Type *ParamTy : FTy->params()) {
701 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
702 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
703 LocalMemLimit = 0;
704 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
705 "local memory disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
;
706 return false;
707 }
708 }
709
710 LocalMemLimit = ST.getLocalMemorySize();
711 if (LocalMemLimit == 0)
712 return false;
713
714 SmallVector<const Constant *, 16> Stack;
715 SmallPtrSet<const Constant *, 8> VisitedConstants;
716 SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
717
718 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
719 for (const User *U : Val->users()) {
720 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
721 if (Use->getParent()->getParent() == &F)
722 return true;
723 } else {
724 const Constant *C = cast<Constant>(U);
725 if (VisitedConstants.insert(C).second)
726 Stack.push_back(C);
727 }
728 }
729
730 return false;
731 };
732
733 for (GlobalVariable &GV : Mod->globals()) {
734 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
735 continue;
736
737 if (visitUsers(&GV, &GV)) {
738 UsedLDS.insert(&GV);
739 Stack.clear();
740 continue;
741 }
742
743 // For any ConstantExpr uses, we need to recursively search the users until
744 // we see a function.
745 while (!Stack.empty()) {
746 const Constant *C = Stack.pop_back_val();
747 if (visitUsers(&GV, C)) {
748 UsedLDS.insert(&GV);
749 Stack.clear();
750 break;
751 }
752 }
753 }
754
755 const DataLayout &DL = Mod->getDataLayout();
756 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
757 AllocatedSizes.reserve(UsedLDS.size());
758
759 for (const GlobalVariable *GV : UsedLDS) {
760 Align Alignment =
761 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
762 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
763
764 // HIP uses an extern unsized array in local address space for dynamically
765 // allocated shared memory. In that case, we have to disable the promotion.
766 if (GV->hasExternalLinkage() && AllocSize == 0) {
767 LocalMemLimit = 0;
768 LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
769 "local memory. Promoting to local memory "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
770 "disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
;
771 return false;
772 }
773
774 AllocatedSizes.emplace_back(AllocSize, Alignment);
775 }
776
777 // Sort to try to estimate the worst case alignment padding
778 //
779 // FIXME: We should really do something to fix the addresses to a more optimal
780 // value instead
781 llvm::sort(AllocatedSizes, llvm::less_second());
782
783 // Check how much local memory is being used by global objects
784 CurrentLocalMemUsage = 0;
785
786 // FIXME: Try to account for padding here. The real padding and address is
787 // currently determined from the inverse order of uses in the function when
788 // legalizing, which could also potentially change. We try to estimate the
789 // worst case here, but we probably should fix the addresses earlier.
790 for (auto Alloc : AllocatedSizes) {
791 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
792 CurrentLocalMemUsage += Alloc.first;
793 }
794
795 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
796 F);
797
798 // Restrict local memory usage so that we don't drastically reduce occupancy,
799 // unless it is already significantly reduced.
800
801 // TODO: Have some sort of hint or other heuristics to guess occupancy based
802 // on other factors..
803 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
804 if (OccupancyHint == 0)
805 OccupancyHint = 7;
806
807 // Clamp to max value.
808 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
809
810 // Check the hint but ignore it if it's obviously wrong from the existing LDS
811 // usage.
812 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
813
814
815 // Round up to the next tier of usage.
816 unsigned MaxSizeWithWaveCount
817 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
818
819 // Program is possibly broken by using more local mem than available.
820 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
821 return false;
822
823 LocalMemLimit = MaxSizeWithWaveCount;
824
825 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsagedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
826 << " bytes of LDS\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
827 << " Rounding size to " << MaxSizeWithWaveCountdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
828 << " with a maximum occupancy of " << MaxOccupancy << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
829 << " and " << (LocalMemLimit - CurrentLocalMemUsage)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
830 << " available for promotion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
;
831
832 return true;
833}
834
835// FIXME: Should try to pick the most likely to be profitable allocas first.
836bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) {
837 // Array allocations are probably not worth handling, since an allocation of
838 // the array type is the canonical form.
839 if (!I.isStaticAlloca() || I.isArrayAllocation())
840 return false;
841
842 const DataLayout &DL = Mod->getDataLayout();
843 IRBuilder<> Builder(&I);
844
845 // First try to replace the alloca with a vector
846 Type *AllocaTy = I.getAllocatedType();
847
848 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
849
850 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
851 return true; // Promoted to vector.
852
853 if (DisablePromoteAllocaToLDS)
854 return false;
855
856 const Function &ContainingFunction = *I.getParent()->getParent();
857 CallingConv::ID CC = ContainingFunction.getCallingConv();
858
859 // Don't promote the alloca to LDS for shader calling conventions as the work
860 // item ID intrinsics are not supported for these calling conventions.
861 // Furthermore not all LDS is available for some of the stages.
862 switch (CC) {
863 case CallingConv::AMDGPU_KERNEL:
864 case CallingConv::SPIR_KERNEL:
865 break;
866 default:
867 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
868 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
869 << " promote alloca to LDS not supported with calling convention.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
;
870 return false;
871 }
872
873 // Not likely to have sufficient local memory for promotion.
874 if (!SufficientLDS)
875 return false;
876
877 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
878 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
879
880 Align Alignment =
881 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
882
883 // FIXME: This computed padding is likely wrong since it depends on inverse
884 // usage order.
885 //
886 // FIXME: It is also possible that if we're allowed to use all of the memory
887 // could end up using more than the maximum due to alignment padding.
888
889 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
890 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
891 NewSize += AllocSize;
892
893 if (NewSize > LocalMemLimit) {
894 LLVM_DEBUG(dbgs() << " " << AllocSizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
895 << " bytes of local memory not available to promote\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
;
896 return false;
897 }
898
899 CurrentLocalMemUsage = NewSize;
900
901 std::vector<Value*> WorkList;
902
903 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
904 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Do not know how to convert all uses\n"
; } } while (false)
;
905 return false;
906 }
907
908 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Promoting alloca to local memory\n"
; } } while (false)
;
909
910 Function *F = I.getParent()->getParent();
911
912 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
913 GlobalVariable *GV = new GlobalVariable(
914 *Mod, GVTy, false, GlobalValue::InternalLinkage,
915 UndefValue::get(GVTy),
916 Twine(F->getName()) + Twine('.') + I.getName(),
917 nullptr,
918 GlobalVariable::NotThreadLocal,
919 AMDGPUAS::LOCAL_ADDRESS);
920 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
921 GV->setAlignment(I.getAlign());
922
923 Value *TCntY, *TCntZ;
924
925 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
926 Value *TIdX = getWorkitemID(Builder, 0);
927 Value *TIdY = getWorkitemID(Builder, 1);
928 Value *TIdZ = getWorkitemID(Builder, 2);
929
930 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
931 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
932 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
933 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
934 TID = Builder.CreateAdd(TID, TIdZ);
935
936 Value *Indices[] = {
937 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
938 TID
939 };
940
941 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
942 I.mutateType(Offset->getType());
943 I.replaceAllUsesWith(Offset);
944 I.eraseFromParent();
945
946 SmallVector<IntrinsicInst *> DeferredIntrs;
947
948 for (Value *V : WorkList) {
949 CallInst *Call = dyn_cast<CallInst>(V);
950 if (!Call) {
951 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
952 Value *Src0 = CI->getOperand(0);
953 PointerType *NewTy = PointerType::getWithSamePointeeType(
954 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
955
956 if (isa<ConstantPointerNull>(CI->getOperand(0)))
957 CI->setOperand(0, ConstantPointerNull::get(NewTy));
958
959 if (isa<ConstantPointerNull>(CI->getOperand(1)))
960 CI->setOperand(1, ConstantPointerNull::get(NewTy));
961
962 continue;
963 }
964
965 // The operand's value should be corrected on its own and we don't want to
966 // touch the users.
967 if (isa<AddrSpaceCastInst>(V))
968 continue;
969
970 PointerType *NewTy = PointerType::getWithSamePointeeType(
971 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
972
973 // FIXME: It doesn't really make sense to try to do this for all
974 // instructions.
975 V->mutateType(NewTy);
976
977 // Adjust the types of any constant operands.
978 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
979 if (isa<ConstantPointerNull>(SI->getOperand(1)))
980 SI->setOperand(1, ConstantPointerNull::get(NewTy));
981
982 if (isa<ConstantPointerNull>(SI->getOperand(2)))
983 SI->setOperand(2, ConstantPointerNull::get(NewTy));
984 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
985 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
986 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
987 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
988 }
989 }
990
991 continue;
992 }
993
994 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
995 Builder.SetInsertPoint(Intr);
996 switch (Intr->getIntrinsicID()) {
997 case Intrinsic::lifetime_start:
998 case Intrinsic::lifetime_end:
999 // These intrinsics are for address space 0 only
1000 Intr->eraseFromParent();
1001 continue;
1002 case Intrinsic::memcpy:
1003 case Intrinsic::memmove:
1004 // These have 2 pointer operands. In case if second pointer also needs
1005 // to be replaced we defer processing of these intrinsics until all
1006 // other values are processed.
1007 DeferredIntrs.push_back(Intr);
1008 continue;
1009 case Intrinsic::memset: {
1010 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1011 Builder.CreateMemSet(
1012 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1013 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1014 Intr->eraseFromParent();
1015 continue;
1016 }
1017 case Intrinsic::invariant_start:
1018 case Intrinsic::invariant_end:
1019 case Intrinsic::launder_invariant_group:
1020 case Intrinsic::strip_invariant_group:
1021 Intr->eraseFromParent();
1022 // FIXME: I think the invariant marker should still theoretically apply,
1023 // but the intrinsics need to be changed to accept pointers with any
1024 // address space.
1025 continue;
1026 case Intrinsic::objectsize: {
1027 Value *Src = Intr->getOperand(0);
1028 Function *ObjectSize = Intrinsic::getDeclaration(
1029 Mod, Intrinsic::objectsize,
1030 {Intr->getType(),
1031 PointerType::getWithSamePointeeType(
1032 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1033
1034 CallInst *NewCall = Builder.CreateCall(
1035 ObjectSize,
1036 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1037 Intr->replaceAllUsesWith(NewCall);
1038 Intr->eraseFromParent();
1039 continue;
1040 }
1041 default:
1042 Intr->print(errs());
1043 llvm_unreachable("Don't know how to promote alloca intrinsic use.")::llvm::llvm_unreachable_internal("Don't know how to promote alloca intrinsic use."
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 1043)
;
1044 }
1045 }
1046
1047 for (IntrinsicInst *Intr : DeferredIntrs) {
1048 Builder.SetInsertPoint(Intr);
1049 Intrinsic::ID ID = Intr->getIntrinsicID();
1050 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove)(static_cast <bool> (ID == Intrinsic::memcpy || ID == Intrinsic
::memmove) ? void (0) : __assert_fail ("ID == Intrinsic::memcpy || ID == Intrinsic::memmove"
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 1050, __extension__
__PRETTY_FUNCTION__))
;
1051
1052 MemTransferInst *MI = cast<MemTransferInst>(Intr);
1053 auto *B =
1054 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1055 MI->getRawSource(), MI->getSourceAlign(),
1056 MI->getLength(), MI->isVolatile());
1057
1058 for (unsigned I = 0; I != 2; ++I) {
1059 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1060 B->addDereferenceableParamAttr(I, Bytes);
1061 }
1062 }
1063
1064 Intr->eraseFromParent();
1065 }
1066
1067 return true;
1068}
1069
1070bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) {
1071 // Array allocations are probably not worth handling, since an allocation of
1072 // the array type is the canonical form.
1073 if (!I.isStaticAlloca() || I.isArrayAllocation())
1074 return false;
1075
1076 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
1077
1078 Module *Mod = I.getParent()->getParent()->getParent();
1079 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
1080}
1081
1082bool promoteAllocasToVector(Function &F, TargetMachine &TM) {
1083 if (DisablePromoteAllocaToVector)
1084 return false;
1085
1086 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1087 if (!ST.isPromoteAllocaEnabled())
1088 return false;
1089
1090 unsigned MaxVGPRs;
1091 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
1092 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1093 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1094 // A non-entry function has only 32 caller preserved registers.
1095 // Do not promote alloca which will force spilling.
1096 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
1097 MaxVGPRs = std::min(MaxVGPRs, 32u);
1098 } else {
1099 MaxVGPRs = 128;
1100 }
1101
1102 bool Changed = false;
1103 BasicBlock &EntryBB = *F.begin();
1104
1105 SmallVector<AllocaInst *, 16> Allocas;
1106 for (Instruction &I : EntryBB) {
1107 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1108 Allocas.push_back(AI);
1109 }
1110
1111 for (AllocaInst *AI : Allocas) {
1112 if (handlePromoteAllocaToVector(*AI, MaxVGPRs))
1113 Changed = true;
1114 }
1115
1116 return Changed;
1117}
1118
1119bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1120 if (skipFunction(F))
1121 return false;
1122 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
1123 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>());
1124 }
1125 return false;
1126}
1127
1128PreservedAnalyses
1129AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
1130 bool Changed = promoteAllocasToVector(F, TM);
1131 if (Changed) {
1132 PreservedAnalyses PA;
1133 PA.preserveSet<CFGAnalyses>();
1134 return PA;
1135 }
1136 return PreservedAnalyses::all();
1137}
1138
1139FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1140 return new AMDGPUPromoteAlloca();
1141}
1142
1143FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1144 return new AMDGPUPromoteAllocaToVector();
1145}

/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/OperandTraits.h"
33#include "llvm/IR/Use.h"
34#include "llvm/IR/User.h"
35#include "llvm/Support/AtomicOrdering.h"
36#include "llvm/Support/ErrorHandling.h"
37#include <cassert>
38#include <cstddef>
39#include <cstdint>
40#include <iterator>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class BlockAddress;
48class ConstantInt;
49class DataLayout;
50class StringRef;
51class Type;
52class Value;
53
54//===----------------------------------------------------------------------===//
55// AllocaInst Class
56//===----------------------------------------------------------------------===//
57
58/// an instruction to allocate memory on the stack
59class AllocaInst : public UnaryInstruction {
60 Type *AllocatedType;
61
62 using AlignmentField = AlignmentBitfieldElementT<0>;
63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
66 SwiftErrorField>(),
67 "Bitfields must be contiguous");
68
69protected:
70 // Note: Instruction needs to be a friend here to call cloneImpl.
71 friend class Instruction;
72
73 AllocaInst *cloneImpl() const;
74
75public:
76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
77 const Twine &Name, Instruction *InsertBefore);
78 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, BasicBlock *InsertAtEnd);
80
81 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
82 Instruction *InsertBefore);
83 AllocaInst(Type *Ty, unsigned AddrSpace,
84 const Twine &Name, BasicBlock *InsertAtEnd);
85
86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
87 const Twine &Name = "", Instruction *InsertBefore = nullptr);
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name, BasicBlock *InsertAtEnd);
90
91 /// Return true if there is an allocation size parameter to the allocation
92 /// instruction that is not 1.
93 bool isArrayAllocation() const;
94
95 /// Get the number of elements allocated. For a simple allocation of a single
96 /// element, this will return a constant 1 value.
97 const Value *getArraySize() const { return getOperand(0); }
98 Value *getArraySize() { return getOperand(0); }
99
100 /// Overload to return most specific pointer type.
101 PointerType *getType() const {
102 return cast<PointerType>(Instruction::getType());
103 }
104
105 /// Return the address space for the allocation.
106 unsigned getAddressSpace() const {
107 return getType()->getAddressSpace();
108 }
109
110 /// Get allocation size in bits. Returns None if size can't be determined,
111 /// e.g. in case of a VLA.
112 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
113
114 /// Return the type that is being allocated by the instruction.
115 Type *getAllocatedType() const { return AllocatedType; }
116 /// for use only in special circumstances that need to generically
117 /// transform a whole instruction (eg: IR linking and vectorization).
118 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
119
120 /// Return the alignment of the memory that is being allocated by the
121 /// instruction.
122 Align getAlign() const {
123 return Align(1ULL << getSubclassData<AlignmentField>());
124 }
125
126 void setAlignment(Align Align) {
127 setSubclassData<AlignmentField>(Log2(Align));
128 }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 Align getAlign() const {
218 return Align(1ULL << (getSubclassData<AlignmentField>()));
219 }
220
221 void setAlignment(Align Align) {
222 setSubclassData<AlignmentField>(Log2(Align));
223 }
224
225 /// Returns the ordering constraint of this load instruction.
226 AtomicOrdering getOrdering() const {
227 return getSubclassData<OrderingField>();
228 }
229 /// Sets the ordering constraint of this load instruction. May not be Release
230 /// or AcquireRelease.
231 void setOrdering(AtomicOrdering Ordering) {
232 setSubclassData<OrderingField>(Ordering);
233 }
234
235 /// Returns the synchronization scope ID of this load instruction.
236 SyncScope::ID getSyncScopeID() const {
237 return SSID;
238 }
239
240 /// Sets the synchronization scope ID of this load instruction.
241 void setSyncScopeID(SyncScope::ID SSID) {
242 this->SSID = SSID;
243 }
244
245 /// Sets the ordering constraint and the synchronization scope ID of this load
246 /// instruction.
247 void setAtomic(AtomicOrdering Ordering,
248 SyncScope::ID SSID = SyncScope::System) {
249 setOrdering(Ordering);
250 setSyncScopeID(SSID);
251 }
252
253 bool isSimple() const { return !isAtomic() && !isVolatile(); }
254
255 bool isUnordered() const {
256 return (getOrdering() == AtomicOrdering::NotAtomic ||
257 getOrdering() == AtomicOrdering::Unordered) &&
258 !isVolatile();
259 }
260
261 Value *getPointerOperand() { return getOperand(0); }
262 const Value *getPointerOperand() const { return getOperand(0); }
263 static unsigned getPointerOperandIndex() { return 0U; }
264 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
265
266 /// Returns the address space of the pointer operand.
267 unsigned getPointerAddressSpace() const {
268 return getPointerOperandType()->getPointerAddressSpace();
269 }
270
271 // Methods for support type inquiry through isa, cast, and dyn_cast:
272 static bool classof(const Instruction *I) {
273 return I->getOpcode() == Instruction::Load;
274 }
275 static bool classof(const Value *V) {
276 return isa<Instruction>(V) && classof(cast<Instruction>(V));
277 }
278
279private:
280 // Shadow Instruction::setInstructionSubclassData with a private forwarding
281 // method so that subclasses cannot accidentally use it.
282 template <typename Bitfield>
283 void setSubclassData(typename Bitfield::Type Value) {
284 Instruction::setSubclassData<Bitfield>(Value);
285 }
286
287 /// The synchronization scope ID of this load instruction. Not quite enough
288 /// room in SubClassData for everything, so synchronization scope ID gets its
289 /// own field.
290 SyncScope::ID SSID;
291};
292
293//===----------------------------------------------------------------------===//
294// StoreInst Class
295//===----------------------------------------------------------------------===//
296
297/// An instruction for storing to memory.
298class StoreInst : public Instruction {
299 using VolatileField = BoolBitfieldElementT<0>;
300 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
301 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
302 static_assert(
303 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
304 "Bitfields must be contiguous");
305
306 void AssertOK();
307
308protected:
309 // Note: Instruction needs to be a friend here to call cloneImpl.
310 friend class Instruction;
311
312 StoreInst *cloneImpl() const;
313
314public:
315 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
316 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
317 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
318 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
319 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
320 Instruction *InsertBefore = nullptr);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
322 BasicBlock *InsertAtEnd);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
324 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
328
329 // allocate space for exactly two operands
330 void *operator new(size_t S) { return User::operator new(S, 2); }
331 void operator delete(void *Ptr) { User::operator delete(Ptr); }
332
333 /// Return true if this is a store to a volatile memory location.
334 bool isVolatile() const { return getSubclassData<VolatileField>(); }
335
336 /// Specify whether this is a volatile store or not.
337 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
338
339 /// Transparently provide more efficient getOperand methods.
340 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
341
342 Align getAlign() const {
343 return Align(1ULL << (getSubclassData<AlignmentField>()));
344 }
345
346 void setAlignment(Align Align) {
347 setSubclassData<AlignmentField>(Log2(Align));
348 }
349
350 /// Returns the ordering constraint of this store instruction.
351 AtomicOrdering getOrdering() const {
352 return getSubclassData<OrderingField>();
353 }
354
355 /// Sets the ordering constraint of this store instruction. May not be
356 /// Acquire or AcquireRelease.
357 void setOrdering(AtomicOrdering Ordering) {
358 setSubclassData<OrderingField>(Ordering);
359 }
360
361 /// Returns the synchronization scope ID of this store instruction.
362 SyncScope::ID getSyncScopeID() const {
363 return SSID;
364 }
365
366 /// Sets the synchronization scope ID of this store instruction.
367 void setSyncScopeID(SyncScope::ID SSID) {
368 this->SSID = SSID;
369 }
370
371 /// Sets the ordering constraint and the synchronization scope ID of this
372 /// store instruction.
373 void setAtomic(AtomicOrdering Ordering,
374 SyncScope::ID SSID = SyncScope::System) {
375 setOrdering(Ordering);
376 setSyncScopeID(SSID);
377 }
378
379 bool isSimple() const { return !isAtomic() && !isVolatile(); }
380
381 bool isUnordered() const {
382 return (getOrdering() == AtomicOrdering::NotAtomic ||
383 getOrdering() == AtomicOrdering::Unordered) &&
384 !isVolatile();
385 }
386
387 Value *getValueOperand() { return getOperand(0); }
388 const Value *getValueOperand() const { return getOperand(0); }
389
390 Value *getPointerOperand() { return getOperand(1); }
391 const Value *getPointerOperand() const { return getOperand(1); }
392 static unsigned getPointerOperandIndex() { return 1U; }
393 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
394
395 /// Returns the address space of the pointer operand.
396 unsigned getPointerAddressSpace() const {
397 return getPointerOperandType()->getPointerAddressSpace();
398 }
399
400 // Methods for support type inquiry through isa, cast, and dyn_cast:
401 static bool classof(const Instruction *I) {
402 return I->getOpcode() == Instruction::Store;
403 }
404 static bool classof(const Value *V) {
405 return isa<Instruction>(V) && classof(cast<Instruction>(V));
406 }
407
408private:
409 // Shadow Instruction::setInstructionSubclassData with a private forwarding
410 // method so that subclasses cannot accidentally use it.
411 template <typename Bitfield>
412 void setSubclassData(typename Bitfield::Type Value) {
413 Instruction::setSubclassData<Bitfield>(Value);
414 }
415
416 /// The synchronization scope ID of this store instruction. Not quite enough
417 /// room in SubClassData for everything, so synchronization scope ID gets its
418 /// own field.
419 SyncScope::ID SSID;
420};
421
422template <>
423struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
424};
425
426DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 426, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 426, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
427
428//===----------------------------------------------------------------------===//
429// FenceInst Class
430//===----------------------------------------------------------------------===//
431
432/// An instruction for ordering other memory operations.
433class FenceInst : public Instruction {
434 using OrderingField = AtomicOrderingBitfieldElementT<0>;
435
436 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
437
438protected:
439 // Note: Instruction needs to be a friend here to call cloneImpl.
440 friend class Instruction;
441
442 FenceInst *cloneImpl() const;
443
444public:
445 // Ordering may only be Acquire, Release, AcquireRelease, or
446 // SequentiallyConsistent.
447 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
448 SyncScope::ID SSID = SyncScope::System,
449 Instruction *InsertBefore = nullptr);
450 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
451 BasicBlock *InsertAtEnd);
452
453 // allocate space for exactly zero operands
454 void *operator new(size_t S) { return User::operator new(S, 0); }
455 void operator delete(void *Ptr) { User::operator delete(Ptr); }
456
457 /// Returns the ordering constraint of this fence instruction.
458 AtomicOrdering getOrdering() const {
459 return getSubclassData<OrderingField>();
460 }
461
462 /// Sets the ordering constraint of this fence instruction. May only be
463 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
464 void setOrdering(AtomicOrdering Ordering) {
465 setSubclassData<OrderingField>(Ordering);
466 }
467
468 /// Returns the synchronization scope ID of this fence instruction.
469 SyncScope::ID getSyncScopeID() const {
470 return SSID;
471 }
472
473 /// Sets the synchronization scope ID of this fence instruction.
474 void setSyncScopeID(SyncScope::ID SSID) {
475 this->SSID = SSID;
476 }
477
478 // Methods for support type inquiry through isa, cast, and dyn_cast:
479 static bool classof(const Instruction *I) {
480 return I->getOpcode() == Instruction::Fence;
481 }
482 static bool classof(const Value *V) {
483 return isa<Instruction>(V) && classof(cast<Instruction>(V));
484 }
485
486private:
487 // Shadow Instruction::setInstructionSubclassData with a private forwarding
488 // method so that subclasses cannot accidentally use it.
489 template <typename Bitfield>
490 void setSubclassData(typename Bitfield::Type Value) {
491 Instruction::setSubclassData<Bitfield>(Value);
492 }
493
494 /// The synchronization scope ID of this fence instruction. Not quite enough
495 /// room in SubClassData for everything, so synchronization scope ID gets its
496 /// own field.
497 SyncScope::ID SSID;
498};
499
500//===----------------------------------------------------------------------===//
501// AtomicCmpXchgInst Class
502//===----------------------------------------------------------------------===//
503
504/// An instruction that atomically checks whether a
505/// specified value is in a memory location, and, if it is, stores a new value
506/// there. The value returned by this instruction is a pair containing the
507/// original value as first element, and an i1 indicating success (true) or
508/// failure (false) as second element.
509///
510class AtomicCmpXchgInst : public Instruction {
511 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
512 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
513 SyncScope::ID SSID);
514
515 template <unsigned Offset>
516 using AtomicOrderingBitfieldElement =
517 typename Bitfield::Element<AtomicOrdering, Offset, 3,
518 AtomicOrdering::LAST>;
519
520protected:
521 // Note: Instruction needs to be a friend here to call cloneImpl.
522 friend class Instruction;
523
524 AtomicCmpXchgInst *cloneImpl() const;
525
526public:
527 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
528 AtomicOrdering SuccessOrdering,
529 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
530 Instruction *InsertBefore = nullptr);
531 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
532 AtomicOrdering SuccessOrdering,
533 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
534 BasicBlock *InsertAtEnd);
535
536 // allocate space for exactly three operands
537 void *operator new(size_t S) { return User::operator new(S, 3); }
538 void operator delete(void *Ptr) { User::operator delete(Ptr); }
539
540 using VolatileField = BoolBitfieldElementT<0>;
541 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
542 using SuccessOrderingField =
543 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
544 using FailureOrderingField =
545 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
546 using AlignmentField =
547 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
548 static_assert(
549 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
550 FailureOrderingField, AlignmentField>(),
551 "Bitfields must be contiguous");
552
553 /// Return the alignment of the memory that is being allocated by the
554 /// instruction.
555 Align getAlign() const {
556 return Align(1ULL << getSubclassData<AlignmentField>());
557 }
558
559 void setAlignment(Align Align) {
560 setSubclassData<AlignmentField>(Log2(Align));
561 }
562
563 /// Return true if this is a cmpxchg from a volatile memory
564 /// location.
565 ///
566 bool isVolatile() const { return getSubclassData<VolatileField>(); }
567
568 /// Specify whether this is a volatile cmpxchg.
569 ///
570 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
571
572 /// Return true if this cmpxchg may spuriously fail.
573 bool isWeak() const { return getSubclassData<WeakField>(); }
574
575 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
576
577 /// Transparently provide more efficient getOperand methods.
578 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
579
580 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
581 return Ordering != AtomicOrdering::NotAtomic &&
582 Ordering != AtomicOrdering::Unordered;
583 }
584
585 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
586 return Ordering != AtomicOrdering::NotAtomic &&
587 Ordering != AtomicOrdering::Unordered &&
588 Ordering != AtomicOrdering::AcquireRelease &&
589 Ordering != AtomicOrdering::Release;
590 }
591
592 /// Returns the success ordering constraint of this cmpxchg instruction.
593 AtomicOrdering getSuccessOrdering() const {
594 return getSubclassData<SuccessOrderingField>();
595 }
596
597 /// Sets the success ordering constraint of this cmpxchg instruction.
598 void setSuccessOrdering(AtomicOrdering Ordering) {
599 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 600, __extension__ __PRETTY_FUNCTION__
))
600 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 600, __extension__ __PRETTY_FUNCTION__
))
;
601 setSubclassData<SuccessOrderingField>(Ordering);
602 }
603
604 /// Returns the failure ordering constraint of this cmpxchg instruction.
605 AtomicOrdering getFailureOrdering() const {
606 return getSubclassData<FailureOrderingField>();
607 }
608
609 /// Sets the failure ordering constraint of this cmpxchg instruction.
610 void setFailureOrdering(AtomicOrdering Ordering) {
611 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
612 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
;
613 setSubclassData<FailureOrderingField>(Ordering);
614 }
615
616 /// Returns a single ordering which is at least as strong as both the
617 /// success and failure orderings for this cmpxchg.
618 AtomicOrdering getMergedOrdering() const {
619 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
620 return AtomicOrdering::SequentiallyConsistent;
621 if (getFailureOrdering() == AtomicOrdering::Acquire) {
622 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
623 return AtomicOrdering::Acquire;
624 if (getSuccessOrdering() == AtomicOrdering::Release)
625 return AtomicOrdering::AcquireRelease;
626 }
627 return getSuccessOrdering();
628 }
629
630 /// Returns the synchronization scope ID of this cmpxchg instruction.
631 SyncScope::ID getSyncScopeID() const {
632 return SSID;
633 }
634
635 /// Sets the synchronization scope ID of this cmpxchg instruction.
636 void setSyncScopeID(SyncScope::ID SSID) {
637 this->SSID = SSID;
638 }
639
640 Value *getPointerOperand() { return getOperand(0); }
641 const Value *getPointerOperand() const { return getOperand(0); }
642 static unsigned getPointerOperandIndex() { return 0U; }
643
644 Value *getCompareOperand() { return getOperand(1); }
645 const Value *getCompareOperand() const { return getOperand(1); }
646
647 Value *getNewValOperand() { return getOperand(2); }
648 const Value *getNewValOperand() const { return getOperand(2); }
649
650 /// Returns the address space of the pointer operand.
651 unsigned getPointerAddressSpace() const {
652 return getPointerOperand()->getType()->getPointerAddressSpace();
653 }
654
655 /// Returns the strongest permitted ordering on failure, given the
656 /// desired ordering on success.
657 ///
658 /// If the comparison in a cmpxchg operation fails, there is no atomic store
659 /// so release semantics cannot be provided. So this function drops explicit
660 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
661 /// operation would remain SequentiallyConsistent.
662 static AtomicOrdering
663 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
664 switch (SuccessOrdering) {
665 default:
666 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 666)
;
667 case AtomicOrdering::Release:
668 case AtomicOrdering::Monotonic:
669 return AtomicOrdering::Monotonic;
670 case AtomicOrdering::AcquireRelease:
671 case AtomicOrdering::Acquire:
672 return AtomicOrdering::Acquire;
673 case AtomicOrdering::SequentiallyConsistent:
674 return AtomicOrdering::SequentiallyConsistent;
675 }
676 }
677
678 // Methods for support type inquiry through isa, cast, and dyn_cast:
679 static bool classof(const Instruction *I) {
680 return I->getOpcode() == Instruction::AtomicCmpXchg;
681 }
682 static bool classof(const Value *V) {
683 return isa<Instruction>(V) && classof(cast<Instruction>(V));
684 }
685
686private:
687 // Shadow Instruction::setInstructionSubclassData with a private forwarding
688 // method so that subclasses cannot accidentally use it.
689 template <typename Bitfield>
690 void setSubclassData(typename Bitfield::Type Value) {
691 Instruction::setSubclassData<Bitfield>(Value);
692 }
693
694 /// The synchronization scope ID of this cmpxchg instruction. Not quite
695 /// enough room in SubClassData for everything, so synchronization scope ID
696 /// gets its own field.
697 SyncScope::ID SSID;
698};
699
700template <>
701struct OperandTraits<AtomicCmpXchgInst> :
702 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
703};
704
705DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 705, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 705, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
706
707//===----------------------------------------------------------------------===//
708// AtomicRMWInst Class
709//===----------------------------------------------------------------------===//
710
711/// an instruction that atomically reads a memory location,
712/// combines it with another value, and then stores the result back. Returns
713/// the old value.
714///
715class AtomicRMWInst : public Instruction {
716protected:
717 // Note: Instruction needs to be a friend here to call cloneImpl.
718 friend class Instruction;
719
720 AtomicRMWInst *cloneImpl() const;
721
722public:
723 /// This enumeration lists the possible modifications atomicrmw can make. In
724 /// the descriptions, 'p' is the pointer to the instruction's memory location,
725 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
726 /// instruction. These instructions always return 'old'.
727 enum BinOp : unsigned {
728 /// *p = v
729 Xchg,
730 /// *p = old + v
731 Add,
732 /// *p = old - v
733 Sub,
734 /// *p = old & v
735 And,
736 /// *p = ~(old & v)
737 Nand,
738 /// *p = old | v
739 Or,
740 /// *p = old ^ v
741 Xor,
742 /// *p = old >signed v ? old : v
743 Max,
744 /// *p = old <signed v ? old : v
745 Min,
746 /// *p = old >unsigned v ? old : v
747 UMax,
748 /// *p = old <unsigned v ? old : v
749 UMin,
750
751 /// *p = old + v
752 FAdd,
753
754 /// *p = old - v
755 FSub,
756
757 /// *p = maxnum(old, v)
758 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
759 FMax,
760
761 /// *p = minnum(old, v)
762 /// \p minnum matches the behavior of \p llvm.minnum.*.
763 FMin,
764
765 FIRST_BINOP = Xchg,
766 LAST_BINOP = FMin,
767 BAD_BINOP
768 };
769
770private:
771 template <unsigned Offset>
772 using AtomicOrderingBitfieldElement =
773 typename Bitfield::Element<AtomicOrdering, Offset, 3,
774 AtomicOrdering::LAST>;
775
776 template <unsigned Offset>
777 using BinOpBitfieldElement =
778 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
779
780public:
781 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
782 AtomicOrdering Ordering, SyncScope::ID SSID,
783 Instruction *InsertBefore = nullptr);
784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785 AtomicOrdering Ordering, SyncScope::ID SSID,
786 BasicBlock *InsertAtEnd);
787
788 // allocate space for exactly two operands
789 void *operator new(size_t S) { return User::operator new(S, 2); }
790 void operator delete(void *Ptr) { User::operator delete(Ptr); }
791
792 using VolatileField = BoolBitfieldElementT<0>;
793 using AtomicOrderingField =
794 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
795 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
796 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
797 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
798 OperationField, AlignmentField>(),
799 "Bitfields must be contiguous");
800
801 BinOp getOperation() const { return getSubclassData<OperationField>(); }
802
803 static StringRef getOperationName(BinOp Op);
804
805 static bool isFPOperation(BinOp Op) {
806 switch (Op) {
807 case AtomicRMWInst::FAdd:
808 case AtomicRMWInst::FSub:
809 case AtomicRMWInst::FMax:
810 case AtomicRMWInst::FMin:
811 return true;
812 default:
813 return false;
814 }
815 }
816
817 void setOperation(BinOp Operation) {
818 setSubclassData<OperationField>(Operation);
819 }
820
821 /// Return the alignment of the memory that is being allocated by the
822 /// instruction.
823 Align getAlign() const {
824 return Align(1ULL << getSubclassData<AlignmentField>());
825 }
826
827 void setAlignment(Align Align) {
828 setSubclassData<AlignmentField>(Log2(Align));
829 }
830
831 /// Return true if this is a RMW on a volatile memory location.
832 ///
833 bool isVolatile() const { return getSubclassData<VolatileField>(); }
834
835 /// Specify whether this is a volatile RMW or not.
836 ///
837 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
838
839 /// Transparently provide more efficient getOperand methods.
840 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
841
842 /// Returns the ordering constraint of this rmw instruction.
843 AtomicOrdering getOrdering() const {
844 return getSubclassData<AtomicOrderingField>();
845 }
846
847 /// Sets the ordering constraint of this rmw instruction.
848 void setOrdering(AtomicOrdering Ordering) {
849 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 850, __extension__ __PRETTY_FUNCTION__
))
850 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 850, __extension__ __PRETTY_FUNCTION__
))
;
851 assert(Ordering != AtomicOrdering::Unordered &&(static_cast <bool> (Ordering != AtomicOrdering::Unordered
&& "atomicrmw instructions cannot be unordered.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
852 "atomicrmw instructions cannot be unordered.")(static_cast <bool> (Ordering != AtomicOrdering::Unordered
&& "atomicrmw instructions cannot be unordered.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
;
853 setSubclassData<AtomicOrderingField>(Ordering);
854 }
855
856 /// Returns the synchronization scope ID of this rmw instruction.
857 SyncScope::ID getSyncScopeID() const {
858 return SSID;
859 }
860
861 /// Sets the synchronization scope ID of this rmw instruction.
862 void setSyncScopeID(SyncScope::ID SSID) {
863 this->SSID = SSID;
864 }
865
866 Value *getPointerOperand() { return getOperand(0); }
867 const Value *getPointerOperand() const { return getOperand(0); }
868 static unsigned getPointerOperandIndex() { return 0U; }
869
870 Value *getValOperand() { return getOperand(1); }
871 const Value *getValOperand() const { return getOperand(1); }
872
873 /// Returns the address space of the pointer operand.
874 unsigned getPointerAddressSpace() const {
875 return getPointerOperand()->getType()->getPointerAddressSpace();
876 }
877
878 bool isFloatingPointOperation() const {
879 return isFPOperation(getOperation());
880 }
881
882 // Methods for support type inquiry through isa, cast, and dyn_cast:
883 static bool classof(const Instruction *I) {
884 return I->getOpcode() == Instruction::AtomicRMW;
885 }
886 static bool classof(const Value *V) {
887 return isa<Instruction>(V) && classof(cast<Instruction>(V));
888 }
889
890private:
891 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
892 AtomicOrdering Ordering, SyncScope::ID SSID);
893
894 // Shadow Instruction::setInstructionSubclassData with a private forwarding
895 // method so that subclasses cannot accidentally use it.
896 template <typename Bitfield>
897 void setSubclassData(typename Bitfield::Type Value) {
898 Instruction::setSubclassData<Bitfield>(Value);
899 }
900
901 /// The synchronization scope ID of this rmw instruction. Not quite enough
902 /// room in SubClassData for everything, so synchronization scope ID gets its
903 /// own field.
904 SyncScope::ID SSID;
905};
906
907template <>
908struct OperandTraits<AtomicRMWInst>
909 : public FixedNumOperandTraits<AtomicRMWInst,2> {
910};
911
912DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
913
914//===----------------------------------------------------------------------===//
915// GetElementPtrInst Class
916//===----------------------------------------------------------------------===//
917
918// checkGEPType - Simple wrapper function to give a better assertion failure
919// message on bad indexes for a gep instruction.
920//
921inline Type *checkGEPType(Type *Ty) {
922 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 922, __extension__ __PRETTY_FUNCTION__
))
;
923 return Ty;
924}
925
926/// an instruction for type-safe pointer arithmetic to
927/// access elements of arrays and structs
928///
929class GetElementPtrInst : public Instruction {
930 Type *SourceElementType;
931 Type *ResultElementType;
932
933 GetElementPtrInst(const GetElementPtrInst &GEPI);
934
935 /// Constructors - Create a getelementptr instruction with a base pointer an
936 /// list of indices. The first ctor can optionally insert before an existing
937 /// instruction, the second appends the new instruction to the specified
938 /// BasicBlock.
939 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
940 ArrayRef<Value *> IdxList, unsigned Values,
941 const Twine &NameStr, Instruction *InsertBefore);
942 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
943 ArrayRef<Value *> IdxList, unsigned Values,
944 const Twine &NameStr, BasicBlock *InsertAtEnd);
945
946 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
947
948protected:
949 // Note: Instruction needs to be a friend here to call cloneImpl.
950 friend class Instruction;
951
952 GetElementPtrInst *cloneImpl() const;
953
954public:
955 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
956 ArrayRef<Value *> IdxList,
957 const Twine &NameStr = "",
958 Instruction *InsertBefore = nullptr) {
959 unsigned Values = 1 + unsigned(IdxList.size());
960 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 960, __extension__ __PRETTY_FUNCTION__
))
;
961 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
962 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
;
963 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
964 NameStr, InsertBefore);
965 }
966
967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
968 ArrayRef<Value *> IdxList,
969 const Twine &NameStr,
970 BasicBlock *InsertAtEnd) {
971 unsigned Values = 1 + unsigned(IdxList.size());
972 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 972, __extension__ __PRETTY_FUNCTION__
))
;
973 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
974 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
;
975 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
976 NameStr, InsertAtEnd);
977 }
978
979 /// Create an "inbounds" getelementptr. See the documentation for the
980 /// "inbounds" flag in LangRef.html for details.
981 static GetElementPtrInst *
982 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
983 const Twine &NameStr = "",
984 Instruction *InsertBefore = nullptr) {
985 GetElementPtrInst *GEP =
986 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
987 GEP->setIsInBounds(true);
988 return GEP;
989 }
990
991 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr,
994 BasicBlock *InsertAtEnd) {
995 GetElementPtrInst *GEP =
996 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
997 GEP->setIsInBounds(true);
998 return GEP;
999 }
1000
1001 /// Transparently provide more efficient getOperand methods.
1002 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1003
1004 Type *getSourceElementType() const { return SourceElementType; }
1005
1006 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1007 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1008
1009 Type *getResultElementType() const {
1010 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
1011 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
;
1012 return ResultElementType;
1013 }
1014
1015 /// Returns the address space of this instruction's pointer type.
1016 unsigned getAddressSpace() const {
1017 // Note that this is always the same as the pointer operand's address space
1018 // and that is cheaper to compute, so cheat here.
1019 return getPointerAddressSpace();
1020 }
1021
1022 /// Returns the result type of a getelementptr with the given source
1023 /// element type and indexes.
1024 ///
1025 /// Null is returned if the indices are invalid for the specified
1026 /// source element type.
1027 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1028 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1029 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1030
1031 /// Return the type of the element at the given index of an indexable
1032 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1033 ///
1034 /// Returns null if the type can't be indexed, or the given index is not
1035 /// legal for the given type.
1036 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1037 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1038
1039 inline op_iterator idx_begin() { return op_begin()+1; }
1040 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1041 inline op_iterator idx_end() { return op_end(); }
1042 inline const_op_iterator idx_end() const { return op_end(); }
1043
1044 inline iterator_range<op_iterator> indices() {
1045 return make_range(idx_begin(), idx_end());
1046 }
1047
1048 inline iterator_range<const_op_iterator> indices() const {
1049 return make_range(idx_begin(), idx_end());
1050 }
1051
1052 Value *getPointerOperand() {
1053 return getOperand(0);
1054 }
1055 const Value *getPointerOperand() const {
1056 return getOperand(0);
1057 }
1058 static unsigned getPointerOperandIndex() {
1059 return 0U; // get index for modifying correct operand.
1060 }
1061
1062 /// Method to return the pointer operand as a
1063 /// PointerType.
1064 Type *getPointerOperandType() const {
1065 return getPointerOperand()->getType();
1066 }
1067
1068 /// Returns the address space of the pointer operand.
1069 unsigned getPointerAddressSpace() const {
1070 return getPointerOperandType()->getPointerAddressSpace();
1071 }
1072
1073 /// Returns the pointer type returned by the GEP
1074 /// instruction, which may be a vector of pointers.
1075 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1076 ArrayRef<Value *> IdxList) {
1077 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1078 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1079 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1080 Type *PtrTy = OrigPtrTy->isOpaque()
1081 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1082 : PointerType::get(ResultElemTy, AddrSpace);
1083 // Vector GEP
1084 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1085 ElementCount EltCount = PtrVTy->getElementCount();
1086 return VectorType::get(PtrTy, EltCount);
1087 }
1088 for (Value *Index : IdxList)
1089 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1090 ElementCount EltCount = IndexVTy->getElementCount();
1091 return VectorType::get(PtrTy, EltCount);
1092 }
1093 // Scalar GEP
1094 return PtrTy;
1095 }
1096
1097 unsigned getNumIndices() const { // Note: always non-negative
1098 return getNumOperands() - 1;
1099 }
1100
1101 bool hasIndices() const {
1102 return getNumOperands() > 1;
1103 }
1104
1105 /// Return true if all of the indices of this GEP are
1106 /// zeros. If so, the result pointer and the first operand have the same
1107 /// value, just potentially different types.
1108 bool hasAllZeroIndices() const;
1109
1110 /// Return true if all of the indices of this GEP are
1111 /// constant integers. If so, the result pointer and the first operand have
1112 /// a constant offset between them.
1113 bool hasAllConstantIndices() const;
1114
1115 /// Set or clear the inbounds flag on this GEP instruction.
1116 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1117 void setIsInBounds(bool b = true);
1118
1119 /// Determine whether the GEP has the inbounds flag.
1120 bool isInBounds() const;
1121
1122 /// Accumulate the constant address offset of this GEP if possible.
1123 ///
1124 /// This routine accepts an APInt into which it will accumulate the constant
1125 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1126 /// all-constant, it returns false and the value of the offset APInt is
1127 /// undefined (it is *not* preserved!). The APInt passed into this routine
1128 /// must be at least as wide as the IntPtr type for the address space of
1129 /// the base GEP pointer.
1130 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1131 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1132 MapVector<Value *, APInt> &VariableOffsets,
1133 APInt &ConstantOffset) const;
1134 // Methods for support type inquiry through isa, cast, and dyn_cast:
1135 static bool classof(const Instruction *I) {
1136 return (I->getOpcode() == Instruction::GetElementPtr);
1137 }
1138 static bool classof(const Value *V) {
1139 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1140 }
1141};
1142
1143template <>
1144struct OperandTraits<GetElementPtrInst> :
1145 public VariadicOperandTraits<GetElementPtrInst, 1> {
1146};
1147
1148GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1149 ArrayRef<Value *> IdxList, unsigned Values,
1150 const Twine &NameStr,
1151 Instruction *InsertBefore)
1152 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1153 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1154 Values, InsertBefore),
1155 SourceElementType(PointeeType),
1156 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1157 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
1158 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
;
1159 init(Ptr, IdxList, NameStr);
1160}
1161
1162GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1163 ArrayRef<Value *> IdxList, unsigned Values,
1164 const Twine &NameStr,
1165 BasicBlock *InsertAtEnd)
1166 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1167 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1168 Values, InsertAtEnd),
1169 SourceElementType(PointeeType),
1170 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1171 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
1172 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
;
1173 init(Ptr, IdxList, NameStr);
1174}
1175
1176DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1177
1178//===----------------------------------------------------------------------===//
1179// ICmpInst Class
1180//===----------------------------------------------------------------------===//
1181
1182/// This instruction compares its operands according to the predicate given
1183/// to the constructor. It only operates on integers or pointers. The operands
1184/// must be identical types.
1185/// Represent an integer comparison operator.
1186class ICmpInst: public CmpInst {
1187 void AssertOK() {
1188 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
1189 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
;
1190 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
1191 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
;
1192 // Check that the operands are the right type
1193 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1194 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1195 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
;
1196 }
1197
1198protected:
1199 // Note: Instruction needs to be a friend here to call cloneImpl.
1200 friend class Instruction;
1201
1202 /// Clone an identical ICmpInst
1203 ICmpInst *cloneImpl() const;
1204
1205public:
1206 /// Constructor with insert-before-instruction semantics.
1207 ICmpInst(
1208 Instruction *InsertBefore, ///< Where to insert
1209 Predicate pred, ///< The predicate to use for the comparison
1210 Value *LHS, ///< The left-hand-side of the expression
1211 Value *RHS, ///< The right-hand-side of the expression
1212 const Twine &NameStr = "" ///< Name of the instruction
1213 ) : CmpInst(makeCmpResultType(LHS->getType()),
1214 Instruction::ICmp, pred, LHS, RHS, NameStr,
1215 InsertBefore) {
1216#ifndef NDEBUG
1217 AssertOK();
1218#endif
1219 }
1220
1221 /// Constructor with insert-at-end semantics.
1222 ICmpInst(
1223 BasicBlock &InsertAtEnd, ///< Block to insert into.
1224 Predicate pred, ///< The predicate to use for the comparison
1225 Value *LHS, ///< The left-hand-side of the expression
1226 Value *RHS, ///< The right-hand-side of the expression
1227 const Twine &NameStr = "" ///< Name of the instruction
1228 ) : CmpInst(makeCmpResultType(LHS->getType()),
1229 Instruction::ICmp, pred, LHS, RHS, NameStr,
1230 &InsertAtEnd) {
1231#ifndef NDEBUG
1232 AssertOK();
1233#endif
1234 }
1235
1236 /// Constructor with no-insertion semantics
1237 ICmpInst(
1238 Predicate pred, ///< The predicate to use for the comparison
1239 Value *LHS, ///< The left-hand-side of the expression
1240 Value *RHS, ///< The right-hand-side of the expression
1241 const Twine &NameStr = "" ///< Name of the instruction
1242 ) : CmpInst(makeCmpResultType(LHS->getType()),
1243 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1244#ifndef NDEBUG
1245 AssertOK();
1246#endif
1247 }
1248
1249 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1250 /// @returns the predicate that would be the result if the operand were
1251 /// regarded as signed.
1252 /// Return the signed version of the predicate
1253 Predicate getSignedPredicate() const {
1254 return getSignedPredicate(getPredicate());
1255 }
1256
1257 /// This is a static version that you can use without an instruction.
1258 /// Return the signed version of the predicate.
1259 static Predicate getSignedPredicate(Predicate pred);
1260
1261 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1262 /// @returns the predicate that would be the result if the operand were
1263 /// regarded as unsigned.
1264 /// Return the unsigned version of the predicate
1265 Predicate getUnsignedPredicate() const {
1266 return getUnsignedPredicate(getPredicate());
1267 }
1268
1269 /// This is a static version that you can use without an instruction.
1270 /// Return the unsigned version of the predicate.
1271 static Predicate getUnsignedPredicate(Predicate pred);
1272
1273 /// Return true if this predicate is either EQ or NE. This also
1274 /// tests for commutativity.
1275 static bool isEquality(Predicate P) {
1276 return P == ICMP_EQ || P == ICMP_NE;
1277 }
1278
1279 /// Return true if this predicate is either EQ or NE. This also
1280 /// tests for commutativity.
1281 bool isEquality() const {
1282 return isEquality(getPredicate());
1283 }
1284
1285 /// @returns true if the predicate of this ICmpInst is commutative
1286 /// Determine if this relation is commutative.
1287 bool isCommutative() const { return isEquality(); }
1288
1289 /// Return true if the predicate is relational (not EQ or NE).
1290 ///
1291 bool isRelational() const {
1292 return !isEquality();
1293 }
1294
1295 /// Return true if the predicate is relational (not EQ or NE).
1296 ///
1297 static bool isRelational(Predicate P) {
1298 return !isEquality(P);
1299 }
1300
1301 /// Return true if the predicate is SGT or UGT.
1302 ///
1303 static bool isGT(Predicate P) {
1304 return P == ICMP_SGT || P == ICMP_UGT;
1305 }
1306
1307 /// Return true if the predicate is SLT or ULT.
1308 ///
1309 static bool isLT(Predicate P) {
1310 return P == ICMP_SLT || P == ICMP_ULT;
1311 }
1312
1313 /// Return true if the predicate is SGE or UGE.
1314 ///
1315 static bool isGE(Predicate P) {
1316 return P == ICMP_SGE || P == ICMP_UGE;
1317 }
1318
1319 /// Return true if the predicate is SLE or ULE.
1320 ///
1321 static bool isLE(Predicate P) {
1322 return P == ICMP_SLE || P == ICMP_ULE;
1323 }
1324
1325 /// Returns the sequence of all ICmp predicates.
1326 ///
1327 static auto predicates() { return ICmpPredicates(); }
1328
1329 /// Exchange the two operands to this instruction in such a way that it does
1330 /// not modify the semantics of the instruction. The predicate value may be
1331 /// changed to retain the same result if the predicate is order dependent
1332 /// (e.g. ult).
1333 /// Swap operands and adjust predicate.
1334 void swapOperands() {
1335 setPredicate(getSwappedPredicate());
1336 Op<0>().swap(Op<1>());
1337 }
1338
1339 /// Return result of `LHS Pred RHS` comparison.
1340 static bool compare(const APInt &LHS, const APInt &RHS,
1341 ICmpInst::Predicate Pred);
1342
1343 // Methods for support type inquiry through isa, cast, and dyn_cast:
1344 static bool classof(const Instruction *I) {
1345 return I->getOpcode() == Instruction::ICmp;
1346 }
1347 static bool classof(const Value *V) {
1348 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1349 }
1350};
1351
1352//===----------------------------------------------------------------------===//
1353// FCmpInst Class
1354//===----------------------------------------------------------------------===//
1355
1356/// This instruction compares its operands according to the predicate given
1357/// to the constructor. It only operates on floating point values or packed
1358/// vectors of floating point values. The operands must be identical types.
1359/// Represents a floating point comparison operator.
1360class FCmpInst: public CmpInst {
1361 void AssertOK() {
1362 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1362, __extension__ __PRETTY_FUNCTION__
))
;
1363 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
1364 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
;
1365 // Check that the operands are the right type
1366 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
1367 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
;
1368 }
1369
1370protected:
1371 // Note: Instruction needs to be a friend here to call cloneImpl.
1372 friend class Instruction;
1373
1374 /// Clone an identical FCmpInst
1375 FCmpInst *cloneImpl() const;
1376
1377public:
1378 /// Constructor with insert-before-instruction semantics.
1379 FCmpInst(
1380 Instruction *InsertBefore, ///< Where to insert
1381 Predicate pred, ///< The predicate to use for the comparison
1382 Value *LHS, ///< The left-hand-side of the expression
1383 Value *RHS, ///< The right-hand-side of the expression
1384 const Twine &NameStr = "" ///< Name of the instruction
1385 ) : CmpInst(makeCmpResultType(LHS->getType()),
1386 Instruction::FCmp, pred, LHS, RHS, NameStr,
1387 InsertBefore) {
1388 AssertOK();
1389 }
1390
1391 /// Constructor with insert-at-end semantics.
1392 FCmpInst(
1393 BasicBlock &InsertAtEnd, ///< Block to insert into.
1394 Predicate pred, ///< The predicate to use for the comparison
1395 Value *LHS, ///< The left-hand-side of the expression
1396 Value *RHS, ///< The right-hand-side of the expression
1397 const Twine &NameStr = "" ///< Name of the instruction
1398 ) : CmpInst(makeCmpResultType(LHS->getType()),
1399 Instruction::FCmp, pred, LHS, RHS, NameStr,
1400 &InsertAtEnd) {
1401 AssertOK();
1402 }
1403
1404 /// Constructor with no-insertion semantics
1405 FCmpInst(
1406 Predicate Pred, ///< The predicate to use for the comparison
1407 Value *LHS, ///< The left-hand-side of the expression
1408 Value *RHS, ///< The right-hand-side of the expression
1409 const Twine &NameStr = "", ///< Name of the instruction
1410 Instruction *FlagsSource = nullptr
1411 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1412 RHS, NameStr, nullptr, FlagsSource) {
1413 AssertOK();
1414 }
1415
1416 /// @returns true if the predicate of this instruction is EQ or NE.
1417 /// Determine if this is an equality predicate.
1418 static bool isEquality(Predicate Pred) {
1419 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1420 Pred == FCMP_UNE;
1421 }
1422
1423 /// @returns true if the predicate of this instruction is EQ or NE.
1424 /// Determine if this is an equality predicate.
1425 bool isEquality() const { return isEquality(getPredicate()); }
1426
1427 /// @returns true if the predicate of this instruction is commutative.
1428 /// Determine if this is a commutative predicate.
1429 bool isCommutative() const {
1430 return isEquality() ||
1431 getPredicate() == FCMP_FALSE ||
1432 getPredicate() == FCMP_TRUE ||
1433 getPredicate() == FCMP_ORD ||
1434 getPredicate() == FCMP_UNO;
1435 }
1436
1437 /// @returns true if the predicate is relational (not EQ or NE).
1438 /// Determine if this a relational predicate.
1439 bool isRelational() const { return !isEquality(); }
1440
1441 /// Exchange the two operands to this instruction in such a way that it does
1442 /// not modify the semantics of the instruction. The predicate value may be
1443 /// changed to retain the same result if the predicate is order dependent
1444 /// (e.g. ult).
1445 /// Swap operands and adjust predicate.
1446 void swapOperands() {
1447 setPredicate(getSwappedPredicate());
1448 Op<0>().swap(Op<1>());
1449 }
1450
1451 /// Returns the sequence of all FCmp predicates.
1452 ///
1453 static auto predicates() { return FCmpPredicates(); }
1454
1455 /// Return result of `LHS Pred RHS` comparison.
1456 static bool compare(const APFloat &LHS, const APFloat &RHS,
1457 FCmpInst::Predicate Pred);
1458
1459 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1460 static bool classof(const Instruction *I) {
1461 return I->getOpcode() == Instruction::FCmp;
1462 }
1463 static bool classof(const Value *V) {
1464 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1465 }
1466};
1467
1468//===----------------------------------------------------------------------===//
1469/// This class represents a function call, abstracting a target
1470/// machine's calling convention. This class uses low bit of the SubClassData
1471/// field to indicate whether or not this is a tail call. The rest of the bits
1472/// hold the calling convention of the call.
1473///
1474class CallInst : public CallBase {
1475 CallInst(const CallInst &CI);
1476
1477 /// Construct a CallInst given a range of arguments.
1478 /// Construct a CallInst from a range of arguments
1479 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1480 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1481 Instruction *InsertBefore);
1482
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 const Twine &NameStr, Instruction *InsertBefore)
1485 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1486
1487 /// Construct a CallInst given a range of arguments.
1488 /// Construct a CallInst from a range of arguments
1489 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1490 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1491 BasicBlock *InsertAtEnd);
1492
1493 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1494 Instruction *InsertBefore);
1495
1496 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1497 BasicBlock *InsertAtEnd);
1498
1499 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1500 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1501 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1502
1503 /// Compute the number of operands to allocate.
1504 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1505 // We need one operand for the called function, plus the input operand
1506 // counts provided.
1507 return 1 + NumArgs + NumBundleInputs;
1508 }
1509
1510protected:
1511 // Note: Instruction needs to be a friend here to call cloneImpl.
1512 friend class Instruction;
1513
1514 CallInst *cloneImpl() const;
1515
1516public:
1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1518 Instruction *InsertBefore = nullptr) {
1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1520 }
1521
1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1523 const Twine &NameStr,
1524 Instruction *InsertBefore = nullptr) {
1525 return new (ComputeNumOperands(Args.size()))
1526 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1527 }
1528
1529 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1530 ArrayRef<OperandBundleDef> Bundles = None,
1531 const Twine &NameStr = "",
1532 Instruction *InsertBefore = nullptr) {
1533 const int NumOperands =
1534 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1535 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1536
1537 return new (NumOperands, DescriptorBytes)
1538 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1539 }
1540
1541 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1542 BasicBlock *InsertAtEnd) {
1543 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1544 }
1545
1546 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1547 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1548 return new (ComputeNumOperands(Args.size()))
1549 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1550 }
1551
1552 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1553 ArrayRef<OperandBundleDef> Bundles,
1554 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1555 const int NumOperands =
1556 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1557 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1558
1559 return new (NumOperands, DescriptorBytes)
1560 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1561 }
1562
1563 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1564 Instruction *InsertBefore = nullptr) {
1565 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1566 InsertBefore);
1567 }
1568
1569 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1570 ArrayRef<OperandBundleDef> Bundles = None,
1571 const Twine &NameStr = "",
1572 Instruction *InsertBefore = nullptr) {
1573 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1574 NameStr, InsertBefore);
1575 }
1576
1577 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1578 const Twine &NameStr,
1579 Instruction *InsertBefore = nullptr) {
1580 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1581 InsertBefore);
1582 }
1583
1584 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1585 BasicBlock *InsertAtEnd) {
1586 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1587 InsertAtEnd);
1588 }
1589
1590 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1591 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1592 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1593 InsertAtEnd);
1594 }
1595
1596 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1597 ArrayRef<OperandBundleDef> Bundles,
1598 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1599 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1600 NameStr, InsertAtEnd);
1601 }
1602
1603 /// Create a clone of \p CI with a different set of operand bundles and
1604 /// insert it before \p InsertPt.
1605 ///
1606 /// The returned call instruction is identical \p CI in every way except that
1607 /// the operand bundles for the new instruction are set to the operand bundles
1608 /// in \p Bundles.
1609 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1610 Instruction *InsertPt = nullptr);
1611
1612 /// Generate the IR for a call to malloc:
1613 /// 1. Compute the malloc call's argument as the specified type's size,
1614 /// possibly multiplied by the array size if the array size is not
1615 /// constant 1.
1616 /// 2. Call malloc with that argument.
1617 /// 3. Bitcast the result of the malloc call to the specified type.
1618 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1619 Type *AllocTy, Value *AllocSize,
1620 Value *ArraySize = nullptr,
1621 Function *MallocF = nullptr,
1622 const Twine &Name = "");
1623 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1624 Type *AllocTy, Value *AllocSize,
1625 Value *ArraySize = nullptr,
1626 Function *MallocF = nullptr,
1627 const Twine &Name = "");
1628 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1629 Type *AllocTy, Value *AllocSize,
1630 Value *ArraySize = nullptr,
1631 ArrayRef<OperandBundleDef> Bundles = None,
1632 Function *MallocF = nullptr,
1633 const Twine &Name = "");
1634 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1635 Type *AllocTy, Value *AllocSize,
1636 Value *ArraySize = nullptr,
1637 ArrayRef<OperandBundleDef> Bundles = None,
1638 Function *MallocF = nullptr,
1639 const Twine &Name = "");
1640 /// Generate the IR for a call to the builtin free function.
1641 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1642 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1643 static Instruction *CreateFree(Value *Source,
1644 ArrayRef<OperandBundleDef> Bundles,
1645 Instruction *InsertBefore);
1646 static Instruction *CreateFree(Value *Source,
1647 ArrayRef<OperandBundleDef> Bundles,
1648 BasicBlock *InsertAtEnd);
1649
1650 // Note that 'musttail' implies 'tail'.
1651 enum TailCallKind : unsigned {
1652 TCK_None = 0,
1653 TCK_Tail = 1,
1654 TCK_MustTail = 2,
1655 TCK_NoTail = 3,
1656 TCK_LAST = TCK_NoTail
1657 };
1658
1659 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1660 static_assert(
1661 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1662 "Bitfields must be contiguous");
1663
1664 TailCallKind getTailCallKind() const {
1665 return getSubclassData<TailCallKindField>();
1666 }
1667
1668 bool isTailCall() const {
1669 TailCallKind Kind = getTailCallKind();
1670 return Kind == TCK_Tail || Kind == TCK_MustTail;
1671 }
1672
1673 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1674
1675 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1676
1677 void setTailCallKind(TailCallKind TCK) {
1678 setSubclassData<TailCallKindField>(TCK);
1679 }
1680
1681 void setTailCall(bool IsTc = true) {
1682 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1683 }
1684
1685 /// Return true if the call can return twice
1686 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1687 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1688
1689 // Methods for support type inquiry through isa, cast, and dyn_cast:
1690 static bool classof(const Instruction *I) {
1691 return I->getOpcode() == Instruction::Call;
1692 }
1693 static bool classof(const Value *V) {
1694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1695 }
1696
1697 /// Updates profile metadata by scaling it by \p S / \p T.
1698 void updateProfWeight(uint64_t S, uint64_t T);
1699
1700private:
1701 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1702 // method so that subclasses cannot accidentally use it.
1703 template <typename Bitfield>
1704 void setSubclassData(typename Bitfield::Type Value) {
1705 Instruction::setSubclassData<Bitfield>(Value);
1706 }
1707};
1708
1709CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1710 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1711 BasicBlock *InsertAtEnd)
1712 : CallBase(Ty->getReturnType(), Instruction::Call,
1713 OperandTraits<CallBase>::op_end(this) -
1714 (Args.size() + CountBundleInputs(Bundles) + 1),
1715 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1716 InsertAtEnd) {
1717 init(Ty, Func, Args, Bundles, NameStr);
1718}
1719
1720CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1721 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1722 Instruction *InsertBefore)
1723 : CallBase(Ty->getReturnType(), Instruction::Call,
1724 OperandTraits<CallBase>::op_end(this) -
1725 (Args.size() + CountBundleInputs(Bundles) + 1),
1726 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1727 InsertBefore) {
1728 init(Ty, Func, Args, Bundles, NameStr);
1729}
1730
1731//===----------------------------------------------------------------------===//
1732// SelectInst Class
1733//===----------------------------------------------------------------------===//
1734
1735/// This class represents the LLVM 'select' instruction.
1736///
1737class SelectInst : public Instruction {
1738 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1739 Instruction *InsertBefore)
1740 : Instruction(S1->getType(), Instruction::Select,
1741 &Op<0>(), 3, InsertBefore) {
1742 init(C, S1, S2);
1743 setName(NameStr);
1744 }
1745
1746 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1747 BasicBlock *InsertAtEnd)
1748 : Instruction(S1->getType(), Instruction::Select,
1749 &Op<0>(), 3, InsertAtEnd) {
1750 init(C, S1, S2);
1751 setName(NameStr);
1752 }
1753
1754 void init(Value *C, Value *S1, Value *S2) {
1755 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1755, __extension__ __PRETTY_FUNCTION__
))
;
1756 Op<0>() = C;
1757 Op<1>() = S1;
1758 Op<2>() = S2;
1759 }
1760
1761protected:
1762 // Note: Instruction needs to be a friend here to call cloneImpl.
1763 friend class Instruction;
1764
1765 SelectInst *cloneImpl() const;
1766
1767public:
1768 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1769 const Twine &NameStr = "",
1770 Instruction *InsertBefore = nullptr,
1771 Instruction *MDFrom = nullptr) {
1772 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1773 if (MDFrom)
1774 Sel->copyMetadata(*MDFrom);
1775 return Sel;
1776 }
1777
1778 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1779 const Twine &NameStr,
1780 BasicBlock *InsertAtEnd) {
1781 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1782 }
1783
1784 const Value *getCondition() const { return Op<0>(); }
1785 const Value *getTrueValue() const { return Op<1>(); }
1786 const Value *getFalseValue() const { return Op<2>(); }
1787 Value *getCondition() { return Op<0>(); }
1788 Value *getTrueValue() { return Op<1>(); }
1789 Value *getFalseValue() { return Op<2>(); }
1790
1791 void setCondition(Value *V) { Op<0>() = V; }
1792 void setTrueValue(Value *V) { Op<1>() = V; }
1793 void setFalseValue(Value *V) { Op<2>() = V; }
1794
1795 /// Swap the true and false values of the select instruction.
1796 /// This doesn't swap prof metadata.
1797 void swapValues() { Op<1>().swap(Op<2>()); }
1798
1799 /// Return a string if the specified operands are invalid
1800 /// for a select operation, otherwise return null.
1801 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1802
1803 /// Transparently provide more efficient getOperand methods.
1804 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1805
1806 OtherOps getOpcode() const {
1807 return static_cast<OtherOps>(Instruction::getOpcode());
1808 }
1809
1810 // Methods for support type inquiry through isa, cast, and dyn_cast:
1811 static bool classof(const Instruction *I) {
1812 return I->getOpcode() == Instruction::Select;
1813 }
1814 static bool classof(const Value *V) {
1815 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1816 }
1817};
1818
1819template <>
1820struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1821};
1822
1823DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1824
1825//===----------------------------------------------------------------------===//
1826// VAArgInst Class
1827//===----------------------------------------------------------------------===//
1828
1829/// This class represents the va_arg llvm instruction, which returns
1830/// an argument of the specified type given a va_list and increments that list
1831///
1832class VAArgInst : public UnaryInstruction {
1833protected:
1834 // Note: Instruction needs to be a friend here to call cloneImpl.
1835 friend class Instruction;
1836
1837 VAArgInst *cloneImpl() const;
1838
1839public:
1840 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1841 Instruction *InsertBefore = nullptr)
1842 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1843 setName(NameStr);
1844 }
1845
1846 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1847 BasicBlock *InsertAtEnd)
1848 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1849 setName(NameStr);
1850 }
1851
1852 Value *getPointerOperand() { return getOperand(0); }
1853 const Value *getPointerOperand() const { return getOperand(0); }
1854 static unsigned getPointerOperandIndex() { return 0U; }
1855
1856 // Methods for support type inquiry through isa, cast, and dyn_cast:
1857 static bool classof(const Instruction *I) {
1858 return I->getOpcode() == VAArg;
1859 }
1860 static bool classof(const Value *V) {
1861 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1862 }
1863};
1864
1865//===----------------------------------------------------------------------===//
1866// ExtractElementInst Class
1867//===----------------------------------------------------------------------===//
1868
1869/// This instruction extracts a single (scalar)
1870/// element from a VectorType value
1871///
1872class ExtractElementInst : public Instruction {
1873 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1874 Instruction *InsertBefore = nullptr);
1875 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1876 BasicBlock *InsertAtEnd);
1877
1878protected:
1879 // Note: Instruction needs to be a friend here to call cloneImpl.
1880 friend class Instruction;
1881
1882 ExtractElementInst *cloneImpl() const;
1883
1884public:
1885 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1886 const Twine &NameStr = "",
1887 Instruction *InsertBefore = nullptr) {
1888 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1889 }
1890
1891 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1892 const Twine &NameStr,
1893 BasicBlock *InsertAtEnd) {
1894 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1895 }
1896
1897 /// Return true if an extractelement instruction can be
1898 /// formed with the specified operands.
1899 static bool isValidOperands(const Value *Vec, const Value *Idx);
1900
1901 Value *getVectorOperand() { return Op<0>(); }
1902 Value *getIndexOperand() { return Op<1>(); }
1903 const Value *getVectorOperand() const { return Op<0>(); }
1904 const Value *getIndexOperand() const { return Op<1>(); }
1905
1906 VectorType *getVectorOperandType() const {
1907 return cast<VectorType>(getVectorOperand()->getType());
1908 }
1909
1910 /// Transparently provide more efficient getOperand methods.
1911 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1912
1913 // Methods for support type inquiry through isa, cast, and dyn_cast:
1914 static bool classof(const Instruction *I) {
1915 return I->getOpcode() == Instruction::ExtractElement;
1916 }
1917 static bool classof(const Value *V) {
1918 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1919 }
1920};
1921
1922template <>
1923struct OperandTraits<ExtractElementInst> :
1924 public FixedNumOperandTraits<ExtractElementInst, 2> {
1925};
1926
1927DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1928
1929//===----------------------------------------------------------------------===//
1930// InsertElementInst Class
1931//===----------------------------------------------------------------------===//
1932
1933/// This instruction inserts a single (scalar)
1934/// element into a VectorType value
1935///
1936class InsertElementInst : public Instruction {
1937 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1938 const Twine &NameStr = "",
1939 Instruction *InsertBefore = nullptr);
1940 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1941 BasicBlock *InsertAtEnd);
1942
1943protected:
1944 // Note: Instruction needs to be a friend here to call cloneImpl.
1945 friend class Instruction;
1946
1947 InsertElementInst *cloneImpl() const;
1948
1949public:
1950 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1951 const Twine &NameStr = "",
1952 Instruction *InsertBefore = nullptr) {
1953 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1954 }
1955
1956 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1957 const Twine &NameStr,
1958 BasicBlock *InsertAtEnd) {
1959 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1960 }
1961
1962 /// Return true if an insertelement instruction can be
1963 /// formed with the specified operands.
1964 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1965 const Value *Idx);
1966
1967 /// Overload to return most specific vector type.
1968 ///
1969 VectorType *getType() const {
1970 return cast<VectorType>(Instruction::getType());
1971 }
1972
1973 /// Transparently provide more efficient getOperand methods.
1974 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1975
1976 // Methods for support type inquiry through isa, cast, and dyn_cast:
1977 static bool classof(const Instruction *I) {
1978 return I->getOpcode() == Instruction::InsertElement;
1979 }
1980 static bool classof(const Value *V) {
1981 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1982 }
1983};
1984
1985template <>
1986struct OperandTraits<InsertElementInst> :
1987 public FixedNumOperandTraits<InsertElementInst, 3> {
1988};
1989
1990DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1991
1992//===----------------------------------------------------------------------===//
1993// ShuffleVectorInst Class
1994//===----------------------------------------------------------------------===//
1995
1996constexpr int UndefMaskElem = -1;
1997
1998/// This instruction constructs a fixed permutation of two
1999/// input vectors.
2000///
2001/// For each element of the result vector, the shuffle mask selects an element
2002/// from one of the input vectors to copy to the result. Non-negative elements
2003/// in the mask represent an index into the concatenated pair of input vectors.
2004/// UndefMaskElem (-1) specifies that the result element is undefined.
2005///
2006/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2007/// requirement may be relaxed in the future.
2008class ShuffleVectorInst : public Instruction {
2009 SmallVector<int, 4> ShuffleMask;
2010 Constant *ShuffleMaskForBitcode;
2011
2012protected:
2013 // Note: Instruction needs to be a friend here to call cloneImpl.
2014 friend class Instruction;
2015
2016 ShuffleVectorInst *cloneImpl() const;
2017
2018public:
2019 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2020 Instruction *InsertBefore = nullptr);
2021 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2022 BasicBlock *InsertAtEnd);
2023 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2024 Instruction *InsertBefore = nullptr);
2025 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2026 BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2028 const Twine &NameStr = "",
2029 Instruction *InsertBefor = nullptr);
2030 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2031 const Twine &NameStr, BasicBlock *InsertAtEnd);
2032 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2033 const Twine &NameStr = "",
2034 Instruction *InsertBefor = nullptr);
2035 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2036 const Twine &NameStr, BasicBlock *InsertAtEnd);
2037
2038 void *operator new(size_t S) { return User::operator new(S, 2); }
2039 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2040
2041 /// Swap the operands and adjust the mask to preserve the semantics
2042 /// of the instruction.
2043 void commute();
2044
2045 /// Return true if a shufflevector instruction can be
2046 /// formed with the specified operands.
2047 static bool isValidOperands(const Value *V1, const Value *V2,
2048 const Value *Mask);
2049 static bool isValidOperands(const Value *V1, const Value *V2,
2050 ArrayRef<int> Mask);
2051
2052 /// Overload to return most specific vector type.
2053 ///
2054 VectorType *getType() const {
2055 return cast<VectorType>(Instruction::getType());
2056 }
2057
2058 /// Transparently provide more efficient getOperand methods.
2059 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2060
2061 /// Return the shuffle mask value of this instruction for the given element
2062 /// index. Return UndefMaskElem if the element is undef.
2063 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2064
2065 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 static void getShuffleMask(const Constant *Mask,
2068 SmallVectorImpl<int> &Result);
2069
2070 /// Return the mask for this instruction as a vector of integers. Undefined
2071 /// elements of the mask are returned as UndefMaskElem.
2072 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2073 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2074 }
2075
2076 /// Return the mask for this instruction, for use in bitcode.
2077 ///
2078 /// TODO: This is temporary until we decide a new bitcode encoding for
2079 /// shufflevector.
2080 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2081
2082 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2083 Type *ResultTy);
2084
2085 void setShuffleMask(ArrayRef<int> Mask);
2086
2087 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2088
2089 /// Return true if this shuffle returns a vector with a different number of
2090 /// elements than its source vectors.
2091 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2092 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2093 bool changesLength() const {
2094 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2095 ->getElementCount()
2096 .getKnownMinValue();
2097 unsigned NumMaskElts = ShuffleMask.size();
2098 return NumSourceElts != NumMaskElts;
2099 }
2100
2101 /// Return true if this shuffle returns a vector with a greater number of
2102 /// elements than its source vectors.
2103 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2104 bool increasesLength() const {
2105 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2106 ->getElementCount()
2107 .getKnownMinValue();
2108 unsigned NumMaskElts = ShuffleMask.size();
2109 return NumSourceElts < NumMaskElts;
2110 }
2111
2112 /// Return true if this shuffle mask chooses elements from exactly one source
2113 /// vector.
2114 /// Example: <7,5,undef,7>
2115 /// This assumes that vector operands are the same length as the mask.
2116 static bool isSingleSourceMask(ArrayRef<int> Mask);
2117 static bool isSingleSourceMask(const Constant *Mask) {
2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2118, __extension__ __PRETTY_FUNCTION__
))
;
2119 SmallVector<int, 16> MaskAsInts;
2120 getShuffleMask(Mask, MaskAsInts);
2121 return isSingleSourceMask(MaskAsInts);
2122 }
2123
2124 /// Return true if this shuffle chooses elements from exactly one source
2125 /// vector without changing the length of that vector.
2126 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2127 /// TODO: Optionally allow length-changing shuffles.
2128 bool isSingleSource() const {
2129 return !changesLength() && isSingleSourceMask(ShuffleMask);
2130 }
2131
2132 /// Return true if this shuffle mask chooses elements from exactly one source
2133 /// vector without lane crossings. A shuffle using this mask is not
2134 /// necessarily a no-op because it may change the number of elements from its
2135 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2136 /// Example: <undef,undef,2,3>
2137 static bool isIdentityMask(ArrayRef<int> Mask);
2138 static bool isIdentityMask(const Constant *Mask) {
2139 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2139, __extension__ __PRETTY_FUNCTION__
))
;
2140
2141 // Not possible to express a shuffle mask for a scalable vector for this
2142 // case.
2143 if (isa<ScalableVectorType>(Mask->getType()))
2144 return false;
2145
2146 SmallVector<int, 16> MaskAsInts;
2147 getShuffleMask(Mask, MaskAsInts);
2148 return isIdentityMask(MaskAsInts);
2149 }
2150
2151 /// Return true if this shuffle chooses elements from exactly one source
2152 /// vector without lane crossings and does not change the number of elements
2153 /// from its input vectors.
2154 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2155 bool isIdentity() const {
2156 // Not possible to express a shuffle mask for a scalable vector for this
2157 // case.
2158 if (isa<ScalableVectorType>(getType()))
2159 return false;
2160
2161 return !changesLength() && isIdentityMask(ShuffleMask);
2162 }
2163
2164 /// Return true if this shuffle lengthens exactly one source vector with
2165 /// undefs in the high elements.
2166 bool isIdentityWithPadding() const;
2167
2168 /// Return true if this shuffle extracts the first N elements of exactly one
2169 /// source vector.
2170 bool isIdentityWithExtract() const;
2171
2172 /// Return true if this shuffle concatenates its 2 source vectors. This
2173 /// returns false if either input is undefined. In that case, the shuffle is
2174 /// is better classified as an identity with padding operation.
2175 bool isConcat() const;
2176
2177 /// Return true if this shuffle mask chooses elements from its source vectors
2178 /// without lane crossings. A shuffle using this mask would be
2179 /// equivalent to a vector select with a constant condition operand.
2180 /// Example: <4,1,6,undef>
2181 /// This returns false if the mask does not choose from both input vectors.
2182 /// In that case, the shuffle is better classified as an identity shuffle.
2183 /// This assumes that vector operands are the same length as the mask
2184 /// (a length-changing shuffle can never be equivalent to a vector select).
2185 static bool isSelectMask(ArrayRef<int> Mask);
2186 static bool isSelectMask(const Constant *Mask) {
2187 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2187, __extension__ __PRETTY_FUNCTION__
))
;
2188 SmallVector<int, 16> MaskAsInts;
2189 getShuffleMask(Mask, MaskAsInts);
2190 return isSelectMask(MaskAsInts);
2191 }
2192
2193 /// Return true if this shuffle chooses elements from its source vectors
2194 /// without lane crossings and all operands have the same number of elements.
2195 /// In other words, this shuffle is equivalent to a vector select with a
2196 /// constant condition operand.
2197 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2198 /// This returns false if the mask does not choose from both input vectors.
2199 /// In that case, the shuffle is better classified as an identity shuffle.
2200 /// TODO: Optionally allow length-changing shuffles.
2201 bool isSelect() const {
2202 return !changesLength() && isSelectMask(ShuffleMask);
2203 }
2204
2205 /// Return true if this shuffle mask swaps the order of elements from exactly
2206 /// one source vector.
2207 /// Example: <7,6,undef,4>
2208 /// This assumes that vector operands are the same length as the mask.
2209 static bool isReverseMask(ArrayRef<int> Mask);
2210 static bool isReverseMask(const Constant *Mask) {
2211 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2211, __extension__ __PRETTY_FUNCTION__
))
;
2212 SmallVector<int, 16> MaskAsInts;
2213 getShuffleMask(Mask, MaskAsInts);
2214 return isReverseMask(MaskAsInts);
2215 }
2216
2217 /// Return true if this shuffle swaps the order of elements from exactly
2218 /// one source vector.
2219 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2220 /// TODO: Optionally allow length-changing shuffles.
2221 bool isReverse() const {
2222 return !changesLength() && isReverseMask(ShuffleMask);
2223 }
2224
2225 /// Return true if this shuffle mask chooses all elements with the same value
2226 /// as the first element of exactly one source vector.
2227 /// Example: <4,undef,undef,4>
2228 /// This assumes that vector operands are the same length as the mask.
2229 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2230 static bool isZeroEltSplatMask(const Constant *Mask) {
2231 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2231, __extension__ __PRETTY_FUNCTION__
))
;
2232 SmallVector<int, 16> MaskAsInts;
2233 getShuffleMask(Mask, MaskAsInts);
2234 return isZeroEltSplatMask(MaskAsInts);
2235 }
2236
2237 /// Return true if all elements of this shuffle are the same value as the
2238 /// first element of exactly one source vector without changing the length
2239 /// of that vector.
2240 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2241 /// TODO: Optionally allow length-changing shuffles.
2242 /// TODO: Optionally allow splats from other elements.
2243 bool isZeroEltSplat() const {
2244 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2245 }
2246
2247 /// Return true if this shuffle mask is a transpose mask.
2248 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2249 /// even- or odd-numbered vector elements from two n-dimensional source
2250 /// vectors and write each result into consecutive elements of an
2251 /// n-dimensional destination vector. Two shuffles are necessary to complete
2252 /// the transpose, one for the even elements and another for the odd elements.
2253 /// This description closely follows how the TRN1 and TRN2 AArch64
2254 /// instructions operate.
2255 ///
2256 /// For example, a simple 2x2 matrix can be transposed with:
2257 ///
2258 /// ; Original matrix
2259 /// m0 = < a, b >
2260 /// m1 = < c, d >
2261 ///
2262 /// ; Transposed matrix
2263 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2264 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2265 ///
2266 /// For matrices having greater than n columns, the resulting nx2 transposed
2267 /// matrix is stored in two result vectors such that one vector contains
2268 /// interleaved elements from all the even-numbered rows and the other vector
2269 /// contains interleaved elements from all the odd-numbered rows. For example,
2270 /// a 2x4 matrix can be transposed with:
2271 ///
2272 /// ; Original matrix
2273 /// m0 = < a, b, c, d >
2274 /// m1 = < e, f, g, h >
2275 ///
2276 /// ; Transposed matrix
2277 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2278 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2279 static bool isTransposeMask(ArrayRef<int> Mask);
2280 static bool isTransposeMask(const Constant *Mask) {
2281 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2281, __extension__ __PRETTY_FUNCTION__
))
;
2282 SmallVector<int, 16> MaskAsInts;
2283 getShuffleMask(Mask, MaskAsInts);
2284 return isTransposeMask(MaskAsInts);
2285 }
2286
2287 /// Return true if this shuffle transposes the elements of its inputs without
2288 /// changing the length of the vectors. This operation may also be known as a
2289 /// merge or interleave. See the description for isTransposeMask() for the
2290 /// exact specification.
2291 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2292 bool isTranspose() const {
2293 return !changesLength() && isTransposeMask(ShuffleMask);
2294 }
2295
2296 /// Return true if this shuffle mask is a splice mask, concatenating the two
2297 /// inputs together and then extracts an original width vector starting from
2298 /// the splice index.
2299 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2300 static bool isSpliceMask(ArrayRef<int> Mask, int &Index);
2301 static bool isSpliceMask(const Constant *Mask, int &Index) {
2302 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2302, __extension__ __PRETTY_FUNCTION__
))
;
2303 SmallVector<int, 16> MaskAsInts;
2304 getShuffleMask(Mask, MaskAsInts);
2305 return isSpliceMask(MaskAsInts, Index);
2306 }
2307
2308 /// Return true if this shuffle splices two inputs without changing the length
2309 /// of the vectors. This operation concatenates the two inputs together and
2310 /// then extracts an original width vector starting from the splice index.
2311 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2312 bool isSplice(int &Index) const {
2313 return !changesLength() && isSpliceMask(ShuffleMask, Index);
2314 }
2315
2316 /// Return true if this shuffle mask is an extract subvector mask.
2317 /// A valid extract subvector mask returns a smaller vector from a single
2318 /// source operand. The base extraction index is returned as well.
2319 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2320 int &Index);
2321 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2322 int &Index) {
2323 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2323, __extension__ __PRETTY_FUNCTION__
))
;
2324 // Not possible to express a shuffle mask for a scalable vector for this
2325 // case.
2326 if (isa<ScalableVectorType>(Mask->getType()))
2327 return false;
2328 SmallVector<int, 16> MaskAsInts;
2329 getShuffleMask(Mask, MaskAsInts);
2330 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2331 }
2332
2333 /// Return true if this shuffle mask is an extract subvector mask.
2334 bool isExtractSubvectorMask(int &Index) const {
2335 // Not possible to express a shuffle mask for a scalable vector for this
2336 // case.
2337 if (isa<ScalableVectorType>(getType()))
2338 return false;
2339
2340 int NumSrcElts =
2341 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2342 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2343 }
2344
2345 /// Return true if this shuffle mask is an insert subvector mask.
2346 /// A valid insert subvector mask inserts the lowest elements of a second
2347 /// source operand into an in-place first source operand operand.
2348 /// Both the sub vector width and the insertion index is returned.
2349 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2350 int &NumSubElts, int &Index);
2351 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2352 int &NumSubElts, int &Index) {
2353 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2353, __extension__ __PRETTY_FUNCTION__
))
;
2354 // Not possible to express a shuffle mask for a scalable vector for this
2355 // case.
2356 if (isa<ScalableVectorType>(Mask->getType()))
2357 return false;
2358 SmallVector<int, 16> MaskAsInts;
2359 getShuffleMask(Mask, MaskAsInts);
2360 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2361 }
2362
2363 /// Return true if this shuffle mask is an insert subvector mask.
2364 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2365 // Not possible to express a shuffle mask for a scalable vector for this
2366 // case.
2367 if (isa<ScalableVectorType>(getType()))
2368 return false;
2369
2370 int NumSrcElts =
2371 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2372 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2373 }
2374
2375 /// Return true if this shuffle mask replicates each of the \p VF elements
2376 /// in a vector \p ReplicationFactor times.
2377 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2378 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2379 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2380 int &VF);
2381 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2382 int &VF) {
2383 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2383, __extension__ __PRETTY_FUNCTION__
))
;
2384 // Not possible to express a shuffle mask for a scalable vector for this
2385 // case.
2386 if (isa<ScalableVectorType>(Mask->getType()))
2387 return false;
2388 SmallVector<int, 16> MaskAsInts;
2389 getShuffleMask(Mask, MaskAsInts);
2390 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2391 }
2392
2393 /// Return true if this shuffle mask is a replication mask.
2394 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2395
2396 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2397 /// i.e. each index between [0..VF) is used exactly once in each submask of
2398 /// size VF.
2399 /// For example, the mask for \p VF=4 is:
2400 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2401 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2402 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2403 /// element 3 is used twice in the second submask
2404 /// (3,3,1,0) and index 2 is not used at all.
2405 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2406
2407 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2408 /// mask.
2409 bool isOneUseSingleSourceMask(int VF) const;
2410
2411 /// Change values in a shuffle permute mask assuming the two vector operands
2412 /// of length InVecNumElts have swapped position.
2413 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2414 unsigned InVecNumElts) {
2415 for (int &Idx : Mask) {
2416 if (Idx == -1)
2417 continue;
2418 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2419 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2420, __extension__ __PRETTY_FUNCTION__
))
2420 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2420, __extension__ __PRETTY_FUNCTION__
))
;
2421 }
2422 }
2423
2424 // Methods for support type inquiry through isa, cast, and dyn_cast:
2425 static bool classof(const Instruction *I) {
2426 return I->getOpcode() == Instruction::ShuffleVector;
2427 }
2428 static bool classof(const Value *V) {
2429 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2430 }
2431};
2432
2433template <>
2434struct OperandTraits<ShuffleVectorInst>
2435 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2436
2437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2437, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst
>::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture
].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2437, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands
() const { return OperandTraits<ShuffleVectorInst>::operands
(this); } template <int Idx_nocapture> Use &ShuffleVectorInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ShuffleVectorInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2438
2439//===----------------------------------------------------------------------===//
2440// ExtractValueInst Class
2441//===----------------------------------------------------------------------===//
2442
2443/// This instruction extracts a struct member or array
2444/// element value from an aggregate value.
2445///
2446class ExtractValueInst : public UnaryInstruction {
2447 SmallVector<unsigned, 4> Indices;
2448
2449 ExtractValueInst(const ExtractValueInst &EVI);
2450
2451 /// Constructors - Create a extractvalue instruction with a base aggregate
2452 /// value and a list of indices. The first ctor can optionally insert before
2453 /// an existing instruction, the second appends the new instruction to the
2454 /// specified BasicBlock.
2455 inline ExtractValueInst(Value *Agg,
2456 ArrayRef<unsigned> Idxs,
2457 const Twine &NameStr,
2458 Instruction *InsertBefore);
2459 inline ExtractValueInst(Value *Agg,
2460 ArrayRef<unsigned> Idxs,
2461 const Twine &NameStr, BasicBlock *InsertAtEnd);
2462
2463 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2464
2465protected:
2466 // Note: Instruction needs to be a friend here to call cloneImpl.
2467 friend class Instruction;
2468
2469 ExtractValueInst *cloneImpl() const;
2470
2471public:
2472 static ExtractValueInst *Create(Value *Agg,
2473 ArrayRef<unsigned> Idxs,
2474 const Twine &NameStr = "",
2475 Instruction *InsertBefore = nullptr) {
2476 return new
2477 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2478 }
2479
2480 static ExtractValueInst *Create(Value *Agg,
2481 ArrayRef<unsigned> Idxs,
2482 const Twine &NameStr,
2483 BasicBlock *InsertAtEnd) {
2484 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2485 }
2486
2487 /// Returns the type of the element that would be extracted
2488 /// with an extractvalue instruction with the specified parameters.
2489 ///
2490 /// Null is returned if the indices are invalid for the specified type.
2491 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2492
2493 using idx_iterator = const unsigned*;
2494
2495 inline idx_iterator idx_begin() const { return Indices.begin(); }
2496 inline idx_iterator idx_end() const { return Indices.end(); }
2497 inline iterator_range<idx_iterator> indices() const {
2498 return make_range(idx_begin(), idx_end());
2499 }
2500
2501 Value *getAggregateOperand() {
2502 return getOperand(0);
2503 }
2504 const Value *getAggregateOperand() const {
2505 return getOperand(0);
2506 }
2507 static unsigned getAggregateOperandIndex() {
2508 return 0U; // get index for modifying correct operand
2509 }
2510
2511 ArrayRef<unsigned> getIndices() const {
2512 return Indices;
2513 }
2514
2515 unsigned getNumIndices() const {
2516 return (unsigned)Indices.size();
2517 }
2518
2519 bool hasIndices() const {
2520 return true;
2521 }
2522
2523 // Methods for support type inquiry through isa, cast, and dyn_cast:
2524 static bool classof(const Instruction *I) {
2525 return I->getOpcode() == Instruction::ExtractValue;
2526 }
2527 static bool classof(const Value *V) {
2528 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2529 }
2530};
2531
2532ExtractValueInst::ExtractValueInst(Value *Agg,
2533 ArrayRef<unsigned> Idxs,
2534 const Twine &NameStr,
2535 Instruction *InsertBefore)
2536 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2537 ExtractValue, Agg, InsertBefore) {
2538 init(Idxs, NameStr);
2539}
2540
2541ExtractValueInst::ExtractValueInst(Value *Agg,
2542 ArrayRef<unsigned> Idxs,
2543 const Twine &NameStr,
2544 BasicBlock *InsertAtEnd)
2545 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2546 ExtractValue, Agg, InsertAtEnd) {
2547 init(Idxs, NameStr);
2548}
2549
2550//===----------------------------------------------------------------------===//
2551// InsertValueInst Class
2552//===----------------------------------------------------------------------===//
2553
2554/// This instruction inserts a struct field of array element
2555/// value into an aggregate value.
2556///
2557class InsertValueInst : public Instruction {
2558 SmallVector<unsigned, 4> Indices;
2559
2560 InsertValueInst(const InsertValueInst &IVI);
2561
2562 /// Constructors - Create a insertvalue instruction with a base aggregate
2563 /// value, a value to insert, and a list of indices. The first ctor can
2564 /// optionally insert before an existing instruction, the second appends
2565 /// the new instruction to the specified BasicBlock.
2566 inline InsertValueInst(Value *Agg, Value *Val,
2567 ArrayRef<unsigned> Idxs,
2568 const Twine &NameStr,
2569 Instruction *InsertBefore);
2570 inline InsertValueInst(Value *Agg, Value *Val,
2571 ArrayRef<unsigned> Idxs,
2572 const Twine &NameStr, BasicBlock *InsertAtEnd);
2573
2574 /// Constructors - These two constructors are convenience methods because one
2575 /// and two index insertvalue instructions are so common.
2576 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2577 const Twine &NameStr = "",
2578 Instruction *InsertBefore = nullptr);
2579 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2580 BasicBlock *InsertAtEnd);
2581
2582 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2583 const Twine &NameStr);
2584
2585protected:
2586 // Note: Instruction needs to be a friend here to call cloneImpl.
2587 friend class Instruction;
2588
2589 InsertValueInst *cloneImpl() const;
2590
2591public:
2592 // allocate space for exactly two operands
2593 void *operator new(size_t S) { return User::operator new(S, 2); }
2594 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2595
2596 static InsertValueInst *Create(Value *Agg, Value *Val,
2597 ArrayRef<unsigned> Idxs,
2598 const Twine &NameStr = "",
2599 Instruction *InsertBefore = nullptr) {
2600 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2601 }
2602
2603 static InsertValueInst *Create(Value *Agg, Value *Val,
2604 ArrayRef<unsigned> Idxs,
2605 const Twine &NameStr,
2606 BasicBlock *InsertAtEnd) {
2607 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2608 }
2609
2610 /// Transparently provide more efficient getOperand methods.
2611 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2612
2613 using idx_iterator = const unsigned*;
2614
2615 inline idx_iterator idx_begin() const { return Indices.begin(); }
2616 inline idx_iterator idx_end() const { return Indices.end(); }
2617 inline iterator_range<idx_iterator> indices() const {
2618 return make_range(idx_begin(), idx_end());
2619 }
2620
2621 Value *getAggregateOperand() {
2622 return getOperand(0);
2623 }
2624 const Value *getAggregateOperand() const {
2625 return getOperand(0);
2626 }
2627 static unsigned getAggregateOperandIndex() {
2628 return 0U; // get index for modifying correct operand
2629 }
2630
2631 Value *getInsertedValueOperand() {
2632 return getOperand(1);
2633 }
2634 const Value *getInsertedValueOperand() const {
2635 return getOperand(1);
2636 }
2637 static unsigned getInsertedValueOperandIndex() {
2638 return 1U; // get index for modifying correct operand
2639 }
2640
2641 ArrayRef<unsigned> getIndices() const {
2642 return Indices;
2643 }
2644
2645 unsigned getNumIndices() const {
2646 return (unsigned)Indices.size();
2647 }
2648
2649 bool hasIndices() const {
2650 return true;
2651 }
2652
2653 // Methods for support type inquiry through isa, cast, and dyn_cast:
2654 static bool classof(const Instruction *I) {
2655 return I->getOpcode() == Instruction::InsertValue;
2656 }
2657 static bool classof(const Value *V) {
2658 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2659 }
2660};
2661
2662template <>
2663struct OperandTraits<InsertValueInst> :
2664 public FixedNumOperandTraits<InsertValueInst, 2> {
2665};
2666
2667InsertValueInst::InsertValueInst(Value *Agg,
2668 Value *Val,
2669 ArrayRef<unsigned> Idxs,
2670 const Twine &NameStr,
2671 Instruction *InsertBefore)
2672 : Instruction(Agg->getType(), InsertValue,
2673 OperandTraits<InsertValueInst>::op_begin(this),
2674 2, InsertBefore) {
2675 init(Agg, Val, Idxs, NameStr);
2676}
2677
2678InsertValueInst::InsertValueInst(Value *Agg,
2679 Value *Val,
2680 ArrayRef<unsigned> Idxs,
2681 const Twine &NameStr,
2682 BasicBlock *InsertAtEnd)
2683 : Instruction(Agg->getType(), InsertValue,
2684 OperandTraits<InsertValueInst>::op_begin(this),
2685 2, InsertAtEnd) {
2686 init(Agg, Val, Idxs, NameStr);
2687}
2688
2689DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2689, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertValueInst
>::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture
].get()); } void InsertValueInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2689, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertValueInst::getNumOperands
() const { return OperandTraits<InsertValueInst>::operands
(this); } template <int Idx_nocapture> Use &InsertValueInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertValueInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2690
2691//===----------------------------------------------------------------------===//
2692// PHINode Class
2693//===----------------------------------------------------------------------===//
2694
2695// PHINode - The PHINode class is used to represent the magical mystical PHI
2696// node, that can not exist in nature, but can be synthesized in a computer
2697// scientist's overactive imagination.
2698//
2699class PHINode : public Instruction {
2700 /// The number of operands actually allocated. NumOperands is
2701 /// the number actually in use.
2702 unsigned ReservedSpace;
2703
2704 PHINode(const PHINode &PN);
2705
2706 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2707 const Twine &NameStr = "",
2708 Instruction *InsertBefore = nullptr)
2709 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2710 ReservedSpace(NumReservedValues) {
2711 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2711, __extension__ __PRETTY_FUNCTION__
))
;
2712 setName(NameStr);
2713 allocHungoffUses(ReservedSpace);
2714 }
2715
2716 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2717 BasicBlock *InsertAtEnd)
2718 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2719 ReservedSpace(NumReservedValues) {
2720 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2720, __extension__ __PRETTY_FUNCTION__
))
;
2721 setName(NameStr);
2722 allocHungoffUses(ReservedSpace);
2723 }
2724
2725protected:
2726 // Note: Instruction needs to be a friend here to call cloneImpl.
2727 friend class Instruction;
2728
2729 PHINode *cloneImpl() const;
2730
2731 // allocHungoffUses - this is more complicated than the generic
2732 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2733 // values and pointers to the incoming blocks, all in one allocation.
2734 void allocHungoffUses(unsigned N) {
2735 User::allocHungoffUses(N, /* IsPhi */ true);
2736 }
2737
2738public:
2739 /// Constructors - NumReservedValues is a hint for the number of incoming
2740 /// edges that this phi node will have (use 0 if you really have no idea).
2741 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2742 const Twine &NameStr = "",
2743 Instruction *InsertBefore = nullptr) {
2744 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2745 }
2746
2747 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2748 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2749 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2750 }
2751
2752 /// Provide fast operand accessors
2753 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2754
2755 // Block iterator interface. This provides access to the list of incoming
2756 // basic blocks, which parallels the list of incoming values.
2757
2758 using block_iterator = BasicBlock **;
2759 using const_block_iterator = BasicBlock * const *;
2760
2761 block_iterator block_begin() {
2762 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2763 }
2764
2765 const_block_iterator block_begin() const {
2766 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2767 }
2768
2769 block_iterator block_end() {
2770 return block_begin() + getNumOperands();
2771 }
2772
2773 const_block_iterator block_end() const {
2774 return block_begin() + getNumOperands();
2775 }
2776
2777 iterator_range<block_iterator> blocks() {
2778 return make_range(block_begin(), block_end());
2779 }
2780
2781 iterator_range<const_block_iterator> blocks() const {
2782 return make_range(block_begin(), block_end());
2783 }
2784
2785 op_range incoming_values() { return operands(); }
2786
2787 const_op_range incoming_values() const { return operands(); }
2788
2789 /// Return the number of incoming edges
2790 ///
2791 unsigned getNumIncomingValues() const { return getNumOperands(); }
2792
2793 /// Return incoming value number x
2794 ///
2795 Value *getIncomingValue(unsigned i) const {
2796 return getOperand(i);
2797 }
2798 void setIncomingValue(unsigned i, Value *V) {
2799 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "llvm/include/llvm/IR/Instructions.h", 2799, __extension__ __PRETTY_FUNCTION__
))
;
2800 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2801, __extension__ __PRETTY_FUNCTION__
))
2801 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2801, __extension__ __PRETTY_FUNCTION__
))
;
2802 setOperand(i, V);
2803 }
2804
2805 static unsigned getOperandNumForIncomingValue(unsigned i) {
2806 return i;
2807 }
2808
2809 static unsigned getIncomingValueNumForOperand(unsigned i) {
2810 return i;
2811 }
2812
2813 /// Return incoming basic block number @p i.
2814 ///
2815 BasicBlock *getIncomingBlock(unsigned i) const {
2816 return block_begin()[i];
2817 }
2818
2819 /// Return incoming basic block corresponding
2820 /// to an operand of the PHI.
2821 ///
2822 BasicBlock *getIncomingBlock(const Use &U) const {
2823 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "llvm/include/llvm/IR/Instructions.h", 2823, __extension__ __PRETTY_FUNCTION__
))
;
2824 return getIncomingBlock(unsigned(&U - op_begin()));
2825 }
2826
2827 /// Return incoming basic block corresponding
2828 /// to value use iterator.
2829 ///
2830 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2831 return getIncomingBlock(I.getUse());
2832 }
2833
2834 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2835 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2835, __extension__ __PRETTY_FUNCTION__
))
;
2836 block_begin()[i] = BB;
2837 }
2838
2839 /// Replace every incoming basic block \p Old to basic block \p New.
2840 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2841 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2841, __extension__ __PRETTY_FUNCTION__
))
;
2842 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2843 if (getIncomingBlock(Op) == Old)
2844 setIncomingBlock(Op, New);
2845 }
2846
2847 /// Add an incoming value to the end of the PHI list
2848 ///
2849 void addIncoming(Value *V, BasicBlock *BB) {
2850 if (getNumOperands() == ReservedSpace)
2851 growOperands(); // Get more space!
2852 // Initialize some new operands.
2853 setNumHungOffUseOperands(getNumOperands() + 1);
2854 setIncomingValue(getNumOperands() - 1, V);
2855 setIncomingBlock(getNumOperands() - 1, BB);
2856 }
2857
2858 /// Remove an incoming value. This is useful if a
2859 /// predecessor basic block is deleted. The value removed is returned.
2860 ///
2861 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2862 /// is true), the PHI node is destroyed and any uses of it are replaced with
2863 /// dummy values. The only time there should be zero incoming values to a PHI
2864 /// node is when the block is dead, so this strategy is sound.
2865 ///
2866 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2867
2868 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2869 int Idx = getBasicBlockIndex(BB);
2870 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "llvm/include/llvm/IR/Instructions.h", 2870, __extension__ __PRETTY_FUNCTION__
))
;
2871 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2872 }
2873
2874 /// Return the first index of the specified basic
2875 /// block in the value list for this PHI. Returns -1 if no instance.
2876 ///
2877 int getBasicBlockIndex(const BasicBlock *BB) const {
2878 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2879 if (block_begin()[i] == BB)
2880 return i;
2881 return -1;
2882 }
2883
2884 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2885 int Idx = getBasicBlockIndex(BB);
2886 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "llvm/include/llvm/IR/Instructions.h", 2886, __extension__ __PRETTY_FUNCTION__
))
;
2887 return getIncomingValue(Idx);
2888 }
2889
2890 /// Set every incoming value(s) for block \p BB to \p V.
2891 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2892 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2892, __extension__ __PRETTY_FUNCTION__
))
;
2893 bool Found = false;
2894 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2895 if (getIncomingBlock(Op) == BB) {
2896 Found = true;
2897 setIncomingValue(Op, V);
2898 }
2899 (void)Found;
2900 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "llvm/include/llvm/IR/Instructions.h", 2900, __extension__ __PRETTY_FUNCTION__
))
;
2901 }
2902
2903 /// If the specified PHI node always merges together the
2904 /// same value, return the value, otherwise return null.
2905 Value *hasConstantValue() const;
2906
2907 /// Whether the specified PHI node always merges
2908 /// together the same value, assuming undefs are equal to a unique
2909 /// non-undef value.
2910 bool hasConstantOrUndefValue() const;
2911
2912 /// If the PHI node is complete which means all of its parent's predecessors
2913 /// have incoming value in this PHI, return true, otherwise return false.
2914 bool isComplete() const {
2915 return llvm::all_of(predecessors(getParent()),
2916 [this](const BasicBlock *Pred) {
2917 return getBasicBlockIndex(Pred) >= 0;
2918 });
2919 }
2920
2921 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2922 static bool classof(const Instruction *I) {
2923 return I->getOpcode() == Instruction::PHI;
2924 }
2925 static bool classof(const Value *V) {
2926 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2927 }
2928
2929private:
2930 void growOperands();
2931};
2932
2933template <>
2934struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2935};
2936
2937DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2937, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<PHINode
>::op_begin(const_cast<PHINode*>(this))[i_nocapture]
.get()); } void PHINode::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2937, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<PHINode>::op_begin(this)[i_nocapture]
= Val_nocapture; } unsigned PHINode::getNumOperands() const {
return OperandTraits<PHINode>::operands(this); } template
<int Idx_nocapture> Use &PHINode::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &PHINode::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
2938
2939//===----------------------------------------------------------------------===//
2940// LandingPadInst Class
2941//===----------------------------------------------------------------------===//
2942
2943//===---------------------------------------------------------------------------
2944/// The landingpad instruction holds all of the information
2945/// necessary to generate correct exception handling. The landingpad instruction
2946/// cannot be moved from the top of a landing pad block, which itself is
2947/// accessible only from the 'unwind' edge of an invoke. This uses the
2948/// SubclassData field in Value to store whether or not the landingpad is a
2949/// cleanup.
2950///
2951class LandingPadInst : public Instruction {
2952 using CleanupField = BoolBitfieldElementT<0>;
2953
2954 /// The number of operands actually allocated. NumOperands is
2955 /// the number actually in use.
2956 unsigned ReservedSpace;
2957
2958 LandingPadInst(const LandingPadInst &LP);
2959
2960public:
2961 enum ClauseType { Catch, Filter };
2962
2963private:
2964 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2965 const Twine &NameStr, Instruction *InsertBefore);
2966 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2967 const Twine &NameStr, BasicBlock *InsertAtEnd);
2968
2969 // Allocate space for exactly zero operands.
2970 void *operator new(size_t S) { return User::operator new(S); }
2971
2972 void growOperands(unsigned Size);
2973 void init(unsigned NumReservedValues, const Twine &NameStr);
2974
2975protected:
2976 // Note: Instruction needs to be a friend here to call cloneImpl.
2977 friend class Instruction;
2978
2979 LandingPadInst *cloneImpl() const;
2980
2981public:
2982 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2983
2984 /// Constructors - NumReservedClauses is a hint for the number of incoming
2985 /// clauses that this landingpad will have (use 0 if you really have no idea).
2986 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2987 const Twine &NameStr = "",
2988 Instruction *InsertBefore = nullptr);
2989 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2990 const Twine &NameStr, BasicBlock *InsertAtEnd);
2991
2992 /// Provide fast operand accessors
2993 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2994
2995 /// Return 'true' if this landingpad instruction is a
2996 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2997 /// doesn't catch the exception.
2998 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2999
3000 /// Indicate that this landingpad instruction is a cleanup.
3001 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3002
3003 /// Add a catch or filter clause to the landing pad.
3004 void addClause(Constant *ClauseVal);
3005
3006 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3007 /// determine what type of clause this is.
3008 Constant *getClause(unsigned Idx) const {
3009 return cast<Constant>(getOperandList()[Idx]);
3010 }
3011
3012 /// Return 'true' if the clause and index Idx is a catch clause.
3013 bool isCatch(unsigned Idx) const {
3014 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3015 }
3016
3017 /// Return 'true' if the clause and index Idx is a filter clause.
3018 bool isFilter(unsigned Idx) const {
3019 return isa<ArrayType>(getOperandList()[Idx]->getType());
3020 }
3021
3022 /// Get the number of clauses for this landing pad.
3023 unsigned getNumClauses() const { return getNumOperands(); }
3024
3025 /// Grow the size of the operand list to accommodate the new
3026 /// number of clauses.
3027 void reserveClauses(unsigned Size) { growOperands(Size); }
3028
3029 // Methods for support type inquiry through isa, cast, and dyn_cast:
3030 static bool classof(const Instruction *I) {
3031 return I->getOpcode() == Instruction::LandingPad;
3032 }
3033 static bool classof(const Value *V) {
3034 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3035 }
3036};
3037
3038template <>
3039struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3040};
3041
3042DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3042, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<LandingPadInst
>::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture
].get()); } void LandingPadInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3042, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned LandingPadInst::getNumOperands(
) const { return OperandTraits<LandingPadInst>::operands
(this); } template <int Idx_nocapture> Use &LandingPadInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &LandingPadInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3043
3044//===----------------------------------------------------------------------===//
3045// ReturnInst Class
3046//===----------------------------------------------------------------------===//
3047
3048//===---------------------------------------------------------------------------
3049/// Return a value (possibly void), from a function. Execution
3050/// does not continue in this function any longer.
3051///
3052class ReturnInst : public Instruction {
3053 ReturnInst(const ReturnInst &RI);
3054
3055private:
3056 // ReturnInst constructors:
3057 // ReturnInst() - 'ret void' instruction
3058 // ReturnInst( null) - 'ret void' instruction
3059 // ReturnInst(Value* X) - 'ret X' instruction
3060 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3061 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3062 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3063 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3064 //
3065 // NOTE: If the Value* passed is of type void then the constructor behaves as
3066 // if it was passed NULL.
3067 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3068 Instruction *InsertBefore = nullptr);
3069 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3070 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3071
3072protected:
3073 // Note: Instruction needs to be a friend here to call cloneImpl.
3074 friend class Instruction;
3075
3076 ReturnInst *cloneImpl() const;
3077
3078public:
3079 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3080 Instruction *InsertBefore = nullptr) {
3081 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3082 }
3083
3084 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3085 BasicBlock *InsertAtEnd) {
3086 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3087 }
3088
3089 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3090 return new(0) ReturnInst(C, InsertAtEnd);
3091 }
3092
3093 /// Provide fast operand accessors
3094 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3095
3096 /// Convenience accessor. Returns null if there is no return value.
3097 Value *getReturnValue() const {
3098 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3099 }
3100
3101 unsigned getNumSuccessors() const { return 0; }
3102
3103 // Methods for support type inquiry through isa, cast, and dyn_cast:
3104 static bool classof(const Instruction *I) {
3105 return (I->getOpcode() == Instruction::Ret);
3106 }
3107 static bool classof(const Value *V) {
3108 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3109 }
3110
3111private:
3112 BasicBlock *getSuccessor(unsigned idx) const {
3113 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3113)
;
3114 }
3115
3116 void setSuccessor(unsigned idx, BasicBlock *B) {
3117 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3117)
;
3118 }
3119};
3120
3121template <>
3122struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3123};
3124
3125DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3125, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this))[i_nocapture
].get()); } void ReturnInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3125, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const
{ return OperandTraits<ReturnInst>::operands(this); } template
<int Idx_nocapture> Use &ReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ReturnInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3126
3127//===----------------------------------------------------------------------===//
3128// BranchInst Class
3129//===----------------------------------------------------------------------===//
3130
3131//===---------------------------------------------------------------------------
3132/// Conditional or Unconditional Branch instruction.
3133///
3134class BranchInst : public Instruction {
3135 /// Ops list - Branches are strange. The operands are ordered:
3136 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3137 /// they don't have to check for cond/uncond branchness. These are mostly
3138 /// accessed relative from op_end().
3139 BranchInst(const BranchInst &BI);
3140 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3141 // BranchInst(BB *B) - 'br B'
3142 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3143 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3144 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3145 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3146 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3147 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3148 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3149 Instruction *InsertBefore = nullptr);
3150 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3151 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3152 BasicBlock *InsertAtEnd);
3153
3154 void AssertOK();
3155
3156protected:
3157 // Note: Instruction needs to be a friend here to call cloneImpl.
3158 friend class Instruction;
3159
3160 BranchInst *cloneImpl() const;
3161
3162public:
3163 /// Iterator type that casts an operand to a basic block.
3164 ///
3165 /// This only makes sense because the successors are stored as adjacent
3166 /// operands for branch instructions.
3167 struct succ_op_iterator
3168 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3169 std::random_access_iterator_tag, BasicBlock *,
3170 ptrdiff_t, BasicBlock *, BasicBlock *> {
3171 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3172
3173 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3174 BasicBlock *operator->() const { return operator*(); }
3175 };
3176
3177 /// The const version of `succ_op_iterator`.
3178 struct const_succ_op_iterator
3179 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3180 std::random_access_iterator_tag,
3181 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3182 const BasicBlock *> {
3183 explicit const_succ_op_iterator(const_value_op_iterator I)
3184 : iterator_adaptor_base(I) {}
3185
3186 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3187 const BasicBlock *operator->() const { return operator*(); }
3188 };
3189
3190 static BranchInst *Create(BasicBlock *IfTrue,
3191 Instruction *InsertBefore = nullptr) {
3192 return new(1) BranchInst(IfTrue, InsertBefore);
3193 }
3194
3195 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3196 Value *Cond, Instruction *InsertBefore = nullptr) {
3197 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3198 }
3199
3200 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3201 return new(1) BranchInst(IfTrue, InsertAtEnd);
3202 }
3203
3204 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3205 Value *Cond, BasicBlock *InsertAtEnd) {
3206 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3207 }
3208
3209 /// Transparently provide more efficient getOperand methods.
3210 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3211
3212 bool isUnconditional() const { return getNumOperands() == 1; }
3213 bool isConditional() const { return getNumOperands() == 3; }
3214
3215 Value *getCondition() const {
3216 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3216, __extension__ __PRETTY_FUNCTION__
))
;
3217 return Op<-3>();
3218 }
3219
3220 void setCondition(Value *V) {
3221 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3221, __extension__ __PRETTY_FUNCTION__
))
;
3222 Op<-3>() = V;
3223 }
3224
3225 unsigned getNumSuccessors() const { return 1+isConditional(); }
3226
3227 BasicBlock *getSuccessor(unsigned i) const {
3228 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3228, __extension__ __PRETTY_FUNCTION__
))
;
3229 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3230 }
3231
3232 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3233 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3233, __extension__ __PRETTY_FUNCTION__
))
;
3234 *(&Op<-1>() - idx) = NewSucc;
3235 }
3236
3237 /// Swap the successors of this branch instruction.
3238 ///
3239 /// Swaps the successors of the branch instruction. This also swaps any
3240 /// branch weight metadata associated with the instruction so that it
3241 /// continues to map correctly to each operand.
3242 void swapSuccessors();
3243
3244 iterator_range<succ_op_iterator> successors() {
3245 return make_range(
3246 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3247 succ_op_iterator(value_op_end()));
3248 }
3249
3250 iterator_range<const_succ_op_iterator> successors() const {
3251 return make_range(const_succ_op_iterator(
3252 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3253 const_succ_op_iterator(value_op_end()));
3254 }
3255
3256 // Methods for support type inquiry through isa, cast, and dyn_cast:
3257 static bool classof(const Instruction *I) {
3258 return (I->getOpcode() == Instruction::Br);
3259 }
3260 static bool classof(const Value *V) {
3261 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3262 }
3263};
3264
3265template <>
3266struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3267};
3268
3269DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3269, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this))[i_nocapture
].get()); } void BranchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<BranchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3269, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<BranchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned BranchInst::getNumOperands() const
{ return OperandTraits<BranchInst>::operands(this); } template
<int Idx_nocapture> Use &BranchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &BranchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3270
3271//===----------------------------------------------------------------------===//
3272// SwitchInst Class
3273//===----------------------------------------------------------------------===//
3274
3275//===---------------------------------------------------------------------------
3276/// Multiway switch
3277///
3278class SwitchInst : public Instruction {
3279 unsigned ReservedSpace;
3280
3281 // Operand[0] = Value to switch on
3282 // Operand[1] = Default basic block destination
3283 // Operand[2n ] = Value to match
3284 // Operand[2n+1] = BasicBlock to go to on match
3285 SwitchInst(const SwitchInst &SI);
3286
3287 /// Create a new switch instruction, specifying a value to switch on and a
3288 /// default destination. The number of additional cases can be specified here
3289 /// to make memory allocation more efficient. This constructor can also
3290 /// auto-insert before another instruction.
3291 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3292 Instruction *InsertBefore);
3293
3294 /// Create a new switch instruction, specifying a value to switch on and a
3295 /// default destination. The number of additional cases can be specified here
3296 /// to make memory allocation more efficient. This constructor also
3297 /// auto-inserts at the end of the specified BasicBlock.
3298 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3299 BasicBlock *InsertAtEnd);
3300
3301 // allocate space for exactly zero operands
3302 void *operator new(size_t S) { return User::operator new(S); }
3303
3304 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3305 void growOperands();
3306
3307protected:
3308 // Note: Instruction needs to be a friend here to call cloneImpl.
3309 friend class Instruction;
3310
3311 SwitchInst *cloneImpl() const;
3312
3313public:
3314 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3315
3316 // -2
3317 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3318
3319 template <typename CaseHandleT> class CaseIteratorImpl;
3320
3321 /// A handle to a particular switch case. It exposes a convenient interface
3322 /// to both the case value and the successor block.
3323 ///
3324 /// We define this as a template and instantiate it to form both a const and
3325 /// non-const handle.
3326 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3327 class CaseHandleImpl {
3328 // Directly befriend both const and non-const iterators.
3329 friend class SwitchInst::CaseIteratorImpl<
3330 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3331
3332 protected:
3333 // Expose the switch type we're parameterized with to the iterator.
3334 using SwitchInstType = SwitchInstT;
3335
3336 SwitchInstT *SI;
3337 ptrdiff_t Index;
3338
3339 CaseHandleImpl() = default;
3340 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3341
3342 public:
3343 /// Resolves case value for current case.
3344 ConstantIntT *getCaseValue() const {
3345 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3346, __extension__ __PRETTY_FUNCTION__
))
3346 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3346, __extension__ __PRETTY_FUNCTION__
))
;
3347 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3348 }
3349
3350 /// Resolves successor for current case.
3351 BasicBlockT *getCaseSuccessor() const {
3352 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3354, __extension__ __PRETTY_FUNCTION__
))
3353 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3354, __extension__ __PRETTY_FUNCTION__
))
3354 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3354, __extension__ __PRETTY_FUNCTION__
))
;
3355 return SI->getSuccessor(getSuccessorIndex());
3356 }
3357
3358 /// Returns number of current case.
3359 unsigned getCaseIndex() const { return Index; }
3360
3361 /// Returns successor index for current case successor.
3362 unsigned getSuccessorIndex() const {
3363 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3365, __extension__ __PRETTY_FUNCTION__
))
3364 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3365, __extension__ __PRETTY_FUNCTION__
))
3365 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3365, __extension__ __PRETTY_FUNCTION__
))
;
3366 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3367 }
3368
3369 bool operator==(const CaseHandleImpl &RHS) const {
3370 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3370, __extension__ __PRETTY_FUNCTION__
))
;
3371 return Index == RHS.Index;
3372 }
3373 };
3374
3375 using ConstCaseHandle =
3376 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3377
3378 class CaseHandle
3379 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3380 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3381
3382 public:
3383 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3384
3385 /// Sets the new value for current case.
3386 void setValue(ConstantInt *V) const {
3387 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3388, __extension__ __PRETTY_FUNCTION__
))
3388 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3388, __extension__ __PRETTY_FUNCTION__
))
;
3389 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3390 }
3391
3392 /// Sets the new successor for current case.
3393 void setSuccessor(BasicBlock *S) const {
3394 SI->setSuccessor(getSuccessorIndex(), S);
3395 }
3396 };
3397
3398 template <typename CaseHandleT>
3399 class CaseIteratorImpl
3400 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3401 std::random_access_iterator_tag,
3402 const CaseHandleT> {
3403 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3404
3405 CaseHandleT Case;
3406
3407 public:
3408 /// Default constructed iterator is in an invalid state until assigned to
3409 /// a case for a particular switch.
3410 CaseIteratorImpl() = default;
3411
3412 /// Initializes case iterator for given SwitchInst and for given
3413 /// case number.
3414 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3415
3416 /// Initializes case iterator for given SwitchInst and for given
3417 /// successor index.
3418 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3419 unsigned SuccessorIndex) {
3420 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3421, __extension__ __PRETTY_FUNCTION__
))
3421 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3421, __extension__ __PRETTY_FUNCTION__
))
;
3422 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3423 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3424 }
3425
3426 /// Support converting to the const variant. This will be a no-op for const
3427 /// variant.
3428 operator CaseIteratorImpl<ConstCaseHandle>() const {
3429 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3430 }
3431
3432 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3433 // Check index correctness after addition.
3434 // Note: Index == getNumCases() means end().
3435 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3437, __extension__ __PRETTY_FUNCTION__
))
3436 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3437, __extension__ __PRETTY_FUNCTION__
))
3437 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3437, __extension__ __PRETTY_FUNCTION__
))
;
3438 Case.Index += N;
3439 return *this;
3440 }
3441 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3442 // Check index correctness after subtraction.
3443 // Note: Case.Index == getNumCases() means end().
3444 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3446, __extension__ __PRETTY_FUNCTION__
))
3445 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3446, __extension__ __PRETTY_FUNCTION__
))
3446 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3446, __extension__ __PRETTY_FUNCTION__
))
;
3447 Case.Index -= N;
3448 return *this;
3449 }
3450 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3451 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3451, __extension__ __PRETTY_FUNCTION__
))
;
3452 return Case.Index - RHS.Case.Index;
3453 }
3454 bool operator==(const CaseIteratorImpl &RHS) const {
3455 return Case == RHS.Case;
3456 }
3457 bool operator<(const CaseIteratorImpl &RHS) const {
3458 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3458, __extension__ __PRETTY_FUNCTION__
))
;
3459 return Case.Index < RHS.Case.Index;
3460 }
3461 const CaseHandleT &operator*() const { return Case; }
3462 };
3463
3464 using CaseIt = CaseIteratorImpl<CaseHandle>;
3465 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3466
3467 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3468 unsigned NumCases,
3469 Instruction *InsertBefore = nullptr) {
3470 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3471 }
3472
3473 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3474 unsigned NumCases, BasicBlock *InsertAtEnd) {
3475 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3476 }
3477
3478 /// Provide fast operand accessors
3479 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3480
3481 // Accessor Methods for Switch stmt
3482 Value *getCondition() const { return getOperand(0); }
3483 void setCondition(Value *V) { setOperand(0, V); }
3484
3485 BasicBlock *getDefaultDest() const {
3486 return cast<BasicBlock>(getOperand(1));
3487 }
3488
3489 void setDefaultDest(BasicBlock *DefaultCase) {
3490 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3491 }
3492
3493 /// Return the number of 'cases' in this switch instruction, excluding the
3494 /// default case.
3495 unsigned getNumCases() const {
3496 return getNumOperands()/2 - 1;
3497 }
3498
3499 /// Returns a read/write iterator that points to the first case in the
3500 /// SwitchInst.
3501 CaseIt case_begin() {
3502 return CaseIt(this, 0);
3503 }
3504
3505 /// Returns a read-only iterator that points to the first case in the
3506 /// SwitchInst.
3507 ConstCaseIt case_begin() const {
3508 return ConstCaseIt(this, 0);
3509 }
3510
3511 /// Returns a read/write iterator that points one past the last in the
3512 /// SwitchInst.
3513 CaseIt case_end() {
3514 return CaseIt(this, getNumCases());
3515 }
3516
3517 /// Returns a read-only iterator that points one past the last in the
3518 /// SwitchInst.
3519 ConstCaseIt case_end() const {
3520 return ConstCaseIt(this, getNumCases());
3521 }
3522
3523 /// Iteration adapter for range-for loops.
3524 iterator_range<CaseIt> cases() {
3525 return make_range(case_begin(), case_end());
3526 }
3527
3528 /// Constant iteration adapter for range-for loops.
3529 iterator_range<ConstCaseIt> cases() const {
3530 return make_range(case_begin(), case_end());
3531 }
3532
3533 /// Returns an iterator that points to the default case.
3534 /// Note: this iterator allows to resolve successor only. Attempt
3535 /// to resolve case value causes an assertion.
3536 /// Also note, that increment and decrement also causes an assertion and
3537 /// makes iterator invalid.
3538 CaseIt case_default() {
3539 return CaseIt(this, DefaultPseudoIndex);
3540 }
3541 ConstCaseIt case_default() const {
3542 return ConstCaseIt(this, DefaultPseudoIndex);
3543 }
3544
3545 /// Search all of the case values for the specified constant. If it is
3546 /// explicitly handled, return the case iterator of it, otherwise return
3547 /// default case iterator to indicate that it is handled by the default
3548 /// handler.
3549 CaseIt findCaseValue(const ConstantInt *C) {
3550 return CaseIt(
3551 this,
3552 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3553 }
3554 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3555 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3556 return Case.getCaseValue() == C;
3557 });
3558 if (I != case_end())
3559 return I;
3560
3561 return case_default();
3562 }
3563
3564 /// Finds the unique case value for a given successor. Returns null if the
3565 /// successor is not found, not unique, or is the default case.
3566 ConstantInt *findCaseDest(BasicBlock *BB) {
3567 if (BB == getDefaultDest())
3568 return nullptr;
3569
3570 ConstantInt *CI = nullptr;
3571 for (auto Case : cases()) {
3572 if (Case.getCaseSuccessor() != BB)
3573 continue;
3574
3575 if (CI)
3576 return nullptr; // Multiple cases lead to BB.
3577
3578 CI = Case.getCaseValue();
3579 }
3580
3581 return CI;
3582 }
3583
3584 /// Add an entry to the switch instruction.
3585 /// Note:
3586 /// This action invalidates case_end(). Old case_end() iterator will
3587 /// point to the added case.
3588 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3589
3590 /// This method removes the specified case and its successor from the switch
3591 /// instruction. Note that this operation may reorder the remaining cases at
3592 /// index idx and above.
3593 /// Note:
3594 /// This action invalidates iterators for all cases following the one removed,
3595 /// including the case_end() iterator. It returns an iterator for the next
3596 /// case.
3597 CaseIt removeCase(CaseIt I);
3598
3599 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3600 BasicBlock *getSuccessor(unsigned idx) const {
3601 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3601, __extension__ __PRETTY_FUNCTION__
))
;
3602 return cast<BasicBlock>(getOperand(idx*2+1));
3603 }
3604 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3605 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3605, __extension__ __PRETTY_FUNCTION__
))
;
3606 setOperand(idx * 2 + 1, NewSucc);
3607 }
3608
3609 // Methods for support type inquiry through isa, cast, and dyn_cast:
3610 static bool classof(const Instruction *I) {
3611 return I->getOpcode() == Instruction::Switch;
3612 }
3613 static bool classof(const Value *V) {
3614 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3615 }
3616};
3617
3618/// A wrapper class to simplify modification of SwitchInst cases along with
3619/// their prof branch_weights metadata.
3620class SwitchInstProfUpdateWrapper {
3621 SwitchInst &SI;
3622 Optional<SmallVector<uint32_t, 8> > Weights = None;
3623 bool Changed = false;
3624
3625protected:
3626 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3627
3628 MDNode *buildProfBranchWeightsMD();
3629
3630 void init();
3631
3632public:
3633 using CaseWeightOpt = Optional<uint32_t>;
3634 SwitchInst *operator->() { return &SI; }
3635 SwitchInst &operator*() { return SI; }
3636 operator SwitchInst *() { return &SI; }
3637
3638 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3639
3640 ~SwitchInstProfUpdateWrapper() {
3641 if (Changed)
3642 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3643 }
3644
3645 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3646 /// correspondent branch weight.
3647 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3648
3649 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3650 /// specified branch weight for the added case.
3651 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3652
3653 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3654 /// this object to not touch the underlying SwitchInst in destructor.
3655 SymbolTableList<Instruction>::iterator eraseFromParent();
3656
3657 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3658 CaseWeightOpt getSuccessorWeight(unsigned idx);
3659
3660 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3661};
3662
3663template <>
3664struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3665};
3666
3667DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3667, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this))[i_nocapture
].get()); } void SwitchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3667, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const
{ return OperandTraits<SwitchInst>::operands(this); } template
<int Idx_nocapture> Use &SwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SwitchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3668
3669//===----------------------------------------------------------------------===//
3670// IndirectBrInst Class
3671//===----------------------------------------------------------------------===//
3672
3673//===---------------------------------------------------------------------------
3674/// Indirect Branch Instruction.
3675///
3676class IndirectBrInst : public Instruction {
3677 unsigned ReservedSpace;
3678
3679 // Operand[0] = Address to jump to
3680 // Operand[n+1] = n-th destination
3681 IndirectBrInst(const IndirectBrInst &IBI);
3682
3683 /// Create a new indirectbr instruction, specifying an
3684 /// Address to jump to. The number of expected destinations can be specified
3685 /// here to make memory allocation more efficient. This constructor can also
3686 /// autoinsert before another instruction.
3687 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3688
3689 /// Create a new indirectbr instruction, specifying an
3690 /// Address to jump to. The number of expected destinations can be specified
3691 /// here to make memory allocation more efficient. This constructor also
3692 /// autoinserts at the end of the specified BasicBlock.
3693 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3694
3695 // allocate space for exactly zero operands
3696 void *operator new(size_t S) { return User::operator new(S); }
3697
3698 void init(Value *Address, unsigned NumDests);
3699 void growOperands();
3700
3701protected:
3702 // Note: Instruction needs to be a friend here to call cloneImpl.
3703 friend class Instruction;
3704
3705 IndirectBrInst *cloneImpl() const;
3706
3707public:
3708 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3709
3710 /// Iterator type that casts an operand to a basic block.
3711 ///
3712 /// This only makes sense because the successors are stored as adjacent
3713 /// operands for indirectbr instructions.
3714 struct succ_op_iterator
3715 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3716 std::random_access_iterator_tag, BasicBlock *,
3717 ptrdiff_t, BasicBlock *, BasicBlock *> {
3718 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3719
3720 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3721 BasicBlock *operator->() const { return operator*(); }
3722 };
3723
3724 /// The const version of `succ_op_iterator`.
3725 struct const_succ_op_iterator
3726 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3727 std::random_access_iterator_tag,
3728 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3729 const BasicBlock *> {
3730 explicit const_succ_op_iterator(const_value_op_iterator I)
3731 : iterator_adaptor_base(I) {}
3732
3733 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3734 const BasicBlock *operator->() const { return operator*(); }
3735 };
3736
3737 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3738 Instruction *InsertBefore = nullptr) {
3739 return new IndirectBrInst(Address, NumDests, InsertBefore);
3740 }
3741
3742 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3743 BasicBlock *InsertAtEnd) {
3744 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3745 }
3746
3747 /// Provide fast operand accessors.
3748 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3749
3750 // Accessor Methods for IndirectBrInst instruction.
3751 Value *getAddress() { return getOperand(0); }
3752 const Value *getAddress() const { return getOperand(0); }
3753 void setAddress(Value *V) { setOperand(0, V); }
3754
3755 /// return the number of possible destinations in this
3756 /// indirectbr instruction.
3757 unsigned getNumDestinations() const { return getNumOperands()-1; }
3758
3759 /// Return the specified destination.
3760 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3761 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3762
3763 /// Add a destination.
3764 ///
3765 void addDestination(BasicBlock *Dest);
3766
3767 /// This method removes the specified successor from the
3768 /// indirectbr instruction.
3769 void removeDestination(unsigned i);
3770
3771 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3772 BasicBlock *getSuccessor(unsigned i) const {
3773 return cast<BasicBlock>(getOperand(i+1));
3774 }
3775 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3776 setOperand(i + 1, NewSucc);
3777 }
3778
3779 iterator_range<succ_op_iterator> successors() {
3780 return make_range(succ_op_iterator(std::next(value_op_begin())),
3781 succ_op_iterator(value_op_end()));
3782 }
3783
3784 iterator_range<const_succ_op_iterator> successors() const {
3785 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3786 const_succ_op_iterator(value_op_end()));
3787 }
3788
3789 // Methods for support type inquiry through isa, cast, and dyn_cast:
3790 static bool classof(const Instruction *I) {
3791 return I->getOpcode() == Instruction::IndirectBr;
3792 }
3793 static bool classof(const Value *V) {
3794 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3795 }
3796};
3797
3798template <>
3799struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3800};
3801
3802DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3802, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<IndirectBrInst
>::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture
].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3802, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands(
) const { return OperandTraits<IndirectBrInst>::operands
(this); } template <int Idx_nocapture> Use &IndirectBrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &IndirectBrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3803
3804//===----------------------------------------------------------------------===//
3805// InvokeInst Class
3806//===----------------------------------------------------------------------===//
3807
3808/// Invoke instruction. The SubclassData field is used to hold the
3809/// calling convention of the call.
3810///
3811class InvokeInst : public CallBase {
3812 /// The number of operands for this call beyond the called function,
3813 /// arguments, and operand bundles.
3814 static constexpr int NumExtraOperands = 2;
3815
3816 /// The index from the end of the operand array to the normal destination.
3817 static constexpr int NormalDestOpEndIdx = -3;
3818
3819 /// The index from the end of the operand array to the unwind destination.
3820 static constexpr int UnwindDestOpEndIdx = -2;
3821
3822 InvokeInst(const InvokeInst &BI);
3823
3824 /// Construct an InvokeInst given a range of arguments.
3825 ///
3826 /// Construct an InvokeInst from a range of arguments
3827 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3828 BasicBlock *IfException, ArrayRef<Value *> Args,
3829 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3830 const Twine &NameStr, Instruction *InsertBefore);
3831
3832 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3833 BasicBlock *IfException, ArrayRef<Value *> Args,
3834 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3835 const Twine &NameStr, BasicBlock *InsertAtEnd);
3836
3837 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3838 BasicBlock *IfException, ArrayRef<Value *> Args,
3839 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3840
3841 /// Compute the number of operands to allocate.
3842 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3843 // We need one operand for the called function, plus our extra operands and
3844 // the input operand counts provided.
3845 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3846 }
3847
3848protected:
3849 // Note: Instruction needs to be a friend here to call cloneImpl.
3850 friend class Instruction;
3851
3852 InvokeInst *cloneImpl() const;
3853
3854public:
3855 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3856 BasicBlock *IfException, ArrayRef<Value *> Args,
3857 const Twine &NameStr,
3858 Instruction *InsertBefore = nullptr) {
3859 int NumOperands = ComputeNumOperands(Args.size());
3860 return new (NumOperands)
3861 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3862 NameStr, InsertBefore);
3863 }
3864
3865 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3866 BasicBlock *IfException, ArrayRef<Value *> Args,
3867 ArrayRef<OperandBundleDef> Bundles = None,
3868 const Twine &NameStr = "",
3869 Instruction *InsertBefore = nullptr) {
3870 int NumOperands =
3871 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3872 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3873
3874 return new (NumOperands, DescriptorBytes)
3875 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3876 NameStr, InsertBefore);
3877 }
3878
3879 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3880 BasicBlock *IfException, ArrayRef<Value *> Args,
3881 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3882 int NumOperands = ComputeNumOperands(Args.size());
3883 return new (NumOperands)
3884 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3885 NameStr, InsertAtEnd);
3886 }
3887
3888 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3889 BasicBlock *IfException, ArrayRef<Value *> Args,
3890 ArrayRef<OperandBundleDef> Bundles,
3891 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3892 int NumOperands =
3893 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3894 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3895
3896 return new (NumOperands, DescriptorBytes)
3897 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3898 NameStr, InsertAtEnd);
3899 }
3900
3901 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3902 BasicBlock *IfException, ArrayRef<Value *> Args,
3903 const Twine &NameStr,
3904 Instruction *InsertBefore = nullptr) {
3905 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3906 IfException, Args, None, NameStr, InsertBefore);
3907 }
3908
3909 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3910 BasicBlock *IfException, ArrayRef<Value *> Args,
3911 ArrayRef<OperandBundleDef> Bundles = None,
3912 const Twine &NameStr = "",
3913 Instruction *InsertBefore = nullptr) {
3914 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3915 IfException, Args, Bundles, NameStr, InsertBefore);
3916 }
3917
3918 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3919 BasicBlock *IfException, ArrayRef<Value *> Args,
3920 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3921 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3922 IfException, Args, NameStr, InsertAtEnd);
3923 }
3924
3925 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3926 BasicBlock *IfException, ArrayRef<Value *> Args,
3927 ArrayRef<OperandBundleDef> Bundles,
3928 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3929 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3930 IfException, Args, Bundles, NameStr, InsertAtEnd);
3931 }
3932
3933 /// Create a clone of \p II with a different set of operand bundles and
3934 /// insert it before \p InsertPt.
3935 ///
3936 /// The returned invoke instruction is identical to \p II in every way except
3937 /// that the operand bundles for the new instruction are set to the operand
3938 /// bundles in \p Bundles.
3939 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3940 Instruction *InsertPt = nullptr);
3941
3942 // get*Dest - Return the destination basic blocks...
3943 BasicBlock *getNormalDest() const {
3944 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3945 }
3946 BasicBlock *getUnwindDest() const {
3947 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3948 }
3949 void setNormalDest(BasicBlock *B) {
3950 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3951 }
3952 void setUnwindDest(BasicBlock *B) {
3953 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3954 }
3955
3956 /// Get the landingpad instruction from the landing pad
3957 /// block (the unwind destination).
3958 LandingPadInst *getLandingPadInst() const;
3959
3960 BasicBlock *getSuccessor(unsigned i) const {
3961 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3961, __extension__ __PRETTY_FUNCTION__
))
;
3962 return i == 0 ? getNormalDest() : getUnwindDest();
3963 }
3964
3965 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3966 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3966, __extension__ __PRETTY_FUNCTION__
))
;
3967 if (i == 0)
3968 setNormalDest(NewSucc);
3969 else
3970 setUnwindDest(NewSucc);
3971 }
3972
3973 unsigned getNumSuccessors() const { return 2; }
3974
3975 // Methods for support type inquiry through isa, cast, and dyn_cast:
3976 static bool classof(const Instruction *I) {
3977 return (I->getOpcode() == Instruction::Invoke);
3978 }
3979 static bool classof(const Value *V) {
3980 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3981 }
3982
3983private:
3984 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3985 // method so that subclasses cannot accidentally use it.
3986 template <typename Bitfield>
3987 void setSubclassData(typename Bitfield::Type Value) {
3988 Instruction::setSubclassData<Bitfield>(Value);
3989 }
3990};
3991
3992InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3993 BasicBlock *IfException, ArrayRef<Value *> Args,
3994 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3995 const Twine &NameStr, Instruction *InsertBefore)
3996 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3997 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3998 InsertBefore) {
3999 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4000}
4001
4002InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4003 BasicBlock *IfException, ArrayRef<Value *> Args,
4004 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4005 const Twine &NameStr, BasicBlock *InsertAtEnd)
4006 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4007 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4008 InsertAtEnd) {
4009 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4010}
4011
4012//===----------------------------------------------------------------------===//
4013// CallBrInst Class
4014//===----------------------------------------------------------------------===//
4015
4016/// CallBr instruction, tracking function calls that may not return control but
4017/// instead transfer it to a third location. The SubclassData field is used to
4018/// hold the calling convention of the call.
4019///
4020class CallBrInst : public CallBase {
4021
4022 unsigned NumIndirectDests;
4023
4024 CallBrInst(const CallBrInst &BI);
4025
4026 /// Construct a CallBrInst given a range of arguments.
4027 ///
4028 /// Construct a CallBrInst from a range of arguments
4029 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4030 ArrayRef<BasicBlock *> IndirectDests,
4031 ArrayRef<Value *> Args,
4032 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4033 const Twine &NameStr, Instruction *InsertBefore);
4034
4035 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4036 ArrayRef<BasicBlock *> IndirectDests,
4037 ArrayRef<Value *> Args,
4038 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4039 const Twine &NameStr, BasicBlock *InsertAtEnd);
4040
4041 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4042 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4043 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4044
4045 /// Compute the number of operands to allocate.
4046 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4047 int NumBundleInputs = 0) {
4048 // We need one operand for the called function, plus our extra operands and
4049 // the input operand counts provided.
4050 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4051 }
4052
4053protected:
4054 // Note: Instruction needs to be a friend here to call cloneImpl.
4055 friend class Instruction;
4056
4057 CallBrInst *cloneImpl() const;
4058
4059public:
4060 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4061 BasicBlock *DefaultDest,
4062 ArrayRef<BasicBlock *> IndirectDests,
4063 ArrayRef<Value *> Args, const Twine &NameStr,
4064 Instruction *InsertBefore = nullptr) {
4065 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4066 return new (NumOperands)
4067 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4068 NumOperands, NameStr, InsertBefore);
4069 }
4070
4071 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4072 BasicBlock *DefaultDest,
4073 ArrayRef<BasicBlock *> IndirectDests,
4074 ArrayRef<Value *> Args,
4075 ArrayRef<OperandBundleDef> Bundles = None,
4076 const Twine &NameStr = "",
4077 Instruction *InsertBefore = nullptr) {
4078 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4079 CountBundleInputs(Bundles));
4080 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4081
4082 return new (NumOperands, DescriptorBytes)
4083 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4084 NumOperands, NameStr, InsertBefore);
4085 }
4086
4087 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4088 BasicBlock *DefaultDest,
4089 ArrayRef<BasicBlock *> IndirectDests,
4090 ArrayRef<Value *> Args, const Twine &NameStr,
4091 BasicBlock *InsertAtEnd) {
4092 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4093 return new (NumOperands)
4094 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4095 NumOperands, NameStr, InsertAtEnd);
4096 }
4097
4098 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4099 BasicBlock *DefaultDest,
4100 ArrayRef<BasicBlock *> IndirectDests,
4101 ArrayRef<Value *> Args,
4102 ArrayRef<OperandBundleDef> Bundles,
4103 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4104 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4105 CountBundleInputs(Bundles));
4106 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4107
4108 return new (NumOperands, DescriptorBytes)
4109 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4110 NumOperands, NameStr, InsertAtEnd);
4111 }
4112
4113 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4114 ArrayRef<BasicBlock *> IndirectDests,
4115 ArrayRef<Value *> Args, const Twine &NameStr,
4116 Instruction *InsertBefore = nullptr) {
4117 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4118 IndirectDests, Args, NameStr, InsertBefore);
4119 }
4120
4121 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4122 ArrayRef<BasicBlock *> IndirectDests,
4123 ArrayRef<Value *> Args,
4124 ArrayRef<OperandBundleDef> Bundles = None,
4125 const Twine &NameStr = "",
4126 Instruction *InsertBefore = nullptr) {
4127 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4128 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4129 }
4130
4131 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4132 ArrayRef<BasicBlock *> IndirectDests,
4133 ArrayRef<Value *> Args, const Twine &NameStr,
4134 BasicBlock *InsertAtEnd) {
4135 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4136 IndirectDests, Args, NameStr, InsertAtEnd);
4137 }
4138
4139 static CallBrInst *Create(FunctionCallee Func,
4140 BasicBlock *DefaultDest,
4141 ArrayRef<BasicBlock *> IndirectDests,
4142 ArrayRef<Value *> Args,
4143 ArrayRef<OperandBundleDef> Bundles,
4144 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4145 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4146 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4147 }
4148
4149 /// Create a clone of \p CBI with a different set of operand bundles and
4150 /// insert it before \p InsertPt.
4151 ///
4152 /// The returned callbr instruction is identical to \p CBI in every way
4153 /// except that the operand bundles for the new instruction are set to the
4154 /// operand bundles in \p Bundles.
4155 static CallBrInst *Create(CallBrInst *CBI,
4156 ArrayRef<OperandBundleDef> Bundles,
4157 Instruction *InsertPt = nullptr);
4158
4159 /// Return the number of callbr indirect dest labels.
4160 ///
4161 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4162
4163 /// getIndirectDestLabel - Return the i-th indirect dest label.
4164 ///
4165 Value *getIndirectDestLabel(unsigned i) const {
4166 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4166, __extension__ __PRETTY_FUNCTION__
))
;
4167 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4168 }
4169
4170 Value *getIndirectDestLabelUse(unsigned i) const {
4171 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4171, __extension__ __PRETTY_FUNCTION__
))
;
4172 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4173 }
4174
4175 // Return the destination basic blocks...
4176 BasicBlock *getDefaultDest() const {
4177 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4178 }
4179 BasicBlock *getIndirectDest(unsigned i) const {
4180 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4181 }
4182 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4183 SmallVector<BasicBlock *, 16> IndirectDests;
4184 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4185 IndirectDests.push_back(getIndirectDest(i));
4186 return IndirectDests;
4187 }
4188 void setDefaultDest(BasicBlock *B) {
4189 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4190 }
4191 void setIndirectDest(unsigned i, BasicBlock *B) {
4192 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4193 }
4194
4195 BasicBlock *getSuccessor(unsigned i) const {
4196 assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4197, __extension__ __PRETTY_FUNCTION__
))
4197 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4197, __extension__ __PRETTY_FUNCTION__
))
;
4198 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4199 }
4200
4201 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4202 assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4203, __extension__ __PRETTY_FUNCTION__
))
4203 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4203, __extension__ __PRETTY_FUNCTION__
))
;
4204 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4205 }
4206
4207 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4208
4209 // Methods for support type inquiry through isa, cast, and dyn_cast:
4210 static bool classof(const Instruction *I) {
4211 return (I->getOpcode() == Instruction::CallBr);
4212 }
4213 static bool classof(const Value *V) {
4214 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4215 }
4216
4217private:
4218 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4219 // method so that subclasses cannot accidentally use it.
4220 template <typename Bitfield>
4221 void setSubclassData(typename Bitfield::Type Value) {
4222 Instruction::setSubclassData<Bitfield>(Value);
4223 }
4224};
4225
4226CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4227 ArrayRef<BasicBlock *> IndirectDests,
4228 ArrayRef<Value *> Args,
4229 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4230 const Twine &NameStr, Instruction *InsertBefore)
4231 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4232 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4233 InsertBefore) {
4234 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4235}
4236
4237CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4238 ArrayRef<BasicBlock *> IndirectDests,
4239 ArrayRef<Value *> Args,
4240 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4241 const Twine &NameStr, BasicBlock *InsertAtEnd)
4242 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4243 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4244 InsertAtEnd) {
4245 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4246}
4247
4248//===----------------------------------------------------------------------===//
4249// ResumeInst Class
4250//===----------------------------------------------------------------------===//
4251
4252//===---------------------------------------------------------------------------
4253/// Resume the propagation of an exception.
4254///
4255class ResumeInst : public Instruction {
4256 ResumeInst(const ResumeInst &RI);
4257
4258 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4259 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4260
4261protected: