Bug Summary

File:build/source/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
Warning:line 474, column 21
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUPromoteAlloca.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16 -I lib/Target/AMDGPU -I /build/source/llvm/lib/Target/AMDGPU -I include -I /build/source/llvm/include -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1672697298 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-01-03-004743-16233-1 -x c++ /build/source/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

/build/source/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass eliminates allocas by either converting them into vectors or
10// by migrating them to local address space.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPU.h"
15#include "GCNSubtarget.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "llvm/Analysis/CaptureTracking.h"
18#include "llvm/Analysis/ValueTracking.h"
19#include "llvm/CodeGen/TargetPassConfig.h"
20#include "llvm/IR/IRBuilder.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/IntrinsicsAMDGPU.h"
23#include "llvm/IR/IntrinsicsR600.h"
24#include "llvm/Pass.h"
25#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE"amdgpu-promote-alloca" "amdgpu-promote-alloca"
28
29using namespace llvm;
30
31namespace {
32
33static cl::opt<bool> DisablePromoteAllocaToVector(
34 "disable-promote-alloca-to-vector",
35 cl::desc("Disable promote alloca to vector"),
36 cl::init(false));
37
38static cl::opt<bool> DisablePromoteAllocaToLDS(
39 "disable-promote-alloca-to-lds",
40 cl::desc("Disable promote alloca to LDS"),
41 cl::init(false));
42
43static cl::opt<unsigned> PromoteAllocaToVectorLimit(
44 "amdgpu-promote-alloca-to-vector-limit",
45 cl::desc("Maximum byte size to consider promote alloca to vector"),
46 cl::init(0));
47
48// FIXME: This can create globals so should be a module pass.
49class AMDGPUPromoteAlloca : public FunctionPass {
50public:
51 static char ID;
52
53 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
54
55 bool runOnFunction(Function &F) override;
56
57 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
58
59 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
60
61 void getAnalysisUsage(AnalysisUsage &AU) const override {
62 AU.setPreservesCFG();
63 FunctionPass::getAnalysisUsage(AU);
64 }
65};
66
67class AMDGPUPromoteAllocaImpl {
68private:
69 const TargetMachine &TM;
70 Module *Mod = nullptr;
71 const DataLayout *DL = nullptr;
72
73 // FIXME: This should be per-kernel.
74 uint32_t LocalMemLimit = 0;
75 uint32_t CurrentLocalMemUsage = 0;
76 unsigned MaxVGPRs;
77
78 bool IsAMDGCN = false;
79 bool IsAMDHSA = false;
80
81 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
82 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
83
84 /// BaseAlloca is the alloca root the search started from.
85 /// Val may be that alloca or a recursive user of it.
86 bool collectUsesWithPtrTypes(Value *BaseAlloca,
87 Value *Val,
88 std::vector<Value*> &WorkList) const;
89
90 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92 /// Returns true if both operands are derived from the same alloca. Val should
93 /// be the same value as one of the input operands of UseInst.
94 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95 Instruction *UseInst,
96 int OpIdx0, int OpIdx1) const;
97
98 /// Check whether we have enough local memory for promotion.
99 bool hasSufficientLocalMem(const Function &F);
100
101 bool handleAlloca(AllocaInst &I, bool SufficientLDS);
102
103public:
104 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {}
105 bool run(Function &F);
106};
107
108class AMDGPUPromoteAllocaToVector : public FunctionPass {
109public:
110 static char ID;
111
112 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
113
114 bool runOnFunction(Function &F) override;
115
116 StringRef getPassName() const override {
117 return "AMDGPU Promote Alloca to vector";
118 }
119
120 void getAnalysisUsage(AnalysisUsage &AU) const override {
121 AU.setPreservesCFG();
122 FunctionPass::getAnalysisUsage(AU);
123 }
124};
125
126} // end anonymous namespace
127
128char AMDGPUPromoteAlloca::ID = 0;
129char AMDGPUPromoteAllocaToVector::ID = 0;
130
131INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
132 "AMDGPU promote alloca to vector or LDS", false, false)static void *initializeAMDGPUPromoteAllocaPassOnce(PassRegistry
&Registry) {
133// Move LDS uses from functions to kernels before promote alloca for accurate
134// estimation of LDS available
135INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)initializeAMDGPULowerModuleLDSPass(Registry);
136INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
137 "AMDGPU promote alloca to vector or LDS", false, false)PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector or LDS"
, "amdgpu-promote-alloca", &AMDGPUPromoteAlloca::ID, PassInfo
::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAlloca>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeAMDGPUPromoteAllocaPassFlag; void llvm
::initializeAMDGPUPromoteAllocaPass(PassRegistry &Registry
) { llvm::call_once(InitializeAMDGPUPromoteAllocaPassFlag, initializeAMDGPUPromoteAllocaPassOnce
, std::ref(Registry)); }
138
139INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
140 "AMDGPU promote alloca to vector", false, false)static void *initializeAMDGPUPromoteAllocaToVectorPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "AMDGPU promote alloca to vector"
, "amdgpu-promote-alloca" "-to-vector", &AMDGPUPromoteAllocaToVector
::ID, PassInfo::NormalCtor_t(callDefaultCtor<AMDGPUPromoteAllocaToVector
>), false, false); Registry.registerPass(*PI, true); return
PI; } static llvm::once_flag InitializeAMDGPUPromoteAllocaToVectorPassFlag
; void llvm::initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry
&Registry) { llvm::call_once(InitializeAMDGPUPromoteAllocaToVectorPassFlag
, initializeAMDGPUPromoteAllocaToVectorPassOnce, std::ref(Registry
)); }
141
142char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
143char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
144
145bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146 if (skipFunction(F))
147 return false;
148
149 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
150 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F);
151 }
152 return false;
153}
154
155PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
156 FunctionAnalysisManager &AM) {
157 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F);
158 if (Changed) {
159 PreservedAnalyses PA;
160 PA.preserveSet<CFGAnalyses>();
161 return PA;
162 }
163 return PreservedAnalyses::all();
164}
165
166bool AMDGPUPromoteAllocaImpl::run(Function &F) {
167 Mod = F.getParent();
168 DL = &Mod->getDataLayout();
169
170 const Triple &TT = TM.getTargetTriple();
171 IsAMDGCN = TT.getArch() == Triple::amdgcn;
172 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
173
174 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
175 if (!ST.isPromoteAllocaEnabled())
176 return false;
177
178 if (IsAMDGCN) {
179 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
180 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
181 // A non-entry function has only 32 caller preserved registers.
182 // Do not promote alloca which will force spilling.
183 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
184 MaxVGPRs = std::min(MaxVGPRs, 32u);
185 } else {
186 MaxVGPRs = 128;
187 }
188
189 bool SufficientLDS = hasSufficientLocalMem(F);
190 bool Changed = false;
191 BasicBlock &EntryBB = *F.begin();
192
193 SmallVector<AllocaInst *, 16> Allocas;
194 for (Instruction &I : EntryBB) {
195 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
196 Allocas.push_back(AI);
197 }
198
199 for (AllocaInst *AI : Allocas) {
200 if (handleAlloca(*AI, SufficientLDS))
201 Changed = true;
202 }
203
204 return Changed;
205}
206
207std::pair<Value *, Value *>
208AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
209 Function &F = *Builder.GetInsertBlock()->getParent();
210 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
211
212 if (!IsAMDHSA) {
213 Function *LocalSizeYFn
214 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
215 Function *LocalSizeZFn
216 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
217
218 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
219 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
220
221 ST.makeLIDRangeMetadata(LocalSizeY);
222 ST.makeLIDRangeMetadata(LocalSizeZ);
223
224 return std::pair(LocalSizeY, LocalSizeZ);
225 }
226
227 // We must read the size out of the dispatch pointer.
228 assert(IsAMDGCN)(static_cast <bool> (IsAMDGCN) ? void (0) : __assert_fail
("IsAMDGCN", "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 228, __extension__ __PRETTY_FUNCTION__))
;
229
230 // We are indexing into this struct, and want to extract the workgroup_size_*
231 // fields.
232 //
233 // typedef struct hsa_kernel_dispatch_packet_s {
234 // uint16_t header;
235 // uint16_t setup;
236 // uint16_t workgroup_size_x ;
237 // uint16_t workgroup_size_y;
238 // uint16_t workgroup_size_z;
239 // uint16_t reserved0;
240 // uint32_t grid_size_x ;
241 // uint32_t grid_size_y ;
242 // uint32_t grid_size_z;
243 //
244 // uint32_t private_segment_size;
245 // uint32_t group_segment_size;
246 // uint64_t kernel_object;
247 //
248 // #ifdef HSA_LARGE_MODEL
249 // void *kernarg_address;
250 // #elif defined HSA_LITTLE_ENDIAN
251 // void *kernarg_address;
252 // uint32_t reserved1;
253 // #else
254 // uint32_t reserved1;
255 // void *kernarg_address;
256 // #endif
257 // uint64_t reserved2;
258 // hsa_signal_t completion_signal; // uint64_t wrapper
259 // } hsa_kernel_dispatch_packet_t
260 //
261 Function *DispatchPtrFn
262 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
263
264 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
265 DispatchPtr->addRetAttr(Attribute::NoAlias);
266 DispatchPtr->addRetAttr(Attribute::NonNull);
267 F.removeFnAttr("amdgpu-no-dispatch-ptr");
268
269 // Size of the dispatch packet struct.
270 DispatchPtr->addDereferenceableRetAttr(64);
271
272 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
273 Value *CastDispatchPtr = Builder.CreateBitCast(
274 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
275
276 // We could do a single 64-bit load here, but it's likely that the basic
277 // 32-bit and extract sequence is already present, and it is probably easier
278 // to CSE this. The loads should be mergeable later anyway.
279 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
280 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
281
282 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
283 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
284
285 MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt);
286 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
287 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
288 ST.makeLIDRangeMetadata(LoadZU);
289
290 // Extract y component. Upper half of LoadZU should be zero already.
291 Value *Y = Builder.CreateLShr(LoadXY, 16);
292
293 return std::pair(Y, LoadZU);
294}
295
296Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
297 unsigned N) {
298 Function *F = Builder.GetInsertBlock()->getParent();
299 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
300 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
301 StringRef AttrName;
302
303 switch (N) {
304 case 0:
305 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
306 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
307 AttrName = "amdgpu-no-workitem-id-x";
308 break;
309 case 1:
310 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
311 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
312 AttrName = "amdgpu-no-workitem-id-y";
313 break;
314
315 case 2:
316 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
317 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
318 AttrName = "amdgpu-no-workitem-id-z";
319 break;
320 default:
321 llvm_unreachable("invalid dimension")::llvm::llvm_unreachable_internal("invalid dimension", "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp"
, 321)
;
322 }
323
324 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
325 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
326 ST.makeLIDRangeMetadata(CI);
327 F->removeFnAttr(AttrName);
328
329 return CI;
330}
331
332static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
333 return FixedVectorType::get(ArrayTy->getElementType(),
334 ArrayTy->getNumElements());
335}
336
337static Value *
338calculateVectorIndex(Value *Ptr,
339 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
340 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
341 if (!GEP)
342 return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
343
344 auto I = GEPIdx.find(GEP);
345 assert(I != GEPIdx.end() && "Must have entry for GEP!")(static_cast <bool> (I != GEPIdx.end() && "Must have entry for GEP!"
) ? void (0) : __assert_fail ("I != GEPIdx.end() && \"Must have entry for GEP!\""
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 345, __extension__
__PRETTY_FUNCTION__))
;
346 return I->second;
347}
348
349static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
350 Type *VecElemTy, const DataLayout &DL) {
351 // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
352 // helper.
353 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
354 MapVector<Value *, APInt> VarOffsets;
355 APInt ConstOffset(BW, 0);
356 if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
36
Assuming the condition is false
38
Taking false branch
357 !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
37
Assuming the condition is false
358 return nullptr;
359
360 unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
361 if (VarOffsets.size() > 1)
39
Assuming the condition is false
40
Taking false branch
362 return nullptr;
363
364 if (VarOffsets.size() == 1) {
41
Assuming the condition is false
42
Taking false branch
365 // Only handle cases where we don't need to insert extra arithmetic
366 // instructions.
367 const auto &VarOffset = VarOffsets.front();
368 if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
369 return nullptr;
370 return VarOffset.first;
371 }
372
373 APInt Quot;
374 uint64_t Rem;
375 APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
376 if (Rem != 0)
43
Assuming 'Rem' is equal to 0
44
Taking false branch
377 return nullptr;
378
379 return ConstantInt::get(GEP->getContext(), Quot);
45
Returning pointer, which participates in a condition later
380}
381
382static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
383 unsigned MaxVGPRs) {
384
385 if (DisablePromoteAllocaToVector) {
1
Assuming the condition is false
2
Taking false branch
386 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Promotion alloca to vector is disabled\n"
; } } while (false)
;
387 return false;
388 }
389
390 Type *AllocaTy = Alloca->getAllocatedType();
391 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
3
Assuming 'AllocaTy' is not a 'CastReturnType'
392 if (auto *ArrayTy
4.1
'ArrayTy' is non-null
4.1
'ArrayTy' is non-null
= dyn_cast<ArrayType>(AllocaTy)) {
4
Assuming 'AllocaTy' is a 'CastReturnType'
393 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
5
Assuming the condition is true
7
Taking true branch
394 ArrayTy->getNumElements() > 0)
6
Assuming the condition is true
395 VectorTy = arrayTypeToVecType(ArrayTy);
396 }
397
398 // Use up to 1/4 of available register budget for vectorization.
399 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
8
Assuming the condition is false
9
'?' condition is false
400 : (MaxVGPRs * 32);
401
402 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
10
Assuming the condition is false
403 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
404 << MaxVGPRs << " registers available\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Alloca too big for vectorization with "
<< MaxVGPRs << " registers available\n"; } } while
(false)
;
405 return false;
406 }
407
408 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Alloca candidate for vectorization\n"
; } } while (false)
;
11
Taking false branch
12
Assuming 'DebugFlag' is false
409
410 // FIXME: There is no reason why we can't support larger arrays, we
411 // are just being conservative for now.
412 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
413 // could also be promoted but we don't currently handle this case
414 if (!VectorTy || VectorTy->getNumElements() > 16 ||
13
Assuming 'VectorTy' is non-null
14
Assuming the condition is false
16
Taking false branch
415 VectorTy->getNumElements() < 2) {
15
Assuming the condition is false
416 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot convert type to vector\n"
; } } while (false)
;
417 return false;
418 }
419
420 std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
421 SmallVector<Instruction *> WorkList;
422 SmallVector<Use *, 8> Uses;
423 for (Use &U : Alloca->uses())
424 Uses.push_back(&U);
425
426 Type *VecEltTy = VectorTy->getElementType();
427 while (!Uses.empty()) {
17
Loop condition is true. Entering loop body
428 Use *U = Uses.pop_back_val();
429 Instruction *Inst = dyn_cast<Instruction>(U->getUser());
18
Assuming the object is not a 'CastReturnType'
19
'Inst' initialized to a null pointer value
430
431 if (Value *Ptr
29.1
'Ptr' is null
29.1
'Ptr' is null
= getLoadStorePointerOperand(Inst)) {
20
Calling 'getLoadStorePointerOperand'
29
Returning from 'getLoadStorePointerOperand'
30
Taking false branch
432 // This is a store of the pointer, not to the pointer.
433 if (isa<StoreInst>(Inst) &&
434 U->getOperandNo() != StoreInst::getPointerOperandIndex())
435 return false;
436
437 Type *AccessTy = getLoadStoreType(Inst);
438 Ptr = Ptr->stripPointerCasts();
439
440 // Alloca already accessed as vector, leave alone.
441 if (Ptr == Alloca && DL.getTypeStoreSize(Alloca->getAllocatedType()) ==
442 DL.getTypeStoreSize(AccessTy))
443 continue;
444
445 // Check that this is a simple access of a vector element.
446 bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
447 : cast<StoreInst>(Inst)->isSimple();
448 if (!IsSimple ||
449 !CastInst::isBitOrNoopPointerCastable(VecEltTy, AccessTy, DL))
450 return false;
451
452 WorkList.push_back(Inst);
453 continue;
454 }
455
456 if (isa<BitCastInst>(Inst)) {
31
Assuming 'Inst' is not a 'BitCastInst'
32
Taking false branch
457 // Look through bitcasts.
458 for (Use &U : Inst->uses())
459 Uses.push_back(&U);
460 continue;
461 }
462
463 if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
33
Assuming 'GEP' is non-null
34
Taking true branch
464 // If we can't compute a vector index from this GEP, then we can't
465 // promote this alloca to vector.
466 Value *Index = GEPToVectorIndex(GEP, Alloca, VecEltTy, DL);
35
Calling 'GEPToVectorIndex'
46
Returning from 'GEPToVectorIndex'
467 if (!Index) {
47
Assuming 'Index' is non-null
48
Taking false branch
468 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEPdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
469 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Cannot compute vector index for GEP "
<< *GEP << '\n'; } } while (false)
;
470 return false;
471 }
472
473 GEPVectorIdx[GEP] = Index;
474 for (Use &U : Inst->uses())
49
Called C++ object pointer is null
475 Uses.push_back(&U);
476 continue;
477 }
478
479 // Ignore assume-like intrinsics and comparisons used in assumes.
480 if (isAssumeLikeIntrinsic(Inst))
481 continue;
482
483 if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
484 return isAssumeLikeIntrinsic(cast<Instruction>(U));
485 }))
486 continue;
487
488 // Unknown user.
489 return false;
490 }
491
492 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
493 << *VectorTy << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy <<
'\n'; } } while (false)
;
494
495 for (Instruction *Inst : WorkList) {
496 IRBuilder<> Builder(Inst);
497 switch (Inst->getOpcode()) {
498 case Instruction::Load: {
499 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
500 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
501 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
502 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
503 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
504 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
505 if (Inst->getType() != VecEltTy)
506 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
507 Inst->replaceAllUsesWith(ExtractElement);
508 Inst->eraseFromParent();
509 break;
510 }
511 case Instruction::Store: {
512 StoreInst *SI = cast<StoreInst>(Inst);
513 Value *Ptr = SI->getPointerOperand();
514 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
515 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
516 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
517 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
518 Value *Elt = SI->getValueOperand();
519 if (Elt->getType() != VecEltTy)
520 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
521 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
522 Builder.CreateStore(NewVecValue, BitCast);
523 Inst->eraseFromParent();
524 break;
525 }
526
527 default:
528 llvm_unreachable("Inconsistency in instructions promotable to vector")::llvm::llvm_unreachable_internal("Inconsistency in instructions promotable to vector"
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 528)
;
529 }
530 }
531 return true;
532}
533
534static bool isCallPromotable(CallInst *CI) {
535 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
536 if (!II)
537 return false;
538
539 switch (II->getIntrinsicID()) {
540 case Intrinsic::memcpy:
541 case Intrinsic::memmove:
542 case Intrinsic::memset:
543 case Intrinsic::lifetime_start:
544 case Intrinsic::lifetime_end:
545 case Intrinsic::invariant_start:
546 case Intrinsic::invariant_end:
547 case Intrinsic::launder_invariant_group:
548 case Intrinsic::strip_invariant_group:
549 case Intrinsic::objectsize:
550 return true;
551 default:
552 return false;
553 }
554}
555
556bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
557 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
558 int OpIdx1) const {
559 // Figure out which operand is the one we might not be promoting.
560 Value *OtherOp = Inst->getOperand(OpIdx0);
561 if (Val == OtherOp)
562 OtherOp = Inst->getOperand(OpIdx1);
563
564 if (isa<ConstantPointerNull>(OtherOp))
565 return true;
566
567 Value *OtherObj = getUnderlyingObject(OtherOp);
568 if (!isa<AllocaInst>(OtherObj))
569 return false;
570
571 // TODO: We should be able to replace undefs with the right pointer type.
572
573 // TODO: If we know the other base object is another promotable
574 // alloca, not necessarily this alloca, we can do this. The
575 // important part is both must have the same address space at
576 // the end.
577 if (OtherObj != BaseAlloca) {
578 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
579 dbgs() << "Found a binary instruction with another alloca object\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Found a binary instruction with another alloca object\n"
; } } while (false)
;
580 return false;
581 }
582
583 return true;
584}
585
586bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
587 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
588
589 for (User *User : Val->users()) {
590 if (is_contained(WorkList, User))
591 continue;
592
593 if (CallInst *CI = dyn_cast<CallInst>(User)) {
594 if (!isCallPromotable(CI))
595 return false;
596
597 WorkList.push_back(User);
598 continue;
599 }
600
601 Instruction *UseInst = cast<Instruction>(User);
602 if (UseInst->getOpcode() == Instruction::PtrToInt)
603 return false;
604
605 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
606 if (LI->isVolatile())
607 return false;
608
609 continue;
610 }
611
612 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
613 if (SI->isVolatile())
614 return false;
615
616 // Reject if the stored value is not the pointer operand.
617 if (SI->getPointerOperand() != Val)
618 return false;
619 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
620 if (RMW->isVolatile())
621 return false;
622 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
623 if (CAS->isVolatile())
624 return false;
625 }
626
627 // Only promote a select if we know that the other select operand
628 // is from another pointer that will also be promoted.
629 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
630 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
631 return false;
632
633 // May need to rewrite constant operands.
634 WorkList.push_back(ICmp);
635 }
636
637 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
638 // Give up if the pointer may be captured.
639 if (PointerMayBeCaptured(UseInst, true, true))
640 return false;
641 // Don't collect the users of this.
642 WorkList.push_back(User);
643 continue;
644 }
645
646 // Do not promote vector/aggregate type instructions. It is hard to track
647 // their users.
648 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
649 return false;
650
651 if (!User->getType()->isPointerTy())
652 continue;
653
654 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
655 // Be conservative if an address could be computed outside the bounds of
656 // the alloca.
657 if (!GEP->isInBounds())
658 return false;
659 }
660
661 // Only promote a select if we know that the other select operand is from
662 // another pointer that will also be promoted.
663 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
664 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
665 return false;
666 }
667
668 // Repeat for phis.
669 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
670 // TODO: Handle more complex cases. We should be able to replace loops
671 // over arrays.
672 switch (Phi->getNumIncomingValues()) {
673 case 1:
674 break;
675 case 2:
676 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
677 return false;
678 break;
679 default:
680 return false;
681 }
682 }
683
684 WorkList.push_back(User);
685 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
686 return false;
687 }
688
689 return true;
690}
691
692bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
693
694 FunctionType *FTy = F.getFunctionType();
695 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
696
697 // If the function has any arguments in the local address space, then it's
698 // possible these arguments require the entire local memory space, so
699 // we cannot use local memory in the pass.
700 for (Type *ParamTy : FTy->params()) {
701 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
702 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
703 LocalMemLimit = 0;
704 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
705 "local memory disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n"; } } while (false)
;
706 return false;
707 }
708 }
709
710 LocalMemLimit = ST.getLocalMemorySize();
711 if (LocalMemLimit == 0)
712 return false;
713
714 SmallVector<const Constant *, 16> Stack;
715 SmallPtrSet<const Constant *, 8> VisitedConstants;
716 SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
717
718 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
719 for (const User *U : Val->users()) {
720 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
721 if (Use->getParent()->getParent() == &F)
722 return true;
723 } else {
724 const Constant *C = cast<Constant>(U);
725 if (VisitedConstants.insert(C).second)
726 Stack.push_back(C);
727 }
728 }
729
730 return false;
731 };
732
733 for (GlobalVariable &GV : Mod->globals()) {
734 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
735 continue;
736
737 if (visitUsers(&GV, &GV)) {
738 UsedLDS.insert(&GV);
739 Stack.clear();
740 continue;
741 }
742
743 // For any ConstantExpr uses, we need to recursively search the users until
744 // we see a function.
745 while (!Stack.empty()) {
746 const Constant *C = Stack.pop_back_val();
747 if (visitUsers(&GV, C)) {
748 UsedLDS.insert(&GV);
749 Stack.clear();
750 break;
751 }
752 }
753 }
754
755 const DataLayout &DL = Mod->getDataLayout();
756 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
757 AllocatedSizes.reserve(UsedLDS.size());
758
759 for (const GlobalVariable *GV : UsedLDS) {
760 Align Alignment =
761 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
762 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
763
764 // HIP uses an extern unsized array in local address space for dynamically
765 // allocated shared memory. In that case, we have to disable the promotion.
766 if (GV->hasExternalLinkage() && AllocSize == 0) {
767 LocalMemLimit = 0;
768 LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
769 "local memory. Promoting to local memory "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
770 "disabled.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Function has a reference to externally allocated "
"local memory. Promoting to local memory " "disabled.\n"; } }
while (false)
;
771 return false;
772 }
773
774 AllocatedSizes.emplace_back(AllocSize, Alignment);
775 }
776
777 // Sort to try to estimate the worst case alignment padding
778 //
779 // FIXME: We should really do something to fix the addresses to a more optimal
780 // value instead
781 llvm::sort(AllocatedSizes, llvm::less_second());
782
783 // Check how much local memory is being used by global objects
784 CurrentLocalMemUsage = 0;
785
786 // FIXME: Try to account for padding here. The real padding and address is
787 // currently determined from the inverse order of uses in the function when
788 // legalizing, which could also potentially change. We try to estimate the
789 // worst case here, but we probably should fix the addresses earlier.
790 for (auto Alloc : AllocatedSizes) {
791 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
792 CurrentLocalMemUsage += Alloc.first;
793 }
794
795 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
796 F);
797
798 // Restrict local memory usage so that we don't drastically reduce occupancy,
799 // unless it is already significantly reduced.
800
801 // TODO: Have some sort of hint or other heuristics to guess occupancy based
802 // on other factors..
803 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
804 if (OccupancyHint == 0)
805 OccupancyHint = 7;
806
807 // Clamp to max value.
808 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
809
810 // Check the hint but ignore it if it's obviously wrong from the existing LDS
811 // usage.
812 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
813
814
815 // Round up to the next tier of usage.
816 unsigned MaxSizeWithWaveCount
817 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
818
819 // Program is possibly broken by using more local mem than available.
820 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
821 return false;
822
823 LocalMemLimit = MaxSizeWithWaveCount;
824
825 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsagedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
826 << " bytes of LDS\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
827 << " Rounding size to " << MaxSizeWithWaveCountdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
828 << " with a maximum occupancy of " << MaxOccupancy << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
829 << " and " << (LocalMemLimit - CurrentLocalMemUsage)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
830 << " available for promotion\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << F.getName() <<
" uses " << CurrentLocalMemUsage << " bytes of LDS\n"
<< " Rounding size to " << MaxSizeWithWaveCount
<< " with a maximum occupancy of " << MaxOccupancy
<< '\n' << " and " << (LocalMemLimit - CurrentLocalMemUsage
) << " available for promotion\n"; } } while (false)
;
831
832 return true;
833}
834
835// FIXME: Should try to pick the most likely to be profitable allocas first.
836bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) {
837 // Array allocations are probably not worth handling, since an allocation of
838 // the array type is the canonical form.
839 if (!I.isStaticAlloca() || I.isArrayAllocation())
840 return false;
841
842 const DataLayout &DL = Mod->getDataLayout();
843 IRBuilder<> Builder(&I);
844
845 // First try to replace the alloca with a vector
846 Type *AllocaTy = I.getAllocatedType();
847
848 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
849
850 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
851 return true; // Promoted to vector.
852
853 if (DisablePromoteAllocaToLDS)
854 return false;
855
856 const Function &ContainingFunction = *I.getParent()->getParent();
857 CallingConv::ID CC = ContainingFunction.getCallingConv();
858
859 // Don't promote the alloca to LDS for shader calling conventions as the work
860 // item ID intrinsics are not supported for these calling conventions.
861 // Furthermore not all LDS is available for some of the stages.
862 switch (CC) {
863 case CallingConv::AMDGPU_KERNEL:
864 case CallingConv::SPIR_KERNEL:
865 break;
866 default:
867 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
868 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
869 << " promote alloca to LDS not supported with calling convention.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " promote alloca to LDS not supported with calling convention.\n"
; } } while (false)
;
870 return false;
871 }
872
873 // Not likely to have sufficient local memory for promotion.
874 if (!SufficientLDS)
875 return false;
876
877 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
878 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
879
880 Align Alignment =
881 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
882
883 // FIXME: This computed padding is likely wrong since it depends on inverse
884 // usage order.
885 //
886 // FIXME: It is also possible that if we're allowed to use all of the memory
887 // could end up using more than the maximum due to alignment padding.
888
889 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
890 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
891 NewSize += AllocSize;
892
893 if (NewSize > LocalMemLimit) {
894 LLVM_DEBUG(dbgs() << " " << AllocSizedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
895 << " bytes of local memory not available to promote\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n"
; } } while (false)
;
896 return false;
897 }
898
899 CurrentLocalMemUsage = NewSize;
900
901 std::vector<Value*> WorkList;
902
903 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
904 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << " Do not know how to convert all uses\n"
; } } while (false)
;
905 return false;
906 }
907
908 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Promoting alloca to local memory\n"
; } } while (false)
;
909
910 Function *F = I.getParent()->getParent();
911
912 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
913 GlobalVariable *GV = new GlobalVariable(
914 *Mod, GVTy, false, GlobalValue::InternalLinkage, PoisonValue::get(GVTy),
915 Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
916 GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
917 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
918 GV->setAlignment(I.getAlign());
919
920 Value *TCntY, *TCntZ;
921
922 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
923 Value *TIdX = getWorkitemID(Builder, 0);
924 Value *TIdY = getWorkitemID(Builder, 1);
925 Value *TIdZ = getWorkitemID(Builder, 2);
926
927 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
928 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
929 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
930 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
931 TID = Builder.CreateAdd(TID, TIdZ);
932
933 Value *Indices[] = {
934 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
935 TID
936 };
937
938 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
939 I.mutateType(Offset->getType());
940 I.replaceAllUsesWith(Offset);
941 I.eraseFromParent();
942
943 SmallVector<IntrinsicInst *> DeferredIntrs;
944
945 for (Value *V : WorkList) {
946 CallInst *Call = dyn_cast<CallInst>(V);
947 if (!Call) {
948 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
949 Value *Src0 = CI->getOperand(0);
950 PointerType *NewTy = PointerType::getWithSamePointeeType(
951 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
952
953 if (isa<ConstantPointerNull>(CI->getOperand(0)))
954 CI->setOperand(0, ConstantPointerNull::get(NewTy));
955
956 if (isa<ConstantPointerNull>(CI->getOperand(1)))
957 CI->setOperand(1, ConstantPointerNull::get(NewTy));
958
959 continue;
960 }
961
962 // The operand's value should be corrected on its own and we don't want to
963 // touch the users.
964 if (isa<AddrSpaceCastInst>(V))
965 continue;
966
967 PointerType *NewTy = PointerType::getWithSamePointeeType(
968 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
969
970 // FIXME: It doesn't really make sense to try to do this for all
971 // instructions.
972 V->mutateType(NewTy);
973
974 // Adjust the types of any constant operands.
975 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
976 if (isa<ConstantPointerNull>(SI->getOperand(1)))
977 SI->setOperand(1, ConstantPointerNull::get(NewTy));
978
979 if (isa<ConstantPointerNull>(SI->getOperand(2)))
980 SI->setOperand(2, ConstantPointerNull::get(NewTy));
981 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
982 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
983 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
984 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
985 }
986 }
987
988 continue;
989 }
990
991 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
992 Builder.SetInsertPoint(Intr);
993 switch (Intr->getIntrinsicID()) {
994 case Intrinsic::lifetime_start:
995 case Intrinsic::lifetime_end:
996 // These intrinsics are for address space 0 only
997 Intr->eraseFromParent();
998 continue;
999 case Intrinsic::memcpy:
1000 case Intrinsic::memmove:
1001 // These have 2 pointer operands. In case if second pointer also needs
1002 // to be replaced we defer processing of these intrinsics until all
1003 // other values are processed.
1004 DeferredIntrs.push_back(Intr);
1005 continue;
1006 case Intrinsic::memset: {
1007 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1008 Builder.CreateMemSet(
1009 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1010 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1011 Intr->eraseFromParent();
1012 continue;
1013 }
1014 case Intrinsic::invariant_start:
1015 case Intrinsic::invariant_end:
1016 case Intrinsic::launder_invariant_group:
1017 case Intrinsic::strip_invariant_group:
1018 Intr->eraseFromParent();
1019 // FIXME: I think the invariant marker should still theoretically apply,
1020 // but the intrinsics need to be changed to accept pointers with any
1021 // address space.
1022 continue;
1023 case Intrinsic::objectsize: {
1024 Value *Src = Intr->getOperand(0);
1025 Function *ObjectSize = Intrinsic::getDeclaration(
1026 Mod, Intrinsic::objectsize,
1027 {Intr->getType(),
1028 PointerType::getWithSamePointeeType(
1029 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1030
1031 CallInst *NewCall = Builder.CreateCall(
1032 ObjectSize,
1033 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1034 Intr->replaceAllUsesWith(NewCall);
1035 Intr->eraseFromParent();
1036 continue;
1037 }
1038 default:
1039 Intr->print(errs());
1040 llvm_unreachable("Don't know how to promote alloca intrinsic use.")::llvm::llvm_unreachable_internal("Don't know how to promote alloca intrinsic use."
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 1040)
;
1041 }
1042 }
1043
1044 for (IntrinsicInst *Intr : DeferredIntrs) {
1045 Builder.SetInsertPoint(Intr);
1046 Intrinsic::ID ID = Intr->getIntrinsicID();
1047 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove)(static_cast <bool> (ID == Intrinsic::memcpy || ID == Intrinsic
::memmove) ? void (0) : __assert_fail ("ID == Intrinsic::memcpy || ID == Intrinsic::memmove"
, "llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp", 1047, __extension__
__PRETTY_FUNCTION__))
;
1048
1049 MemTransferInst *MI = cast<MemTransferInst>(Intr);
1050 auto *B =
1051 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1052 MI->getRawSource(), MI->getSourceAlign(),
1053 MI->getLength(), MI->isVolatile());
1054
1055 for (unsigned I = 0; I != 2; ++I) {
1056 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1057 B->addDereferenceableParamAttr(I, Bytes);
1058 }
1059 }
1060
1061 Intr->eraseFromParent();
1062 }
1063
1064 return true;
1065}
1066
1067bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) {
1068 // Array allocations are probably not worth handling, since an allocation of
1069 // the array type is the canonical form.
1070 if (!I.isStaticAlloca() || I.isArrayAllocation())
1071 return false;
1072
1073 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-promote-alloca")) { dbgs() << "Trying to promote "
<< I << '\n'; } } while (false)
;
1074
1075 Module *Mod = I.getParent()->getParent()->getParent();
1076 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
1077}
1078
1079bool promoteAllocasToVector(Function &F, TargetMachine &TM) {
1080 if (DisablePromoteAllocaToVector)
1081 return false;
1082
1083 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1084 if (!ST.isPromoteAllocaEnabled())
1085 return false;
1086
1087 unsigned MaxVGPRs;
1088 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
1089 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1090 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1091 // A non-entry function has only 32 caller preserved registers.
1092 // Do not promote alloca which will force spilling.
1093 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
1094 MaxVGPRs = std::min(MaxVGPRs, 32u);
1095 } else {
1096 MaxVGPRs = 128;
1097 }
1098
1099 bool Changed = false;
1100 BasicBlock &EntryBB = *F.begin();
1101
1102 SmallVector<AllocaInst *, 16> Allocas;
1103 for (Instruction &I : EntryBB) {
1104 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1105 Allocas.push_back(AI);
1106 }
1107
1108 for (AllocaInst *AI : Allocas) {
1109 if (handlePromoteAllocaToVector(*AI, MaxVGPRs))
1110 Changed = true;
1111 }
1112
1113 return Changed;
1114}
1115
1116bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1117 if (skipFunction(F))
1118 return false;
1119 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
1120 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>());
1121 }
1122 return false;
1123}
1124
1125PreservedAnalyses
1126AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
1127 bool Changed = promoteAllocasToVector(F, TM);
1128 if (Changed) {
1129 PreservedAnalyses PA;
1130 PA.preserveSet<CFGAnalyses>();
1131 return PA;
1132 }
1133 return PreservedAnalyses::all();
1134}
1135
1136FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1137 return new AMDGPUPromoteAlloca();
1138}
1139
1140FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1141 return new AMDGPUPromoteAllocaToVector();
1142}

/build/source/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/CFG.h"
27#include "llvm/IR/Constant.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Instruction.h"
31#include "llvm/IR/OperandTraits.h"
32#include "llvm/IR/Use.h"
33#include "llvm/IR/User.h"
34#include "llvm/Support/AtomicOrdering.h"
35#include "llvm/Support/ErrorHandling.h"
36#include <cassert>
37#include <cstddef>
38#include <cstdint>
39#include <iterator>
40#include <optional>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class ConstantInt;
48class DataLayout;
49class StringRef;
50class Type;
51class Value;
52
53//===----------------------------------------------------------------------===//
54// AllocaInst Class
55//===----------------------------------------------------------------------===//
56
57/// an instruction to allocate memory on the stack
58class AllocaInst : public UnaryInstruction {
59 Type *AllocatedType;
60
61 using AlignmentField = AlignmentBitfieldElementT<0>;
62 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
63 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
64 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
65 SwiftErrorField>(),
66 "Bitfields must be contiguous");
67
68protected:
69 // Note: Instruction needs to be a friend here to call cloneImpl.
70 friend class Instruction;
71
72 AllocaInst *cloneImpl() const;
73
74public:
75 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
76 const Twine &Name, Instruction *InsertBefore);
77 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78 const Twine &Name, BasicBlock *InsertAtEnd);
79
80 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
81 Instruction *InsertBefore);
82 AllocaInst(Type *Ty, unsigned AddrSpace,
83 const Twine &Name, BasicBlock *InsertAtEnd);
84
85 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
86 const Twine &Name = "", Instruction *InsertBefore = nullptr);
87 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
88 const Twine &Name, BasicBlock *InsertAtEnd);
89
90 /// Return true if there is an allocation size parameter to the allocation
91 /// instruction that is not 1.
92 bool isArrayAllocation() const;
93
94 /// Get the number of elements allocated. For a simple allocation of a single
95 /// element, this will return a constant 1 value.
96 const Value *getArraySize() const { return getOperand(0); }
97 Value *getArraySize() { return getOperand(0); }
98
99 /// Overload to return most specific pointer type.
100 PointerType *getType() const {
101 return cast<PointerType>(Instruction::getType());
102 }
103
104 /// Return the address space for the allocation.
105 unsigned getAddressSpace() const {
106 return getType()->getAddressSpace();
107 }
108
109 /// Get allocation size in bits. Returns std::nullopt if size can't be
110 /// determined, e.g. in case of a VLA.
111 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
112
113 /// Return the type that is being allocated by the instruction.
114 Type *getAllocatedType() const { return AllocatedType; }
115 /// for use only in special circumstances that need to generically
116 /// transform a whole instruction (eg: IR linking and vectorization).
117 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
118
119 /// Return the alignment of the memory that is being allocated by the
120 /// instruction.
121 Align getAlign() const {
122 return Align(1ULL << getSubclassData<AlignmentField>());
123 }
124
125 void setAlignment(Align Align) {
126 setSubclassData<AlignmentField>(Log2(Align));
127 }
128
129 /// Return true if this alloca is in the entry block of the function and is a
130 /// constant size. If so, the code generator will fold it into the
131 /// prolog/epilog code, so it is basically free.
132 bool isStaticAlloca() const;
133
134 /// Return true if this alloca is used as an inalloca argument to a call. Such
135 /// allocas are never considered static even if they are in the entry block.
136 bool isUsedWithInAlloca() const {
137 return getSubclassData<UsedWithInAllocaField>();
138 }
139
140 /// Specify whether this alloca is used to represent the arguments to a call.
141 void setUsedWithInAlloca(bool V) {
142 setSubclassData<UsedWithInAllocaField>(V);
143 }
144
145 /// Return true if this alloca is used as a swifterror argument to a call.
146 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
147 /// Specify whether this alloca is used to represent a swifterror.
148 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
149
150 // Methods for support type inquiry through isa, cast, and dyn_cast:
151 static bool classof(const Instruction *I) {
152 return (I->getOpcode() == Instruction::Alloca);
153 }
154 static bool classof(const Value *V) {
155 return isa<Instruction>(V) && classof(cast<Instruction>(V));
156 }
157
158private:
159 // Shadow Instruction::setInstructionSubclassData with a private forwarding
160 // method so that subclasses cannot accidentally use it.
161 template <typename Bitfield>
162 void setSubclassData(typename Bitfield::Type Value) {
163 Instruction::setSubclassData<Bitfield>(Value);
164 }
165};
166
167//===----------------------------------------------------------------------===//
168// LoadInst Class
169//===----------------------------------------------------------------------===//
170
171/// An instruction for reading from memory. This uses the SubclassData field in
172/// Value to store whether or not the load is volatile.
173class LoadInst : public UnaryInstruction {
174 using VolatileField = BoolBitfieldElementT<0>;
175 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
176 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
177 static_assert(
178 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
179 "Bitfields must be contiguous");
180
181 void AssertOK();
182
183protected:
184 // Note: Instruction needs to be a friend here to call cloneImpl.
185 friend class Instruction;
186
187 LoadInst *cloneImpl() const;
188
189public:
190 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
191 Instruction *InsertBefore);
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
194 Instruction *InsertBefore);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196 BasicBlock *InsertAtEnd);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 Align Align, Instruction *InsertBefore = nullptr);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200 Align Align, BasicBlock *InsertAtEnd);
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 Align Align, AtomicOrdering Order,
203 SyncScope::ID SSID = SyncScope::System,
204 Instruction *InsertBefore = nullptr);
205 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
206 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
207 BasicBlock *InsertAtEnd);
208
209 /// Return true if this is a load from a volatile memory location.
210 bool isVolatile() const { return getSubclassData<VolatileField>(); }
211
212 /// Specify whether this is a volatile load or not.
213 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
214
215 /// Return the alignment of the access that is being performed.
216 Align getAlign() const {
217 return Align(1ULL << (getSubclassData<AlignmentField>()));
218 }
219
220 void setAlignment(Align Align) {
221 setSubclassData<AlignmentField>(Log2(Align));
222 }
223
224 /// Returns the ordering constraint of this load instruction.
225 AtomicOrdering getOrdering() const {
226 return getSubclassData<OrderingField>();
227 }
228 /// Sets the ordering constraint of this load instruction. May not be Release
229 /// or AcquireRelease.
230 void setOrdering(AtomicOrdering Ordering) {
231 setSubclassData<OrderingField>(Ordering);
232 }
233
234 /// Returns the synchronization scope ID of this load instruction.
235 SyncScope::ID getSyncScopeID() const {
236 return SSID;
237 }
238
239 /// Sets the synchronization scope ID of this load instruction.
240 void setSyncScopeID(SyncScope::ID SSID) {
241 this->SSID = SSID;
242 }
243
244 /// Sets the ordering constraint and the synchronization scope ID of this load
245 /// instruction.
246 void setAtomic(AtomicOrdering Ordering,
247 SyncScope::ID SSID = SyncScope::System) {
248 setOrdering(Ordering);
249 setSyncScopeID(SSID);
250 }
251
252 bool isSimple() const { return !isAtomic() && !isVolatile(); }
253
254 bool isUnordered() const {
255 return (getOrdering() == AtomicOrdering::NotAtomic ||
256 getOrdering() == AtomicOrdering::Unordered) &&
257 !isVolatile();
258 }
259
260 Value *getPointerOperand() { return getOperand(0); }
261 const Value *getPointerOperand() const { return getOperand(0); }
262 static unsigned getPointerOperandIndex() { return 0U; }
263 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
264
265 /// Returns the address space of the pointer operand.
266 unsigned getPointerAddressSpace() const {
267 return getPointerOperandType()->getPointerAddressSpace();
268 }
269
270 // Methods for support type inquiry through isa, cast, and dyn_cast:
271 static bool classof(const Instruction *I) {
272 return I->getOpcode() == Instruction::Load;
273 }
274 static bool classof(const Value *V) {
275 return isa<Instruction>(V) && classof(cast<Instruction>(V));
276 }
277
278private:
279 // Shadow Instruction::setInstructionSubclassData with a private forwarding
280 // method so that subclasses cannot accidentally use it.
281 template <typename Bitfield>
282 void setSubclassData(typename Bitfield::Type Value) {
283 Instruction::setSubclassData<Bitfield>(Value);
284 }
285
286 /// The synchronization scope ID of this load instruction. Not quite enough
287 /// room in SubClassData for everything, so synchronization scope ID gets its
288 /// own field.
289 SyncScope::ID SSID;
290};
291
292//===----------------------------------------------------------------------===//
293// StoreInst Class
294//===----------------------------------------------------------------------===//
295
296/// An instruction for storing to memory.
297class StoreInst : public Instruction {
298 using VolatileField = BoolBitfieldElementT<0>;
299 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
300 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
301 static_assert(
302 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
303 "Bitfields must be contiguous");
304
305 void AssertOK();
306
307protected:
308 // Note: Instruction needs to be a friend here to call cloneImpl.
309 friend class Instruction;
310
311 StoreInst *cloneImpl() const;
312
313public:
314 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
315 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
316 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
317 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
318 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
319 Instruction *InsertBefore = nullptr);
320 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
321 BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
323 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
324 Instruction *InsertBefore = nullptr);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
327
328 // allocate space for exactly two operands
329 void *operator new(size_t S) { return User::operator new(S, 2); }
330 void operator delete(void *Ptr) { User::operator delete(Ptr); }
331
332 /// Return true if this is a store to a volatile memory location.
333 bool isVolatile() const { return getSubclassData<VolatileField>(); }
334
335 /// Specify whether this is a volatile store or not.
336 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
337
338 /// Transparently provide more efficient getOperand methods.
339 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
340
341 Align getAlign() const {
342 return Align(1ULL << (getSubclassData<AlignmentField>()));
343 }
344
345 void setAlignment(Align Align) {
346 setSubclassData<AlignmentField>(Log2(Align));
347 }
348
349 /// Returns the ordering constraint of this store instruction.
350 AtomicOrdering getOrdering() const {
351 return getSubclassData<OrderingField>();
352 }
353
354 /// Sets the ordering constraint of this store instruction. May not be
355 /// Acquire or AcquireRelease.
356 void setOrdering(AtomicOrdering Ordering) {
357 setSubclassData<OrderingField>(Ordering);
358 }
359
360 /// Returns the synchronization scope ID of this store instruction.
361 SyncScope::ID getSyncScopeID() const {
362 return SSID;
363 }
364
365 /// Sets the synchronization scope ID of this store instruction.
366 void setSyncScopeID(SyncScope::ID SSID) {
367 this->SSID = SSID;
368 }
369
370 /// Sets the ordering constraint and the synchronization scope ID of this
371 /// store instruction.
372 void setAtomic(AtomicOrdering Ordering,
373 SyncScope::ID SSID = SyncScope::System) {
374 setOrdering(Ordering);
375 setSyncScopeID(SSID);
376 }
377
378 bool isSimple() const { return !isAtomic() && !isVolatile(); }
379
380 bool isUnordered() const {
381 return (getOrdering() == AtomicOrdering::NotAtomic ||
382 getOrdering() == AtomicOrdering::Unordered) &&
383 !isVolatile();
384 }
385
386 Value *getValueOperand() { return getOperand(0); }
387 const Value *getValueOperand() const { return getOperand(0); }
388
389 Value *getPointerOperand() { return getOperand(1); }
390 const Value *getPointerOperand() const { return getOperand(1); }
391 static unsigned getPointerOperandIndex() { return 1U; }
392 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
393
394 /// Returns the address space of the pointer operand.
395 unsigned getPointerAddressSpace() const {
396 return getPointerOperandType()->getPointerAddressSpace();
397 }
398
399 // Methods for support type inquiry through isa, cast, and dyn_cast:
400 static bool classof(const Instruction *I) {
401 return I->getOpcode() == Instruction::Store;
402 }
403 static bool classof(const Value *V) {
404 return isa<Instruction>(V) && classof(cast<Instruction>(V));
405 }
406
407private:
408 // Shadow Instruction::setInstructionSubclassData with a private forwarding
409 // method so that subclasses cannot accidentally use it.
410 template <typename Bitfield>
411 void setSubclassData(typename Bitfield::Type Value) {
412 Instruction::setSubclassData<Bitfield>(Value);
413 }
414
415 /// The synchronization scope ID of this store instruction. Not quite enough
416 /// room in SubClassData for everything, so synchronization scope ID gets its
417 /// own field.
418 SyncScope::ID SSID;
419};
420
421template <>
422struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
423};
424
425DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 425, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 425, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
426
427//===----------------------------------------------------------------------===//
428// FenceInst Class
429//===----------------------------------------------------------------------===//
430
431/// An instruction for ordering other memory operations.
432class FenceInst : public Instruction {
433 using OrderingField = AtomicOrderingBitfieldElementT<0>;
434
435 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
436
437protected:
438 // Note: Instruction needs to be a friend here to call cloneImpl.
439 friend class Instruction;
440
441 FenceInst *cloneImpl() const;
442
443public:
444 // Ordering may only be Acquire, Release, AcquireRelease, or
445 // SequentiallyConsistent.
446 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
447 SyncScope::ID SSID = SyncScope::System,
448 Instruction *InsertBefore = nullptr);
449 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
450 BasicBlock *InsertAtEnd);
451
452 // allocate space for exactly zero operands
453 void *operator new(size_t S) { return User::operator new(S, 0); }
454 void operator delete(void *Ptr) { User::operator delete(Ptr); }
455
456 /// Returns the ordering constraint of this fence instruction.
457 AtomicOrdering getOrdering() const {
458 return getSubclassData<OrderingField>();
459 }
460
461 /// Sets the ordering constraint of this fence instruction. May only be
462 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
463 void setOrdering(AtomicOrdering Ordering) {
464 setSubclassData<OrderingField>(Ordering);
465 }
466
467 /// Returns the synchronization scope ID of this fence instruction.
468 SyncScope::ID getSyncScopeID() const {
469 return SSID;
470 }
471
472 /// Sets the synchronization scope ID of this fence instruction.
473 void setSyncScopeID(SyncScope::ID SSID) {
474 this->SSID = SSID;
475 }
476
477 // Methods for support type inquiry through isa, cast, and dyn_cast:
478 static bool classof(const Instruction *I) {
479 return I->getOpcode() == Instruction::Fence;
480 }
481 static bool classof(const Value *V) {
482 return isa<Instruction>(V) && classof(cast<Instruction>(V));
483 }
484
485private:
486 // Shadow Instruction::setInstructionSubclassData with a private forwarding
487 // method so that subclasses cannot accidentally use it.
488 template <typename Bitfield>
489 void setSubclassData(typename Bitfield::Type Value) {
490 Instruction::setSubclassData<Bitfield>(Value);
491 }
492
493 /// The synchronization scope ID of this fence instruction. Not quite enough
494 /// room in SubClassData for everything, so synchronization scope ID gets its
495 /// own field.
496 SyncScope::ID SSID;
497};
498
499//===----------------------------------------------------------------------===//
500// AtomicCmpXchgInst Class
501//===----------------------------------------------------------------------===//
502
503/// An instruction that atomically checks whether a
504/// specified value is in a memory location, and, if it is, stores a new value
505/// there. The value returned by this instruction is a pair containing the
506/// original value as first element, and an i1 indicating success (true) or
507/// failure (false) as second element.
508///
509class AtomicCmpXchgInst : public Instruction {
510 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
511 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
512 SyncScope::ID SSID);
513
514 template <unsigned Offset>
515 using AtomicOrderingBitfieldElement =
516 typename Bitfield::Element<AtomicOrdering, Offset, 3,
517 AtomicOrdering::LAST>;
518
519protected:
520 // Note: Instruction needs to be a friend here to call cloneImpl.
521 friend class Instruction;
522
523 AtomicCmpXchgInst *cloneImpl() const;
524
525public:
526 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
527 AtomicOrdering SuccessOrdering,
528 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
529 Instruction *InsertBefore = nullptr);
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
533 BasicBlock *InsertAtEnd);
534
535 // allocate space for exactly three operands
536 void *operator new(size_t S) { return User::operator new(S, 3); }
537 void operator delete(void *Ptr) { User::operator delete(Ptr); }
538
539 using VolatileField = BoolBitfieldElementT<0>;
540 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
541 using SuccessOrderingField =
542 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
543 using FailureOrderingField =
544 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
545 using AlignmentField =
546 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
547 static_assert(
548 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
549 FailureOrderingField, AlignmentField>(),
550 "Bitfields must be contiguous");
551
552 /// Return the alignment of the memory that is being allocated by the
553 /// instruction.
554 Align getAlign() const {
555 return Align(1ULL << getSubclassData<AlignmentField>());
556 }
557
558 void setAlignment(Align Align) {
559 setSubclassData<AlignmentField>(Log2(Align));
560 }
561
562 /// Return true if this is a cmpxchg from a volatile memory
563 /// location.
564 ///
565 bool isVolatile() const { return getSubclassData<VolatileField>(); }
566
567 /// Specify whether this is a volatile cmpxchg.
568 ///
569 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
570
571 /// Return true if this cmpxchg may spuriously fail.
572 bool isWeak() const { return getSubclassData<WeakField>(); }
573
574 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
575
576 /// Transparently provide more efficient getOperand methods.
577 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
578
579 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
580 return Ordering != AtomicOrdering::NotAtomic &&
581 Ordering != AtomicOrdering::Unordered;
582 }
583
584 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
585 return Ordering != AtomicOrdering::NotAtomic &&
586 Ordering != AtomicOrdering::Unordered &&
587 Ordering != AtomicOrdering::AcquireRelease &&
588 Ordering != AtomicOrdering::Release;
589 }
590
591 /// Returns the success ordering constraint of this cmpxchg instruction.
592 AtomicOrdering getSuccessOrdering() const {
593 return getSubclassData<SuccessOrderingField>();
594 }
595
596 /// Sets the success ordering constraint of this cmpxchg instruction.
597 void setSuccessOrdering(AtomicOrdering Ordering) {
598 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 599, __extension__ __PRETTY_FUNCTION__
))
599 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 599, __extension__ __PRETTY_FUNCTION__
))
;
600 setSubclassData<SuccessOrderingField>(Ordering);
601 }
602
603 /// Returns the failure ordering constraint of this cmpxchg instruction.
604 AtomicOrdering getFailureOrdering() const {
605 return getSubclassData<FailureOrderingField>();
606 }
607
608 /// Sets the failure ordering constraint of this cmpxchg instruction.
609 void setFailureOrdering(AtomicOrdering Ordering) {
610 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 611, __extension__ __PRETTY_FUNCTION__
))
611 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 611, __extension__ __PRETTY_FUNCTION__
))
;
612 setSubclassData<FailureOrderingField>(Ordering);
613 }
614
615 /// Returns a single ordering which is at least as strong as both the
616 /// success and failure orderings for this cmpxchg.
617 AtomicOrdering getMergedOrdering() const {
618 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
619 return AtomicOrdering::SequentiallyConsistent;
620 if (getFailureOrdering() == AtomicOrdering::Acquire) {
621 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
622 return AtomicOrdering::Acquire;
623 if (getSuccessOrdering() == AtomicOrdering::Release)
624 return AtomicOrdering::AcquireRelease;
625 }
626 return getSuccessOrdering();
627 }
628
629 /// Returns the synchronization scope ID of this cmpxchg instruction.
630 SyncScope::ID getSyncScopeID() const {
631 return SSID;
632 }
633
634 /// Sets the synchronization scope ID of this cmpxchg instruction.
635 void setSyncScopeID(SyncScope::ID SSID) {
636 this->SSID = SSID;
637 }
638
639 Value *getPointerOperand() { return getOperand(0); }
640 const Value *getPointerOperand() const { return getOperand(0); }
641 static unsigned getPointerOperandIndex() { return 0U; }
642
643 Value *getCompareOperand() { return getOperand(1); }
644 const Value *getCompareOperand() const { return getOperand(1); }
645
646 Value *getNewValOperand() { return getOperand(2); }
647 const Value *getNewValOperand() const { return getOperand(2); }
648
649 /// Returns the address space of the pointer operand.
650 unsigned getPointerAddressSpace() const {
651 return getPointerOperand()->getType()->getPointerAddressSpace();
652 }
653
654 /// Returns the strongest permitted ordering on failure, given the
655 /// desired ordering on success.
656 ///
657 /// If the comparison in a cmpxchg operation fails, there is no atomic store
658 /// so release semantics cannot be provided. So this function drops explicit
659 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
660 /// operation would remain SequentiallyConsistent.
661 static AtomicOrdering
662 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
663 switch (SuccessOrdering) {
664 default:
665 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 665)
;
666 case AtomicOrdering::Release:
667 case AtomicOrdering::Monotonic:
668 return AtomicOrdering::Monotonic;
669 case AtomicOrdering::AcquireRelease:
670 case AtomicOrdering::Acquire:
671 return AtomicOrdering::Acquire;
672 case AtomicOrdering::SequentiallyConsistent:
673 return AtomicOrdering::SequentiallyConsistent;
674 }
675 }
676
677 // Methods for support type inquiry through isa, cast, and dyn_cast:
678 static bool classof(const Instruction *I) {
679 return I->getOpcode() == Instruction::AtomicCmpXchg;
680 }
681 static bool classof(const Value *V) {
682 return isa<Instruction>(V) && classof(cast<Instruction>(V));
683 }
684
685private:
686 // Shadow Instruction::setInstructionSubclassData with a private forwarding
687 // method so that subclasses cannot accidentally use it.
688 template <typename Bitfield>
689 void setSubclassData(typename Bitfield::Type Value) {
690 Instruction::setSubclassData<Bitfield>(Value);
691 }
692
693 /// The synchronization scope ID of this cmpxchg instruction. Not quite
694 /// enough room in SubClassData for everything, so synchronization scope ID
695 /// gets its own field.
696 SyncScope::ID SSID;
697};
698
699template <>
700struct OperandTraits<AtomicCmpXchgInst> :
701 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
702};
703
704DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 704, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 704, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
705
706//===----------------------------------------------------------------------===//
707// AtomicRMWInst Class
708//===----------------------------------------------------------------------===//
709
710/// an instruction that atomically reads a memory location,
711/// combines it with another value, and then stores the result back. Returns
712/// the old value.
713///
714class AtomicRMWInst : public Instruction {
715protected:
716 // Note: Instruction needs to be a friend here to call cloneImpl.
717 friend class Instruction;
718
719 AtomicRMWInst *cloneImpl() const;
720
721public:
722 /// This enumeration lists the possible modifications atomicrmw can make. In
723 /// the descriptions, 'p' is the pointer to the instruction's memory location,
724 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
725 /// instruction. These instructions always return 'old'.
726 enum BinOp : unsigned {
727 /// *p = v
728 Xchg,
729 /// *p = old + v
730 Add,
731 /// *p = old - v
732 Sub,
733 /// *p = old & v
734 And,
735 /// *p = ~(old & v)
736 Nand,
737 /// *p = old | v
738 Or,
739 /// *p = old ^ v
740 Xor,
741 /// *p = old >signed v ? old : v
742 Max,
743 /// *p = old <signed v ? old : v
744 Min,
745 /// *p = old >unsigned v ? old : v
746 UMax,
747 /// *p = old <unsigned v ? old : v
748 UMin,
749
750 /// *p = old + v
751 FAdd,
752
753 /// *p = old - v
754 FSub,
755
756 /// *p = maxnum(old, v)
757 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
758 FMax,
759
760 /// *p = minnum(old, v)
761 /// \p minnum matches the behavior of \p llvm.minnum.*.
762 FMin,
763
764 FIRST_BINOP = Xchg,
765 LAST_BINOP = FMin,
766 BAD_BINOP
767 };
768
769private:
770 template <unsigned Offset>
771 using AtomicOrderingBitfieldElement =
772 typename Bitfield::Element<AtomicOrdering, Offset, 3,
773 AtomicOrdering::LAST>;
774
775 template <unsigned Offset>
776 using BinOpBitfieldElement =
777 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
778
779public:
780 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
781 AtomicOrdering Ordering, SyncScope::ID SSID,
782 Instruction *InsertBefore = nullptr);
783 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
784 AtomicOrdering Ordering, SyncScope::ID SSID,
785 BasicBlock *InsertAtEnd);
786
787 // allocate space for exactly two operands
788 void *operator new(size_t S) { return User::operator new(S, 2); }
789 void operator delete(void *Ptr) { User::operator delete(Ptr); }
790
791 using VolatileField = BoolBitfieldElementT<0>;
792 using AtomicOrderingField =
793 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
794 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
795 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
796 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
797 OperationField, AlignmentField>(),
798 "Bitfields must be contiguous");
799
800 BinOp getOperation() const { return getSubclassData<OperationField>(); }
801
802 static StringRef getOperationName(BinOp Op);
803
804 static bool isFPOperation(BinOp Op) {
805 switch (Op) {
806 case AtomicRMWInst::FAdd:
807 case AtomicRMWInst::FSub:
808 case AtomicRMWInst::FMax:
809 case AtomicRMWInst::FMin:
810 return true;
811 default:
812 return false;
813 }
814 }
815
816 void setOperation(BinOp Operation) {
817 setSubclassData<OperationField>(Operation);
818 }
819
820 /// Return the alignment of the memory that is being allocated by the
821 /// instruction.
822 Align getAlign() const {
823 return Align(1ULL << getSubclassData<AlignmentField>());
824 }
825
826 void setAlignment(Align Align) {
827 setSubclassData<AlignmentField>(Log2(Align));
828 }
829
830 /// Return true if this is a RMW on a volatile memory location.
831 ///
832 bool isVolatile() const { return getSubclassData<VolatileField>(); }
833
834 /// Specify whether this is a volatile RMW or not.
835 ///
836 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
837
838 /// Transparently provide more efficient getOperand methods.
839 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
840
841 /// Returns the ordering constraint of this rmw instruction.
842 AtomicOrdering getOrdering() const {
843 return getSubclassData<AtomicOrderingField>();
844 }
845
846 /// Sets the ordering constraint of this rmw instruction.
847 void setOrdering(AtomicOrdering Ordering) {
848 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 849, __extension__ __PRETTY_FUNCTION__
))
849 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 849, __extension__ __PRETTY_FUNCTION__
))
;
850 assert(Ordering != AtomicOrdering::Unordered &&(static_cast <bool> (Ordering != AtomicOrdering::Unordered
&& "atomicrmw instructions cannot be unordered.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\""
, "llvm/include/llvm/IR/Instructions.h", 851, __extension__ __PRETTY_FUNCTION__
))
851 "atomicrmw instructions cannot be unordered.")(static_cast <bool> (Ordering != AtomicOrdering::Unordered
&& "atomicrmw instructions cannot be unordered.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\""
, "llvm/include/llvm/IR/Instructions.h", 851, __extension__ __PRETTY_FUNCTION__
))
;
852 setSubclassData<AtomicOrderingField>(Ordering);
853 }
854
855 /// Returns the synchronization scope ID of this rmw instruction.
856 SyncScope::ID getSyncScopeID() const {
857 return SSID;
858 }
859
860 /// Sets the synchronization scope ID of this rmw instruction.
861 void setSyncScopeID(SyncScope::ID SSID) {
862 this->SSID = SSID;
863 }
864
865 Value *getPointerOperand() { return getOperand(0); }
866 const Value *getPointerOperand() const { return getOperand(0); }
867 static unsigned getPointerOperandIndex() { return 0U; }
868
869 Value *getValOperand() { return getOperand(1); }
870 const Value *getValOperand() const { return getOperand(1); }
871
872 /// Returns the address space of the pointer operand.
873 unsigned getPointerAddressSpace() const {
874 return getPointerOperand()->getType()->getPointerAddressSpace();
875 }
876
877 bool isFloatingPointOperation() const {
878 return isFPOperation(getOperation());
879 }
880
881 // Methods for support type inquiry through isa, cast, and dyn_cast:
882 static bool classof(const Instruction *I) {
883 return I->getOpcode() == Instruction::AtomicRMW;
884 }
885 static bool classof(const Value *V) {
886 return isa<Instruction>(V) && classof(cast<Instruction>(V));
887 }
888
889private:
890 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
891 AtomicOrdering Ordering, SyncScope::ID SSID);
892
893 // Shadow Instruction::setInstructionSubclassData with a private forwarding
894 // method so that subclasses cannot accidentally use it.
895 template <typename Bitfield>
896 void setSubclassData(typename Bitfield::Type Value) {
897 Instruction::setSubclassData<Bitfield>(Value);
898 }
899
900 /// The synchronization scope ID of this rmw instruction. Not quite enough
901 /// room in SubClassData for everything, so synchronization scope ID gets its
902 /// own field.
903 SyncScope::ID SSID;
904};
905
906template <>
907struct OperandTraits<AtomicRMWInst>
908 : public FixedNumOperandTraits<AtomicRMWInst,2> {
909};
910
911DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 911, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 911, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
912
913//===----------------------------------------------------------------------===//
914// GetElementPtrInst Class
915//===----------------------------------------------------------------------===//
916
917// checkGEPType - Simple wrapper function to give a better assertion failure
918// message on bad indexes for a gep instruction.
919//
920inline Type *checkGEPType(Type *Ty) {
921 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 921, __extension__ __PRETTY_FUNCTION__
))
;
922 return Ty;
923}
924
925/// an instruction for type-safe pointer arithmetic to
926/// access elements of arrays and structs
927///
928class GetElementPtrInst : public Instruction {
929 Type *SourceElementType;
930 Type *ResultElementType;
931
932 GetElementPtrInst(const GetElementPtrInst &GEPI);
933
934 /// Constructors - Create a getelementptr instruction with a base pointer an
935 /// list of indices. The first ctor can optionally insert before an existing
936 /// instruction, the second appends the new instruction to the specified
937 /// BasicBlock.
938 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
939 ArrayRef<Value *> IdxList, unsigned Values,
940 const Twine &NameStr, Instruction *InsertBefore);
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, BasicBlock *InsertAtEnd);
944
945 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
946
947protected:
948 // Note: Instruction needs to be a friend here to call cloneImpl.
949 friend class Instruction;
950
951 GetElementPtrInst *cloneImpl() const;
952
953public:
954 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
955 ArrayRef<Value *> IdxList,
956 const Twine &NameStr = "",
957 Instruction *InsertBefore = nullptr) {
958 unsigned Values = 1 + unsigned(IdxList.size());
959 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 959, __extension__ __PRETTY_FUNCTION__
))
;
960 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 961, __extension__ __PRETTY_FUNCTION__
))
961 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 961, __extension__ __PRETTY_FUNCTION__
))
;
962 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
963 NameStr, InsertBefore);
964 }
965
966 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967 ArrayRef<Value *> IdxList,
968 const Twine &NameStr,
969 BasicBlock *InsertAtEnd) {
970 unsigned Values = 1 + unsigned(IdxList.size());
971 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 971, __extension__ __PRETTY_FUNCTION__
))
;
972 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 973, __extension__ __PRETTY_FUNCTION__
))
973 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 973, __extension__ __PRETTY_FUNCTION__
))
;
974 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975 NameStr, InsertAtEnd);
976 }
977
978 /// Create an "inbounds" getelementptr. See the documentation for the
979 /// "inbounds" flag in LangRef.html for details.
980 static GetElementPtrInst *
981 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
982 const Twine &NameStr = "",
983 Instruction *InsertBefore = nullptr) {
984 GetElementPtrInst *GEP =
985 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
986 GEP->setIsInBounds(true);
987 return GEP;
988 }
989
990 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
991 ArrayRef<Value *> IdxList,
992 const Twine &NameStr,
993 BasicBlock *InsertAtEnd) {
994 GetElementPtrInst *GEP =
995 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
996 GEP->setIsInBounds(true);
997 return GEP;
998 }
999
1000 /// Transparently provide more efficient getOperand methods.
1001 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1002
1003 Type *getSourceElementType() const { return SourceElementType; }
1004
1005 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1006 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1007
1008 Type *getResultElementType() const {
1009 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1010, __extension__ __PRETTY_FUNCTION__
))
1010 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1010, __extension__ __PRETTY_FUNCTION__
))
;
1011 return ResultElementType;
1012 }
1013
1014 /// Returns the address space of this instruction's pointer type.
1015 unsigned getAddressSpace() const {
1016 // Note that this is always the same as the pointer operand's address space
1017 // and that is cheaper to compute, so cheat here.
1018 return getPointerAddressSpace();
1019 }
1020
1021 /// Returns the result type of a getelementptr with the given source
1022 /// element type and indexes.
1023 ///
1024 /// Null is returned if the indices are invalid for the specified
1025 /// source element type.
1026 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1027 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1028 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1029
1030 /// Return the type of the element at the given index of an indexable
1031 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1032 ///
1033 /// Returns null if the type can't be indexed, or the given index is not
1034 /// legal for the given type.
1035 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1036 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1037
1038 inline op_iterator idx_begin() { return op_begin()+1; }
1039 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1040 inline op_iterator idx_end() { return op_end(); }
1041 inline const_op_iterator idx_end() const { return op_end(); }
1042
1043 inline iterator_range<op_iterator> indices() {
1044 return make_range(idx_begin(), idx_end());
1045 }
1046
1047 inline iterator_range<const_op_iterator> indices() const {
1048 return make_range(idx_begin(), idx_end());
1049 }
1050
1051 Value *getPointerOperand() {
1052 return getOperand(0);
1053 }
1054 const Value *getPointerOperand() const {
1055 return getOperand(0);
1056 }
1057 static unsigned getPointerOperandIndex() {
1058 return 0U; // get index for modifying correct operand.
1059 }
1060
1061 /// Method to return the pointer operand as a
1062 /// PointerType.
1063 Type *getPointerOperandType() const {
1064 return getPointerOperand()->getType();
1065 }
1066
1067 /// Returns the address space of the pointer operand.
1068 unsigned getPointerAddressSpace() const {
1069 return getPointerOperandType()->getPointerAddressSpace();
1070 }
1071
1072 /// Returns the pointer type returned by the GEP
1073 /// instruction, which may be a vector of pointers.
1074 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1075 ArrayRef<Value *> IdxList) {
1076 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1077 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1078 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1079 Type *PtrTy = OrigPtrTy->isOpaque()
1080 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1081 : PointerType::get(ResultElemTy, AddrSpace);
1082 // Vector GEP
1083 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1084 ElementCount EltCount = PtrVTy->getElementCount();
1085 return VectorType::get(PtrTy, EltCount);
1086 }
1087 for (Value *Index : IdxList)
1088 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1089 ElementCount EltCount = IndexVTy->getElementCount();
1090 return VectorType::get(PtrTy, EltCount);
1091 }
1092 // Scalar GEP
1093 return PtrTy;
1094 }
1095
1096 unsigned getNumIndices() const { // Note: always non-negative
1097 return getNumOperands() - 1;
1098 }
1099
1100 bool hasIndices() const {
1101 return getNumOperands() > 1;
1102 }
1103
1104 /// Return true if all of the indices of this GEP are
1105 /// zeros. If so, the result pointer and the first operand have the same
1106 /// value, just potentially different types.
1107 bool hasAllZeroIndices() const;
1108
1109 /// Return true if all of the indices of this GEP are
1110 /// constant integers. If so, the result pointer and the first operand have
1111 /// a constant offset between them.
1112 bool hasAllConstantIndices() const;
1113
1114 /// Set or clear the inbounds flag on this GEP instruction.
1115 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1116 void setIsInBounds(bool b = true);
1117
1118 /// Determine whether the GEP has the inbounds flag.
1119 bool isInBounds() const;
1120
1121 /// Accumulate the constant address offset of this GEP if possible.
1122 ///
1123 /// This routine accepts an APInt into which it will accumulate the constant
1124 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1125 /// all-constant, it returns false and the value of the offset APInt is
1126 /// undefined (it is *not* preserved!). The APInt passed into this routine
1127 /// must be at least as wide as the IntPtr type for the address space of
1128 /// the base GEP pointer.
1129 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1130 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1131 MapVector<Value *, APInt> &VariableOffsets,
1132 APInt &ConstantOffset) const;
1133 // Methods for support type inquiry through isa, cast, and dyn_cast:
1134 static bool classof(const Instruction *I) {
1135 return (I->getOpcode() == Instruction::GetElementPtr);
1136 }
1137 static bool classof(const Value *V) {
1138 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1139 }
1140};
1141
1142template <>
1143struct OperandTraits<GetElementPtrInst> :
1144 public VariadicOperandTraits<GetElementPtrInst, 1> {
1145};
1146
1147GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1148 ArrayRef<Value *> IdxList, unsigned Values,
1149 const Twine &NameStr,
1150 Instruction *InsertBefore)
1151 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1152 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1153 Values, InsertBefore),
1154 SourceElementType(PointeeType),
1155 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1156 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1157, __extension__ __PRETTY_FUNCTION__
))
1157 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1157, __extension__ __PRETTY_FUNCTION__
))
;
1158 init(Ptr, IdxList, NameStr);
1159}
1160
1161GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1162 ArrayRef<Value *> IdxList, unsigned Values,
1163 const Twine &NameStr,
1164 BasicBlock *InsertAtEnd)
1165 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1166 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1167 Values, InsertAtEnd),
1168 SourceElementType(PointeeType),
1169 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1170 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1171, __extension__ __PRETTY_FUNCTION__
))
1171 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1171, __extension__ __PRETTY_FUNCTION__
))
;
1172 init(Ptr, IdxList, NameStr);
1173}
1174
1175DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1175, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1175, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1176
1177//===----------------------------------------------------------------------===//
1178// ICmpInst Class
1179//===----------------------------------------------------------------------===//
1180
1181/// This instruction compares its operands according to the predicate given
1182/// to the constructor. It only operates on integers or pointers. The operands
1183/// must be identical types.
1184/// Represent an integer comparison operator.
1185class ICmpInst: public CmpInst {
1186 void AssertOK() {
1187 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1188, __extension__ __PRETTY_FUNCTION__
))
1188 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1188, __extension__ __PRETTY_FUNCTION__
))
;
1189 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1190, __extension__ __PRETTY_FUNCTION__
))
1190 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1190, __extension__ __PRETTY_FUNCTION__
))
;
1191 // Check that the operands are the right type
1192 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1194, __extension__ __PRETTY_FUNCTION__
))
1193 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1194, __extension__ __PRETTY_FUNCTION__
))
1194 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1194, __extension__ __PRETTY_FUNCTION__
))
;
1195 }
1196
1197protected:
1198 // Note: Instruction needs to be a friend here to call cloneImpl.
1199 friend class Instruction;
1200
1201 /// Clone an identical ICmpInst
1202 ICmpInst *cloneImpl() const;
1203
1204public:
1205 /// Constructor with insert-before-instruction semantics.
1206 ICmpInst(
1207 Instruction *InsertBefore, ///< Where to insert
1208 Predicate pred, ///< The predicate to use for the comparison
1209 Value *LHS, ///< The left-hand-side of the expression
1210 Value *RHS, ///< The right-hand-side of the expression
1211 const Twine &NameStr = "" ///< Name of the instruction
1212 ) : CmpInst(makeCmpResultType(LHS->getType()),
1213 Instruction::ICmp, pred, LHS, RHS, NameStr,
1214 InsertBefore) {
1215#ifndef NDEBUG
1216 AssertOK();
1217#endif
1218 }
1219
1220 /// Constructor with insert-at-end semantics.
1221 ICmpInst(
1222 BasicBlock &InsertAtEnd, ///< Block to insert into.
1223 Predicate pred, ///< The predicate to use for the comparison
1224 Value *LHS, ///< The left-hand-side of the expression
1225 Value *RHS, ///< The right-hand-side of the expression
1226 const Twine &NameStr = "" ///< Name of the instruction
1227 ) : CmpInst(makeCmpResultType(LHS->getType()),
1228 Instruction::ICmp, pred, LHS, RHS, NameStr,
1229 &InsertAtEnd) {
1230#ifndef NDEBUG
1231 AssertOK();
1232#endif
1233 }
1234
1235 /// Constructor with no-insertion semantics
1236 ICmpInst(
1237 Predicate pred, ///< The predicate to use for the comparison
1238 Value *LHS, ///< The left-hand-side of the expression
1239 Value *RHS, ///< The right-hand-side of the expression
1240 const Twine &NameStr = "" ///< Name of the instruction
1241 ) : CmpInst(makeCmpResultType(LHS->getType()),
1242 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1243#ifndef NDEBUG
1244 AssertOK();
1245#endif
1246 }
1247
1248 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1249 /// @returns the predicate that would be the result if the operand were
1250 /// regarded as signed.
1251 /// Return the signed version of the predicate
1252 Predicate getSignedPredicate() const {
1253 return getSignedPredicate(getPredicate());
1254 }
1255
1256 /// This is a static version that you can use without an instruction.
1257 /// Return the signed version of the predicate.
1258 static Predicate getSignedPredicate(Predicate pred);
1259
1260 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1261 /// @returns the predicate that would be the result if the operand were
1262 /// regarded as unsigned.
1263 /// Return the unsigned version of the predicate
1264 Predicate getUnsignedPredicate() const {
1265 return getUnsignedPredicate(getPredicate());
1266 }
1267
1268 /// This is a static version that you can use without an instruction.
1269 /// Return the unsigned version of the predicate.
1270 static Predicate getUnsignedPredicate(Predicate pred);
1271
1272 /// Return true if this predicate is either EQ or NE. This also
1273 /// tests for commutativity.
1274 static bool isEquality(Predicate P) {
1275 return P == ICMP_EQ || P == ICMP_NE;
1276 }
1277
1278 /// Return true if this predicate is either EQ or NE. This also
1279 /// tests for commutativity.
1280 bool isEquality() const {
1281 return isEquality(getPredicate());
1282 }
1283
1284 /// @returns true if the predicate of this ICmpInst is commutative
1285 /// Determine if this relation is commutative.
1286 bool isCommutative() const { return isEquality(); }
1287
1288 /// Return true if the predicate is relational (not EQ or NE).
1289 ///
1290 bool isRelational() const {
1291 return !isEquality();
1292 }
1293
1294 /// Return true if the predicate is relational (not EQ or NE).
1295 ///
1296 static bool isRelational(Predicate P) {
1297 return !isEquality(P);
1298 }
1299
1300 /// Return true if the predicate is SGT or UGT.
1301 ///
1302 static bool isGT(Predicate P) {
1303 return P == ICMP_SGT || P == ICMP_UGT;
1304 }
1305
1306 /// Return true if the predicate is SLT or ULT.
1307 ///
1308 static bool isLT(Predicate P) {
1309 return P == ICMP_SLT || P == ICMP_ULT;
1310 }
1311
1312 /// Return true if the predicate is SGE or UGE.
1313 ///
1314 static bool isGE(Predicate P) {
1315 return P == ICMP_SGE || P == ICMP_UGE;
1316 }
1317
1318 /// Return true if the predicate is SLE or ULE.
1319 ///
1320 static bool isLE(Predicate P) {
1321 return P == ICMP_SLE || P == ICMP_ULE;
1322 }
1323
1324 /// Returns the sequence of all ICmp predicates.
1325 ///
1326 static auto predicates() { return ICmpPredicates(); }
1327
1328 /// Exchange the two operands to this instruction in such a way that it does
1329 /// not modify the semantics of the instruction. The predicate value may be
1330 /// changed to retain the same result if the predicate is order dependent
1331 /// (e.g. ult).
1332 /// Swap operands and adjust predicate.
1333 void swapOperands() {
1334 setPredicate(getSwappedPredicate());
1335 Op<0>().swap(Op<1>());
1336 }
1337
1338 /// Return result of `LHS Pred RHS` comparison.
1339 static bool compare(const APInt &LHS, const APInt &RHS,
1340 ICmpInst::Predicate Pred);
1341
1342 // Methods for support type inquiry through isa, cast, and dyn_cast:
1343 static bool classof(const Instruction *I) {
1344 return I->getOpcode() == Instruction::ICmp;
1345 }
1346 static bool classof(const Value *V) {
1347 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1348 }
1349};
1350
1351//===----------------------------------------------------------------------===//
1352// FCmpInst Class
1353//===----------------------------------------------------------------------===//
1354
1355/// This instruction compares its operands according to the predicate given
1356/// to the constructor. It only operates on floating point values or packed
1357/// vectors of floating point values. The operands must be identical types.
1358/// Represents a floating point comparison operator.
1359class FCmpInst: public CmpInst {
1360 void AssertOK() {
1361 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1361, __extension__ __PRETTY_FUNCTION__
))
;
1362 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1363, __extension__ __PRETTY_FUNCTION__
))
1363 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1363, __extension__ __PRETTY_FUNCTION__
))
;
1364 // Check that the operands are the right type
1365 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1366, __extension__ __PRETTY_FUNCTION__
))
1366 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1366, __extension__ __PRETTY_FUNCTION__
))
;
1367 }
1368
1369protected:
1370 // Note: Instruction needs to be a friend here to call cloneImpl.
1371 friend class Instruction;
1372
1373 /// Clone an identical FCmpInst
1374 FCmpInst *cloneImpl() const;
1375
1376public:
1377 /// Constructor with insert-before-instruction semantics.
1378 FCmpInst(
1379 Instruction *InsertBefore, ///< Where to insert
1380 Predicate pred, ///< The predicate to use for the comparison
1381 Value *LHS, ///< The left-hand-side of the expression
1382 Value *RHS, ///< The right-hand-side of the expression
1383 const Twine &NameStr = "" ///< Name of the instruction
1384 ) : CmpInst(makeCmpResultType(LHS->getType()),
1385 Instruction::FCmp, pred, LHS, RHS, NameStr,
1386 InsertBefore) {
1387 AssertOK();
1388 }
1389
1390 /// Constructor with insert-at-end semantics.
1391 FCmpInst(
1392 BasicBlock &InsertAtEnd, ///< Block to insert into.
1393 Predicate pred, ///< The predicate to use for the comparison
1394 Value *LHS, ///< The left-hand-side of the expression
1395 Value *RHS, ///< The right-hand-side of the expression
1396 const Twine &NameStr = "" ///< Name of the instruction
1397 ) : CmpInst(makeCmpResultType(LHS->getType()),
1398 Instruction::FCmp, pred, LHS, RHS, NameStr,
1399 &InsertAtEnd) {
1400 AssertOK();
1401 }
1402
1403 /// Constructor with no-insertion semantics
1404 FCmpInst(
1405 Predicate Pred, ///< The predicate to use for the comparison
1406 Value *LHS, ///< The left-hand-side of the expression
1407 Value *RHS, ///< The right-hand-side of the expression
1408 const Twine &NameStr = "", ///< Name of the instruction
1409 Instruction *FlagsSource = nullptr
1410 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1411 RHS, NameStr, nullptr, FlagsSource) {
1412 AssertOK();
1413 }
1414
1415 /// @returns true if the predicate of this instruction is EQ or NE.
1416 /// Determine if this is an equality predicate.
1417 static bool isEquality(Predicate Pred) {
1418 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1419 Pred == FCMP_UNE;
1420 }
1421
1422 /// @returns true if the predicate of this instruction is EQ or NE.
1423 /// Determine if this is an equality predicate.
1424 bool isEquality() const { return isEquality(getPredicate()); }
1425
1426 /// @returns true if the predicate of this instruction is commutative.
1427 /// Determine if this is a commutative predicate.
1428 bool isCommutative() const {
1429 return isEquality() ||
1430 getPredicate() == FCMP_FALSE ||
1431 getPredicate() == FCMP_TRUE ||
1432 getPredicate() == FCMP_ORD ||
1433 getPredicate() == FCMP_UNO;
1434 }
1435
1436 /// @returns true if the predicate is relational (not EQ or NE).
1437 /// Determine if this a relational predicate.
1438 bool isRelational() const { return !isEquality(); }
1439
1440 /// Exchange the two operands to this instruction in such a way that it does
1441 /// not modify the semantics of the instruction. The predicate value may be
1442 /// changed to retain the same result if the predicate is order dependent
1443 /// (e.g. ult).
1444 /// Swap operands and adjust predicate.
1445 void swapOperands() {
1446 setPredicate(getSwappedPredicate());
1447 Op<0>().swap(Op<1>());
1448 }
1449
1450 /// Returns the sequence of all FCmp predicates.
1451 ///
1452 static auto predicates() { return FCmpPredicates(); }
1453
1454 /// Return result of `LHS Pred RHS` comparison.
1455 static bool compare(const APFloat &LHS, const APFloat &RHS,
1456 FCmpInst::Predicate Pred);
1457
1458 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1459 static bool classof(const Instruction *I) {
1460 return I->getOpcode() == Instruction::FCmp;
1461 }
1462 static bool classof(const Value *V) {
1463 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1464 }
1465};
1466
1467//===----------------------------------------------------------------------===//
1468/// This class represents a function call, abstracting a target
1469/// machine's calling convention. This class uses low bit of the SubClassData
1470/// field to indicate whether or not this is a tail call. The rest of the bits
1471/// hold the calling convention of the call.
1472///
1473class CallInst : public CallBase {
1474 CallInst(const CallInst &CI);
1475
1476 /// Construct a CallInst given a range of arguments.
1477 /// Construct a CallInst from a range of arguments
1478 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1479 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1480 Instruction *InsertBefore);
1481
1482 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1483 const Twine &NameStr, Instruction *InsertBefore)
1484 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1485
1486 /// Construct a CallInst given a range of arguments.
1487 /// Construct a CallInst from a range of arguments
1488 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1489 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1490 BasicBlock *InsertAtEnd);
1491
1492 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1493 Instruction *InsertBefore);
1494
1495 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1496 BasicBlock *InsertAtEnd);
1497
1498 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1499 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1500 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1501
1502 /// Compute the number of operands to allocate.
1503 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1504 // We need one operand for the called function, plus the input operand
1505 // counts provided.
1506 return 1 + NumArgs + NumBundleInputs;
1507 }
1508
1509protected:
1510 // Note: Instruction needs to be a friend here to call cloneImpl.
1511 friend class Instruction;
1512
1513 CallInst *cloneImpl() const;
1514
1515public:
1516 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1517 Instruction *InsertBefore = nullptr) {
1518 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1519 }
1520
1521 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1522 const Twine &NameStr,
1523 Instruction *InsertBefore = nullptr) {
1524 return new (ComputeNumOperands(Args.size()))
1525 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1526 }
1527
1528 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1529 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1530 const Twine &NameStr = "",
1531 Instruction *InsertBefore = nullptr) {
1532 const int NumOperands =
1533 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1534 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1535
1536 return new (NumOperands, DescriptorBytes)
1537 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1538 }
1539
1540 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1541 BasicBlock *InsertAtEnd) {
1542 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1543 }
1544
1545 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1546 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1547 return new (ComputeNumOperands(Args.size()))
1548 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
1549 }
1550
1551 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1552 ArrayRef<OperandBundleDef> Bundles,
1553 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1554 const int NumOperands =
1555 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1556 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1557
1558 return new (NumOperands, DescriptorBytes)
1559 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1560 }
1561
1562 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1563 Instruction *InsertBefore = nullptr) {
1564 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1565 InsertBefore);
1566 }
1567
1568 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1569 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1570 const Twine &NameStr = "",
1571 Instruction *InsertBefore = nullptr) {
1572 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1573 NameStr, InsertBefore);
1574 }
1575
1576 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1577 const Twine &NameStr,
1578 Instruction *InsertBefore = nullptr) {
1579 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1580 InsertBefore);
1581 }
1582
1583 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1584 BasicBlock *InsertAtEnd) {
1585 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1586 InsertAtEnd);
1587 }
1588
1589 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1590 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1591 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1592 InsertAtEnd);
1593 }
1594
1595 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1596 ArrayRef<OperandBundleDef> Bundles,
1597 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1598 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1599 NameStr, InsertAtEnd);
1600 }
1601
1602 /// Create a clone of \p CI with a different set of operand bundles and
1603 /// insert it before \p InsertPt.
1604 ///
1605 /// The returned call instruction is identical \p CI in every way except that
1606 /// the operand bundles for the new instruction are set to the operand bundles
1607 /// in \p Bundles.
1608 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1609 Instruction *InsertPt = nullptr);
1610
1611 /// Generate the IR for a call to malloc:
1612 /// 1. Compute the malloc call's argument as the specified type's size,
1613 /// possibly multiplied by the array size if the array size is not
1614 /// constant 1.
1615 /// 2. Call malloc with that argument.
1616 /// 3. Bitcast the result of the malloc call to the specified type.
1617 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1618 Type *AllocTy, Value *AllocSize,
1619 Value *ArraySize = nullptr,
1620 Function *MallocF = nullptr,
1621 const Twine &Name = "");
1622 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1623 Type *AllocTy, Value *AllocSize,
1624 Value *ArraySize = nullptr,
1625 Function *MallocF = nullptr,
1626 const Twine &Name = "");
1627 static Instruction *
1628 CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, Type *AllocTy,
1629 Value *AllocSize, Value *ArraySize = nullptr,
1630 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1631 Function *MallocF = nullptr, const Twine &Name = "");
1632 static Instruction *
1633 CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, Type *AllocTy,
1634 Value *AllocSize, Value *ArraySize = nullptr,
1635 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1636 Function *MallocF = nullptr, const Twine &Name = "");
1637 /// Generate the IR for a call to the builtin free function.
1638 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1639 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1640 static Instruction *CreateFree(Value *Source,
1641 ArrayRef<OperandBundleDef> Bundles,
1642 Instruction *InsertBefore);
1643 static Instruction *CreateFree(Value *Source,
1644 ArrayRef<OperandBundleDef> Bundles,
1645 BasicBlock *InsertAtEnd);
1646
1647 // Note that 'musttail' implies 'tail'.
1648 enum TailCallKind : unsigned {
1649 TCK_None = 0,
1650 TCK_Tail = 1,
1651 TCK_MustTail = 2,
1652 TCK_NoTail = 3,
1653 TCK_LAST = TCK_NoTail
1654 };
1655
1656 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1657 static_assert(
1658 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1659 "Bitfields must be contiguous");
1660
1661 TailCallKind getTailCallKind() const {
1662 return getSubclassData<TailCallKindField>();
1663 }
1664
1665 bool isTailCall() const {
1666 TailCallKind Kind = getTailCallKind();
1667 return Kind == TCK_Tail || Kind == TCK_MustTail;
1668 }
1669
1670 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1671
1672 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1673
1674 void setTailCallKind(TailCallKind TCK) {
1675 setSubclassData<TailCallKindField>(TCK);
1676 }
1677
1678 void setTailCall(bool IsTc = true) {
1679 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1680 }
1681
1682 /// Return true if the call can return twice
1683 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1684 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1685
1686 // Methods for support type inquiry through isa, cast, and dyn_cast:
1687 static bool classof(const Instruction *I) {
1688 return I->getOpcode() == Instruction::Call;
1689 }
1690 static bool classof(const Value *V) {
1691 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1692 }
1693
1694 /// Updates profile metadata by scaling it by \p S / \p T.
1695 void updateProfWeight(uint64_t S, uint64_t T);
1696
1697private:
1698 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1699 // method so that subclasses cannot accidentally use it.
1700 template <typename Bitfield>
1701 void setSubclassData(typename Bitfield::Type Value) {
1702 Instruction::setSubclassData<Bitfield>(Value);
1703 }
1704};
1705
1706CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1707 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1708 BasicBlock *InsertAtEnd)
1709 : CallBase(Ty->getReturnType(), Instruction::Call,
1710 OperandTraits<CallBase>::op_end(this) -
1711 (Args.size() + CountBundleInputs(Bundles) + 1),
1712 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1713 InsertAtEnd) {
1714 init(Ty, Func, Args, Bundles, NameStr);
1715}
1716
1717CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1718 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1719 Instruction *InsertBefore)
1720 : CallBase(Ty->getReturnType(), Instruction::Call,
1721 OperandTraits<CallBase>::op_end(this) -
1722 (Args.size() + CountBundleInputs(Bundles) + 1),
1723 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1724 InsertBefore) {
1725 init(Ty, Func, Args, Bundles, NameStr);
1726}
1727
1728//===----------------------------------------------------------------------===//
1729// SelectInst Class
1730//===----------------------------------------------------------------------===//
1731
1732/// This class represents the LLVM 'select' instruction.
1733///
1734class SelectInst : public Instruction {
1735 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1736 Instruction *InsertBefore)
1737 : Instruction(S1->getType(), Instruction::Select,
1738 &Op<0>(), 3, InsertBefore) {
1739 init(C, S1, S2);
1740 setName(NameStr);
1741 }
1742
1743 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1744 BasicBlock *InsertAtEnd)
1745 : Instruction(S1->getType(), Instruction::Select,
1746 &Op<0>(), 3, InsertAtEnd) {
1747 init(C, S1, S2);
1748 setName(NameStr);
1749 }
1750
1751 void init(Value *C, Value *S1, Value *S2) {
1752 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1752, __extension__ __PRETTY_FUNCTION__
))
;
1753 Op<0>() = C;
1754 Op<1>() = S1;
1755 Op<2>() = S2;
1756 }
1757
1758protected:
1759 // Note: Instruction needs to be a friend here to call cloneImpl.
1760 friend class Instruction;
1761
1762 SelectInst *cloneImpl() const;
1763
1764public:
1765 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1766 const Twine &NameStr = "",
1767 Instruction *InsertBefore = nullptr,
1768 Instruction *MDFrom = nullptr) {
1769 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1770 if (MDFrom)
1771 Sel->copyMetadata(*MDFrom);
1772 return Sel;
1773 }
1774
1775 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1776 const Twine &NameStr,
1777 BasicBlock *InsertAtEnd) {
1778 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1779 }
1780
1781 const Value *getCondition() const { return Op<0>(); }
1782 const Value *getTrueValue() const { return Op<1>(); }
1783 const Value *getFalseValue() const { return Op<2>(); }
1784 Value *getCondition() { return Op<0>(); }
1785 Value *getTrueValue() { return Op<1>(); }
1786 Value *getFalseValue() { return Op<2>(); }
1787
1788 void setCondition(Value *V) { Op<0>() = V; }
1789 void setTrueValue(Value *V) { Op<1>() = V; }
1790 void setFalseValue(Value *V) { Op<2>() = V; }
1791
1792 /// Swap the true and false values of the select instruction.
1793 /// This doesn't swap prof metadata.
1794 void swapValues() { Op<1>().swap(Op<2>()); }
1795
1796 /// Return a string if the specified operands are invalid
1797 /// for a select operation, otherwise return null.
1798 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1799
1800 /// Transparently provide more efficient getOperand methods.
1801 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1802
1803 OtherOps getOpcode() const {
1804 return static_cast<OtherOps>(Instruction::getOpcode());
1805 }
1806
1807 // Methods for support type inquiry through isa, cast, and dyn_cast:
1808 static bool classof(const Instruction *I) {
1809 return I->getOpcode() == Instruction::Select;
1810 }
1811 static bool classof(const Value *V) {
1812 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1813 }
1814};
1815
1816template <>
1817struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1818};
1819
1820DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1820, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1820, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1821
1822//===----------------------------------------------------------------------===//
1823// VAArgInst Class
1824//===----------------------------------------------------------------------===//
1825
1826/// This class represents the va_arg llvm instruction, which returns
1827/// an argument of the specified type given a va_list and increments that list
1828///
1829class VAArgInst : public UnaryInstruction {
1830protected:
1831 // Note: Instruction needs to be a friend here to call cloneImpl.
1832 friend class Instruction;
1833
1834 VAArgInst *cloneImpl() const;
1835
1836public:
1837 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1838 Instruction *InsertBefore = nullptr)
1839 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1840 setName(NameStr);
1841 }
1842
1843 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1844 BasicBlock *InsertAtEnd)
1845 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1846 setName(NameStr);
1847 }
1848
1849 Value *getPointerOperand() { return getOperand(0); }
1850 const Value *getPointerOperand() const { return getOperand(0); }
1851 static unsigned getPointerOperandIndex() { return 0U; }
1852
1853 // Methods for support type inquiry through isa, cast, and dyn_cast:
1854 static bool classof(const Instruction *I) {
1855 return I->getOpcode() == VAArg;
1856 }
1857 static bool classof(const Value *V) {
1858 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1859 }
1860};
1861
1862//===----------------------------------------------------------------------===//
1863// ExtractElementInst Class
1864//===----------------------------------------------------------------------===//
1865
1866/// This instruction extracts a single (scalar)
1867/// element from a VectorType value
1868///
1869class ExtractElementInst : public Instruction {
1870 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1871 Instruction *InsertBefore = nullptr);
1872 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1873 BasicBlock *InsertAtEnd);
1874
1875protected:
1876 // Note: Instruction needs to be a friend here to call cloneImpl.
1877 friend class Instruction;
1878
1879 ExtractElementInst *cloneImpl() const;
1880
1881public:
1882 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1883 const Twine &NameStr = "",
1884 Instruction *InsertBefore = nullptr) {
1885 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1886 }
1887
1888 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1889 const Twine &NameStr,
1890 BasicBlock *InsertAtEnd) {
1891 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1892 }
1893
1894 /// Return true if an extractelement instruction can be
1895 /// formed with the specified operands.
1896 static bool isValidOperands(const Value *Vec, const Value *Idx);
1897
1898 Value *getVectorOperand() { return Op<0>(); }
1899 Value *getIndexOperand() { return Op<1>(); }
1900 const Value *getVectorOperand() const { return Op<0>(); }
1901 const Value *getIndexOperand() const { return Op<1>(); }
1902
1903 VectorType *getVectorOperandType() const {
1904 return cast<VectorType>(getVectorOperand()->getType());
1905 }
1906
1907 /// Transparently provide more efficient getOperand methods.
1908 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1909
1910 // Methods for support type inquiry through isa, cast, and dyn_cast:
1911 static bool classof(const Instruction *I) {
1912 return I->getOpcode() == Instruction::ExtractElement;
1913 }
1914 static bool classof(const Value *V) {
1915 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1916 }
1917};
1918
1919template <>
1920struct OperandTraits<ExtractElementInst> :
1921 public FixedNumOperandTraits<ExtractElementInst, 2> {
1922};
1923
1924DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1924, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1924, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1925
1926//===----------------------------------------------------------------------===//
1927// InsertElementInst Class
1928//===----------------------------------------------------------------------===//
1929
1930/// This instruction inserts a single (scalar)
1931/// element into a VectorType value
1932///
1933class InsertElementInst : public Instruction {
1934 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1935 const Twine &NameStr = "",
1936 Instruction *InsertBefore = nullptr);
1937 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1938 BasicBlock *InsertAtEnd);
1939
1940protected:
1941 // Note: Instruction needs to be a friend here to call cloneImpl.
1942 friend class Instruction;
1943
1944 InsertElementInst *cloneImpl() const;
1945
1946public:
1947 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1948 const Twine &NameStr = "",
1949 Instruction *InsertBefore = nullptr) {
1950 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1951 }
1952
1953 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1954 const Twine &NameStr,
1955 BasicBlock *InsertAtEnd) {
1956 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1957 }
1958
1959 /// Return true if an insertelement instruction can be
1960 /// formed with the specified operands.
1961 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1962 const Value *Idx);
1963
1964 /// Overload to return most specific vector type.
1965 ///
1966 VectorType *getType() const {
1967 return cast<VectorType>(Instruction::getType());
1968 }
1969
1970 /// Transparently provide more efficient getOperand methods.
1971 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1972
1973 // Methods for support type inquiry through isa, cast, and dyn_cast:
1974 static bool classof(const Instruction *I) {
1975 return I->getOpcode() == Instruction::InsertElement;
1976 }
1977 static bool classof(const Value *V) {
1978 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1979 }
1980};
1981
1982template <>
1983struct OperandTraits<InsertElementInst> :
1984 public FixedNumOperandTraits<InsertElementInst, 3> {
1985};
1986
1987DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1987, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1987, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1988
1989//===----------------------------------------------------------------------===//
1990// ShuffleVectorInst Class
1991//===----------------------------------------------------------------------===//
1992
1993constexpr int UndefMaskElem = -1;
1994
1995/// This instruction constructs a fixed permutation of two
1996/// input vectors.
1997///
1998/// For each element of the result vector, the shuffle mask selects an element
1999/// from one of the input vectors to copy to the result. Non-negative elements
2000/// in the mask represent an index into the concatenated pair of input vectors.
2001/// UndefMaskElem (-1) specifies that the result element is undefined.
2002///
2003/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2004/// requirement may be relaxed in the future.
2005class ShuffleVectorInst : public Instruction {
2006 SmallVector<int, 4> ShuffleMask;
2007 Constant *ShuffleMaskForBitcode;
2008
2009protected:
2010 // Note: Instruction needs to be a friend here to call cloneImpl.
2011 friend class Instruction;
2012
2013 ShuffleVectorInst *cloneImpl() const;
2014
2015public:
2016 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2017 Instruction *InsertBefore = nullptr);
2018 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2019 BasicBlock *InsertAtEnd);
2020 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2021 Instruction *InsertBefore = nullptr);
2022 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2023 BasicBlock *InsertAtEnd);
2024 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2025 const Twine &NameStr = "",
2026 Instruction *InsertBefor = nullptr);
2027 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2028 const Twine &NameStr, BasicBlock *InsertAtEnd);
2029 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2030 const Twine &NameStr = "",
2031 Instruction *InsertBefor = nullptr);
2032 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2033 const Twine &NameStr, BasicBlock *InsertAtEnd);
2034
2035 void *operator new(size_t S) { return User::operator new(S, 2); }
2036 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2037
2038 /// Swap the operands and adjust the mask to preserve the semantics
2039 /// of the instruction.
2040 void commute();
2041
2042 /// Return true if a shufflevector instruction can be
2043 /// formed with the specified operands.
2044 static bool isValidOperands(const Value *V1, const Value *V2,
2045 const Value *Mask);
2046 static bool isValidOperands(const Value *V1, const Value *V2,
2047 ArrayRef<int> Mask);
2048
2049 /// Overload to return most specific vector type.
2050 ///
2051 VectorType *getType() const {
2052 return cast<VectorType>(Instruction::getType());
2053 }
2054
2055 /// Transparently provide more efficient getOperand methods.
2056 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2057
2058 /// Return the shuffle mask value of this instruction for the given element
2059 /// index. Return UndefMaskElem if the element is undef.
2060 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2061
2062 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2063 /// elements of the mask are returned as UndefMaskElem.
2064 static void getShuffleMask(const Constant *Mask,
2065 SmallVectorImpl<int> &Result);
2066
2067 /// Return the mask for this instruction as a vector of integers. Undefined
2068 /// elements of the mask are returned as UndefMaskElem.
2069 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2070 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2071 }
2072
2073 /// Return the mask for this instruction, for use in bitcode.
2074 ///
2075 /// TODO: This is temporary until we decide a new bitcode encoding for
2076 /// shufflevector.
2077 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2078
2079 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2080 Type *ResultTy);
2081
2082 void setShuffleMask(ArrayRef<int> Mask);
2083
2084 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2085
2086 /// Return true if this shuffle returns a vector with a different number of
2087 /// elements than its source vectors.
2088 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2089 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2090 bool changesLength() const {
2091 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2092 ->getElementCount()
2093 .getKnownMinValue();
2094 unsigned NumMaskElts = ShuffleMask.size();
2095 return NumSourceElts != NumMaskElts;
2096 }
2097
2098 /// Return true if this shuffle returns a vector with a greater number of
2099 /// elements than its source vectors.
2100 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2101 bool increasesLength() const {
2102 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2103 ->getElementCount()
2104 .getKnownMinValue();
2105 unsigned NumMaskElts = ShuffleMask.size();
2106 return NumSourceElts < NumMaskElts;
2107 }
2108
2109 /// Return true if this shuffle mask chooses elements from exactly one source
2110 /// vector.
2111 /// Example: <7,5,undef,7>
2112 /// This assumes that vector operands are the same length as the mask.
2113 static bool isSingleSourceMask(ArrayRef<int> Mask);
2114 static bool isSingleSourceMask(const Constant *Mask) {
2115 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2115, __extension__ __PRETTY_FUNCTION__
))
;
2116 SmallVector<int, 16> MaskAsInts;
2117 getShuffleMask(Mask, MaskAsInts);
2118 return isSingleSourceMask(MaskAsInts);
2119 }
2120
2121 /// Return true if this shuffle chooses elements from exactly one source
2122 /// vector without changing the length of that vector.
2123 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2124 /// TODO: Optionally allow length-changing shuffles.
2125 bool isSingleSource() const {
2126 return !changesLength() && isSingleSourceMask(ShuffleMask);
2127 }
2128
2129 /// Return true if this shuffle mask chooses elements from exactly one source
2130 /// vector without lane crossings. A shuffle using this mask is not
2131 /// necessarily a no-op because it may change the number of elements from its
2132 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2133 /// Example: <undef,undef,2,3>
2134 static bool isIdentityMask(ArrayRef<int> Mask);
2135 static bool isIdentityMask(const Constant *Mask) {
2136 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2136, __extension__ __PRETTY_FUNCTION__
))
;
2137
2138 // Not possible to express a shuffle mask for a scalable vector for this
2139 // case.
2140 if (isa<ScalableVectorType>(Mask->getType()))
2141 return false;
2142
2143 SmallVector<int, 16> MaskAsInts;
2144 getShuffleMask(Mask, MaskAsInts);
2145 return isIdentityMask(MaskAsInts);
2146 }
2147
2148 /// Return true if this shuffle chooses elements from exactly one source
2149 /// vector without lane crossings and does not change the number of elements
2150 /// from its input vectors.
2151 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2152 bool isIdentity() const {
2153 // Not possible to express a shuffle mask for a scalable vector for this
2154 // case.
2155 if (isa<ScalableVectorType>(getType()))
2156 return false;
2157
2158 return !changesLength() && isIdentityMask(ShuffleMask);
2159 }
2160
2161 /// Return true if this shuffle lengthens exactly one source vector with
2162 /// undefs in the high elements.
2163 bool isIdentityWithPadding() const;
2164
2165 /// Return true if this shuffle extracts the first N elements of exactly one
2166 /// source vector.
2167 bool isIdentityWithExtract() const;
2168
2169 /// Return true if this shuffle concatenates its 2 source vectors. This
2170 /// returns false if either input is undefined. In that case, the shuffle is
2171 /// is better classified as an identity with padding operation.
2172 bool isConcat() const;
2173
2174 /// Return true if this shuffle mask chooses elements from its source vectors
2175 /// without lane crossings. A shuffle using this mask would be
2176 /// equivalent to a vector select with a constant condition operand.
2177 /// Example: <4,1,6,undef>
2178 /// This returns false if the mask does not choose from both input vectors.
2179 /// In that case, the shuffle is better classified as an identity shuffle.
2180 /// This assumes that vector operands are the same length as the mask
2181 /// (a length-changing shuffle can never be equivalent to a vector select).
2182 static bool isSelectMask(ArrayRef<int> Mask);
2183 static bool isSelectMask(const Constant *Mask) {
2184 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2184, __extension__ __PRETTY_FUNCTION__
))
;
2185 SmallVector<int, 16> MaskAsInts;
2186 getShuffleMask(Mask, MaskAsInts);
2187 return isSelectMask(MaskAsInts);
2188 }
2189
2190 /// Return true if this shuffle chooses elements from its source vectors
2191 /// without lane crossings and all operands have the same number of elements.
2192 /// In other words, this shuffle is equivalent to a vector select with a
2193 /// constant condition operand.
2194 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2195 /// This returns false if the mask does not choose from both input vectors.
2196 /// In that case, the shuffle is better classified as an identity shuffle.
2197 /// TODO: Optionally allow length-changing shuffles.
2198 bool isSelect() const {
2199 return !changesLength() && isSelectMask(ShuffleMask);
2200 }
2201
2202 /// Return true if this shuffle mask swaps the order of elements from exactly
2203 /// one source vector.
2204 /// Example: <7,6,undef,4>
2205 /// This assumes that vector operands are the same length as the mask.
2206 static bool isReverseMask(ArrayRef<int> Mask);
2207 static bool isReverseMask(const Constant *Mask) {
2208 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2208, __extension__ __PRETTY_FUNCTION__
))
;
2209 SmallVector<int, 16> MaskAsInts;
2210 getShuffleMask(Mask, MaskAsInts);
2211 return isReverseMask(MaskAsInts);
2212 }
2213
2214 /// Return true if this shuffle swaps the order of elements from exactly
2215 /// one source vector.
2216 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2217 /// TODO: Optionally allow length-changing shuffles.
2218 bool isReverse() const {
2219 return !changesLength() && isReverseMask(ShuffleMask);
2220 }
2221
2222 /// Return true if this shuffle mask chooses all elements with the same value
2223 /// as the first element of exactly one source vector.
2224 /// Example: <4,undef,undef,4>
2225 /// This assumes that vector operands are the same length as the mask.
2226 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2227 static bool isZeroEltSplatMask(const Constant *Mask) {
2228 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2228, __extension__ __PRETTY_FUNCTION__
))
;
2229 SmallVector<int, 16> MaskAsInts;
2230 getShuffleMask(Mask, MaskAsInts);
2231 return isZeroEltSplatMask(MaskAsInts);
2232 }
2233
2234 /// Return true if all elements of this shuffle are the same value as the
2235 /// first element of exactly one source vector without changing the length
2236 /// of that vector.
2237 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2238 /// TODO: Optionally allow length-changing shuffles.
2239 /// TODO: Optionally allow splats from other elements.
2240 bool isZeroEltSplat() const {
2241 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2242 }
2243
2244 /// Return true if this shuffle mask is a transpose mask.
2245 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2246 /// even- or odd-numbered vector elements from two n-dimensional source
2247 /// vectors and write each result into consecutive elements of an
2248 /// n-dimensional destination vector. Two shuffles are necessary to complete
2249 /// the transpose, one for the even elements and another for the odd elements.
2250 /// This description closely follows how the TRN1 and TRN2 AArch64
2251 /// instructions operate.
2252 ///
2253 /// For example, a simple 2x2 matrix can be transposed with:
2254 ///
2255 /// ; Original matrix
2256 /// m0 = < a, b >
2257 /// m1 = < c, d >
2258 ///
2259 /// ; Transposed matrix
2260 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2261 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2262 ///
2263 /// For matrices having greater than n columns, the resulting nx2 transposed
2264 /// matrix is stored in two result vectors such that one vector contains
2265 /// interleaved elements from all the even-numbered rows and the other vector
2266 /// contains interleaved elements from all the odd-numbered rows. For example,
2267 /// a 2x4 matrix can be transposed with:
2268 ///
2269 /// ; Original matrix
2270 /// m0 = < a, b, c, d >
2271 /// m1 = < e, f, g, h >
2272 ///
2273 /// ; Transposed matrix
2274 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2275 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2276 static bool isTransposeMask(ArrayRef<int> Mask);
2277 static bool isTransposeMask(const Constant *Mask) {
2278 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2278, __extension__ __PRETTY_FUNCTION__
))
;
2279 SmallVector<int, 16> MaskAsInts;
2280 getShuffleMask(Mask, MaskAsInts);
2281 return isTransposeMask(MaskAsInts);
2282 }
2283
2284 /// Return true if this shuffle transposes the elements of its inputs without
2285 /// changing the length of the vectors. This operation may also be known as a
2286 /// merge or interleave. See the description for isTransposeMask() for the
2287 /// exact specification.
2288 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2289 bool isTranspose() const {
2290 return !changesLength() && isTransposeMask(ShuffleMask);
2291 }
2292
2293 /// Return true if this shuffle mask is a splice mask, concatenating the two
2294 /// inputs together and then extracts an original width vector starting from
2295 /// the splice index.
2296 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2297 static bool isSpliceMask(ArrayRef<int> Mask, int &Index);
2298 static bool isSpliceMask(const Constant *Mask, int &Index) {
2299 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2299, __extension__ __PRETTY_FUNCTION__
))
;
2300 SmallVector<int, 16> MaskAsInts;
2301 getShuffleMask(Mask, MaskAsInts);
2302 return isSpliceMask(MaskAsInts, Index);
2303 }
2304
2305 /// Return true if this shuffle splices two inputs without changing the length
2306 /// of the vectors. This operation concatenates the two inputs together and
2307 /// then extracts an original width vector starting from the splice index.
2308 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2309 bool isSplice(int &Index) const {
2310 return !changesLength() && isSpliceMask(ShuffleMask, Index);
2311 }
2312
2313 /// Return true if this shuffle mask is an extract subvector mask.
2314 /// A valid extract subvector mask returns a smaller vector from a single
2315 /// source operand. The base extraction index is returned as well.
2316 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2317 int &Index);
2318 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2319 int &Index) {
2320 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2320, __extension__ __PRETTY_FUNCTION__
))
;
2321 // Not possible to express a shuffle mask for a scalable vector for this
2322 // case.
2323 if (isa<ScalableVectorType>(Mask->getType()))
2324 return false;
2325 SmallVector<int, 16> MaskAsInts;
2326 getShuffleMask(Mask, MaskAsInts);
2327 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2328 }
2329
2330 /// Return true if this shuffle mask is an extract subvector mask.
2331 bool isExtractSubvectorMask(int &Index) const {
2332 // Not possible to express a shuffle mask for a scalable vector for this
2333 // case.
2334 if (isa<ScalableVectorType>(getType()))
2335 return false;
2336
2337 int NumSrcElts =
2338 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2339 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2340 }
2341
2342 /// Return true if this shuffle mask is an insert subvector mask.
2343 /// A valid insert subvector mask inserts the lowest elements of a second
2344 /// source operand into an in-place first source operand operand.
2345 /// Both the sub vector width and the insertion index is returned.
2346 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2347 int &NumSubElts, int &Index);
2348 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2349 int &NumSubElts, int &Index) {
2350 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2350, __extension__ __PRETTY_FUNCTION__
))
;
2351 // Not possible to express a shuffle mask for a scalable vector for this
2352 // case.
2353 if (isa<ScalableVectorType>(Mask->getType()))
2354 return false;
2355 SmallVector<int, 16> MaskAsInts;
2356 getShuffleMask(Mask, MaskAsInts);
2357 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2358 }
2359
2360 /// Return true if this shuffle mask is an insert subvector mask.
2361 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2362 // Not possible to express a shuffle mask for a scalable vector for this
2363 // case.
2364 if (isa<ScalableVectorType>(getType()))
2365 return false;
2366
2367 int NumSrcElts =
2368 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2369 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2370 }
2371
2372 /// Return true if this shuffle mask replicates each of the \p VF elements
2373 /// in a vector \p ReplicationFactor times.
2374 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2375 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2376 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2377 int &VF);
2378 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2379 int &VF) {
2380 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2380, __extension__ __PRETTY_FUNCTION__
))
;
2381 // Not possible to express a shuffle mask for a scalable vector for this
2382 // case.
2383 if (isa<ScalableVectorType>(Mask->getType()))
2384 return false;
2385 SmallVector<int, 16> MaskAsInts;
2386 getShuffleMask(Mask, MaskAsInts);
2387 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2388 }
2389
2390 /// Return true if this shuffle mask is a replication mask.
2391 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2392
2393 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2394 /// i.e. each index between [0..VF) is used exactly once in each submask of
2395 /// size VF.
2396 /// For example, the mask for \p VF=4 is:
2397 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2398 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2399 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2400 /// element 3 is used twice in the second submask
2401 /// (3,3,1,0) and index 2 is not used at all.
2402 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2403
2404 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2405 /// mask.
2406 bool isOneUseSingleSourceMask(int VF) const;
2407
2408 /// Change values in a shuffle permute mask assuming the two vector operands
2409 /// of length InVecNumElts have swapped position.
2410 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2411 unsigned InVecNumElts) {
2412 for (int &Idx : Mask) {
2413 if (Idx == -1)
2414 continue;
2415 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2416 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2417, __extension__ __PRETTY_FUNCTION__
))
2417 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2417, __extension__ __PRETTY_FUNCTION__
))
;
2418 }
2419 }
2420
2421 // Methods for support type inquiry through isa, cast, and dyn_cast:
2422 static bool classof(const Instruction *I) {
2423 return I->getOpcode() == Instruction::ShuffleVector;
2424 }
2425 static bool classof(const Value *V) {
2426 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2427 }
2428};
2429
2430template <>
2431struct OperandTraits<ShuffleVectorInst>
2432 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2433
2434DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2434, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst
>::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture
].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2434, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands
() const { return OperandTraits<ShuffleVectorInst>::operands
(this); } template <int Idx_nocapture> Use &ShuffleVectorInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ShuffleVectorInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2435
2436//===----------------------------------------------------------------------===//
2437// ExtractValueInst Class
2438//===----------------------------------------------------------------------===//
2439
2440/// This instruction extracts a struct member or array
2441/// element value from an aggregate value.
2442///
2443class ExtractValueInst : public UnaryInstruction {
2444 SmallVector<unsigned, 4> Indices;
2445
2446 ExtractValueInst(const ExtractValueInst &EVI);
2447
2448 /// Constructors - Create a extractvalue instruction with a base aggregate
2449 /// value and a list of indices. The first ctor can optionally insert before
2450 /// an existing instruction, the second appends the new instruction to the
2451 /// specified BasicBlock.
2452 inline ExtractValueInst(Value *Agg,
2453 ArrayRef<unsigned> Idxs,
2454 const Twine &NameStr,
2455 Instruction *InsertBefore);
2456 inline ExtractValueInst(Value *Agg,
2457 ArrayRef<unsigned> Idxs,
2458 const Twine &NameStr, BasicBlock *InsertAtEnd);
2459
2460 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2461
2462protected:
2463 // Note: Instruction needs to be a friend here to call cloneImpl.
2464 friend class Instruction;
2465
2466 ExtractValueInst *cloneImpl() const;
2467
2468public:
2469 static ExtractValueInst *Create(Value *Agg,
2470 ArrayRef<unsigned> Idxs,
2471 const Twine &NameStr = "",
2472 Instruction *InsertBefore = nullptr) {
2473 return new
2474 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2475 }
2476
2477 static ExtractValueInst *Create(Value *Agg,
2478 ArrayRef<unsigned> Idxs,
2479 const Twine &NameStr,
2480 BasicBlock *InsertAtEnd) {
2481 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2482 }
2483
2484 /// Returns the type of the element that would be extracted
2485 /// with an extractvalue instruction with the specified parameters.
2486 ///
2487 /// Null is returned if the indices are invalid for the specified type.
2488 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2489
2490 using idx_iterator = const unsigned*;
2491
2492 inline idx_iterator idx_begin() const { return Indices.begin(); }
2493 inline idx_iterator idx_end() const { return Indices.end(); }
2494 inline iterator_range<idx_iterator> indices() const {
2495 return make_range(idx_begin(), idx_end());
2496 }
2497
2498 Value *getAggregateOperand() {
2499 return getOperand(0);
2500 }
2501 const Value *getAggregateOperand() const {
2502 return getOperand(0);
2503 }
2504 static unsigned getAggregateOperandIndex() {
2505 return 0U; // get index for modifying correct operand
2506 }
2507
2508 ArrayRef<unsigned> getIndices() const {
2509 return Indices;
2510 }
2511
2512 unsigned getNumIndices() const {
2513 return (unsigned)Indices.size();
2514 }
2515
2516 bool hasIndices() const {
2517 return true;
2518 }
2519
2520 // Methods for support type inquiry through isa, cast, and dyn_cast:
2521 static bool classof(const Instruction *I) {
2522 return I->getOpcode() == Instruction::ExtractValue;
2523 }
2524 static bool classof(const Value *V) {
2525 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2526 }
2527};
2528
2529ExtractValueInst::ExtractValueInst(Value *Agg,
2530 ArrayRef<unsigned> Idxs,
2531 const Twine &NameStr,
2532 Instruction *InsertBefore)
2533 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2534 ExtractValue, Agg, InsertBefore) {
2535 init(Idxs, NameStr);
2536}
2537
2538ExtractValueInst::ExtractValueInst(Value *Agg,
2539 ArrayRef<unsigned> Idxs,
2540 const Twine &NameStr,
2541 BasicBlock *InsertAtEnd)
2542 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2543 ExtractValue, Agg, InsertAtEnd) {
2544 init(Idxs, NameStr);
2545}
2546
2547//===----------------------------------------------------------------------===//
2548// InsertValueInst Class
2549//===----------------------------------------------------------------------===//
2550
2551/// This instruction inserts a struct field of array element
2552/// value into an aggregate value.
2553///
2554class InsertValueInst : public Instruction {
2555 SmallVector<unsigned, 4> Indices;
2556
2557 InsertValueInst(const InsertValueInst &IVI);
2558
2559 /// Constructors - Create a insertvalue instruction with a base aggregate
2560 /// value, a value to insert, and a list of indices. The first ctor can
2561 /// optionally insert before an existing instruction, the second appends
2562 /// the new instruction to the specified BasicBlock.
2563 inline InsertValueInst(Value *Agg, Value *Val,
2564 ArrayRef<unsigned> Idxs,
2565 const Twine &NameStr,
2566 Instruction *InsertBefore);
2567 inline InsertValueInst(Value *Agg, Value *Val,
2568 ArrayRef<unsigned> Idxs,
2569 const Twine &NameStr, BasicBlock *InsertAtEnd);
2570
2571 /// Constructors - These two constructors are convenience methods because one
2572 /// and two index insertvalue instructions are so common.
2573 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2574 const Twine &NameStr = "",
2575 Instruction *InsertBefore = nullptr);
2576 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2577 BasicBlock *InsertAtEnd);
2578
2579 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2580 const Twine &NameStr);
2581
2582protected:
2583 // Note: Instruction needs to be a friend here to call cloneImpl.
2584 friend class Instruction;
2585
2586 InsertValueInst *cloneImpl() const;
2587
2588public:
2589 // allocate space for exactly two operands
2590 void *operator new(size_t S) { return User::operator new(S, 2); }
2591 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2592
2593 static InsertValueInst *Create(Value *Agg, Value *Val,
2594 ArrayRef<unsigned> Idxs,
2595 const Twine &NameStr = "",
2596 Instruction *InsertBefore = nullptr) {
2597 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2598 }
2599
2600 static InsertValueInst *Create(Value *Agg, Value *Val,
2601 ArrayRef<unsigned> Idxs,
2602 const Twine &NameStr,
2603 BasicBlock *InsertAtEnd) {
2604 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2605 }
2606
2607 /// Transparently provide more efficient getOperand methods.
2608 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2609
2610 using idx_iterator = const unsigned*;
2611
2612 inline idx_iterator idx_begin() const { return Indices.begin(); }
2613 inline idx_iterator idx_end() const { return Indices.end(); }
2614 inline iterator_range<idx_iterator> indices() const {
2615 return make_range(idx_begin(), idx_end());
2616 }
2617
2618 Value *getAggregateOperand() {
2619 return getOperand(0);
2620 }
2621 const Value *getAggregateOperand() const {
2622 return getOperand(0);
2623 }
2624 static unsigned getAggregateOperandIndex() {
2625 return 0U; // get index for modifying correct operand
2626 }
2627
2628 Value *getInsertedValueOperand() {
2629 return getOperand(1);
2630 }
2631 const Value *getInsertedValueOperand() const {
2632 return getOperand(1);
2633 }
2634 static unsigned getInsertedValueOperandIndex() {
2635 return 1U; // get index for modifying correct operand
2636 }
2637
2638 ArrayRef<unsigned> getIndices() const {
2639 return Indices;
2640 }
2641
2642 unsigned getNumIndices() const {
2643 return (unsigned)Indices.size();
2644 }
2645
2646 bool hasIndices() const {
2647 return true;
2648 }
2649
2650 // Methods for support type inquiry through isa, cast, and dyn_cast:
2651 static bool classof(const Instruction *I) {
2652 return I->getOpcode() == Instruction::InsertValue;
2653 }
2654 static bool classof(const Value *V) {
2655 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2656 }
2657};
2658
2659template <>
2660struct OperandTraits<InsertValueInst> :
2661 public FixedNumOperandTraits<InsertValueInst, 2> {
2662};
2663
2664InsertValueInst::InsertValueInst(Value *Agg,
2665 Value *Val,
2666 ArrayRef<unsigned> Idxs,
2667 const Twine &NameStr,
2668 Instruction *InsertBefore)
2669 : Instruction(Agg->getType(), InsertValue,
2670 OperandTraits<InsertValueInst>::op_begin(this),
2671 2, InsertBefore) {
2672 init(Agg, Val, Idxs, NameStr);
2673}
2674
2675InsertValueInst::InsertValueInst(Value *Agg,
2676 Value *Val,
2677 ArrayRef<unsigned> Idxs,
2678 const Twine &NameStr,
2679 BasicBlock *InsertAtEnd)
2680 : Instruction(Agg->getType(), InsertValue,
2681 OperandTraits<InsertValueInst>::op_begin(this),
2682 2, InsertAtEnd) {
2683 init(Agg, Val, Idxs, NameStr);
2684}
2685
2686DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2686, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertValueInst
>::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture
].get()); } void InsertValueInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2686, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertValueInst::getNumOperands
() const { return OperandTraits<InsertValueInst>::operands
(this); } template <int Idx_nocapture> Use &InsertValueInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertValueInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2687
2688//===----------------------------------------------------------------------===//
2689// PHINode Class
2690//===----------------------------------------------------------------------===//
2691
2692// PHINode - The PHINode class is used to represent the magical mystical PHI
2693// node, that can not exist in nature, but can be synthesized in a computer
2694// scientist's overactive imagination.
2695//
2696class PHINode : public Instruction {
2697 /// The number of operands actually allocated. NumOperands is
2698 /// the number actually in use.
2699 unsigned ReservedSpace;
2700
2701 PHINode(const PHINode &PN);
2702
2703 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2704 const Twine &NameStr = "",
2705 Instruction *InsertBefore = nullptr)
2706 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2707 ReservedSpace(NumReservedValues) {
2708 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2708, __extension__ __PRETTY_FUNCTION__
))
;
2709 setName(NameStr);
2710 allocHungoffUses(ReservedSpace);
2711 }
2712
2713 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2714 BasicBlock *InsertAtEnd)
2715 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2716 ReservedSpace(NumReservedValues) {
2717 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2717, __extension__ __PRETTY_FUNCTION__
))
;
2718 setName(NameStr);
2719 allocHungoffUses(ReservedSpace);
2720 }
2721
2722protected:
2723 // Note: Instruction needs to be a friend here to call cloneImpl.
2724 friend class Instruction;
2725
2726 PHINode *cloneImpl() const;
2727
2728 // allocHungoffUses - this is more complicated than the generic
2729 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2730 // values and pointers to the incoming blocks, all in one allocation.
2731 void allocHungoffUses(unsigned N) {
2732 User::allocHungoffUses(N, /* IsPhi */ true);
2733 }
2734
2735public:
2736 /// Constructors - NumReservedValues is a hint for the number of incoming
2737 /// edges that this phi node will have (use 0 if you really have no idea).
2738 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2739 const Twine &NameStr = "",
2740 Instruction *InsertBefore = nullptr) {
2741 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2742 }
2743
2744 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2745 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2746 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2747 }
2748
2749 /// Provide fast operand accessors
2750 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2751
2752 // Block iterator interface. This provides access to the list of incoming
2753 // basic blocks, which parallels the list of incoming values.
2754 // Please note that we are not providing non-const iterators for blocks to
2755 // force all updates go through an interface function.
2756
2757 using block_iterator = BasicBlock **;
2758 using const_block_iterator = BasicBlock * const *;
2759
2760 const_block_iterator block_begin() const {
2761 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2762 }
2763
2764 const_block_iterator block_end() const {
2765 return block_begin() + getNumOperands();
2766 }
2767
2768 iterator_range<const_block_iterator> blocks() const {
2769 return make_range(block_begin(), block_end());
2770 }
2771
2772 op_range incoming_values() { return operands(); }
2773
2774 const_op_range incoming_values() const { return operands(); }
2775
2776 /// Return the number of incoming edges
2777 ///
2778 unsigned getNumIncomingValues() const { return getNumOperands(); }
2779
2780 /// Return incoming value number x
2781 ///
2782 Value *getIncomingValue(unsigned i) const {
2783 return getOperand(i);
2784 }
2785 void setIncomingValue(unsigned i, Value *V) {
2786 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "llvm/include/llvm/IR/Instructions.h", 2786, __extension__ __PRETTY_FUNCTION__
))
;
2787 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2788, __extension__ __PRETTY_FUNCTION__
))
2788 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2788, __extension__ __PRETTY_FUNCTION__
))
;
2789 setOperand(i, V);
2790 }
2791
2792 static unsigned getOperandNumForIncomingValue(unsigned i) {
2793 return i;
2794 }
2795
2796 static unsigned getIncomingValueNumForOperand(unsigned i) {
2797 return i;
2798 }
2799
2800 /// Return incoming basic block number @p i.
2801 ///
2802 BasicBlock *getIncomingBlock(unsigned i) const {
2803 return block_begin()[i];
2804 }
2805
2806 /// Return incoming basic block corresponding
2807 /// to an operand of the PHI.
2808 ///
2809 BasicBlock *getIncomingBlock(const Use &U) const {
2810 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "llvm/include/llvm/IR/Instructions.h", 2810, __extension__ __PRETTY_FUNCTION__
))
;
2811 return getIncomingBlock(unsigned(&U - op_begin()));
2812 }
2813
2814 /// Return incoming basic block corresponding
2815 /// to value use iterator.
2816 ///
2817 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2818 return getIncomingBlock(I.getUse());
2819 }
2820
2821 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2822 const_cast<block_iterator>(block_begin())[i] = BB;
2823 }
2824
2825 /// Copies the basic blocks from \p BBRange to the incoming basic block list
2826 /// of this PHINode, starting at \p ToIdx.
2827 void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange,
2828 uint32_t ToIdx = 0) {
2829 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2830 }
2831
2832 /// Replace every incoming basic block \p Old to basic block \p New.
2833 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2834 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2834, __extension__ __PRETTY_FUNCTION__
))
;
2835 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2836 if (getIncomingBlock(Op) == Old)
2837 setIncomingBlock(Op, New);
2838 }
2839
2840 /// Add an incoming value to the end of the PHI list
2841 ///
2842 void addIncoming(Value *V, BasicBlock *BB) {
2843 if (getNumOperands() == ReservedSpace)
2844 growOperands(); // Get more space!
2845 // Initialize some new operands.
2846 setNumHungOffUseOperands(getNumOperands() + 1);
2847 setIncomingValue(getNumOperands() - 1, V);
2848 setIncomingBlock(getNumOperands() - 1, BB);
2849 }
2850
2851 /// Remove an incoming value. This is useful if a
2852 /// predecessor basic block is deleted. The value removed is returned.
2853 ///
2854 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2855 /// is true), the PHI node is destroyed and any uses of it are replaced with
2856 /// dummy values. The only time there should be zero incoming values to a PHI
2857 /// node is when the block is dead, so this strategy is sound.
2858 ///
2859 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2860
2861 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2862 int Idx = getBasicBlockIndex(BB);
2863 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "llvm/include/llvm/IR/Instructions.h", 2863, __extension__ __PRETTY_FUNCTION__
))
;
2864 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2865 }
2866
2867 /// Return the first index of the specified basic
2868 /// block in the value list for this PHI. Returns -1 if no instance.
2869 ///
2870 int getBasicBlockIndex(const BasicBlock *BB) const {
2871 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2872 if (block_begin()[i] == BB)
2873 return i;
2874 return -1;
2875 }
2876
2877 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2878 int Idx = getBasicBlockIndex(BB);
2879 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "llvm/include/llvm/IR/Instructions.h", 2879, __extension__ __PRETTY_FUNCTION__
))
;
2880 return getIncomingValue(Idx);
2881 }
2882
2883 /// Set every incoming value(s) for block \p BB to \p V.
2884 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2885 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2885, __extension__ __PRETTY_FUNCTION__
))
;
2886 bool Found = false;
2887 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2888 if (getIncomingBlock(Op) == BB) {
2889 Found = true;
2890 setIncomingValue(Op, V);
2891 }
2892 (void)Found;
2893 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "llvm/include/llvm/IR/Instructions.h", 2893, __extension__ __PRETTY_FUNCTION__
))
;
2894 }
2895
2896 /// If the specified PHI node always merges together the
2897 /// same value, return the value, otherwise return null.
2898 Value *hasConstantValue() const;
2899
2900 /// Whether the specified PHI node always merges
2901 /// together the same value, assuming undefs are equal to a unique
2902 /// non-undef value.
2903 bool hasConstantOrUndefValue() const;
2904
2905 /// If the PHI node is complete which means all of its parent's predecessors
2906 /// have incoming value in this PHI, return true, otherwise return false.
2907 bool isComplete() const {
2908 return llvm::all_of(predecessors(getParent()),
2909 [this](const BasicBlock *Pred) {
2910 return getBasicBlockIndex(Pred) >= 0;
2911 });
2912 }
2913
2914 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2915 static bool classof(const Instruction *I) {
2916 return I->getOpcode() == Instruction::PHI;
2917 }
2918 static bool classof(const Value *V) {
2919 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2920 }
2921
2922private:
2923 void growOperands();
2924};
2925
2926template <>
2927struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2928};
2929
2930DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2930, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<PHINode
>::op_begin(const_cast<PHINode*>(this))[i_nocapture]
.get()); } void PHINode::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2930, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<PHINode>::op_begin(this)[i_nocapture]
= Val_nocapture; } unsigned PHINode::getNumOperands() const {
return OperandTraits<PHINode>::operands(this); } template
<int Idx_nocapture> Use &PHINode::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &PHINode::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
2931
2932//===----------------------------------------------------------------------===//
2933// LandingPadInst Class
2934//===----------------------------------------------------------------------===//
2935
2936//===---------------------------------------------------------------------------
2937/// The landingpad instruction holds all of the information
2938/// necessary to generate correct exception handling. The landingpad instruction
2939/// cannot be moved from the top of a landing pad block, which itself is
2940/// accessible only from the 'unwind' edge of an invoke. This uses the
2941/// SubclassData field in Value to store whether or not the landingpad is a
2942/// cleanup.
2943///
2944class LandingPadInst : public Instruction {
2945 using CleanupField = BoolBitfieldElementT<0>;
2946
2947 /// The number of operands actually allocated. NumOperands is
2948 /// the number actually in use.
2949 unsigned ReservedSpace;
2950
2951 LandingPadInst(const LandingPadInst &LP);
2952
2953public:
2954 enum ClauseType { Catch, Filter };
2955
2956private:
2957 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2958 const Twine &NameStr, Instruction *InsertBefore);
2959 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2960 const Twine &NameStr, BasicBlock *InsertAtEnd);
2961
2962 // Allocate space for exactly zero operands.
2963 void *operator new(size_t S) { return User::operator new(S); }
2964
2965 void growOperands(unsigned Size);
2966 void init(unsigned NumReservedValues, const Twine &NameStr);
2967
2968protected:
2969 // Note: Instruction needs to be a friend here to call cloneImpl.
2970 friend class Instruction;
2971
2972 LandingPadInst *cloneImpl() const;
2973
2974public:
2975 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2976
2977 /// Constructors - NumReservedClauses is a hint for the number of incoming
2978 /// clauses that this landingpad will have (use 0 if you really have no idea).
2979 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2980 const Twine &NameStr = "",
2981 Instruction *InsertBefore = nullptr);
2982 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2983 const Twine &NameStr, BasicBlock *InsertAtEnd);
2984
2985 /// Provide fast operand accessors
2986 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2987
2988 /// Return 'true' if this landingpad instruction is a
2989 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2990 /// doesn't catch the exception.
2991 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2992
2993 /// Indicate that this landingpad instruction is a cleanup.
2994 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2995
2996 /// Add a catch or filter clause to the landing pad.
2997 void addClause(Constant *ClauseVal);
2998
2999 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3000 /// determine what type of clause this is.
3001 Constant *getClause(unsigned Idx) const {
3002 return cast<Constant>(getOperandList()[Idx]);
3003 }
3004
3005 /// Return 'true' if the clause and index Idx is a catch clause.
3006 bool isCatch(unsigned Idx) const {
3007 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3008 }
3009
3010 /// Return 'true' if the clause and index Idx is a filter clause.
3011 bool isFilter(unsigned Idx) const {
3012 return isa<ArrayType>(getOperandList()[Idx]->getType());
3013 }
3014
3015 /// Get the number of clauses for this landing pad.
3016 unsigned getNumClauses() const { return getNumOperands(); }
3017
3018 /// Grow the size of the operand list to accommodate the new
3019 /// number of clauses.
3020 void reserveClauses(unsigned Size) { growOperands(Size); }
3021
3022 // Methods for support type inquiry through isa, cast, and dyn_cast:
3023 static bool classof(const Instruction *I) {
3024 return I->getOpcode() == Instruction::LandingPad;
3025 }
3026 static bool classof(const Value *V) {
3027 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3028 }
3029};
3030
3031template <>
3032struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3033};
3034
3035DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3035, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<LandingPadInst
>::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture
].get()); } void LandingPadInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3035, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned LandingPadInst::getNumOperands(
) const { return OperandTraits<LandingPadInst>::operands
(this); } template <int Idx_nocapture> Use &LandingPadInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &LandingPadInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3036
3037//===----------------------------------------------------------------------===//
3038// ReturnInst Class
3039//===----------------------------------------------------------------------===//
3040
3041//===---------------------------------------------------------------------------
3042/// Return a value (possibly void), from a function. Execution
3043/// does not continue in this function any longer.
3044///
3045class ReturnInst : public Instruction {
3046 ReturnInst(const ReturnInst &RI);
3047
3048private:
3049 // ReturnInst constructors:
3050 // ReturnInst() - 'ret void' instruction
3051 // ReturnInst( null) - 'ret void' instruction
3052 // ReturnInst(Value* X) - 'ret X' instruction
3053 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3054 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3055 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3056 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3057 //
3058 // NOTE: If the Value* passed is of type void then the constructor behaves as
3059 // if it was passed NULL.
3060 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3061 Instruction *InsertBefore = nullptr);
3062 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3063 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3064
3065protected:
3066 // Note: Instruction needs to be a friend here to call cloneImpl.
3067 friend class Instruction;
3068
3069 ReturnInst *cloneImpl() const;
3070
3071public:
3072 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3073 Instruction *InsertBefore = nullptr) {
3074 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3075 }
3076
3077 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3078 BasicBlock *InsertAtEnd) {
3079 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3080 }
3081
3082 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3083 return new(0) ReturnInst(C, InsertAtEnd);
3084 }
3085
3086 /// Provide fast operand accessors
3087 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3088
3089 /// Convenience accessor. Returns null if there is no return value.
3090 Value *getReturnValue() const {
3091 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3092 }
3093
3094 unsigned getNumSuccessors() const { return 0; }
3095
3096 // Methods for support type inquiry through isa, cast, and dyn_cast:
3097 static bool classof(const Instruction *I) {
3098 return (I->getOpcode() == Instruction::Ret);
3099 }
3100 static bool classof(const Value *V) {
3101 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3102 }
3103
3104private:
3105 BasicBlock *getSuccessor(unsigned idx) const {
3106 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3106)
;
3107 }
3108
3109 void setSuccessor(unsigned idx, BasicBlock *B) {
3110 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3110)
;
3111 }
3112};
3113
3114template <>
3115struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3116};
3117
3118DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3118, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this))[i_nocapture
].get()); } void ReturnInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3118, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const
{ return OperandTraits<ReturnInst>::operands(this); } template
<int Idx_nocapture> Use &ReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ReturnInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3119
3120//===----------------------------------------------------------------------===//
3121// BranchInst Class
3122//===----------------------------------------------------------------------===//
3123
3124//===---------------------------------------------------------------------------
3125/// Conditional or Unconditional Branch instruction.
3126///
3127class BranchInst : public Instruction {
3128 /// Ops list - Branches are strange. The operands are ordered:
3129 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3130 /// they don't have to check for cond/uncond branchness. These are mostly
3131 /// accessed relative from op_end().
3132 BranchInst(const BranchInst &BI);
3133 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3134 // BranchInst(BB *B) - 'br B'
3135 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3136 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3137 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3138 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3139 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3140 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3141 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3142 Instruction *InsertBefore = nullptr);
3143 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3144 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3145 BasicBlock *InsertAtEnd);
3146
3147 void AssertOK();
3148
3149protected:
3150 // Note: Instruction needs to be a friend here to call cloneImpl.
3151 friend class Instruction;
3152
3153 BranchInst *cloneImpl() const;
3154
3155public:
3156 /// Iterator type that casts an operand to a basic block.
3157 ///
3158 /// This only makes sense because the successors are stored as adjacent
3159 /// operands for branch instructions.
3160 struct succ_op_iterator
3161 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3162 std::random_access_iterator_tag, BasicBlock *,
3163 ptrdiff_t, BasicBlock *, BasicBlock *> {
3164 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3165
3166 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3167 BasicBlock *operator->() const { return operator*(); }
3168 };
3169
3170 /// The const version of `succ_op_iterator`.
3171 struct const_succ_op_iterator
3172 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3173 std::random_access_iterator_tag,
3174 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3175 const BasicBlock *> {
3176 explicit const_succ_op_iterator(const_value_op_iterator I)
3177 : iterator_adaptor_base(I) {}
3178
3179 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3180 const BasicBlock *operator->() const { return operator*(); }
3181 };
3182
3183 static BranchInst *Create(BasicBlock *IfTrue,
3184 Instruction *InsertBefore = nullptr) {
3185 return new(1) BranchInst(IfTrue, InsertBefore);
3186 }
3187
3188 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3189 Value *Cond, Instruction *InsertBefore = nullptr) {
3190 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3191 }
3192
3193 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3194 return new(1) BranchInst(IfTrue, InsertAtEnd);
3195 }
3196
3197 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3198 Value *Cond, BasicBlock *InsertAtEnd) {
3199 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3200 }
3201
3202 /// Transparently provide more efficient getOperand methods.
3203 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3204
3205 bool isUnconditional() const { return getNumOperands() == 1; }
3206 bool isConditional() const { return getNumOperands() == 3; }
3207
3208 Value *getCondition() const {
3209 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3209, __extension__ __PRETTY_FUNCTION__
))
;
3210 return Op<-3>();
3211 }
3212
3213 void setCondition(Value *V) {
3214 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3214, __extension__ __PRETTY_FUNCTION__
))
;
3215 Op<-3>() = V;
3216 }
3217
3218 unsigned getNumSuccessors() const { return 1+isConditional(); }
3219
3220 BasicBlock *getSuccessor(unsigned i) const {
3221 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3221, __extension__ __PRETTY_FUNCTION__
))
;
3222 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3223 }
3224
3225 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3226 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3226, __extension__ __PRETTY_FUNCTION__
))
;
3227 *(&Op<-1>() - idx) = NewSucc;
3228 }
3229
3230 /// Swap the successors of this branch instruction.
3231 ///
3232 /// Swaps the successors of the branch instruction. This also swaps any
3233 /// branch weight metadata associated with the instruction so that it
3234 /// continues to map correctly to each operand.
3235 void swapSuccessors();
3236
3237 iterator_range<succ_op_iterator> successors() {
3238 return make_range(
3239 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3240 succ_op_iterator(value_op_end()));
3241 }
3242
3243 iterator_range<const_succ_op_iterator> successors() const {
3244 return make_range(const_succ_op_iterator(
3245 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3246 const_succ_op_iterator(value_op_end()));
3247 }
3248
3249 // Methods for support type inquiry through isa, cast, and dyn_cast:
3250 static bool classof(const Instruction *I) {
3251 return (I->getOpcode() == Instruction::Br);
3252 }
3253 static bool classof(const Value *V) {
3254 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3255 }
3256};
3257
3258template <>
3259struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3260};
3261
3262DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3262, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this))[i_nocapture
].get()); } void BranchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<BranchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3262, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<BranchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned BranchInst::getNumOperands() const
{ return OperandTraits<BranchInst>::operands(this); } template
<int Idx_nocapture> Use &BranchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &BranchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3263
3264//===----------------------------------------------------------------------===//
3265// SwitchInst Class
3266//===----------------------------------------------------------------------===//
3267
3268//===---------------------------------------------------------------------------
3269/// Multiway switch
3270///
3271class SwitchInst : public Instruction {
3272 unsigned ReservedSpace;
3273
3274 // Operand[0] = Value to switch on
3275 // Operand[1] = Default basic block destination
3276 // Operand[2n ] = Value to match
3277 // Operand[2n+1] = BasicBlock to go to on match
3278 SwitchInst(const SwitchInst &SI);
3279
3280 /// Create a new switch instruction, specifying a value to switch on and a
3281 /// default destination. The number of additional cases can be specified here
3282 /// to make memory allocation more efficient. This constructor can also
3283 /// auto-insert before another instruction.
3284 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3285 Instruction *InsertBefore);
3286
3287 /// Create a new switch instruction, specifying a value to switch on and a
3288 /// default destination. The number of additional cases can be specified here
3289 /// to make memory allocation more efficient. This constructor also
3290 /// auto-inserts at the end of the specified BasicBlock.
3291 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3292 BasicBlock *InsertAtEnd);
3293
3294 // allocate space for exactly zero operands
3295 void *operator new(size_t S) { return User::operator new(S); }
3296
3297 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3298 void growOperands();
3299
3300protected:
3301 // Note: Instruction needs to be a friend here to call cloneImpl.
3302 friend class Instruction;
3303
3304 SwitchInst *cloneImpl() const;
3305
3306public:
3307 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3308
3309 // -2
3310 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3311
3312 template <typename CaseHandleT> class CaseIteratorImpl;
3313
3314 /// A handle to a particular switch case. It exposes a convenient interface
3315 /// to both the case value and the successor block.
3316 ///
3317 /// We define this as a template and instantiate it to form both a const and
3318 /// non-const handle.
3319 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3320 class CaseHandleImpl {
3321 // Directly befriend both const and non-const iterators.
3322 friend class SwitchInst::CaseIteratorImpl<
3323 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3324
3325 protected:
3326 // Expose the switch type we're parameterized with to the iterator.
3327 using SwitchInstType = SwitchInstT;
3328
3329 SwitchInstT *SI;
3330 ptrdiff_t Index;
3331
3332 CaseHandleImpl() = default;
3333 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3334
3335 public:
3336 /// Resolves case value for current case.
3337 ConstantIntT *getCaseValue() const {
3338 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3339, __extension__ __PRETTY_FUNCTION__
))
3339 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3339, __extension__ __PRETTY_FUNCTION__
))
;
3340 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3341 }
3342
3343 /// Resolves successor for current case.
3344 BasicBlockT *getCaseSuccessor() const {
3345 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3347, __extension__ __PRETTY_FUNCTION__
))
3346 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3347, __extension__ __PRETTY_FUNCTION__
))
3347 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3347, __extension__ __PRETTY_FUNCTION__
))
;
3348 return SI->getSuccessor(getSuccessorIndex());
3349 }
3350
3351 /// Returns number of current case.
3352 unsigned getCaseIndex() const { return Index; }
3353
3354 /// Returns successor index for current case successor.
3355 unsigned getSuccessorIndex() const {
3356 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3358, __extension__ __PRETTY_FUNCTION__
))
3357 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3358, __extension__ __PRETTY_FUNCTION__
))
3358 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3358, __extension__ __PRETTY_FUNCTION__
))
;
3359 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3360 }
3361
3362 bool operator==(const CaseHandleImpl &RHS) const {
3363 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3363, __extension__ __PRETTY_FUNCTION__
))
;
3364 return Index == RHS.Index;
3365 }
3366 };
3367
3368 using ConstCaseHandle =
3369 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3370
3371 class CaseHandle
3372 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3373 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3374
3375 public:
3376 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3377
3378 /// Sets the new value for current case.
3379 void setValue(ConstantInt *V) const {
3380 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3381, __extension__ __PRETTY_FUNCTION__
))
3381 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3381, __extension__ __PRETTY_FUNCTION__
))
;
3382 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3383 }
3384
3385 /// Sets the new successor for current case.
3386 void setSuccessor(BasicBlock *S) const {
3387 SI->setSuccessor(getSuccessorIndex(), S);
3388 }
3389 };
3390
3391 template <typename CaseHandleT>
3392 class CaseIteratorImpl
3393 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3394 std::random_access_iterator_tag,
3395 const CaseHandleT> {
3396 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3397
3398 CaseHandleT Case;
3399
3400 public:
3401 /// Default constructed iterator is in an invalid state until assigned to
3402 /// a case for a particular switch.
3403 CaseIteratorImpl() = default;
3404
3405 /// Initializes case iterator for given SwitchInst and for given
3406 /// case number.
3407 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3408
3409 /// Initializes case iterator for given SwitchInst and for given
3410 /// successor index.
3411 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3412 unsigned SuccessorIndex) {
3413 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3414, __extension__ __PRETTY_FUNCTION__
))
3414 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3414, __extension__ __PRETTY_FUNCTION__
))
;
3415 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3416 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3417 }
3418
3419 /// Support converting to the const variant. This will be a no-op for const
3420 /// variant.
3421 operator CaseIteratorImpl<ConstCaseHandle>() const {
3422 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3423 }
3424
3425 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3426 // Check index correctness after addition.
3427 // Note: Index == getNumCases() means end().
3428 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3430, __extension__ __PRETTY_FUNCTION__
))
3429 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3430, __extension__ __PRETTY_FUNCTION__
))
3430 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3430, __extension__ __PRETTY_FUNCTION__
))
;
3431 Case.Index += N;
3432 return *this;
3433 }
3434 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3435 // Check index correctness after subtraction.
3436 // Note: Case.Index == getNumCases() means end().
3437 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3439, __extension__ __PRETTY_FUNCTION__
))
3438 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3439, __extension__ __PRETTY_FUNCTION__
))
3439 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3439, __extension__ __PRETTY_FUNCTION__
))
;
3440 Case.Index -= N;
3441 return *this;
3442 }
3443 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3444 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3444, __extension__ __PRETTY_FUNCTION__
))
;
3445 return Case.Index - RHS.Case.Index;
3446 }
3447 bool operator==(const CaseIteratorImpl &RHS) const {
3448 return Case == RHS.Case;
3449 }
3450 bool operator<(const CaseIteratorImpl &RHS) const {
3451 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3451, __extension__ __PRETTY_FUNCTION__
))
;
3452 return Case.Index < RHS.Case.Index;
3453 }
3454 const CaseHandleT &operator*() const { return Case; }
3455 };
3456
3457 using CaseIt = CaseIteratorImpl<CaseHandle>;
3458 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3459
3460 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3461 unsigned NumCases,
3462 Instruction *InsertBefore = nullptr) {
3463 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3464 }
3465
3466 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3467 unsigned NumCases, BasicBlock *InsertAtEnd) {
3468 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3469 }
3470
3471 /// Provide fast operand accessors
3472 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3473
3474 // Accessor Methods for Switch stmt
3475 Value *getCondition() const { return getOperand(0); }
3476 void setCondition(Value *V) { setOperand(0, V); }
3477
3478 BasicBlock *getDefaultDest() const {
3479 return cast<BasicBlock>(getOperand(1));
3480 }
3481
3482 void setDefaultDest(BasicBlock *DefaultCase) {
3483 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3484 }
3485
3486 /// Return the number of 'cases' in this switch instruction, excluding the
3487 /// default case.
3488 unsigned getNumCases() const {
3489 return getNumOperands()/2 - 1;
3490 }
3491
3492 /// Returns a read/write iterator that points to the first case in the
3493 /// SwitchInst.
3494 CaseIt case_begin() {
3495 return CaseIt(this, 0);
3496 }
3497
3498 /// Returns a read-only iterator that points to the first case in the
3499 /// SwitchInst.
3500 ConstCaseIt case_begin() const {
3501 return ConstCaseIt(this, 0);
3502 }
3503
3504 /// Returns a read/write iterator that points one past the last in the
3505 /// SwitchInst.
3506 CaseIt case_end() {
3507 return CaseIt(this, getNumCases());
3508 }
3509
3510 /// Returns a read-only iterator that points one past the last in the
3511 /// SwitchInst.
3512 ConstCaseIt case_end() const {
3513 return ConstCaseIt(this, getNumCases());
3514 }
3515
3516 /// Iteration adapter for range-for loops.
3517 iterator_range<CaseIt> cases() {
3518 return make_range(case_begin(), case_end());
3519 }
3520
3521 /// Constant iteration adapter for range-for loops.
3522 iterator_range<ConstCaseIt> cases() const {
3523 return make_range(case_begin(), case_end());
3524 }
3525
3526 /// Returns an iterator that points to the default case.
3527 /// Note: this iterator allows to resolve successor only. Attempt
3528 /// to resolve case value causes an assertion.
3529 /// Also note, that increment and decrement also causes an assertion and
3530 /// makes iterator invalid.
3531 CaseIt case_default() {
3532 return CaseIt(this, DefaultPseudoIndex);
3533 }
3534 ConstCaseIt case_default() const {
3535 return ConstCaseIt(this, DefaultPseudoIndex);
3536 }
3537
3538 /// Search all of the case values for the specified constant. If it is
3539 /// explicitly handled, return the case iterator of it, otherwise return
3540 /// default case iterator to indicate that it is handled by the default
3541 /// handler.
3542 CaseIt findCaseValue(const ConstantInt *C) {
3543 return CaseIt(
3544 this,
3545 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3546 }
3547 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3548 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3549 return Case.getCaseValue() == C;
3550 });
3551 if (I != case_end())
3552 return I;
3553
3554 return case_default();
3555 }
3556
3557 /// Finds the unique case value for a given successor. Returns null if the
3558 /// successor is not found, not unique, or is the default case.
3559 ConstantInt *findCaseDest(BasicBlock *BB) {
3560 if (BB == getDefaultDest())
3561 return nullptr;
3562
3563 ConstantInt *CI = nullptr;
3564 for (auto Case : cases()) {
3565 if (Case.getCaseSuccessor() != BB)
3566 continue;
3567
3568 if (CI)
3569 return nullptr; // Multiple cases lead to BB.
3570
3571 CI = Case.getCaseValue();
3572 }
3573
3574 return CI;
3575 }
3576
3577 /// Add an entry to the switch instruction.
3578 /// Note:
3579 /// This action invalidates case_end(). Old case_end() iterator will
3580 /// point to the added case.
3581 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3582
3583 /// This method removes the specified case and its successor from the switch
3584 /// instruction. Note that this operation may reorder the remaining cases at
3585 /// index idx and above.
3586 /// Note:
3587 /// This action invalidates iterators for all cases following the one removed,
3588 /// including the case_end() iterator. It returns an iterator for the next
3589 /// case.
3590 CaseIt removeCase(CaseIt I);
3591
3592 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3593 BasicBlock *getSuccessor(unsigned idx) const {
3594 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3594, __extension__ __PRETTY_FUNCTION__
))
;
3595 return cast<BasicBlock>(getOperand(idx*2+1));
3596 }
3597 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3598 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3598, __extension__ __PRETTY_FUNCTION__
))
;
3599 setOperand(idx * 2 + 1, NewSucc);
3600 }
3601
3602 // Methods for support type inquiry through isa, cast, and dyn_cast:
3603 static bool classof(const Instruction *I) {
3604 return I->getOpcode() == Instruction::Switch;
3605 }
3606 static bool classof(const Value *V) {
3607 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3608 }
3609};
3610
3611/// A wrapper class to simplify modification of SwitchInst cases along with
3612/// their prof branch_weights metadata.
3613class SwitchInstProfUpdateWrapper {
3614 SwitchInst &SI;
3615 std::optional<SmallVector<uint32_t, 8>> Weights;
3616 bool Changed = false;
3617
3618protected:
3619 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3620
3621 MDNode *buildProfBranchWeightsMD();
3622
3623 void init();
3624
3625public:
3626 using CaseWeightOpt = std::optional<uint32_t>;
3627 SwitchInst *operator->() { return &SI; }
3628 SwitchInst &operator*() { return SI; }
3629 operator SwitchInst *() { return &SI; }
3630
3631 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3632
3633 ~SwitchInstProfUpdateWrapper() {
3634 if (Changed)
3635 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3636 }
3637
3638 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3639 /// correspondent branch weight.
3640 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3641
3642 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3643 /// specified branch weight for the added case.
3644 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3645
3646 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3647 /// this object to not touch the underlying SwitchInst in destructor.
3648 SymbolTableList<Instruction>::iterator eraseFromParent();
3649
3650 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3651 CaseWeightOpt getSuccessorWeight(unsigned idx);
3652
3653 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3654};
3655
3656template <>
3657struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3658};
3659
3660DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3660, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this))[i_nocapture
].get()); } void SwitchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3660, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const
{ return OperandTraits<SwitchInst>::operands(this); } template
<int Idx_nocapture> Use &SwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SwitchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3661
3662//===----------------------------------------------------------------------===//
3663// IndirectBrInst Class
3664//===----------------------------------------------------------------------===//
3665
3666//===---------------------------------------------------------------------------
3667/// Indirect Branch Instruction.
3668///
3669class IndirectBrInst : public Instruction {
3670 unsigned ReservedSpace;
3671
3672 // Operand[0] = Address to jump to
3673 // Operand[n+1] = n-th destination
3674 IndirectBrInst(const IndirectBrInst &IBI);
3675
3676 /// Create a new indirectbr instruction, specifying an
3677 /// Address to jump to. The number of expected destinations can be specified
3678 /// here to make memory allocation more efficient. This constructor can also
3679 /// autoinsert before another instruction.
3680 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3681
3682 /// Create a new indirectbr instruction, specifying an
3683 /// Address to jump to. The number of expected destinations can be specified
3684 /// here to make memory allocation more efficient. This constructor also
3685 /// autoinserts at the end of the specified BasicBlock.
3686 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3687
3688 // allocate space for exactly zero operands
3689 void *operator new(size_t S) { return User::operator new(S); }
3690
3691 void init(Value *Address, unsigned NumDests);
3692 void growOperands();
3693
3694protected:
3695 // Note: Instruction needs to be a friend here to call cloneImpl.
3696 friend class Instruction;
3697
3698 IndirectBrInst *cloneImpl() const;
3699
3700public:
3701 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3702
3703 /// Iterator type that casts an operand to a basic block.
3704 ///
3705 /// This only makes sense because the successors are stored as adjacent
3706 /// operands for indirectbr instructions.
3707 struct succ_op_iterator
3708 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3709 std::random_access_iterator_tag, BasicBlock *,
3710 ptrdiff_t, BasicBlock *, BasicBlock *> {
3711 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3712
3713 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3714 BasicBlock *operator->() const { return operator*(); }
3715 };
3716
3717 /// The const version of `succ_op_iterator`.
3718 struct const_succ_op_iterator
3719 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3720 std::random_access_iterator_tag,
3721 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3722 const BasicBlock *> {
3723 explicit const_succ_op_iterator(const_value_op_iterator I)
3724 : iterator_adaptor_base(I) {}
3725
3726 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3727 const BasicBlock *operator->() const { return operator*(); }
3728 };
3729
3730 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3731 Instruction *InsertBefore = nullptr) {
3732 return new IndirectBrInst(Address, NumDests, InsertBefore);
3733 }
3734
3735 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3736 BasicBlock *InsertAtEnd) {
3737 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3738 }
3739
3740 /// Provide fast operand accessors.
3741 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3742
3743 // Accessor Methods for IndirectBrInst instruction.
3744 Value *getAddress() { return getOperand(0); }
3745 const Value *getAddress() const { return getOperand(0); }
3746 void setAddress(Value *V) { setOperand(0, V); }
3747
3748 /// return the number of possible destinations in this
3749 /// indirectbr instruction.
3750 unsigned getNumDestinations() const { return getNumOperands()-1; }
3751
3752 /// Return the specified destination.
3753 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3754 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3755
3756 /// Add a destination.
3757 ///
3758 void addDestination(BasicBlock *Dest);
3759
3760 /// This method removes the specified successor from the
3761 /// indirectbr instruction.
3762 void removeDestination(unsigned i);
3763
3764 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3765 BasicBlock *getSuccessor(unsigned i) const {
3766 return cast<BasicBlock>(getOperand(i+1));
3767 }
3768 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3769 setOperand(i + 1, NewSucc);
3770 }
3771
3772 iterator_range<succ_op_iterator> successors() {
3773 return make_range(succ_op_iterator(std::next(value_op_begin())),
3774 succ_op_iterator(value_op_end()));
3775 }
3776
3777 iterator_range<const_succ_op_iterator> successors() const {
3778 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3779 const_succ_op_iterator(value_op_end()));
3780 }
3781
3782 // Methods for support type inquiry through isa, cast, and dyn_cast:
3783 static bool classof(const Instruction *I) {
3784 return I->getOpcode() == Instruction::IndirectBr;
3785 }
3786 static bool classof(const Value *V) {
3787 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3788 }
3789};
3790
3791template <>
3792struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3793};
3794
3795DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3795, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<IndirectBrInst
>::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture
].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3795, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands(
) const { return OperandTraits<IndirectBrInst>::operands
(this); } template <int Idx_nocapture> Use &IndirectBrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &IndirectBrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3796
3797//===----------------------------------------------------------------------===//
3798// InvokeInst Class
3799//===----------------------------------------------------------------------===//
3800
3801/// Invoke instruction. The SubclassData field is used to hold the
3802/// calling convention of the call.
3803///
3804class InvokeInst : public CallBase {
3805 /// The number of operands for this call beyond the called function,
3806 /// arguments, and operand bundles.
3807 static constexpr int NumExtraOperands = 2;
3808
3809 /// The index from the end of the operand array to the normal destination.
3810 static constexpr int NormalDestOpEndIdx = -3;
3811
3812 /// The index from the end of the operand array to the unwind destination.
3813 static constexpr int UnwindDestOpEndIdx = -2;
3814
3815 InvokeInst(const InvokeInst &BI);
3816
3817 /// Construct an InvokeInst given a range of arguments.
3818 ///
3819 /// Construct an InvokeInst from a range of arguments
3820 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3821 BasicBlock *IfException, ArrayRef<Value *> Args,
3822 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3823 const Twine &NameStr, Instruction *InsertBefore);
3824
3825 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3826 BasicBlock *IfException, ArrayRef<Value *> Args,
3827 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3828 const Twine &NameStr, BasicBlock *InsertAtEnd);
3829
3830 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3831 BasicBlock *IfException, ArrayRef<Value *> Args,
3832 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3833
3834 /// Compute the number of operands to allocate.
3835 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3836 // We need one operand for the called function, plus our extra operands and
3837 // the input operand counts provided.
3838 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3839 }
3840
3841protected:
3842 // Note: Instruction needs to be a friend here to call cloneImpl.
3843 friend class Instruction;
3844
3845 InvokeInst *cloneImpl() const;
3846
3847public:
3848 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3849 BasicBlock *IfException, ArrayRef<Value *> Args,
3850 const Twine &NameStr,
3851 Instruction *InsertBefore = nullptr) {
3852 int NumOperands = ComputeNumOperands(Args.size());
3853 return new (NumOperands)
3854 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3855 NumOperands, NameStr, InsertBefore);
3856 }
3857
3858 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3859 BasicBlock *IfException, ArrayRef<Value *> Args,
3860 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3861 const Twine &NameStr = "",
3862 Instruction *InsertBefore = nullptr) {
3863 int NumOperands =
3864 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3865 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3866
3867 return new (NumOperands, DescriptorBytes)
3868 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3869 NameStr, InsertBefore);
3870 }
3871
3872 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3873 BasicBlock *IfException, ArrayRef<Value *> Args,
3874 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3875 int NumOperands = ComputeNumOperands(Args.size());
3876 return new (NumOperands)
3877 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3878 NumOperands, NameStr, InsertAtEnd);
3879 }
3880
3881 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3882 BasicBlock *IfException, ArrayRef<Value *> Args,
3883 ArrayRef<OperandBundleDef> Bundles,
3884 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3885 int NumOperands =
3886 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3887 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3888
3889 return new (NumOperands, DescriptorBytes)
3890 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3891 NameStr, InsertAtEnd);
3892 }
3893
3894 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3895 BasicBlock *IfException, ArrayRef<Value *> Args,
3896 const Twine &NameStr,
3897 Instruction *InsertBefore = nullptr) {
3898 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3899 IfException, Args, std::nullopt, NameStr, InsertBefore);
3900 }
3901
3902 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3903 BasicBlock *IfException, ArrayRef<Value *> Args,
3904 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3905 const Twine &NameStr = "",
3906 Instruction *InsertBefore = nullptr) {
3907 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3908 IfException, Args, Bundles, NameStr, InsertBefore);
3909 }
3910
3911 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3912 BasicBlock *IfException, ArrayRef<Value *> Args,
3913 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3914 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3915 IfException, Args, NameStr, InsertAtEnd);
3916 }
3917
3918 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3919 BasicBlock *IfException, ArrayRef<Value *> Args,
3920 ArrayRef<OperandBundleDef> Bundles,
3921 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3922 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3923 IfException, Args, Bundles, NameStr, InsertAtEnd);
3924 }
3925
3926 /// Create a clone of \p II with a different set of operand bundles and
3927 /// insert it before \p InsertPt.
3928 ///
3929 /// The returned invoke instruction is identical to \p II in every way except
3930 /// that the operand bundles for the new instruction are set to the operand
3931 /// bundles in \p Bundles.
3932 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3933 Instruction *InsertPt = nullptr);
3934
3935 // get*Dest - Return the destination basic blocks...
3936 BasicBlock *getNormalDest() const {
3937 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3938 }
3939 BasicBlock *getUnwindDest() const {
3940 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3941 }
3942 void setNormalDest(BasicBlock *B) {
3943 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3944 }
3945 void setUnwindDest(BasicBlock *B) {
3946 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3947 }
3948
3949 /// Get the landingpad instruction from the landing pad
3950 /// block (the unwind destination).
3951 LandingPadInst *getLandingPadInst() const;
3952
3953 BasicBlock *getSuccessor(unsigned i) const {
3954 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3954, __extension__ __PRETTY_FUNCTION__
))
;
3955 return i == 0 ? getNormalDest() : getUnwindDest();
3956 }
3957
3958 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3959 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3959, __extension__ __PRETTY_FUNCTION__
))
;
3960 if (i == 0)
3961 setNormalDest(NewSucc);
3962 else
3963 setUnwindDest(NewSucc);
3964 }
3965
3966 unsigned getNumSuccessors() const { return 2; }
3967
3968 // Methods for support type inquiry through isa, cast, and dyn_cast:
3969 static bool classof(const Instruction *I) {
3970 return (I->getOpcode() == Instruction::Invoke);
3971 }
3972 static bool classof(const Value *V) {
3973 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3974 }
3975
3976private:
3977 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3978 // method so that subclasses cannot accidentally use it.
3979 template <typename Bitfield>
3980 void setSubclassData(typename Bitfield::Type Value) {
3981 Instruction::setSubclassData<Bitfield>(Value);
3982 }
3983};
3984
3985InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3986 BasicBlock *IfException, ArrayRef<Value *> Args,
3987 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3988 const Twine &NameStr, Instruction *InsertBefore)
3989 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3990 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3991 InsertBefore) {
3992 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3993}
3994
3995InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3996 BasicBlock *IfException, ArrayRef<Value *> Args,
3997 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3998 const Twine &NameStr, BasicBlock *InsertAtEnd)
3999 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4000 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4001 InsertAtEnd) {
4002 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4003}
4004
4005//===----------------------------------------------------------------------===//
4006// CallBrInst Class
4007//===----------------------------------------------------------------------===//
4008
4009/// CallBr instruction, tracking function calls that may not return control but
4010/// instead transfer it to a third location. The SubclassData field is used to
4011/// hold the calling convention of the call.
4012///
4013class CallBrInst : public CallBase {
4014
4015 unsigned NumIndirectDests;
4016
4017 CallBrInst(const CallBrInst &BI);
4018
4019 /// Construct a CallBrInst given a range of arguments.
4020 ///
4021 /// Construct a CallBrInst from a range of arguments
4022 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4023 ArrayRef<BasicBlock *> IndirectDests,
4024 ArrayRef<Value *> Args,
4025 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4026 const Twine &NameStr, Instruction *InsertBefore);
4027
4028 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4029 ArrayRef<BasicBlock *> IndirectDests,
4030 ArrayRef<Value *> Args,
4031 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4032 const Twine &NameStr, BasicBlock *InsertAtEnd);
4033
4034 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4035 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4036 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4037
4038 /// Compute the number of operands to allocate.
4039 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4040 int NumBundleInputs = 0) {
4041 // We need one operand for the called function, plus our extra operands and
4042 // the input operand counts provided.
4043 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4044 }
4045
4046protected:
4047 // Note: Instruction needs to be a friend here to call cloneImpl.
4048 friend class Instruction;
4049
4050 CallBrInst *cloneImpl() const;
4051
4052public:
4053 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4054 BasicBlock *DefaultDest,
4055 ArrayRef<BasicBlock *> IndirectDests,
4056 ArrayRef<Value *> Args, const Twine &NameStr,
4057 Instruction *InsertBefore = nullptr) {
4058 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4059 return new (NumOperands)
4060 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4061 NumOperands, NameStr, InsertBefore);
4062 }
4063
4064 static CallBrInst *
4065 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4066 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4067 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4068 const Twine &NameStr = "", Instruction *InsertBefore = nullptr) {
4069 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4070 CountBundleInputs(Bundles));
4071 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4072
4073 return new (NumOperands, DescriptorBytes)
4074 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4075 NumOperands, NameStr, InsertBefore);
4076 }
4077
4078 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4079 BasicBlock *DefaultDest,
4080 ArrayRef<BasicBlock *> IndirectDests,
4081 ArrayRef<Value *> Args, const Twine &NameStr,
4082 BasicBlock *InsertAtEnd) {
4083 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4084 return new (NumOperands)
4085 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4086 NumOperands, NameStr, InsertAtEnd);
4087 }
4088
4089 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4090 BasicBlock *DefaultDest,
4091 ArrayRef<BasicBlock *> IndirectDests,
4092 ArrayRef<Value *> Args,
4093 ArrayRef<OperandBundleDef> Bundles,
4094 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4095 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4096 CountBundleInputs(Bundles));
4097 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4098
4099 return new (NumOperands, DescriptorBytes)
4100 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4101 NumOperands, NameStr, InsertAtEnd);
4102 }
4103
4104 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4105 ArrayRef<BasicBlock *> IndirectDests,
4106 ArrayRef<Value *> Args, const Twine &NameStr,
4107 Instruction *InsertBefore = nullptr) {
4108 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4109 IndirectDests, Args, NameStr, InsertBefore);
4110 }
4111
4112 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4113 ArrayRef<BasicBlock *> IndirectDests,
4114 ArrayRef<Value *> Args,
4115 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4116 const Twine &NameStr = "",
4117 Instruction *InsertBefore = nullptr) {
4118 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4119 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4120 }
4121
4122 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4123 ArrayRef<BasicBlock *> IndirectDests,
4124 ArrayRef<Value *> Args, const Twine &NameStr,
4125 BasicBlock *InsertAtEnd) {
4126 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4127 IndirectDests, Args, NameStr, InsertAtEnd);
4128 }
4129
4130 static CallBrInst *Create(FunctionCallee Func,
4131 BasicBlock *DefaultDest,
4132 ArrayRef<BasicBlock *> IndirectDests,
4133 ArrayRef<Value *> Args,
4134 ArrayRef<OperandBundleDef> Bundles,
4135 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4136 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4137 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4138 }
4139
4140 /// Create a clone of \p CBI with a different set of operand bundles and
4141 /// insert it before \p InsertPt.
4142 ///
4143 /// The returned callbr instruction is identical to \p CBI in every way
4144 /// except that the operand bundles for the new instruction are set to the
4145 /// operand bundles in \p Bundles.
4146 static CallBrInst *Create(CallBrInst *CBI,
4147 ArrayRef<OperandBundleDef> Bundles,
4148 Instruction *InsertPt = nullptr);
4149
4150 /// Return the number of callbr indirect dest labels.
4151 ///
4152 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4153
4154 /// getIndirectDestLabel - Return the i-th indirect dest label.
4155 ///
4156 Value *getIndirectDestLabel(unsigned i) const {
4157 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4157, __extension__ __PRETTY_FUNCTION__
))
;
4158 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4159 }
4160
4161 Value *getIndirectDestLabelUse(unsigned i) const {
4162 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4162, __extension__ __PRETTY_FUNCTION__
))
;
4163 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4164 }
4165
4166 // Return the destination basic blocks...
4167 BasicBlock *getDefaultDest() const {
4168 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4169 }
4170 BasicBlock *getIndirectDest(unsigned i) const {
4171 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4172 }
4173 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4174 SmallVector<BasicBlock *, 16> IndirectDests;
4175 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4176 IndirectDests.push_back(getIndirectDest(i));
4177 return IndirectDests;
4178 }
4179 void setDefaultDest(BasicBlock *B) {
4180 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4181 }
4182 void setIndirectDest(unsigned i, BasicBlock *B) {
4183 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4184 }
4185
4186 BasicBlock *getSuccessor(unsigned i) const {
4187 assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4188, __extension__ __PRETTY_FUNCTION__
))
4188 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4188, __extension__ __PRETTY_FUNCTION__
))
;
4189 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4190 }
4191
4192 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4193 assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4194, __extension__ __PRETTY_FUNCTION__
))
4194 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4194, __extension__ __PRETTY_FUNCTION__
))
;
4195 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4196 }
4197
4198 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4199
4200 // Methods for support type inquiry through isa, cast, and dyn_cast:
4201 static bool classof(const Instruction *I) {
4202 return (I->getOpcode() == Instruction::CallBr);
4203 }
4204 static bool classof(const Value *V) {
4205 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4206 }
4207
4208private:
4209 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4210 // method so that subclasses cannot accidentally use it.
4211 template <typename Bitfield>
4212 void setSubclassData(typename Bitfield::Type Value) {
4213 Instruction::setSubclassData<Bitfield>(Value);
4214 }
4215};
4216
4217CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4218 ArrayRef<BasicBlock *> IndirectDests,
4219 ArrayRef<Value *> Args,
4220 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4221 const Twine &NameStr, Instruction *InsertBefore)
4222 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4223 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4224 InsertBefore) {
4225 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4226}
4227
4228CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4229 ArrayRef<BasicBlock *> IndirectDests,
4230 ArrayRef<Value *> Args,
4231 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4232 const Twine &NameStr, BasicBlock *InsertAtEnd)
4233 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4234 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4235 InsertAtEnd) {
4236 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4237}
4238
4239//===----------------------------------------------------------------------===//
4240// ResumeInst Class
4241//===----------------------------------------------------------------------===//
4242
4243//===---------------------------------------------------------------------------
4244/// Resume the propagation of an exception.
4245///
4246class ResumeInst : public Instruction {
4247 ResumeInst(const ResumeInst &RI);
4248
4249 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4250 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4251
4252protected:
4253 // Note: Instruction needs to be a friend here to call cloneImpl.
4254 friend class Instruction;
4255
4256 ResumeInst *cloneImpl() const;
4257
4258public:
4259 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4260 return new(1) ResumeInst(Exn, InsertBefore);
4261 }
4262
4263 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4264 return new(1) ResumeInst(Exn, InsertAtEnd);
4265 }
4266
4267 /// Provide fast operand accessors
4268 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4269
4270 /// Convenience accessor.
4271 Value *getValue() const { return Op<0>(); }
4272
4273 unsigned getNumSuccessors() const { return 0; }
4274
4275 // Methods for support type inquiry through isa, cast, and dyn_cast:
4276 static bool classof(const Instruction *I) {
4277 return I->getOpcode() == Instruction::Resume;
4278 }
4279 static bool classof(const Value *V) {
4280 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4281 }
4282
4283private:
4284 BasicBlock *getSuccessor(unsigned idx) const {
4285 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4285)
;
4286 }
4287
4288 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4289 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4289)
;
4290 }
4291};
4292
4293template <>
4294struct OperandTraits<ResumeInst> :
4295 public FixedNumOperandTraits<ResumeInst, 1> {
4296};
4297
4298DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4298, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this))[i_nocapture
].get()); } void ResumeInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ResumeInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4298, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const
{ return OperandTraits<ResumeInst>::operands(this); } template
<int Idx_nocapture> Use &ResumeInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ResumeInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
4299
4300//===----------------------------------------------------------------------===//
4301// CatchSwitchInst Class
4302//===----------------------------------------------------------------------===//
4303class CatchSwitchInst : public Instruction {
4304 using UnwindDestField = BoolBitfieldElementT<0>;
4305
4306 /// The number of operands actually allocated. NumOperands is
4307 /// the number actually in use.
4308 unsigned ReservedSpace;
4309
4310 // Operand[0] = Outer scope
4311 // Operand[1] = Unwind block destination
4312 // Operand[n] = BasicBlock to go to on match
4313 CatchSwitchInst(const CatchSwitchInst &CSI);
4314
4315 /// Create a new switch instruction, specifying a
4316 /// default destination. The number of additional handlers can be specified
4317 /// here to make memory allocation more efficient.
4318 /// This constructor can also autoinsert before another instruction.
4319 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4320 unsigned NumHandlers, const Twine &NameStr,
4321 Instruction *InsertBefore);
4322
4323 /// Create a new switch instruction, specifying a
4324 /// default destination. The number of additional handlers can be specified
4325 /// here to make memory allocation more efficient.
4326 /// This constructor also autoinserts at the end of the specified BasicBlock.
4327 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4328 unsigned NumHandlers, const Twine &NameStr,
4329 BasicBlock *InsertAtEnd);
4330
4331 // allocate space for exactly zero operands
4332 void *operator new(size_t S) { return User::operator new(S); }
4333
4334 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4335 void growOperands(unsigned Size);
4336
4337protected:
4338 // Note: Instruction needs to be a friend here to call cloneImpl.
4339 friend class Instruction;
4340
4341 CatchSwitchInst *cloneImpl() const;
4342
4343public:
4344 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4345
4346 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4347 unsigned NumHandlers,
4348 const Twine &NameStr = "",
4349 Instruction *InsertBefore = nullptr) {
4350 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4351 InsertBefore);
4352 }
4353
4354 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4355 unsigned NumHandlers, const Twine &NameStr,
4356 BasicBlock *InsertAtEnd) {
4357 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4358 InsertAtEnd);
4359 }
4360
4361 /// Provide fast operand accessors
4362 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4363
4364 // Accessor Methods for CatchSwitch stmt
4365 Value *getParentPad() const { return getOperand(0); }
4366 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4367
4368 // Accessor Methods for CatchSwitch stmt
4369 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4370 bool unwindsToCaller() const { return !hasUnwindDest(); }
4371 BasicBlock *getUnwindDest() const {
4372 if (hasUnwindDest())
4373 return cast<BasicBlock>(getOperand(1));
4374 return nullptr;
4375 }
4376 void setUnwindDest(BasicBlock *UnwindDest) {
4377 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4377, __extension__
__PRETTY_FUNCTION__))
;
4378 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4378
, __extension__ __PRETTY_FUNCTION__))
;
4379 setOperand(1, UnwindDest);
4380 }
4381
4382 /// return the number of 'handlers' in this catchswitch
4383 /// instruction, except the default handler
4384 unsigned getNumHandlers() const {
4385 if (hasUnwindDest())
4386 return getNumOperands() - 2;
4387 return getNumOperands() - 1;
4388 }
4389
4390private:
4391 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4392 static const BasicBlock *handler_helper(const Value *V) {
4393 return cast<BasicBlock>(V);
4394 }
4395
4396public:
4397 using DerefFnTy = BasicBlock *(*)(Value *);
4398 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4399 using handler_range = iterator_range<handler_iterator>;
4400 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4401 using const_handler_iterator =
4402 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4403 using const_handler_range = iterator_range<const_handler_iterator>;
4404
4405 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4406 handler_iterator handler_begin() {
4407 op_iterator It = op_begin() + 1;
4408 if (hasUnwindDest())
4409 ++It;
4410 return handler_iterator(It, DerefFnTy(handler_helper));
4411 }
4412
4413 /// Returns an iterator that points to the first handler in the
4414 /// CatchSwitchInst.
4415 const_handler_iterator handler_begin() const {
4416 const_op_iterator It = op_begin() + 1;
4417 if (hasUnwindDest())
4418 ++It;
4419 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4420 }
4421
4422 /// Returns a read-only iterator that points one past the last
4423 /// handler in the CatchSwitchInst.
4424 handler_iterator handler_end() {
4425 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4426 }
4427
4428 /// Returns an iterator that points one past the last handler in the
4429 /// CatchSwitchInst.
4430 const_handler_iterator handler_end() const {
4431 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4432 }
4433
4434 /// iteration adapter for range-for loops.
4435 handler_range handlers() {
4436 return make_range(handler_begin(), handler_end());
4437 }
4438
4439 /// iteration adapter for range-for loops.
4440 const_handler_range handlers() const {
4441 return make_range(handler_begin(), handler_end());
4442 }
4443
4444 /// Add an entry to the switch instruction...
4445 /// Note:
4446 /// This action invalidates handler_end(). Old handler_end() iterator will
4447 /// point to the added handler.
4448 void addHandler(BasicBlock *Dest);
4449
4450 void removeHandler(handler_iterator HI);
4451
4452 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4453 BasicBlock *getSuccessor(unsigned Idx) const {
4454 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4455, __extension__ __PRETTY_FUNCTION__
))
4455 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4455, __extension__ __PRETTY_FUNCTION__
))
;
4456 return cast<BasicBlock>(getOperand(Idx + 1));
4457 }
4458 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4459 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4460, __extension__ __PRETTY_FUNCTION__
))
4460 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4460, __extension__ __PRETTY_FUNCTION__
))
;
4461 setOperand(Idx + 1, NewSucc);
4462 }
4463
4464 // Methods for support type inquiry through isa, cast, and dyn_cast:
4465 static bool classof(const Instruction *I) {
4466 return I->getOpcode() == Instruction::CatchSwitch;
4467 }
4468 static bool classof(const Value *V) {
4469 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4470 }
4471};
4472
4473template <>
4474struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4475
4476DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4476, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchSwitchInst
>::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture
].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4476, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands
() const { return OperandTraits<CatchSwitchInst>::operands
(this); } template <int Idx_nocapture> Use &CatchSwitchInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchSwitchInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4477
4478//===----------------------------------------------------------------------===//
4479// CleanupPadInst Class
4480//===----------------------------------------------------------------------===//
4481class CleanupPadInst : public FuncletPadInst {
4482private:
4483 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4484 unsigned Values, const Twine &NameStr,
4485 Instruction *InsertBefore)
4486 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4487 NameStr, InsertBefore) {}
4488 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4489 unsigned Values, const Twine &NameStr,
4490 BasicBlock *InsertAtEnd)
4491 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4492 NameStr, InsertAtEnd) {}
4493
4494public:
4495 static CleanupPadInst *Create(Value *ParentPad,
4496 ArrayRef<Value *> Args = std::nullopt,
4497 const Twine &NameStr = "",
4498 Instruction *InsertBefore = nullptr) {
4499 unsigned Values = 1 + Args.size();
4500 return new (Values)
4501 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4502 }
4503
4504 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4505 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4506 unsigned Values = 1 + Args.size();
4507 return new (Values)
4508 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4509 }
4510
4511 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4512 static bool classof(const Instruction *I) {
4513 return I->getOpcode() == Instruction::CleanupPad;
4514 }
4515 static bool classof(const Value *V) {
4516 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4517 }
4518};
4519
4520//===----------------------------------------------------------------------===//
4521// CatchPadInst Class
4522//===----------------------------------------------------------------------===//
4523class CatchPadInst : public FuncletPadInst {
4524private:
4525 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4526 unsigned Values, const Twine &NameStr,
4527 Instruction *InsertBefore)
4528 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4529 NameStr, InsertBefore) {}
4530 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4531 unsigned Values, const Twine &NameStr,
4532 BasicBlock *InsertAtEnd)
4533 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4534 NameStr, InsertAtEnd) {}
4535
4536public:
4537 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4538 const Twine &NameStr = "",
4539 Instruction *InsertBefore = nullptr) {
4540 unsigned Values = 1 + Args.size();
4541 return new (Values)
4542 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4543 }
4544
4545 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4546 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4547 unsigned Values = 1 + Args.size();
4548 return new (Values)
4549 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4550 }
4551
4552 /// Convenience accessors
4553 CatchSwitchInst *getCatchSwitch() const {
4554 return cast<CatchSwitchInst>(Op<-1>());
4555 }
4556 void setCatchSwitch(Value *CatchSwitch) {
4557 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4557,
__extension__ __PRETTY_FUNCTION__))
;
4558 Op<-1>() = CatchSwitch;
4559 }
4560
4561 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4562 static bool classof(const Instruction *I) {
4563 return I->getOpcode() == Instruction::CatchPad;
4564 }
4565 static bool classof(const Value *V) {
4566 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4567 }
4568};
4569
4570//===----------------------------------------------------------------------===//
4571// CatchReturnInst Class
4572//===----------------------------------------------------------------------===//
4573
4574class CatchReturnInst : public Instruction {
4575 CatchReturnInst(const CatchReturnInst &RI);
4576 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4577 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4578
4579 void init(Value *CatchPad, BasicBlock *BB);
4580
4581protected:
4582 // Note: Instruction needs to be a friend here to call cloneImpl.
4583 friend class Instruction;
4584
4585 CatchReturnInst *cloneImpl() const;
4586
4587public:
4588 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4589 Instruction *InsertBefore = nullptr) {
4590 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4590, __extension__
__PRETTY_FUNCTION__))
;
4591 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4591, __extension__ __PRETTY_FUNCTION__
))
;
4592 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4593 }
4594
4595 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4596 BasicBlock *InsertAtEnd) {
4597 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4597, __extension__
__PRETTY_FUNCTION__))
;
4598 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4598, __extension__ __PRETTY_FUNCTION__
))
;
4599 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4600 }
4601
4602 /// Provide fast operand accessors
4603 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4604
4605 /// Convenience accessors.
4606 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4607 void setCatchPad(CatchPadInst *CatchPad) {
4608 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4608, __extension__
__PRETTY_FUNCTION__))
;
4609 Op<0>() = CatchPad;
4610 }
4611
4612 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4613 void setSuccessor(BasicBlock *NewSucc) {
4614 assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail
("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4614, __extension__
__PRETTY_FUNCTION__))
;
4615 Op<1>() = NewSucc;
4616 }
4617 unsigned getNumSuccessors() const { return 1; }
4618
4619 /// Get the parentPad of this catchret's catchpad's catchswitch.
4620 /// The successor block is implicitly a member of this funclet.
4621 Value *getCatchSwitchParentPad() const {
4622 return getCatchPad()->getCatchSwitch()->getParentPad();
4623 }
4624
4625 // Methods for support type inquiry through isa, cast, and dyn_cast:
4626 static bool classof(const Instruction *I) {
4627 return (I->getOpcode() == Instruction::CatchRet);
4628 }
4629 static bool classof(const Value *V) {
4630 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4631 }
4632
4633private:
4634 BasicBlock *getSuccessor(unsigned Idx) const {
4635 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4635, __extension__ __PRETTY_FUNCTION__
))
;
4636 return getSuccessor();
4637 }
4638
4639 void setSuccessor(unsigned Idx, BasicBlock *B) {
4640 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4640, __extension__ __PRETTY_FUNCTION__
))
;
4641 setSuccessor(B);
4642 }
4643};
4644
4645template <>
4646struct OperandTraits<CatchReturnInst>
4647 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4648
4649DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchReturnInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4649, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchReturnInst
>::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture
].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4649, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands
() const { return OperandTraits<CatchReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CatchReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4650
4651//===----------------------------------------------------------------------===//
4652// CleanupReturnInst Class
4653//===----------------------------------------------------------------------===//
4654
4655class CleanupReturnInst : public Instruction {
4656 using UnwindDestField = BoolBitfieldElementT<0>;
4657
4658private:
4659 CleanupReturnInst(const CleanupReturnInst &RI);
4660 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4661 Instruction *InsertBefore = nullptr);
4662 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4663 BasicBlock *InsertAtEnd);
4664
4665 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4666
4667protected:
4668 // Note: Instruction needs to be a friend here to call cloneImpl.
4669 friend class Instruction;
4670
4671 CleanupReturnInst *cloneImpl() const;
4672
4673public:
4674 static CleanupReturnInst *Create(Value *CleanupPad,
4675 BasicBlock *UnwindBB = nullptr,
4676 Instruction *InsertBefore = nullptr) {
4677 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4677, __extension__
__PRETTY_FUNCTION__))
;
4678 unsigned Values = 1;
4679 if (UnwindBB)
4680 ++Values;
4681 return new (Values)
4682 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4683 }
4684
4685 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4686 BasicBlock *InsertAtEnd) {
4687 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4687, __extension__
__PRETTY_FUNCTION__))
;
4688 unsigned Values = 1;
4689 if (UnwindBB)
4690 ++Values;
4691 return new (Values)
4692 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4693 }
4694
4695 /// Provide fast operand accessors
4696 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4697
4698 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4699 bool unwindsToCaller() const { return !hasUnwindDest(); }
4700
4701 /// Convenience accessor.
4702 CleanupPadInst *getCleanupPad() const {
4703 return cast<CleanupPadInst>(Op<0>());
4704 }
4705 void setCleanupPad(CleanupPadInst *CleanupPad) {
4706 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4706, __extension__
__PRETTY_FUNCTION__))
;
4707 Op<0>() = CleanupPad;
4708 }
4709
4710 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4711
4712 BasicBlock *getUnwindDest() const {
4713 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4714 }
4715 void setUnwindDest(BasicBlock *NewDest) {
4716 assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail
("NewDest", "llvm/include/llvm/IR/Instructions.h", 4716, __extension__
__PRETTY_FUNCTION__))
;
4717 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4717
, __extension__ __PRETTY_FUNCTION__))
;
4718 Op<1>() = NewDest;
4719 }
4720
4721 // Methods for support type inquiry through isa, cast, and dyn_cast:
4722 static bool classof(const Instruction *I) {
4723 return (I->getOpcode() == Instruction::CleanupRet);
4724 }
4725 static bool classof(const Value *V) {
4726 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4727 }
4728
4729private:
4730 BasicBlock *getSuccessor(unsigned Idx) const {
4731 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4731, __extension__
__PRETTY_FUNCTION__))
;
4732 return getUnwindDest();
4733 }
4734
4735 void setSuccessor(unsigned Idx, BasicBlock *B) {
4736 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4736, __extension__
__PRETTY_FUNCTION__))
;
4737 setUnwindDest(B);
4738 }
4739
4740 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4741 // method so that subclasses cannot accidentally use it.
4742 template <typename Bitfield>
4743 void setSubclassData(typename Bitfield::Type Value) {
4744 Instruction::setSubclassData<Bitfield>(Value);
4745 }
4746};
4747
4748template <>
4749struct OperandTraits<CleanupReturnInst>
4750 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4751
4752DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CleanupReturnInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4752, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CleanupReturnInst
>::op_begin(const_cast<CleanupReturnInst*>(this))[i_nocapture
].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CleanupReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4752, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CleanupReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands
() const { return OperandTraits<CleanupReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CleanupReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CleanupReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4753
4754//===----------------------------------------------------------------------===//
4755// UnreachableInst Class
4756//===----------------------------------------------------------------------===//
4757
4758//===---------------------------------------------------------------------------
4759/// This function has undefined behavior. In particular, the
4760/// presence of this instruction indicates some higher level knowledge that the
4761/// end of the block cannot be reached.
4762///
4763class UnreachableInst : public Instruction {
4764protected:
4765 // Note: Instruction needs to be a friend here to call cloneImpl.
4766 friend class Instruction;
4767
4768 UnreachableInst *cloneImpl() const;
4769
4770public:
4771 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4772 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4773
4774 // allocate space for exactly zero operands
4775 void *operator new(size_t S) { return User::operator new(S, 0); }
4776 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4777
4778 unsigned getNumSuccessors() const { return 0; }
4779
4780 // Methods for support type inquiry through isa, cast, and dyn_cast:
4781 static bool classof(const Instruction *I) {
4782 return I->getOpcode() == Instruction::Unreachable;
4783 }
4784 static bool classof(const Value *V) {
4785 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4786 }
4787
4788private:
4789 BasicBlock *getSuccessor(unsigned idx) const {
4790 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4790)
;
4791 }
4792
4793 void setSuccessor(unsigned idx, BasicBlock *B) {
4794 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4794)
;
4795 }
4796};
4797
4798//===----------------------------------------------------------------------===//
4799// TruncInst Class
4800//===----------------------------------------------------------------------===//
4801
4802/// This class represents a truncation of integer types.
4803class TruncInst : public CastInst {
4804protected:
4805 // Note: Instruction needs to be a friend here to call cloneImpl.
4806 friend class Instruction;
4807
4808 /// Clone an identical TruncInst
4809 TruncInst *cloneImpl() const;
4810
4811public:
4812 /// Constructor with insert-before-instruction semantics
4813 TruncInst(
4814 Value *S, ///< The value to be truncated
4815 Type *Ty, ///< The (smaller) type to truncate to
4816 const Twine &NameStr = "", ///< A name for the new instruction
4817 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4818 );
4819
4820 /// Constructor with insert-at-end-of-block semantics
4821 TruncInst(
4822 Value *S, ///< The value to be truncated
4823 Type *Ty, ///< The (smaller) type to truncate to
4824 const Twine &NameStr, ///< A name for the new instruction
4825 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4826 );
4827
4828 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4829 static bool classof(const Instruction *I) {
4830 return I->getOpcode() == Trunc;
4831 }
4832 static bool classof(const Value *V) {
4833 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4834 }
4835};
4836
4837//===----------------------------------------------------------------------===//
4838// ZExtInst Class
4839//===----------------------------------------------------------------------===//
4840
4841/// This class represents zero extension of integer types.
4842class ZExtInst : public CastInst {
4843protected:
4844 // Note: Instruction needs to be a friend here to call cloneImpl.
4845 friend class Instruction;
4846
4847 /// Clone an identical ZExtInst
4848 ZExtInst *cloneImpl() const;
4849
4850public:
4851 /// Constructor with insert-before-instruction semantics
4852 ZExtInst(
4853 Value *S, ///< The value to be zero extended
4854 Type *Ty, ///< The type to zero extend to
4855 const Twine &NameStr = "", ///< A name for the new instruction
4856 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4857 );
4858
4859 /// Constructor with insert-at-end semantics.
4860 ZExtInst(
4861 Value *S, ///< The value to be zero extended
4862 Type *Ty, ///< The type to zero extend to
4863 const Twine &NameStr, ///< A name for the new instruction
4864 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4865 );
4866
4867 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4868 static bool classof(const Instruction *I) {
4869 return I->getOpcode() == ZExt;
4870 }
4871 static bool classof(const Value *V) {
4872 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4873 }
4874};
4875
4876//===----------------------------------------------------------------------===//
4877// SExtInst Class
4878//===----------------------------------------------------------------------===//
4879
4880/// This class represents a sign extension of integer types.
4881class SExtInst : public CastInst {
4882protected:
4883 // Note: Instruction needs to be a friend here to call cloneImpl.
4884 friend class Instruction;
4885
4886 /// Clone an identical SExtInst
4887 SExtInst *cloneImpl() const;
4888
4889public:
4890 /// Constructor with insert-before-instruction semantics
4891 SExtInst(
4892 Value *S, ///< The value to be sign extended
4893 Type *Ty, ///< The type to sign extend to
4894 const Twine &NameStr = "", ///< A name for the new instruction
4895 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4896 );
4897
4898 /// Constructor with insert-at-end-of-block semantics
4899 SExtInst(
4900 Value *S, ///< The value to be sign extended
4901 Type *Ty, ///< The type to sign extend to
4902 const Twine &NameStr, ///< A name for the new instruction
4903 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4904 );
4905
4906 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4907 static bool classof(const Instruction *I) {
4908 return I->getOpcode() == SExt;
4909 }
4910 static bool classof(const Value *V) {
4911 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4912 }
4913};
4914
4915//===----------------------------------------------------------------------===//
4916// FPTruncInst Class
4917//===----------------------------------------------------------------------===//
4918
4919/// This class represents a truncation of floating point types.
4920class FPTruncInst : public CastInst {
4921protected:
4922 // Note: Instruction needs to be a friend here to call cloneImpl.
4923 friend class Instruction;
4924
4925 /// Clone an identical FPTruncInst
4926 FPTruncInst *cloneImpl() const;
4927
4928public:
4929 /// Constructor with insert-before-instruction semantics
4930 FPTruncInst(
4931 Value *S, ///< The value to be truncated
4932 Type *Ty, ///< The type to truncate to
4933 const Twine &NameStr = "", ///< A name for the new instruction
4934 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4935 );
4936
4937 /// Constructor with insert-before-instruction semantics
4938 FPTruncInst(
4939 Value *S, ///< The value to be truncated
4940 Type *Ty, ///< The type to truncate to
4941 const Twine &NameStr, ///< A name for the new instruction
4942 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4943 );
4944
4945 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4946 static bool classof(const Instruction *I) {
4947 return I->getOpcode() == FPTrunc;
4948 }
4949 static bool classof(const Value *V) {
4950 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4951 }
4952};
4953
4954//===----------------------------------------------------------------------===//
4955// FPExtInst Class
4956//===----------------------------------------------------------------------===//
4957
4958/// This class represents an extension of floating point types.
4959class FPExtInst : public CastInst {
4960protected:
4961 // Note: Instruction needs to be a friend here to call cloneImpl.
4962 friend class Instruction;
4963
4964 /// Clone an identical FPExtInst
4965 FPExtInst *cloneImpl() const;
4966
4967public:
4968 /// Constructor with insert-before-instruction semantics
4969 FPExtInst(
4970 Value *S, ///< The value to be extended
4971 Type *Ty, ///< The type to extend to
4972 const Twine &NameStr = "", ///< A name for the new instruction
4973 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4974 );
4975
4976 /// Constructor with insert-at-end-of-block semantics
4977 FPExtInst(
4978 Value *S, ///< The value to be extended
4979 Type *Ty, ///< The type to extend to
4980 const Twine &NameStr, ///< A name for the new instruction
4981 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4982 );
4983
4984 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4985 static bool classof(const Instruction *I) {
4986 return I->getOpcode() == FPExt;
4987 }
4988 static bool classof(const Value *V) {
4989 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4990 }
4991};
4992
4993//===----------------------------------------------------------------------===//
4994// UIToFPInst Class
4995//===----------------------------------------------------------------------===//
4996
4997/// This class represents a cast unsigned integer to floating point.
4998class UIToFPInst : public CastInst {
4999protected:
5000 // Note: Instruction needs to be a friend here to call cloneImpl.
5001 friend class Instruction;
5002
5003 /// Clone an identical UIToFPInst
5004 UIToFPInst *cloneImpl() const;
5005
5006public:
5007 /// Constructor with insert-before-instruction semantics
5008 UIToFPInst(
5009 Value *S, ///< The value to be converted
5010 Type *Ty, ///< The type to convert to
5011 const Twine &NameStr = "", ///< A name for the new instruction
5012 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5013 );
5014
5015 /// Constructor with insert-at-end-of-block semantics
5016 UIToFPInst(
5017 Value *S, ///< The value to be converted
5018 Type *Ty, ///< The type to convert to
5019 const Twine &NameStr, ///< A name for the new instruction
5020 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5021 );
5022
5023 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5024 static bool classof(const Instruction *I) {
5025 return I->getOpcode() == UIToFP;
5026 }
5027 static bool classof(const Value *V) {
5028 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5029 }
5030};
5031
5032//===----------------------------------------------------------------------===//
5033// SIToFPInst Class
5034//===----------------------------------------------------------------------===//
5035
5036/// This class represents a cast from signed integer to floating point.
5037class SIToFPInst : public CastInst {
5038protected:
5039 // Note: Instruction needs to be a friend here to call cloneImpl.
5040 friend class Instruction;
5041
5042 /// Clone an identical SIToFPInst
5043 SIToFPInst *cloneImpl() const;
5044
5045public:
5046 /// Constructor with insert-before-instruction semantics
5047 SIToFPInst(
5048 Value *S, ///< The value to be converted
5049 Type *Ty, ///< The type to convert to
5050 const Twine &NameStr = "", ///< A name for the new instruction
5051 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5052 );
5053
5054 /// Constructor with insert-at-end-of-block semantics
5055 SIToFPInst(
5056 Value *S, ///< The value to be converted
5057 Type *Ty, ///< The type to convert to
5058 const Twine &NameStr, ///< A name for the new instruction
5059 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5060 );
5061
5062 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5063 static bool classof(const Instruction *I) {
5064 return I->getOpcode() == SIToFP;
5065 }
5066 static bool classof(const Value *V) {
5067 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5068 }
5069};
5070
5071//===----------------------------------------------------------------------===//
5072// FPToUIInst Class
5073//===----------------------------------------------------------------------===//
5074
5075/// This class represents a cast from floating point to unsigned integer
5076class FPToUIInst : public CastInst {
5077protected:
5078 // Note: Instruction needs to be a friend here to call cloneImpl.
5079 friend class Instruction;
5080
5081 /// Clone an identical FPToUIInst
5082 FPToUIInst *cloneImpl() const;
5083
5084public:
5085 /// Constructor with insert-before-instruction semantics
5086 FPToUIInst(
5087 Value *S, ///< The value to be converted
5088 Type *Ty, ///< The type to convert to
5089 const Twine &NameStr = "", ///< A name for the new instruction
5090 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5091 );
5092
5093 /// Constructor with insert-at-end-of-block semantics
5094 FPToUIInst(
5095 Value *S, ///< The value to be converted
5096 Type *Ty, ///< The type to convert to
5097 const Twine &NameStr, ///< A name for the new instruction
5098 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5099 );
5100
5101 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5102 static bool classof(const Instruction *I) {
5103 return I->getOpcode() == FPToUI;
5104 }
5105 static bool classof(const Value *V) {
5106 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5107 }
5108};
5109
5110//===----------------------------------------------------------------------===//
5111// FPToSIInst Class
5112//===----------------------------------------------------------------------===//
5113
5114/// This class represents a cast from floating point to signed integer.
5115class FPToSIInst : public CastInst {
5116protected:
5117 // Note: Instruction needs to be a friend here to call cloneImpl.
5118 friend class Instruction;
5119
5120 /// Clone an identical FPToSIInst
5121 FPToSIInst *cloneImpl() const;
5122
5123public:
5124 /// Constructor with insert-before-instruction semantics
5125 FPToSIInst(
5126 Value *S, ///< The value to be converted
5127 Type *Ty, ///< The type to convert to
5128 const Twine &NameStr = "", ///< A name for the new instruction
5129 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5130 );
5131
5132 /// Constructor with insert-at-end-of-block semantics
5133 FPToSIInst(
5134 Value *S, ///< The value to be converted
5135 Type *Ty, ///< The type to convert to
5136 const Twine &NameStr, ///< A name for the new instruction
5137 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5138 );
5139
5140 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5141 static bool classof(const Instruction *I) {
5142 return I->getOpcode() == FPToSI;
5143 }
5144 static bool classof(const Value *V) {
5145 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5146 }
5147};
5148
5149//===----------------------------------------------------------------------===//
5150// IntToPtrInst Class
5151//===----------------------------------------------------------------------===//
5152
5153/// This class represents a cast from an integer to a pointer.
5154class IntToPtrInst : public CastInst {
5155public:
5156 // Note: Instruction needs to be a friend here to call cloneImpl.
5157 friend class Instruction;
5158
5159 /// Constructor with insert-before-instruction semantics
5160 IntToPtrInst(
5161 Value *S, ///< The value to be converted
5162 Type *Ty, ///< The type to convert to
5163 const Twine &NameStr = "", ///< A name for the new instruction
5164 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5165 );
5166
5167 /// Constructor with insert-at-end-of-block semantics
5168 IntToPtrInst(
5169 Value *S, ///< The value to be converted
5170 Type *Ty, ///< The type to convert to
5171 const Twine &NameStr, ///< A name for the new instruction
5172 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5173 );
5174
5175 /// Clone an identical IntToPtrInst.
5176 IntToPtrInst *cloneImpl() const;
5177
5178 /// Returns the address space of this instruction's pointer type.
5179 unsigned getAddressSpace() const {
5180 return getType()->getPointerAddressSpace();
5181 }
5182
5183 // Methods for support type inquiry through isa, cast, and dyn_cast:
5184 static bool classof(const Instruction *I) {
5185 return I->getOpcode() == IntToPtr;
5186 }
5187 static bool classof(const Value *V) {
5188 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5189 }
5190};
5191
5192//===----------------------------------------------------------------------===//
5193// PtrToIntInst Class
5194//===----------------------------------------------------------------------===//
5195
5196/// This class represents a cast from a pointer to an integer.
5197class PtrToIntInst : public CastInst {
5198protected:
5199 // Note: Instruction needs to be a friend here to call cloneImpl.
5200 friend class Instruction;
5201
5202 /// Clone an identical PtrToIntInst.
5203 PtrToIntInst *cloneImpl() const;
5204
5205public:
5206 /// Constructor with insert-before-instruction semantics
5207 PtrToIntInst(
5208 Value *S, ///< The value to be converted
5209 Type *Ty, ///< The type to convert to
5210 const Twine &NameStr = "", ///< A name for the new instruction
5211 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5212 );
5213
5214 /// Constructor with insert-at-end-of-block semantics
5215 PtrToIntInst(
5216 Value *S, ///< The value to be converted
5217 Type *Ty, ///< The type to convert to
5218 const Twine &NameStr, ///< A name for the new instruction
5219 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5220 );
5221
5222 /// Gets the pointer operand.
5223 Value *getPointerOperand() { return getOperand(0); }
5224 /// Gets the pointer operand.
5225 const Value *getPointerOperand() const { return getOperand(0); }
5226 /// Gets the operand index of the pointer operand.
5227 static unsigned getPointerOperandIndex() { return 0U; }
5228
5229 /// Returns the address space of the pointer operand.
5230 unsigned getPointerAddressSpace() const {
5231 return getPointerOperand()->getType()->getPointerAddressSpace();
5232 }
5233
5234 // Methods for support type inquiry through isa, cast, and dyn_cast:
5235 static bool classof(const Instruction *I) {
5236 return I->getOpcode() == PtrToInt;
5237 }
5238 static bool classof(const Value *V) {
5239 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5240 }
5241};
5242
5243//===----------------------------------------------------------------------===//
5244// BitCastInst Class
5245//===----------------------------------------------------------------------===//
5246
5247/// This class represents a no-op cast from one type to another.
5248class BitCastInst : public CastInst {
5249protected:
5250 // Note: Instruction needs to be a friend here to call cloneImpl.
5251 friend class Instruction;
5252
5253 /// Clone an identical BitCastInst.
5254 BitCastInst *cloneImpl() const;
5255
5256public:
5257 /// Constructor with insert-before-instruction semantics
5258 BitCastInst(
5259 Value *S, ///< The value to be casted
5260 Type *Ty, ///< The type to casted to
5261 const Twine &NameStr = "", ///< A name for the new instruction
5262 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5263 );
5264
5265 /// Constructor with insert-at-end-of-block semantics
5266 BitCastInst(
5267 Value *S, ///< The value to be casted
5268 Type *Ty, ///< The type to casted to
5269 const Twine &NameStr, ///< A name for the new instruction
5270 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5271 );
5272
5273 // Methods for support type inquiry through isa, cast, and dyn_cast:
5274 static bool classof(const Instruction *I) {
5275 return I->getOpcode() == BitCast;
5276 }
5277 static bool classof(const Value *V) {
5278 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5279 }
5280};
5281
5282//===----------------------------------------------------------------------===//
5283// AddrSpaceCastInst Class
5284//===----------------------------------------------------------------------===//
5285
5286/// This class represents a conversion between pointers from one address space
5287/// to another.
5288class AddrSpaceCastInst : public CastInst {
5289protected:
5290 // Note: Instruction needs to be a friend here to call cloneImpl.
5291 friend class Instruction;
5292
5293 /// Clone an identical AddrSpaceCastInst.
5294 AddrSpaceCastInst *cloneImpl() const;
5295
5296public:
5297 /// Constructor with insert-before-instruction semantics
5298 AddrSpaceCastInst(
5299 Value *S, ///< The value to be casted
5300 Type *Ty, ///< The type to casted to
5301 const Twine &NameStr = "", ///< A name for the new instruction
5302 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5303 );
5304
5305 /// Constructor with insert-at-end-of-block semantics
5306 AddrSpaceCastInst(
5307 Value *S, ///< The value to be casted
5308 Type *Ty, ///< The type to casted to
5309 const Twine &NameStr, ///< A name for the new instruction
5310 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5311 );
5312
5313 // Methods for support type inquiry through isa, cast, and dyn_cast:
5314 static bool classof(const Instruction *I) {
5315 return I->getOpcode() == AddrSpaceCast;
5316 }
5317 static bool classof(const Value *V) {
5318 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5319 }
5320
5321 /// Gets the pointer operand.
5322 Value *getPointerOperand() {
5323 return getOperand(0);
5324 }
5325
5326 /// Gets the pointer operand.
5327 const Value *getPointerOperand() const {
5328 return getOperand(0);
5329 }
5330
5331 /// Gets the operand index of the pointer operand.
5332 static unsigned getPointerOperandIndex() {
5333 return 0U;
5334 }
5335
5336 /// Returns the address space of the pointer operand.
5337 unsigned getSrcAddressSpace() const {
5338 return getPointerOperand()->getType()->getPointerAddressSpace();
5339 }
5340
5341 /// Returns the address space of the result.
5342 unsigned getDestAddressSpace() const {
5343 return getType()->getPointerAddressSpace();
5344 }
5345};
5346
5347//===----------------------------------------------------------------------===//
5348// Helper functions
5349//===----------------------------------------------------------------------===//
5350
5351/// A helper function that returns the pointer operand of a load or store
5352/// instruction. Returns nullptr if not load or store.
5353inline const Value *getLoadStorePointerOperand(const Value *V) {
5354 if (auto *Load = dyn_cast<LoadInst>(V))
22
Assuming 'Load' is null
23
Taking false branch
5355 return Load->getPointerOperand();
5356 if (auto *Store = dyn_cast<StoreInst>(V))
24
Assuming 'Store' is null
25
Taking false branch
5357 return Store->getPointerOperand();
5358 return nullptr;
26
Returning null pointer, which participates in a condition later
5359}
5360inline Value *getLoadStorePointerOperand(Value *V) {
5361 return const_cast<Value *>(
28
Returning null pointer, which participates in a condition later
5362 getLoadStorePointerOperand(static_cast<const Value *>(V)));
21
Calling 'getLoadStorePointerOperand'
27
Returning from 'getLoadStorePointerOperand'
5363}
5364
5365/// A helper function that returns the pointer operand of a load, store
5366/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5367inline const Value *getPointerOperand(const Value *V) {
5368 if (auto *Ptr = getLoadStorePointerOperand(V))
5369 return Ptr;
5370 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5371 return Gep->getPointerOperand();
5372 return nullptr;
5373}
5374inline Value *getPointerOperand(Value *V) {
5375 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5376}
5377
5378/// A helper function that returns the alignment of load or store instruction.
5379inline Align getLoadStoreAlignment(Value *I) {
5380 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5381, __extension__ __PRETTY_FUNCTION__
))
5381 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5381, __extension__ __PRETTY_FUNCTION__
))
;
5382 if (auto *LI = dyn_cast<LoadInst>(I))
5383 return LI->getAlign();
5384 return cast<StoreInst>(I)->getAlign();
5385}
5386
5387/// A helper function that returns the address space of the pointer operand of
5388/// load or store instruction.
5389inline unsigned getLoadStoreAddressSpace(Value *I) {
5390 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5391, __extension__ __PRETTY_FUNCTION__
))
5391 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5391, __extension__ __PRETTY_FUNCTION__
))
;
5392 if (auto *LI = dyn_cast<LoadInst>(I))
5393 return LI->getPointerAddressSpace();
5394 return cast<StoreInst>(I)->getPointerAddressSpace();
5395}
5396
5397/// A helper function that returns the type of a load or store instruction.
5398inline Type *getLoadStoreType(Value *I) {
5399 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5400, __extension__ __PRETTY_FUNCTION__
))
5400 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5400, __extension__ __PRETTY_FUNCTION__
))
;
5401 if (auto *LI = dyn_cast<LoadInst>(I))
5402 return LI->getType();
5403 return cast<StoreInst>(I)->getValueOperand()->getType();
5404}
5405
5406/// A helper function that returns an atomic operation's sync scope; returns
5407/// std::nullopt if it is not an atomic operation.
5408inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5409 if (!I->isAtomic())
5410 return std::nullopt;
5411 if (auto *AI = dyn_cast<LoadInst>(I))
5412 return AI->getSyncScopeID();
5413 if (auto *AI = dyn_cast<StoreInst>(I))
5414 return AI->getSyncScopeID();
5415 if (auto *AI = dyn_cast<FenceInst>(I))
5416 return AI->getSyncScopeID();
5417 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5418 return AI->getSyncScopeID();
5419 if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5420 return AI->getSyncScopeID();
5421 llvm_unreachable("unhandled atomic operation")::llvm::llvm_unreachable_internal("unhandled atomic operation"
, "llvm/include/llvm/IR/Instructions.h", 5421)
;
5422}
5423
5424//===----------------------------------------------------------------------===//
5425// FreezeInst Class
5426//===----------------------------------------------------------------------===//
5427
5428/// This class represents a freeze function that returns random concrete
5429/// value if an operand is either a poison value or an undef value
5430class FreezeInst : public UnaryInstruction {
5431protected:
5432 // Note: Instruction needs to be a friend here to call cloneImpl.
5433 friend class Instruction;
5434
5435 /// Clone an identical FreezeInst
5436 FreezeInst *cloneImpl() const;
5437
5438public:
5439 explicit FreezeInst(Value *S,
5440 const Twine &NameStr = "",
5441 Instruction *InsertBefore = nullptr);
5442 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5443
5444 // Methods for support type inquiry through isa, cast, and dyn_cast:
5445 static inline bool classof(const Instruction *I) {
5446 return I->getOpcode() == Freeze;
5447 }
5448 static inline bool classof(const Value *V) {
5449 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5450 }
5451};
5452
5453} // end namespace llvm
5454
5455#endif // LLVM_IR_INSTRUCTIONS_H