Bug Summary

File:lib/Transforms/Scalar/LoopIdiomRecognize.cpp
Warning:line 1424, column 46
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LoopIdiomRecognize.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Transforms/Scalar -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp

/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp

1//===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass implements an idiom recognizer that transforms simple loops into a
11// non-loop form. In cases that this kicks in, it can be a significant
12// performance win.
13//
14// If compiling for code size we avoid idiom recognition if the resulting
15// code could be larger than the code for the original loop. One way this could
16// happen is if the loop is not removable after idiom recognition due to the
17// presence of non-idiom instructions. The initial implementation of the
18// heuristics applies to idioms in multi-block loops.
19//
20//===----------------------------------------------------------------------===//
21//
22// TODO List:
23//
24// Future loop memory idioms to recognize:
25// memcmp, memmove, strlen, etc.
26// Future floating point idioms to recognize in -ffast-math mode:
27// fpowi
28// Future integer operation idioms to recognize:
29// ctpop, ctlz, cttz
30//
31// Beware that isel's default lowering for ctpop is highly inefficient for
32// i64 and larger types when i64 is legal and the value has few bits set. It
33// would be good to enhance isel to emit a loop for ctpop in this case.
34//
35// This could recognize common matrix multiplies and dot product idioms and
36// replace them with calls to BLAS (if linked in??).
37//
38//===----------------------------------------------------------------------===//
39
40#include "llvm/ADT/APInt.h"
41#include "llvm/ADT/ArrayRef.h"
42#include "llvm/ADT/DenseMap.h"
43#include "llvm/ADT/MapVector.h"
44#include "llvm/ADT/SetVector.h"
45#include "llvm/ADT/SmallPtrSet.h"
46#include "llvm/ADT/SmallVector.h"
47#include "llvm/ADT/Statistic.h"
48#include "llvm/ADT/StringRef.h"
49#include "llvm/Analysis/AliasAnalysis.h"
50#include "llvm/Analysis/LoopAccessAnalysis.h"
51#include "llvm/Analysis/LoopInfo.h"
52#include "llvm/Analysis/LoopPass.h"
53#include "llvm/Analysis/MemoryLocation.h"
54#include "llvm/Analysis/ScalarEvolution.h"
55#include "llvm/Analysis/ScalarEvolutionExpander.h"
56#include "llvm/Analysis/ScalarEvolutionExpressions.h"
57#include "llvm/Analysis/TargetLibraryInfo.h"
58#include "llvm/Analysis/TargetTransformInfo.h"
59#include "llvm/Analysis/Utils/Local.h"
60#include "llvm/Analysis/ValueTracking.h"
61#include "llvm/IR/Attributes.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
66#include "llvm/IR/DebugLoc.h"
67#include "llvm/IR/DerivedTypes.h"
68#include "llvm/IR/Dominators.h"
69#include "llvm/IR/GlobalValue.h"
70#include "llvm/IR/GlobalVariable.h"
71#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/InstrTypes.h"
73#include "llvm/IR/Instruction.h"
74#include "llvm/IR/Instructions.h"
75#include "llvm/IR/IntrinsicInst.h"
76#include "llvm/IR/Intrinsics.h"
77#include "llvm/IR/LLVMContext.h"
78#include "llvm/IR/Module.h"
79#include "llvm/IR/PassManager.h"
80#include "llvm/IR/Type.h"
81#include "llvm/IR/User.h"
82#include "llvm/IR/Value.h"
83#include "llvm/IR/ValueHandle.h"
84#include "llvm/Pass.h"
85#include "llvm/Support/Casting.h"
86#include "llvm/Support/CommandLine.h"
87#include "llvm/Support/Debug.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/Transforms/Scalar.h"
90#include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
91#include "llvm/Transforms/Utils/BuildLibCalls.h"
92#include "llvm/Transforms/Utils/LoopUtils.h"
93#include <algorithm>
94#include <cassert>
95#include <cstdint>
96#include <utility>
97#include <vector>
98
99using namespace llvm;
100
101#define DEBUG_TYPE"loop-idiom" "loop-idiom"
102
103STATISTIC(NumMemSet, "Number of memset's formed from loop stores")static llvm::Statistic NumMemSet = {"loop-idiom", "NumMemSet"
, "Number of memset's formed from loop stores", {0}, {false}}
;
104STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores")static llvm::Statistic NumMemCpy = {"loop-idiom", "NumMemCpy"
, "Number of memcpy's formed from loop load+stores", {0}, {false
}}
;
105
106static cl::opt<bool> UseLIRCodeSizeHeurs(
107 "use-lir-code-size-heurs",
108 cl::desc("Use loop idiom recognition code size heuristics when compiling"
109 "with -Os/-Oz"),
110 cl::init(true), cl::Hidden);
111
112namespace {
113
114class LoopIdiomRecognize {
115 Loop *CurLoop = nullptr;
116 AliasAnalysis *AA;
117 DominatorTree *DT;
118 LoopInfo *LI;
119 ScalarEvolution *SE;
120 TargetLibraryInfo *TLI;
121 const TargetTransformInfo *TTI;
122 const DataLayout *DL;
123 bool ApplyCodeSizeHeuristics;
124
125public:
126 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
127 LoopInfo *LI, ScalarEvolution *SE,
128 TargetLibraryInfo *TLI,
129 const TargetTransformInfo *TTI,
130 const DataLayout *DL)
131 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL) {}
132
133 bool runOnLoop(Loop *L);
134
135private:
136 using StoreList = SmallVector<StoreInst *, 8>;
137 using StoreListMap = MapVector<Value *, StoreList>;
138
139 StoreListMap StoreRefsForMemset;
140 StoreListMap StoreRefsForMemsetPattern;
141 StoreList StoreRefsForMemcpy;
142 bool HasMemset;
143 bool HasMemsetPattern;
144 bool HasMemcpy;
145
146 /// Return code for isLegalStore()
147 enum LegalStoreKind {
148 None = 0,
149 Memset,
150 MemsetPattern,
151 Memcpy,
152 UnorderedAtomicMemcpy,
153 DontUse // Dummy retval never to be used. Allows catching errors in retval
154 // handling.
155 };
156
157 /// \name Countable Loop Idiom Handling
158 /// @{
159
160 bool runOnCountableLoop();
161 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
162 SmallVectorImpl<BasicBlock *> &ExitBlocks);
163
164 void collectStores(BasicBlock *BB);
165 LegalStoreKind isLegalStore(StoreInst *SI);
166 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
167 bool ForMemset);
168 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
169
170 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
171 unsigned StoreAlignment, Value *StoredVal,
172 Instruction *TheStore,
173 SmallPtrSetImpl<Instruction *> &Stores,
174 const SCEVAddRecExpr *Ev, const SCEV *BECount,
175 bool NegStride, bool IsLoopMemset = false);
176 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
177 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
178 bool IsLoopMemset = false);
179
180 /// @}
181 /// \name Noncountable Loop Idiom Handling
182 /// @{
183
184 bool runOnNoncountableLoop();
185
186 bool recognizePopcount();
187 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
188 PHINode *CntPhi, Value *Var);
189 bool recognizeAndInsertCTLZ();
190 void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
191 PHINode *CntPhi, Value *Var, const DebugLoc DL,
192 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop);
193
194 /// @}
195};
196
197class LoopIdiomRecognizeLegacyPass : public LoopPass {
198public:
199 static char ID;
200
201 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
202 initializeLoopIdiomRecognizeLegacyPassPass(
203 *PassRegistry::getPassRegistry());
204 }
205
206 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
207 if (skipLoop(L))
208 return false;
209
210 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
211 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
212 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
213 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
214 TargetLibraryInfo *TLI =
215 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
216 const TargetTransformInfo *TTI =
217 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
218 *L->getHeader()->getParent());
219 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
220
221 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
222 return LIR.runOnLoop(L);
223 }
224
225 /// This transformation requires natural loop information & requires that
226 /// loop preheaders be inserted into the CFG.
227 void getAnalysisUsage(AnalysisUsage &AU) const override {
228 AU.addRequired<TargetLibraryInfoWrapperPass>();
229 AU.addRequired<TargetTransformInfoWrapperPass>();
230 getLoopAnalysisUsage(AU);
231 }
232};
233
234} // end anonymous namespace
235
236char LoopIdiomRecognizeLegacyPass::ID = 0;
237
238PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
239 LoopStandardAnalysisResults &AR,
240 LPMUpdater &) {
241 const auto *DL = &L.getHeader()->getModule()->getDataLayout();
242
243 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
244 if (!LIR.runOnLoop(&L))
1
Calling 'LoopIdiomRecognize::runOnLoop'
245 return PreservedAnalyses::all();
246
247 return getLoopPassPreservedAnalyses();
248}
249
250INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",static void *initializeLoopIdiomRecognizeLegacyPassPassOnce(PassRegistry
&Registry) {
251 "Recognize loop idioms", false, false)static void *initializeLoopIdiomRecognizeLegacyPassPassOnce(PassRegistry
&Registry) {
252INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry);
253INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
254INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
255INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",PassInfo *PI = new PassInfo( "Recognize loop idioms", "loop-idiom"
, &LoopIdiomRecognizeLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<LoopIdiomRecognizeLegacyPass>), false,
false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeLoopIdiomRecognizeLegacyPassPassFlag
; void llvm::initializeLoopIdiomRecognizeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLoopIdiomRecognizeLegacyPassPassFlag
, initializeLoopIdiomRecognizeLegacyPassPassOnce, std::ref(Registry
)); }
256 "Recognize loop idioms", false, false)PassInfo *PI = new PassInfo( "Recognize loop idioms", "loop-idiom"
, &LoopIdiomRecognizeLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<LoopIdiomRecognizeLegacyPass>), false,
false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeLoopIdiomRecognizeLegacyPassPassFlag
; void llvm::initializeLoopIdiomRecognizeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLoopIdiomRecognizeLegacyPassPassFlag
, initializeLoopIdiomRecognizeLegacyPassPassOnce, std::ref(Registry
)); }
257
258Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
259
260static void deleteDeadInstruction(Instruction *I) {
261 I->replaceAllUsesWith(UndefValue::get(I->getType()));
262 I->eraseFromParent();
263}
264
265//===----------------------------------------------------------------------===//
266//
267// Implementation of LoopIdiomRecognize
268//
269//===----------------------------------------------------------------------===//
270
271bool LoopIdiomRecognize::runOnLoop(Loop *L) {
272 CurLoop = L;
273 // If the loop could not be converted to canonical form, it must have an
274 // indirectbr in it, just give up.
275 if (!L->getLoopPreheader())
2
Assuming the condition is false
3
Taking false branch
276 return false;
277
278 // Disable loop idiom recognition if the function's name is a common idiom.
279 StringRef Name = L->getHeader()->getParent()->getName();
280 if (Name == "memset" || Name == "memcpy")
4
Assuming the condition is false
5
Assuming the condition is false
6
Taking false branch
281 return false;
282
283 // Determine if code size heuristics need to be applied.
284 ApplyCodeSizeHeuristics =
285 L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
7
Assuming the condition is false
286
287 HasMemset = TLI->has(LibFunc_memset);
288 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
289 HasMemcpy = TLI->has(LibFunc_memcpy);
290
291 if (HasMemset || HasMemsetPattern || HasMemcpy)
8
Taking false branch
292 if (SE->hasLoopInvariantBackedgeTakenCount(L))
293 return runOnCountableLoop();
294
295 return runOnNoncountableLoop();
9
Calling 'LoopIdiomRecognize::runOnNoncountableLoop'
296}
297
298bool LoopIdiomRecognize::runOnCountableLoop() {
299 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
300 assert(!isa<SCEVCouldNotCompute>(BECount) &&(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 302, __extension__ __PRETTY_FUNCTION__))
301 "runOnCountableLoop() called on a loop without a predictable"(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 302, __extension__ __PRETTY_FUNCTION__))
302 "backedge-taken count")(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 302, __extension__ __PRETTY_FUNCTION__))
;
303
304 // If this loop executes exactly one time, then it should be peeled, not
305 // optimized by this pass.
306 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
307 if (BECst->getAPInt() == 0)
308 return false;
309
310 SmallVector<BasicBlock *, 8> ExitBlocks;
311 CurLoop->getUniqueExitBlocks(ExitBlocks);
312
313 DEBUG(dbgs() << "loop-idiom Scanning: F["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
314 << CurLoop->getHeader()->getParent()->getName() << "] Loop %"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
315 << CurLoop->getHeader()->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
;
316
317 bool MadeChange = false;
318
319 // The following transforms hoist stores/memsets into the loop pre-header.
320 // Give up if the loop has instructions may throw.
321 LoopSafetyInfo SafetyInfo;
322 computeLoopSafetyInfo(&SafetyInfo, CurLoop);
323 if (SafetyInfo.MayThrow)
324 return MadeChange;
325
326 // Scan all the blocks in the loop that are not in subloops.
327 for (auto *BB : CurLoop->getBlocks()) {
328 // Ignore blocks in subloops.
329 if (LI->getLoopFor(BB) != CurLoop)
330 continue;
331
332 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
333 }
334 return MadeChange;
335}
336
337static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
338 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
339 return ConstStride->getAPInt();
340}
341
342/// getMemSetPatternValue - If a strided store of the specified value is safe to
343/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
344/// be passed in. Otherwise, return null.
345///
346/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
347/// just replicate their input array and then pass on to memset_pattern16.
348static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
349 // If the value isn't a constant, we can't promote it to being in a constant
350 // array. We could theoretically do a store to an alloca or something, but
351 // that doesn't seem worthwhile.
352 Constant *C = dyn_cast<Constant>(V);
353 if (!C)
354 return nullptr;
355
356 // Only handle simple values that are a power of two bytes in size.
357 uint64_t Size = DL->getTypeSizeInBits(V->getType());
358 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
359 return nullptr;
360
361 // Don't care enough about darwin/ppc to implement this.
362 if (DL->isBigEndian())
363 return nullptr;
364
365 // Convert to size in bytes.
366 Size /= 8;
367
368 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
369 // if the top and bottom are the same (e.g. for vectors and large integers).
370 if (Size > 16)
371 return nullptr;
372
373 // If the constant is exactly 16 bytes, just use it.
374 if (Size == 16)
375 return C;
376
377 // Otherwise, we'll use an array of the constants.
378 unsigned ArraySize = 16 / Size;
379 ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
380 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
381}
382
383LoopIdiomRecognize::LegalStoreKind
384LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
385 // Don't touch volatile stores.
386 if (SI->isVolatile())
387 return LegalStoreKind::None;
388 // We only want simple or unordered-atomic stores.
389 if (!SI->isUnordered())
390 return LegalStoreKind::None;
391
392 // Don't convert stores of non-integral pointer types to memsets (which stores
393 // integers).
394 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
395 return LegalStoreKind::None;
396
397 // Avoid merging nontemporal stores.
398 if (SI->getMetadata(LLVMContext::MD_nontemporal))
399 return LegalStoreKind::None;
400
401 Value *StoredVal = SI->getValueOperand();
402 Value *StorePtr = SI->getPointerOperand();
403
404 // Reject stores that are so large that they overflow an unsigned.
405 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
406 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
407 return LegalStoreKind::None;
408
409 // See if the pointer expression is an AddRec like {base,+,1} on the current
410 // loop, which indicates a strided store. If we have something else, it's a
411 // random store we can't handle.
412 const SCEVAddRecExpr *StoreEv =
413 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
414 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
415 return LegalStoreKind::None;
416
417 // Check to see if we have a constant stride.
418 if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
419 return LegalStoreKind::None;
420
421 // See if the store can be turned into a memset.
422
423 // If the stored value is a byte-wise value (like i32 -1), then it may be
424 // turned into a memset of i8 -1, assuming that all the consecutive bytes
425 // are stored. A store of i32 0x01020304 can never be turned into a memset,
426 // but it can be turned into memset_pattern if the target supports it.
427 Value *SplatValue = isBytewiseValue(StoredVal);
428 Constant *PatternValue = nullptr;
429
430 // Note: memset and memset_pattern on unordered-atomic is yet not supported
431 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
432
433 // If we're allowed to form a memset, and the stored value would be
434 // acceptable for memset, use it.
435 if (!UnorderedAtomic && HasMemset && SplatValue &&
436 // Verify that the stored value is loop invariant. If not, we can't
437 // promote the memset.
438 CurLoop->isLoopInvariant(SplatValue)) {
439 // It looks like we can use SplatValue.
440 return LegalStoreKind::Memset;
441 } else if (!UnorderedAtomic && HasMemsetPattern &&
442 // Don't create memset_pattern16s with address spaces.
443 StorePtr->getType()->getPointerAddressSpace() == 0 &&
444 (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
445 // It looks like we can use PatternValue!
446 return LegalStoreKind::MemsetPattern;
447 }
448
449 // Otherwise, see if the store can be turned into a memcpy.
450 if (HasMemcpy) {
451 // Check to see if the stride matches the size of the store. If so, then we
452 // know that every byte is touched in the loop.
453 APInt Stride = getStoreStride(StoreEv);
454 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
455 if (StoreSize != Stride && StoreSize != -Stride)
456 return LegalStoreKind::None;
457
458 // The store must be feeding a non-volatile load.
459 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
460
461 // Only allow non-volatile loads
462 if (!LI || LI->isVolatile())
463 return LegalStoreKind::None;
464 // Only allow simple or unordered-atomic loads
465 if (!LI->isUnordered())
466 return LegalStoreKind::None;
467
468 // See if the pointer expression is an AddRec like {base,+,1} on the current
469 // loop, which indicates a strided load. If we have something else, it's a
470 // random load we can't handle.
471 const SCEVAddRecExpr *LoadEv =
472 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
473 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
474 return LegalStoreKind::None;
475
476 // The store and load must share the same stride.
477 if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
478 return LegalStoreKind::None;
479
480 // Success. This store can be converted into a memcpy.
481 UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
482 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
483 : LegalStoreKind::Memcpy;
484 }
485 // This store can't be transformed into a memset/memcpy.
486 return LegalStoreKind::None;
487}
488
489void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
490 StoreRefsForMemset.clear();
491 StoreRefsForMemsetPattern.clear();
492 StoreRefsForMemcpy.clear();
493 for (Instruction &I : *BB) {
494 StoreInst *SI = dyn_cast<StoreInst>(&I);
495 if (!SI)
496 continue;
497
498 // Make sure this is a strided store with a constant stride.
499 switch (isLegalStore(SI)) {
500 case LegalStoreKind::None:
501 // Nothing to do
502 break;
503 case LegalStoreKind::Memset: {
504 // Find the base pointer.
505 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
506 StoreRefsForMemset[Ptr].push_back(SI);
507 } break;
508 case LegalStoreKind::MemsetPattern: {
509 // Find the base pointer.
510 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
511 StoreRefsForMemsetPattern[Ptr].push_back(SI);
512 } break;
513 case LegalStoreKind::Memcpy:
514 case LegalStoreKind::UnorderedAtomicMemcpy:
515 StoreRefsForMemcpy.push_back(SI);
516 break;
517 default:
518 assert(false && "unhandled return value")(static_cast <bool> (false && "unhandled return value"
) ? void (0) : __assert_fail ("false && \"unhandled return value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 518, __extension__ __PRETTY_FUNCTION__))
;
519 break;
520 }
521 }
522}
523
524/// runOnLoopBlock - Process the specified block, which lives in a counted loop
525/// with the specified backedge count. This block is known to be in the current
526/// loop and not in any subloops.
527bool LoopIdiomRecognize::runOnLoopBlock(
528 BasicBlock *BB, const SCEV *BECount,
529 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
530 // We can only promote stores in this block if they are unconditionally
531 // executed in the loop. For a block to be unconditionally executed, it has
532 // to dominate all the exit blocks of the loop. Verify this now.
533 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
534 if (!DT->dominates(BB, ExitBlocks[i]))
535 return false;
536
537 bool MadeChange = false;
538 // Look for store instructions, which may be optimized to memset/memcpy.
539 collectStores(BB);
540
541 // Look for a single store or sets of stores with a common base, which can be
542 // optimized into a memset (memset_pattern). The latter most commonly happens
543 // with structs and handunrolled loops.
544 for (auto &SL : StoreRefsForMemset)
545 MadeChange |= processLoopStores(SL.second, BECount, true);
546
547 for (auto &SL : StoreRefsForMemsetPattern)
548 MadeChange |= processLoopStores(SL.second, BECount, false);
549
550 // Optimize the store into a memcpy, if it feeds an similarly strided load.
551 for (auto &SI : StoreRefsForMemcpy)
552 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
553
554 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
555 Instruction *Inst = &*I++;
556 // Look for memset instructions, which may be optimized to a larger memset.
557 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
558 WeakTrackingVH InstPtr(&*I);
559 if (!processLoopMemSet(MSI, BECount))
560 continue;
561 MadeChange = true;
562
563 // If processing the memset invalidated our iterator, start over from the
564 // top of the block.
565 if (!InstPtr)
566 I = BB->begin();
567 continue;
568 }
569 }
570
571 return MadeChange;
572}
573
574/// processLoopStores - See if this store(s) can be promoted to a memset.
575bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
576 const SCEV *BECount,
577 bool ForMemset) {
578 // Try to find consecutive stores that can be transformed into memsets.
579 SetVector<StoreInst *> Heads, Tails;
580 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
581
582 // Do a quadratic search on all of the given stores and find
583 // all of the pairs of stores that follow each other.
584 SmallVector<unsigned, 16> IndexQueue;
585 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
586 assert(SL[i]->isSimple() && "Expected only non-volatile stores.")(static_cast <bool> (SL[i]->isSimple() && "Expected only non-volatile stores."
) ? void (0) : __assert_fail ("SL[i]->isSimple() && \"Expected only non-volatile stores.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 586, __extension__ __PRETTY_FUNCTION__))
;
587
588 Value *FirstStoredVal = SL[i]->getValueOperand();
589 Value *FirstStorePtr = SL[i]->getPointerOperand();
590 const SCEVAddRecExpr *FirstStoreEv =
591 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
592 APInt FirstStride = getStoreStride(FirstStoreEv);
593 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
594
595 // See if we can optimize just this store in isolation.
596 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
597 Heads.insert(SL[i]);
598 continue;
599 }
600
601 Value *FirstSplatValue = nullptr;
602 Constant *FirstPatternValue = nullptr;
603
604 if (ForMemset)
605 FirstSplatValue = isBytewiseValue(FirstStoredVal);
606 else
607 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
608
609 assert((FirstSplatValue || FirstPatternValue) &&(static_cast <bool> ((FirstSplatValue || FirstPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(FirstSplatValue || FirstPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 610, __extension__ __PRETTY_FUNCTION__))
610 "Expected either splat value or pattern value.")(static_cast <bool> ((FirstSplatValue || FirstPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(FirstSplatValue || FirstPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 610, __extension__ __PRETTY_FUNCTION__))
;
611
612 IndexQueue.clear();
613 // If a store has multiple consecutive store candidates, search Stores
614 // array according to the sequence: from i+1 to e, then from i-1 to 0.
615 // This is because usually pairing with immediate succeeding or preceding
616 // candidate create the best chance to find memset opportunity.
617 unsigned j = 0;
618 for (j = i + 1; j < e; ++j)
619 IndexQueue.push_back(j);
620 for (j = i; j > 0; --j)
621 IndexQueue.push_back(j - 1);
622
623 for (auto &k : IndexQueue) {
624 assert(SL[k]->isSimple() && "Expected only non-volatile stores.")(static_cast <bool> (SL[k]->isSimple() && "Expected only non-volatile stores."
) ? void (0) : __assert_fail ("SL[k]->isSimple() && \"Expected only non-volatile stores.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 624, __extension__ __PRETTY_FUNCTION__))
;
625 Value *SecondStorePtr = SL[k]->getPointerOperand();
626 const SCEVAddRecExpr *SecondStoreEv =
627 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
628 APInt SecondStride = getStoreStride(SecondStoreEv);
629
630 if (FirstStride != SecondStride)
631 continue;
632
633 Value *SecondStoredVal = SL[k]->getValueOperand();
634 Value *SecondSplatValue = nullptr;
635 Constant *SecondPatternValue = nullptr;
636
637 if (ForMemset)
638 SecondSplatValue = isBytewiseValue(SecondStoredVal);
639 else
640 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
641
642 assert((SecondSplatValue || SecondPatternValue) &&(static_cast <bool> ((SecondSplatValue || SecondPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(SecondSplatValue || SecondPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 643, __extension__ __PRETTY_FUNCTION__))
643 "Expected either splat value or pattern value.")(static_cast <bool> ((SecondSplatValue || SecondPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(SecondSplatValue || SecondPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 643, __extension__ __PRETTY_FUNCTION__))
;
644
645 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
646 if (ForMemset) {
647 if (FirstSplatValue != SecondSplatValue)
648 continue;
649 } else {
650 if (FirstPatternValue != SecondPatternValue)
651 continue;
652 }
653 Tails.insert(SL[k]);
654 Heads.insert(SL[i]);
655 ConsecutiveChain[SL[i]] = SL[k];
656 break;
657 }
658 }
659 }
660
661 // We may run into multiple chains that merge into a single chain. We mark the
662 // stores that we transformed so that we don't visit the same store twice.
663 SmallPtrSet<Value *, 16> TransformedStores;
664 bool Changed = false;
665
666 // For stores that start but don't end a link in the chain:
667 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
668 it != e; ++it) {
669 if (Tails.count(*it))
670 continue;
671
672 // We found a store instr that starts a chain. Now follow the chain and try
673 // to transform it.
674 SmallPtrSet<Instruction *, 8> AdjacentStores;
675 StoreInst *I = *it;
676
677 StoreInst *HeadStore = I;
678 unsigned StoreSize = 0;
679
680 // Collect the chain into a list.
681 while (Tails.count(I) || Heads.count(I)) {
682 if (TransformedStores.count(I))
683 break;
684 AdjacentStores.insert(I);
685
686 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
687 // Move to the next value in the chain.
688 I = ConsecutiveChain[I];
689 }
690
691 Value *StoredVal = HeadStore->getValueOperand();
692 Value *StorePtr = HeadStore->getPointerOperand();
693 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
694 APInt Stride = getStoreStride(StoreEv);
695
696 // Check to see if the stride matches the size of the stores. If so, then
697 // we know that every byte is touched in the loop.
698 if (StoreSize != Stride && StoreSize != -Stride)
699 continue;
700
701 bool NegStride = StoreSize == -Stride;
702
703 if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(),
704 StoredVal, HeadStore, AdjacentStores, StoreEv,
705 BECount, NegStride)) {
706 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
707 Changed = true;
708 }
709 }
710
711 return Changed;
712}
713
714/// processLoopMemSet - See if this memset can be promoted to a large memset.
715bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
716 const SCEV *BECount) {
717 // We can only handle non-volatile memsets with a constant size.
718 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
719 return false;
720
721 // If we're not allowed to hack on memset, we fail.
722 if (!HasMemset)
723 return false;
724
725 Value *Pointer = MSI->getDest();
726
727 // See if the pointer expression is an AddRec like {base,+,1} on the current
728 // loop, which indicates a strided store. If we have something else, it's a
729 // random store we can't handle.
730 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
731 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
732 return false;
733
734 // Reject memsets that are so large that they overflow an unsigned.
735 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
736 if ((SizeInBytes >> 32) != 0)
737 return false;
738
739 // Check to see if the stride matches the size of the memset. If so, then we
740 // know that every byte is touched in the loop.
741 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
742 if (!ConstStride)
743 return false;
744
745 APInt Stride = ConstStride->getAPInt();
746 if (SizeInBytes != Stride && SizeInBytes != -Stride)
747 return false;
748
749 // Verify that the memset value is loop invariant. If not, we can't promote
750 // the memset.
751 Value *SplatValue = MSI->getValue();
752 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
753 return false;
754
755 SmallPtrSet<Instruction *, 1> MSIs;
756 MSIs.insert(MSI);
757 bool NegStride = SizeInBytes == -Stride;
758 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
759 MSI->getDestAlignment(), SplatValue, MSI, MSIs,
760 Ev, BECount, NegStride, /*IsLoopMemset=*/true);
761}
762
763/// mayLoopAccessLocation - Return true if the specified loop might access the
764/// specified pointer location, which is a loop-strided access. The 'Access'
765/// argument specifies what the verboten forms of access are (read or write).
766static bool
767mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
768 const SCEV *BECount, unsigned StoreSize,
769 AliasAnalysis &AA,
770 SmallPtrSetImpl<Instruction *> &IgnoredStores) {
771 // Get the location that may be stored across the loop. Since the access is
772 // strided positively through memory, we say that the modified location starts
773 // at the pointer and has infinite size.
774 uint64_t AccessSize = MemoryLocation::UnknownSize;
775
776 // If the loop iterates a fixed number of times, we can refine the access size
777 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
778 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
779 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
780
781 // TODO: For this to be really effective, we have to dive into the pointer
782 // operand in the store. Store to &A[i] of 100 will always return may alias
783 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
784 // which will then no-alias a store to &A[100].
785 MemoryLocation StoreLoc(Ptr, AccessSize);
786
787 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
788 ++BI)
789 for (Instruction &I : **BI)
790 if (IgnoredStores.count(&I) == 0 &&
791 isModOrRefSet(
792 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access)))
793 return true;
794
795 return false;
796}
797
798// If we have a negative stride, Start refers to the end of the memory location
799// we're trying to memset. Therefore, we need to recompute the base pointer,
800// which is just Start - BECount*Size.
801static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
802 Type *IntPtr, unsigned StoreSize,
803 ScalarEvolution *SE) {
804 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
805 if (StoreSize != 1)
806 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
807 SCEV::FlagNUW);
808 return SE->getMinusSCEV(Start, Index);
809}
810
811/// Compute the number of bytes as a SCEV from the backedge taken count.
812///
813/// This also maps the SCEV into the provided type and tries to handle the
814/// computation in a way that will fold cleanly.
815static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
816 unsigned StoreSize, Loop *CurLoop,
817 const DataLayout *DL, ScalarEvolution *SE) {
818 const SCEV *NumBytesS;
819 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
820 // pointer size if it isn't already.
821 //
822 // If we're going to need to zero extend the BE count, check if we can add
823 // one to it prior to zero extending without overflow. Provided this is safe,
824 // it allows better simplification of the +1.
825 if (DL->getTypeSizeInBits(BECount->getType()) <
826 DL->getTypeSizeInBits(IntPtr) &&
827 SE->isLoopEntryGuardedByCond(
828 CurLoop, ICmpInst::ICMP_NE, BECount,
829 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
830 NumBytesS = SE->getZeroExtendExpr(
831 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
832 IntPtr);
833 } else {
834 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
835 SE->getOne(IntPtr), SCEV::FlagNUW);
836 }
837
838 // And scale it based on the store size.
839 if (StoreSize != 1) {
840 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
841 SCEV::FlagNUW);
842 }
843 return NumBytesS;
844}
845
846/// processLoopStridedStore - We see a strided store of some value. If we can
847/// transform this into a memset or memset_pattern in the loop preheader, do so.
848bool LoopIdiomRecognize::processLoopStridedStore(
849 Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
850 Value *StoredVal, Instruction *TheStore,
851 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
852 const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
853 Value *SplatValue = isBytewiseValue(StoredVal);
854 Constant *PatternValue = nullptr;
855
856 if (!SplatValue)
857 PatternValue = getMemSetPatternValue(StoredVal, DL);
858
859 assert((SplatValue || PatternValue) &&(static_cast <bool> ((SplatValue || PatternValue) &&
"Expected either splat value or pattern value.") ? void (0) :
__assert_fail ("(SplatValue || PatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 860, __extension__ __PRETTY_FUNCTION__))
860 "Expected either splat value or pattern value.")(static_cast <bool> ((SplatValue || PatternValue) &&
"Expected either splat value or pattern value.") ? void (0) :
__assert_fail ("(SplatValue || PatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 860, __extension__ __PRETTY_FUNCTION__))
;
861
862 // The trip count of the loop and the base pointer of the addrec SCEV is
863 // guaranteed to be loop invariant, which means that it should dominate the
864 // header. This allows us to insert code for it in the preheader.
865 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
866 BasicBlock *Preheader = CurLoop->getLoopPreheader();
867 IRBuilder<> Builder(Preheader->getTerminator());
868 SCEVExpander Expander(*SE, *DL, "loop-idiom");
869
870 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
871 Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
872
873 const SCEV *Start = Ev->getStart();
874 // Handle negative strided loops.
875 if (NegStride)
876 Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
877
878 // TODO: ideally we should still be able to generate memset if SCEV expander
879 // is taught to generate the dependencies at the latest point.
880 if (!isSafeToExpand(Start, *SE))
881 return false;
882
883 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
884 // this into a memset in the loop preheader now if we want. However, this
885 // would be unsafe to do if there is anything else in the loop that may read
886 // or write to the aliased location. Check for any overlap by generating the
887 // base pointer and checking the region.
888 Value *BasePtr =
889 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
890 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
891 StoreSize, *AA, Stores)) {
892 Expander.clear();
893 // If we generated new code for the base pointer, clean up.
894 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
895 return false;
896 }
897
898 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
899 return false;
900
901 // Okay, everything looks good, insert the memset.
902
903 const SCEV *NumBytesS =
904 getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
905
906 // TODO: ideally we should still be able to generate memset if SCEV expander
907 // is taught to generate the dependencies at the latest point.
908 if (!isSafeToExpand(NumBytesS, *SE))
909 return false;
910
911 Value *NumBytes =
912 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
913
914 CallInst *NewCall;
915 if (SplatValue) {
916 NewCall =
917 Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
918 } else {
919 // Everything is emitted in default address space
920 Type *Int8PtrTy = DestInt8PtrTy;
921
922 Module *M = TheStore->getModule();
923 Value *MSP =
924 M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
925 Int8PtrTy, Int8PtrTy, IntPtr);
926 inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI);
927
928 // Otherwise we should form a memset_pattern16. PatternValue is known to be
929 // an constant array of 16-bytes. Plop the value into a mergable global.
930 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
931 GlobalValue::PrivateLinkage,
932 PatternValue, ".memset_pattern");
933 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
934 GV->setAlignment(16);
935 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
936 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
937 }
938
939 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memset: " <<
*NewCall << "\n" << " from store to: " <<
*Ev << " at: " << *TheStore << "\n"; } } while
(false)
940 << " from store to: " << *Ev << " at: " << *TheStore << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memset: " <<
*NewCall << "\n" << " from store to: " <<
*Ev << " at: " << *TheStore << "\n"; } } while
(false)
;
941 NewCall->setDebugLoc(TheStore->getDebugLoc());
942
943 // Okay, the memset has been formed. Zap the original store and anything that
944 // feeds into it.
945 for (auto *I : Stores)
946 deleteDeadInstruction(I);
947 ++NumMemSet;
948 return true;
949}
950
951/// If the stored value is a strided load in the same loop with the same stride
952/// this may be transformable into a memcpy. This kicks in for stuff like
953/// for (i) A[i] = B[i];
954bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
955 const SCEV *BECount) {
956 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.")(static_cast <bool> (SI->isUnordered() && "Expected only non-volatile non-ordered stores."
) ? void (0) : __assert_fail ("SI->isUnordered() && \"Expected only non-volatile non-ordered stores.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 956, __extension__ __PRETTY_FUNCTION__))
;
957
958 Value *StorePtr = SI->getPointerOperand();
959 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
960 APInt Stride = getStoreStride(StoreEv);
961 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
962 bool NegStride = StoreSize == -Stride;
963
964 // The store must be feeding a non-volatile load.
965 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
966 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.")(static_cast <bool> (LI->isUnordered() && "Expected only non-volatile non-ordered loads."
) ? void (0) : __assert_fail ("LI->isUnordered() && \"Expected only non-volatile non-ordered loads.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 966, __extension__ __PRETTY_FUNCTION__))
;
967
968 // See if the pointer expression is an AddRec like {base,+,1} on the current
969 // loop, which indicates a strided load. If we have something else, it's a
970 // random load we can't handle.
971 const SCEVAddRecExpr *LoadEv =
972 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
973
974 // The trip count of the loop and the base pointer of the addrec SCEV is
975 // guaranteed to be loop invariant, which means that it should dominate the
976 // header. This allows us to insert code for it in the preheader.
977 BasicBlock *Preheader = CurLoop->getLoopPreheader();
978 IRBuilder<> Builder(Preheader->getTerminator());
979 SCEVExpander Expander(*SE, *DL, "loop-idiom");
980
981 const SCEV *StrStart = StoreEv->getStart();
982 unsigned StrAS = SI->getPointerAddressSpace();
983 Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
984
985 // Handle negative strided loops.
986 if (NegStride)
987 StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
988
989 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
990 // this into a memcpy in the loop preheader now if we want. However, this
991 // would be unsafe to do if there is anything else in the loop that may read
992 // or write the memory region we're storing to. This includes the load that
993 // feeds the stores. Check for an alias by generating the base address and
994 // checking everything.
995 Value *StoreBasePtr = Expander.expandCodeFor(
996 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
997
998 SmallPtrSet<Instruction *, 1> Stores;
999 Stores.insert(SI);
1000 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1001 StoreSize, *AA, Stores)) {
1002 Expander.clear();
1003 // If we generated new code for the base pointer, clean up.
1004 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1005 return false;
1006 }
1007
1008 const SCEV *LdStart = LoadEv->getStart();
1009 unsigned LdAS = LI->getPointerAddressSpace();
1010
1011 // Handle negative strided loops.
1012 if (NegStride)
1013 LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
1014
1015 // For a memcpy, we have to make sure that the input array is not being
1016 // mutated by the loop.
1017 Value *LoadBasePtr = Expander.expandCodeFor(
1018 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
1019
1020 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
1021 StoreSize, *AA, Stores)) {
1022 Expander.clear();
1023 // If we generated new code for the base pointer, clean up.
1024 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
1025 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1026 return false;
1027 }
1028
1029 if (avoidLIRForMultiBlockLoop())
1030 return false;
1031
1032 // Okay, everything is safe, we can transform this!
1033
1034 const SCEV *NumBytesS =
1035 getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
1036
1037 Value *NumBytes =
1038 Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
1039
1040 CallInst *NewCall = nullptr;
1041 // Check whether to generate an unordered atomic memcpy:
1042 // If the load or store are atomic, then they must neccessarily be unordered
1043 // by previous checks.
1044 if (!SI->isAtomic() && !LI->isAtomic())
1045 NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(),
1046 LoadBasePtr, LI->getAlignment(), NumBytes);
1047 else {
1048 // We cannot allow unaligned ops for unordered load/store, so reject
1049 // anything where the alignment isn't at least the element size.
1050 unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
1051 if (Align < StoreSize)
1052 return false;
1053
1054 // If the element.atomic memcpy is not lowered into explicit
1055 // loads/stores later, then it will be lowered into an element-size
1056 // specific lib call. If the lib call doesn't exist for our store size, then
1057 // we shouldn't generate the memcpy.
1058 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1059 return false;
1060
1061 // Create the call.
1062 // Note that unordered atomic loads/stores are *required* by the spec to
1063 // have an alignment but non-atomic loads/stores may not.
1064 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1065 StoreBasePtr, SI->getAlignment(), LoadBasePtr, LI->getAlignment(),
1066 NumBytes, StoreSize);
1067 }
1068 NewCall->setDebugLoc(SI->getDebugLoc());
1069
1070 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
1071 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
1072 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
;
1073
1074 // Okay, the memcpy has been formed. Zap the original store and anything that
1075 // feeds into it.
1076 deleteDeadInstruction(SI);
1077 ++NumMemCpy;
1078 return true;
1079}
1080
1081// When compiling for codesize we avoid idiom recognition for a multi-block loop
1082// unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1083//
1084bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1085 bool IsLoopMemset) {
1086 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1087 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
1088 DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
1089 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
1090 << " avoided: multi-block top-level loop\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
;
1091 return true;
1092 }
1093 }
1094
1095 return false;
1096}
1097
1098bool LoopIdiomRecognize::runOnNoncountableLoop() {
1099 return recognizePopcount() || recognizeAndInsertCTLZ();
10
Calling 'LoopIdiomRecognize::recognizeAndInsertCTLZ'
1100}
1101
1102/// Check if the given conditional branch is based on the comparison between
1103/// a variable and zero, and if the variable is non-zero, the control yields to
1104/// the loop entry. If the branch matches the behavior, the variable involved
1105/// in the comparison is returned. This function will be called to see if the
1106/// precondition and postcondition of the loop are in desirable form.
1107static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
1108 if (!BI || !BI->isConditional())
1109 return nullptr;
1110
1111 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1112 if (!Cond)
1113 return nullptr;
1114
1115 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1116 if (!CmpZero || !CmpZero->isZero())
1117 return nullptr;
1118
1119 ICmpInst::Predicate Pred = Cond->getPredicate();
1120 if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
1121 (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
1122 return Cond->getOperand(0);
1123
1124 return nullptr;
1125}
1126
1127// Check if the recurrence variable `VarX` is in the right form to create
1128// the idiom. Returns the value coerced to a PHINode if so.
1129static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
1130 BasicBlock *LoopEntry) {
1131 auto *PhiX = dyn_cast<PHINode>(VarX);
1132 if (PhiX && PhiX->getParent() == LoopEntry &&
1133 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
1134 return PhiX;
1135 return nullptr;
1136}
1137
1138/// Return true iff the idiom is detected in the loop.
1139///
1140/// Additionally:
1141/// 1) \p CntInst is set to the instruction counting the population bit.
1142/// 2) \p CntPhi is set to the corresponding phi node.
1143/// 3) \p Var is set to the value whose population bits are being counted.
1144///
1145/// The core idiom we are trying to detect is:
1146/// \code
1147/// if (x0 != 0)
1148/// goto loop-exit // the precondition of the loop
1149/// cnt0 = init-val;
1150/// do {
1151/// x1 = phi (x0, x2);
1152/// cnt1 = phi(cnt0, cnt2);
1153///
1154/// cnt2 = cnt1 + 1;
1155/// ...
1156/// x2 = x1 & (x1 - 1);
1157/// ...
1158/// } while(x != 0);
1159///
1160/// loop-exit:
1161/// \endcode
1162static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1163 Instruction *&CntInst, PHINode *&CntPhi,
1164 Value *&Var) {
1165 // step 1: Check to see if the look-back branch match this pattern:
1166 // "if (a!=0) goto loop-entry".
1167 BasicBlock *LoopEntry;
1168 Instruction *DefX2, *CountInst;
1169 Value *VarX1, *VarX0;
1170 PHINode *PhiX, *CountPhi;
1171
1172 DefX2 = CountInst = nullptr;
1173 VarX1 = VarX0 = nullptr;
1174 PhiX = CountPhi = nullptr;
1175 LoopEntry = *(CurLoop->block_begin());
1176
1177 // step 1: Check if the loop-back branch is in desirable form.
1178 {
1179 if (Value *T = matchCondition(
1180 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1181 DefX2 = dyn_cast<Instruction>(T);
1182 else
1183 return false;
1184 }
1185
1186 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1187 {
1188 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1189 return false;
1190
1191 BinaryOperator *SubOneOp;
1192
1193 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1194 VarX1 = DefX2->getOperand(1);
1195 else {
1196 VarX1 = DefX2->getOperand(0);
1197 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1198 }
1199 if (!SubOneOp)
1200 return false;
1201
1202 Instruction *SubInst = cast<Instruction>(SubOneOp);
1203 ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1));
1204 if (!Dec ||
1205 !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1206 (SubInst->getOpcode() == Instruction::Add &&
1207 Dec->isMinusOne()))) {
1208 return false;
1209 }
1210 }
1211
1212 // step 3: Check the recurrence of variable X
1213 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
1214 if (!PhiX)
1215 return false;
1216
1217 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1218 {
1219 CountInst = nullptr;
1220 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1221 IterE = LoopEntry->end();
1222 Iter != IterE; Iter++) {
1223 Instruction *Inst = &*Iter;
1224 if (Inst->getOpcode() != Instruction::Add)
1225 continue;
1226
1227 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1228 if (!Inc || !Inc->isOne())
1229 continue;
1230
1231 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1232 if (!Phi)
1233 continue;
1234
1235 // Check if the result of the instruction is live of the loop.
1236 bool LiveOutLoop = false;
1237 for (User *U : Inst->users()) {
1238 if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1239 LiveOutLoop = true;
1240 break;
1241 }
1242 }
1243
1244 if (LiveOutLoop) {
1245 CountInst = Inst;
1246 CountPhi = Phi;
1247 break;
1248 }
1249 }
1250
1251 if (!CountInst)
1252 return false;
1253 }
1254
1255 // step 5: check if the precondition is in this form:
1256 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1257 {
1258 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1259 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1260 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1261 return false;
1262
1263 CntInst = CountInst;
1264 CntPhi = CountPhi;
1265 Var = T;
1266 }
1267
1268 return true;
1269}
1270
1271/// Return true if the idiom is detected in the loop.
1272///
1273/// Additionally:
1274/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
1275/// or nullptr if there is no such.
1276/// 2) \p CntPhi is set to the corresponding phi node
1277/// or nullptr if there is no such.
1278/// 3) \p Var is set to the value whose CTLZ could be used.
1279/// 4) \p DefX is set to the instruction calculating Loop exit condition.
1280///
1281/// The core idiom we are trying to detect is:
1282/// \code
1283/// if (x0 == 0)
1284/// goto loop-exit // the precondition of the loop
1285/// cnt0 = init-val;
1286/// do {
1287/// x = phi (x0, x.next); //PhiX
1288/// cnt = phi(cnt0, cnt.next);
1289///
1290/// cnt.next = cnt + 1;
1291/// ...
1292/// x.next = x >> 1; // DefX
1293/// ...
1294/// } while(x.next != 0);
1295///
1296/// loop-exit:
1297/// \endcode
1298static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
1299 Instruction *&CntInst, PHINode *&CntPhi,
1300 Instruction *&DefX) {
1301 BasicBlock *LoopEntry;
1302 Value *VarX = nullptr;
1303
1304 DefX = nullptr;
1305 PhiX = nullptr;
1306 CntInst = nullptr;
1307 CntPhi = nullptr;
1308 LoopEntry = *(CurLoop->block_begin());
1309
1310 // step 1: Check if the loop-back branch is in desirable form.
1311 if (Value *T = matchCondition(
1312 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1313 DefX = dyn_cast<Instruction>(T);
1314 else
1315 return false;
1316
1317 // step 2: detect instructions corresponding to "x.next = x >> 1"
1318 if (!DefX || DefX->getOpcode() != Instruction::AShr)
1319 return false;
1320 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
1321 if (!Shft || !Shft->isOne())
1322 return false;
1323 VarX = DefX->getOperand(0);
1324
1325 // step 3: Check the recurrence of variable X
1326 PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
1327 if (!PhiX)
1328 return false;
1329
1330 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
1331 // TODO: We can skip the step. If loop trip count is known (CTLZ),
1332 // then all uses of "cnt.next" could be optimized to the trip count
1333 // plus "cnt0". Currently it is not optimized.
1334 // This step could be used to detect POPCNT instruction:
1335 // cnt.next = cnt + (x.next & 1)
1336 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1337 IterE = LoopEntry->end();
1338 Iter != IterE; Iter++) {
1339 Instruction *Inst = &*Iter;
1340 if (Inst->getOpcode() != Instruction::Add)
1341 continue;
1342
1343 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1344 if (!Inc || !Inc->isOne())
1345 continue;
1346
1347 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1348 if (!Phi)
1349 continue;
1350
1351 CntInst = Inst;
1352 CntPhi = Phi;
1353 break;
1354 }
1355 if (!CntInst)
1356 return false;
1357
1358 return true;
1359}
1360
1361/// Recognize CTLZ idiom in a non-countable loop and convert the loop
1362/// to countable (with CTLZ trip count).
1363/// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
1364bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
1365 // Give up if the loop has multiple blocks or multiple backedges.
1366 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
11
Assuming the condition is false
12
Assuming the condition is false
13
Taking false branch
1367 return false;
1368
1369 Instruction *CntInst, *DefX;
1370 PHINode *CntPhi, *PhiX;
1371 if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
14
Taking false branch
1372 return false;
1373
1374 bool IsCntPhiUsedOutsideLoop = false;
1375 for (User *U : CntPhi->users())
1376 if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
1377 IsCntPhiUsedOutsideLoop = true;
1378 break;
1379 }
1380 bool IsCntInstUsedOutsideLoop = false;
1381 for (User *U : CntInst->users())
1382 if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
1383 IsCntInstUsedOutsideLoop = true;
1384 break;
1385 }
1386 // If both CntInst and CntPhi are used outside the loop the profitability
1387 // is questionable.
1388 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1389 return false;
1390
1391 // For some CPUs result of CTLZ(X) intrinsic is undefined
1392 // when X is 0. If we can not guarantee X != 0, we need to check this
1393 // when expand.
1394 bool ZeroCheck = false;
1395 // It is safe to assume Preheader exist as it was checked in
1396 // parent function RunOnLoop.
1397 BasicBlock *PH = CurLoop->getLoopPreheader();
1398 Value *InitX = PhiX->getIncomingValueForBlock(PH);
15
Calling 'PHINode::getIncomingValueForBlock'
19
Returning from 'PHINode::getIncomingValueForBlock'
20
'InitX' initialized here
1399 // If we check X != 0 before entering the loop we don't need a zero
1400 // check in CTLZ intrinsic, but only if Cnt Phi is not used outside of the
1401 // loop (if it is used we count CTLZ(X >> 1)).
1402 if (!IsCntPhiUsedOutsideLoop)
21
Taking true branch
1403 if (BasicBlock *PreCondBB = PH->getSinglePredecessor())
22
Assuming 'PreCondBB' is non-null
23
Taking true branch
1404 if (BranchInst *PreCondBr =
24
Assuming 'PreCondBr' is non-null
25
Taking true branch
1405 dyn_cast<BranchInst>(PreCondBB->getTerminator())) {
1406 if (matchCondition(PreCondBr, PH) == InitX)
26
Assuming the condition is true
27
Assuming pointer value is null
28
Taking true branch
1407 ZeroCheck = true;
1408 }
1409
1410 // Check if CTLZ intrinsic is profitable. Assume it is always profitable
1411 // if we delete the loop (the loop has only 6 instructions):
1412 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
1413 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
1414 // %shr = ashr %n.addr.0, 1
1415 // %tobool = icmp eq %shr, 0
1416 // %inc = add nsw %i.0, 1
1417 // br i1 %tobool
1418
1419 IRBuilder<> Builder(PH->getTerminator());
1420 SmallVector<const Value *, 2> Ops =
1421 {InitX, ZeroCheck ? Builder.getTrue() : Builder.getFalse()};
29
'?' condition is true
1422 ArrayRef<const Value *> Args(Ops);
1423 if (CurLoop->getHeader()->size() != 6 &&
30
Assuming the condition is true
1424 TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
31
Called C++ object pointer is null
1425 TargetTransformInfo::TCC_Basic)
1426 return false;
1427
1428 const DebugLoc DL = DefX->getDebugLoc();
1429 transformLoopToCountable(PH, CntInst, CntPhi, InitX, DL, ZeroCheck,
1430 IsCntPhiUsedOutsideLoop);
1431 return true;
1432}
1433
1434/// Recognizes a population count idiom in a non-countable loop.
1435///
1436/// If detected, transforms the relevant code to issue the popcount intrinsic
1437/// function call, and returns true; otherwise, returns false.
1438bool LoopIdiomRecognize::recognizePopcount() {
1439 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1440 return false;
1441
1442 // Counting population are usually conducted by few arithmetic instructions.
1443 // Such instructions can be easily "absorbed" by vacant slots in a
1444 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1445 // in a compact loop.
1446
1447 // Give up if the loop has multiple blocks or multiple backedges.
1448 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1449 return false;
1450
1451 BasicBlock *LoopBody = *(CurLoop->block_begin());
1452 if (LoopBody->size() >= 20) {
1453 // The loop is too big, bail out.
1454 return false;
1455 }
1456
1457 // It should have a preheader containing nothing but an unconditional branch.
1458 BasicBlock *PH = CurLoop->getLoopPreheader();
1459 if (!PH || &PH->front() != PH->getTerminator())
1460 return false;
1461 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1462 if (!EntryBI || EntryBI->isConditional())
1463 return false;
1464
1465 // It should have a precondition block where the generated popcount instrinsic
1466 // function can be inserted.
1467 auto *PreCondBB = PH->getSinglePredecessor();
1468 if (!PreCondBB)
1469 return false;
1470 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1471 if (!PreCondBI || PreCondBI->isUnconditional())
1472 return false;
1473
1474 Instruction *CntInst;
1475 PHINode *CntPhi;
1476 Value *Val;
1477 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1478 return false;
1479
1480 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1481 return true;
1482}
1483
1484static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1485 const DebugLoc &DL) {
1486 Value *Ops[] = {Val};
1487 Type *Tys[] = {Val->getType()};
1488
1489 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1490 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1491 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1492 CI->setDebugLoc(DL);
1493
1494 return CI;
1495}
1496
1497static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1498 const DebugLoc &DL, bool ZeroCheck) {
1499 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
1500 Type *Tys[] = {Val->getType()};
1501
1502 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1503 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
1504 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1505 CI->setDebugLoc(DL);
1506
1507 return CI;
1508}
1509
1510/// Transform the following loop:
1511/// loop:
1512/// CntPhi = PHI [Cnt0, CntInst]
1513/// PhiX = PHI [InitX, DefX]
1514/// CntInst = CntPhi + 1
1515/// DefX = PhiX >> 1
1516/// LOOP_BODY
1517/// Br: loop if (DefX != 0)
1518/// Use(CntPhi) or Use(CntInst)
1519///
1520/// Into:
1521/// If CntPhi used outside the loop:
1522/// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
1523/// Count = CountPrev + 1
1524/// else
1525/// Count = BitWidth(InitX) - CTLZ(InitX)
1526/// loop:
1527/// CntPhi = PHI [Cnt0, CntInst]
1528/// PhiX = PHI [InitX, DefX]
1529/// PhiCount = PHI [Count, Dec]
1530/// CntInst = CntPhi + 1
1531/// DefX = PhiX >> 1
1532/// Dec = PhiCount - 1
1533/// LOOP_BODY
1534/// Br: loop if (Dec != 0)
1535/// Use(CountPrev + Cnt0) // Use(CntPhi)
1536/// or
1537/// Use(Count + Cnt0) // Use(CntInst)
1538///
1539/// If LOOP_BODY is empty the loop will be deleted.
1540/// If CntInst and DefX are not used in LOOP_BODY they will be removed.
1541void LoopIdiomRecognize::transformLoopToCountable(
1542 BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
1543 const DebugLoc DL, bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) {
1544 BranchInst *PreheaderBr = dyn_cast<BranchInst>(Preheader->getTerminator());
1545
1546 // Step 1: Insert the CTLZ instruction at the end of the preheader block
1547 // Count = BitWidth - CTLZ(InitX);
1548 // If there are uses of CntPhi create:
1549 // CountPrev = BitWidth - CTLZ(InitX >> 1);
1550 IRBuilder<> Builder(PreheaderBr);
1551 Builder.SetCurrentDebugLocation(DL);
1552 Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
1553
1554 if (IsCntPhiUsedOutsideLoop)
1555 InitXNext = Builder.CreateAShr(InitX,
1556 ConstantInt::get(InitX->getType(), 1));
1557 else
1558 InitXNext = InitX;
1559 CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
1560 Count = Builder.CreateSub(
1561 ConstantInt::get(CTLZ->getType(),
1562 CTLZ->getType()->getIntegerBitWidth()),
1563 CTLZ);
1564 if (IsCntPhiUsedOutsideLoop) {
1565 CountPrev = Count;
1566 Count = Builder.CreateAdd(
1567 CountPrev,
1568 ConstantInt::get(CountPrev->getType(), 1));
1569 }
1570 if (IsCntPhiUsedOutsideLoop)
1571 NewCount = Builder.CreateZExtOrTrunc(CountPrev,
1572 cast<IntegerType>(CntInst->getType()));
1573 else
1574 NewCount = Builder.CreateZExtOrTrunc(Count,
1575 cast<IntegerType>(CntInst->getType()));
1576
1577 // If the CTLZ counter's initial value is not zero, insert Add Inst.
1578 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
1579 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1580 if (!InitConst || !InitConst->isZero())
1581 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1582
1583 // Step 2: Insert new IV and loop condition:
1584 // loop:
1585 // ...
1586 // PhiCount = PHI [Count, Dec]
1587 // ...
1588 // Dec = PhiCount - 1
1589 // ...
1590 // Br: loop if (Dec != 0)
1591 BasicBlock *Body = *(CurLoop->block_begin());
1592 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1593 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1594 Type *Ty = Count->getType();
1595
1596 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1597
1598 Builder.SetInsertPoint(LbCond);
1599 Instruction *TcDec = cast<Instruction>(
1600 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1601 "tcdec", false, true));
1602
1603 TcPhi->addIncoming(Count, Preheader);
1604 TcPhi->addIncoming(TcDec, Body);
1605
1606 CmpInst::Predicate Pred =
1607 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
1608 LbCond->setPredicate(Pred);
1609 LbCond->setOperand(0, TcDec);
1610 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1611
1612 // Step 3: All the references to the original counter outside
1613 // the loop are replaced with the NewCount -- the value returned from
1614 // __builtin_ctlz(x).
1615 if (IsCntPhiUsedOutsideLoop)
1616 CntPhi->replaceUsesOutsideBlock(NewCount, Body);
1617 else
1618 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1619
1620 // step 4: Forget the "non-computable" trip-count SCEV associated with the
1621 // loop. The loop would otherwise not be deleted even if it becomes empty.
1622 SE->forgetLoop(CurLoop);
1623}
1624
1625void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1626 Instruction *CntInst,
1627 PHINode *CntPhi, Value *Var) {
1628 BasicBlock *PreHead = CurLoop->getLoopPreheader();
1629 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1630 const DebugLoc DL = CntInst->getDebugLoc();
1631
1632 // Assuming before transformation, the loop is following:
1633 // if (x) // the precondition
1634 // do { cnt++; x &= x - 1; } while(x);
1635
1636 // Step 1: Insert the ctpop instruction at the end of the precondition block
1637 IRBuilder<> Builder(PreCondBr);
1638 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1639 {
1640 PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1641 NewCount = PopCntZext =
1642 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1643
1644 if (NewCount != PopCnt)
1645 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1646
1647 // TripCnt is exactly the number of iterations the loop has
1648 TripCnt = NewCount;
1649
1650 // If the population counter's initial value is not zero, insert Add Inst.
1651 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1652 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1653 if (!InitConst || !InitConst->isZero()) {
1654 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1655 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1656 }
1657 }
1658
1659 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1660 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1661 // function would be partial dead code, and downstream passes will drag
1662 // it back from the precondition block to the preheader.
1663 {
1664 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1665
1666 Value *Opnd0 = PopCntZext;
1667 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1668 if (PreCond->getOperand(0) != Var)
1669 std::swap(Opnd0, Opnd1);
1670
1671 ICmpInst *NewPreCond = cast<ICmpInst>(
1672 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1673 PreCondBr->setCondition(NewPreCond);
1674
1675 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1676 }
1677
1678 // Step 3: Note that the population count is exactly the trip count of the
1679 // loop in question, which enable us to convert the loop from noncountable
1680 // loop into a countable one. The benefit is twofold:
1681 //
1682 // - If the loop only counts population, the entire loop becomes dead after
1683 // the transformation. It is a lot easier to prove a countable loop dead
1684 // than to prove a noncountable one. (In some C dialects, an infinite loop
1685 // isn't dead even if it computes nothing useful. In general, DCE needs
1686 // to prove a noncountable loop finite before safely delete it.)
1687 //
1688 // - If the loop also performs something else, it remains alive.
1689 // Since it is transformed to countable form, it can be aggressively
1690 // optimized by some optimizations which are in general not applicable
1691 // to a noncountable loop.
1692 //
1693 // After this step, this loop (conceptually) would look like following:
1694 // newcnt = __builtin_ctpop(x);
1695 // t = newcnt;
1696 // if (x)
1697 // do { cnt++; x &= x-1; t--) } while (t > 0);
1698 BasicBlock *Body = *(CurLoop->block_begin());
1699 {
1700 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1701 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1702 Type *Ty = TripCnt->getType();
1703
1704 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1705
1706 Builder.SetInsertPoint(LbCond);
1707 Instruction *TcDec = cast<Instruction>(
1708 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1709 "tcdec", false, true));
1710
1711 TcPhi->addIncoming(TripCnt, PreHead);
1712 TcPhi->addIncoming(TcDec, Body);
1713
1714 CmpInst::Predicate Pred =
1715 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1716 LbCond->setPredicate(Pred);
1717 LbCond->setOperand(0, TcDec);
1718 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1719 }
1720
1721 // Step 4: All the references to the original population counter outside
1722 // the loop are replaced with the NewCount -- the value returned from
1723 // __builtin_ctpop().
1724 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1725
1726 // step 5: Forget the "non-computable" trip-count SCEV associated with the
1727 // loop. The loop would otherwise not be deleted even if it becomes empty.
1728 SE->forgetLoop(CurLoop);
1729}

/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes the class definitions of all of the subclasses of the
11// Instruction class. This is meant to be an easy way to get access to all
12// instruction subclasses.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_IR_INSTRUCTIONS_H
17#define LLVM_IR_INSTRUCTIONS_H
18
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/OperandTraits.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Use.h"
38#include "llvm/IR/User.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/ErrorHandling.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <iterator>
47
48namespace llvm {
49
50class APInt;
51class ConstantInt;
52class DataLayout;
53class LLVMContext;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
60class AllocaInst : public UnaryInstruction {
61 Type *AllocatedType;
62
63protected:
64 // Note: Instruction needs to be a friend here to call cloneImpl.
65 friend class Instruction;
66
67 AllocaInst *cloneImpl() const;
68
69public:
70 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
71 Value *ArraySize = nullptr,
72 const Twine &Name = "",
73 Instruction *InsertBefore = nullptr);
74 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
75 const Twine &Name, BasicBlock *InsertAtEnd);
76
77 AllocaInst(Type *Ty, unsigned AddrSpace,
78 const Twine &Name, Instruction *InsertBefore = nullptr);
79 AllocaInst(Type *Ty, unsigned AddrSpace,
80 const Twine &Name, BasicBlock *InsertAtEnd);
81
82 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
83 const Twine &Name = "", Instruction *InsertBefore = nullptr);
84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
85 const Twine &Name, BasicBlock *InsertAtEnd);
86
87 /// Return true if there is an allocation size parameter to the allocation
88 /// instruction that is not 1.
89 bool isArrayAllocation() const;
90
91 /// Get the number of elements allocated. For a simple allocation of a single
92 /// element, this will return a constant 1 value.
93 const Value *getArraySize() const { return getOperand(0); }
94 Value *getArraySize() { return getOperand(0); }
95
96 /// Overload to return most specific pointer type.
97 PointerType *getType() const {
98 return cast<PointerType>(Instruction::getType());
99 }
100
101 /// Return the type that is being allocated by the instruction.
102 Type *getAllocatedType() const { return AllocatedType; }
103 /// for use only in special circumstances that need to generically
104 /// transform a whole instruction (eg: IR linking and vectorization).
105 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
106
107 /// Return the alignment of the memory that is being allocated by the
108 /// instruction.
109 unsigned getAlignment() const {
110 return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
111 }
112 void setAlignment(unsigned Align);
113
114 /// Return true if this alloca is in the entry block of the function and is a
115 /// constant size. If so, the code generator will fold it into the
116 /// prolog/epilog code, so it is basically free.
117 bool isStaticAlloca() const;
118
119 /// Return true if this alloca is used as an inalloca argument to a call. Such
120 /// allocas are never considered static even if they are in the entry block.
121 bool isUsedWithInAlloca() const {
122 return getSubclassDataFromInstruction() & 32;
123 }
124
125 /// Specify whether this alloca is used to represent the arguments to a call.
126 void setUsedWithInAlloca(bool V) {
127 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
128 (V ? 32 : 0));
129 }
130
131 /// Return true if this alloca is used as a swifterror argument to a call.
132 bool isSwiftError() const {
133 return getSubclassDataFromInstruction() & 64;
134 }
135
136 /// Specify whether this alloca is used to represent a swifterror.
137 void setSwiftError(bool V) {
138 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
139 (V ? 64 : 0));
140 }
141
142 // Methods for support type inquiry through isa, cast, and dyn_cast:
143 static bool classof(const Instruction *I) {
144 return (I->getOpcode() == Instruction::Alloca);
145 }
146 static bool classof(const Value *V) {
147 return isa<Instruction>(V) && classof(cast<Instruction>(V));
148 }
149
150private:
151 // Shadow Instruction::setInstructionSubclassData with a private forwarding
152 // method so that subclasses cannot accidentally use it.
153 void setInstructionSubclassData(unsigned short D) {
154 Instruction::setInstructionSubclassData(D);
155 }
156};
157
158//===----------------------------------------------------------------------===//
159// LoadInst Class
160//===----------------------------------------------------------------------===//
161
162/// An instruction for reading from memory. This uses the SubclassData field in
163/// Value to store whether or not the load is volatile.
164class LoadInst : public UnaryInstruction {
165 void AssertOK();
166
167protected:
168 // Note: Instruction needs to be a friend here to call cloneImpl.
169 friend class Instruction;
170
171 LoadInst *cloneImpl() const;
172
173public:
174 LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
175 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
176 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
177 Instruction *InsertBefore = nullptr);
178 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
179 Instruction *InsertBefore = nullptr)
180 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
181 NameStr, isVolatile, InsertBefore) {}
182 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
183 BasicBlock *InsertAtEnd);
184 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
185 Instruction *InsertBefore = nullptr)
186 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
187 NameStr, isVolatile, Align, InsertBefore) {}
188 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
189 unsigned Align, Instruction *InsertBefore = nullptr);
190 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
191 unsigned Align, BasicBlock *InsertAtEnd);
192 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
193 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
194 Instruction *InsertBefore = nullptr)
195 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
196 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 unsigned Align, AtomicOrdering Order,
199 SyncScope::ID SSID = SyncScope::System,
200 Instruction *InsertBefore = nullptr);
201 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
202 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
203 BasicBlock *InsertAtEnd);
204 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
205 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
206 LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
207 bool isVolatile = false, Instruction *InsertBefore = nullptr);
208 explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
209 bool isVolatile = false,
210 Instruction *InsertBefore = nullptr)
211 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
212 NameStr, isVolatile, InsertBefore) {}
213 LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
214 BasicBlock *InsertAtEnd);
215
216 /// Return true if this is a load from a volatile memory location.
217 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
218
219 /// Specify whether this is a volatile load or not.
220 void setVolatile(bool V) {
221 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
222 (V ? 1 : 0));
223 }
224
225 /// Return the alignment of the access that is being performed.
226 unsigned getAlignment() const {
227 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
228 }
229
230 void setAlignment(unsigned Align);
231
232 /// Returns the ordering constraint of this load instruction.
233 AtomicOrdering getOrdering() const {
234 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
235 }
236
237 /// Sets the ordering constraint of this load instruction. May not be Release
238 /// or AcquireRelease.
239 void setOrdering(AtomicOrdering Ordering) {
240 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
241 ((unsigned)Ordering << 7));
242 }
243
244 /// Returns the synchronization scope ID of this load instruction.
245 SyncScope::ID getSyncScopeID() const {
246 return SSID;
247 }
248
249 /// Sets the synchronization scope ID of this load instruction.
250 void setSyncScopeID(SyncScope::ID SSID) {
251 this->SSID = SSID;
252 }
253
254 /// Sets the ordering constraint and the synchronization scope ID of this load
255 /// instruction.
256 void setAtomic(AtomicOrdering Ordering,
257 SyncScope::ID SSID = SyncScope::System) {
258 setOrdering(Ordering);
259 setSyncScopeID(SSID);
260 }
261
262 bool isSimple() const { return !isAtomic() && !isVolatile(); }
263
264 bool isUnordered() const {
265 return (getOrdering() == AtomicOrdering::NotAtomic ||
266 getOrdering() == AtomicOrdering::Unordered) &&
267 !isVolatile();
268 }
269
270 Value *getPointerOperand() { return getOperand(0); }
271 const Value *getPointerOperand() const { return getOperand(0); }
272 static unsigned getPointerOperandIndex() { return 0U; }
273 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
274
275 /// Returns the address space of the pointer operand.
276 unsigned getPointerAddressSpace() const {
277 return getPointerOperandType()->getPointerAddressSpace();
278 }
279
280 // Methods for support type inquiry through isa, cast, and dyn_cast:
281 static bool classof(const Instruction *I) {
282 return I->getOpcode() == Instruction::Load;
283 }
284 static bool classof(const Value *V) {
285 return isa<Instruction>(V) && classof(cast<Instruction>(V));
286 }
287
288private:
289 // Shadow Instruction::setInstructionSubclassData with a private forwarding
290 // method so that subclasses cannot accidentally use it.
291 void setInstructionSubclassData(unsigned short D) {
292 Instruction::setInstructionSubclassData(D);
293 }
294
295 /// The synchronization scope ID of this load instruction. Not quite enough
296 /// room in SubClassData for everything, so synchronization scope ID gets its
297 /// own field.
298 SyncScope::ID SSID;
299};
300
301//===----------------------------------------------------------------------===//
302// StoreInst Class
303//===----------------------------------------------------------------------===//
304
305/// An instruction for storing to memory.
306class StoreInst : public Instruction {
307 void AssertOK();
308
309protected:
310 // Note: Instruction needs to be a friend here to call cloneImpl.
311 friend class Instruction;
312
313 StoreInst *cloneImpl() const;
314
315public:
316 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
317 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
318 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
319 Instruction *InsertBefore = nullptr);
320 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
322 unsigned Align, Instruction *InsertBefore = nullptr);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
324 unsigned Align, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
326 unsigned Align, AtomicOrdering Order,
327 SyncScope::ID SSID = SyncScope::System,
328 Instruction *InsertBefore = nullptr);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
330 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
331 BasicBlock *InsertAtEnd);
332
333 // allocate space for exactly two operands
334 void *operator new(size_t s) {
335 return User::operator new(s, 2);
336 }
337
338 /// Return true if this is a store to a volatile memory location.
339 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
340
341 /// Specify whether this is a volatile store or not.
342 void setVolatile(bool V) {
343 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
344 (V ? 1 : 0));
345 }
346
347 /// Transparently provide more efficient getOperand methods.
348 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
349
350 /// Return the alignment of the access that is being performed
351 unsigned getAlignment() const {
352 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
353 }
354
355 void setAlignment(unsigned Align);
356
357 /// Returns the ordering constraint of this store instruction.
358 AtomicOrdering getOrdering() const {
359 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
360 }
361
362 /// Sets the ordering constraint of this store instruction. May not be
363 /// Acquire or AcquireRelease.
364 void setOrdering(AtomicOrdering Ordering) {
365 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
366 ((unsigned)Ordering << 7));
367 }
368
369 /// Returns the synchronization scope ID of this store instruction.
370 SyncScope::ID getSyncScopeID() const {
371 return SSID;
372 }
373
374 /// Sets the synchronization scope ID of this store instruction.
375 void setSyncScopeID(SyncScope::ID SSID) {
376 this->SSID = SSID;
377 }
378
379 /// Sets the ordering constraint and the synchronization scope ID of this
380 /// store instruction.
381 void setAtomic(AtomicOrdering Ordering,
382 SyncScope::ID SSID = SyncScope::System) {
383 setOrdering(Ordering);
384 setSyncScopeID(SSID);
385 }
386
387 bool isSimple() const { return !isAtomic() && !isVolatile(); }
388
389 bool isUnordered() const {
390 return (getOrdering() == AtomicOrdering::NotAtomic ||
391 getOrdering() == AtomicOrdering::Unordered) &&
392 !isVolatile();
393 }
394
395 Value *getValueOperand() { return getOperand(0); }
396 const Value *getValueOperand() const { return getOperand(0); }
397
398 Value *getPointerOperand() { return getOperand(1); }
399 const Value *getPointerOperand() const { return getOperand(1); }
400 static unsigned getPointerOperandIndex() { return 1U; }
401 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
402
403 /// Returns the address space of the pointer operand.
404 unsigned getPointerAddressSpace() const {
405 return getPointerOperandType()->getPointerAddressSpace();
406 }
407
408 // Methods for support type inquiry through isa, cast, and dyn_cast:
409 static bool classof(const Instruction *I) {
410 return I->getOpcode() == Instruction::Store;
411 }
412 static bool classof(const Value *V) {
413 return isa<Instruction>(V) && classof(cast<Instruction>(V));
414 }
415
416private:
417 // Shadow Instruction::setInstructionSubclassData with a private forwarding
418 // method so that subclasses cannot accidentally use it.
419 void setInstructionSubclassData(unsigned short D) {
420 Instruction::setInstructionSubclassData(D);
421 }
422
423 /// The synchronization scope ID of this store instruction. Not quite enough
424 /// room in SubClassData for everything, so synchronization scope ID gets its
425 /// own field.
426 SyncScope::ID SSID;
427};
428
429template <>
430struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
431};
432
433DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 433, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 433, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
434
435//===----------------------------------------------------------------------===//
436// FenceInst Class
437//===----------------------------------------------------------------------===//
438
439/// An instruction for ordering other memory operations.
440class FenceInst : public Instruction {
441 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
442
443protected:
444 // Note: Instruction needs to be a friend here to call cloneImpl.
445 friend class Instruction;
446
447 FenceInst *cloneImpl() const;
448
449public:
450 // Ordering may only be Acquire, Release, AcquireRelease, or
451 // SequentiallyConsistent.
452 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
453 SyncScope::ID SSID = SyncScope::System,
454 Instruction *InsertBefore = nullptr);
455 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
456 BasicBlock *InsertAtEnd);
457
458 // allocate space for exactly zero operands
459 void *operator new(size_t s) {
460 return User::operator new(s, 0);
461 }
462
463 /// Returns the ordering constraint of this fence instruction.
464 AtomicOrdering getOrdering() const {
465 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
466 }
467
468 /// Sets the ordering constraint of this fence instruction. May only be
469 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
470 void setOrdering(AtomicOrdering Ordering) {
471 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
472 ((unsigned)Ordering << 1));
473 }
474
475 /// Returns the synchronization scope ID of this fence instruction.
476 SyncScope::ID getSyncScopeID() const {
477 return SSID;
478 }
479
480 /// Sets the synchronization scope ID of this fence instruction.
481 void setSyncScopeID(SyncScope::ID SSID) {
482 this->SSID = SSID;
483 }
484
485 // Methods for support type inquiry through isa, cast, and dyn_cast:
486 static bool classof(const Instruction *I) {
487 return I->getOpcode() == Instruction::Fence;
488 }
489 static bool classof(const Value *V) {
490 return isa<Instruction>(V) && classof(cast<Instruction>(V));
491 }
492
493private:
494 // Shadow Instruction::setInstructionSubclassData with a private forwarding
495 // method so that subclasses cannot accidentally use it.
496 void setInstructionSubclassData(unsigned short D) {
497 Instruction::setInstructionSubclassData(D);
498 }
499
500 /// The synchronization scope ID of this fence instruction. Not quite enough
501 /// room in SubClassData for everything, so synchronization scope ID gets its
502 /// own field.
503 SyncScope::ID SSID;
504};
505
506//===----------------------------------------------------------------------===//
507// AtomicCmpXchgInst Class
508//===----------------------------------------------------------------------===//
509
510/// an instruction that atomically checks whether a
511/// specified value is in a memory location, and, if it is, stores a new value
512/// there. Returns the value that was loaded.
513///
514class AtomicCmpXchgInst : public Instruction {
515 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
516 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
517 SyncScope::ID SSID);
518
519protected:
520 // Note: Instruction needs to be a friend here to call cloneImpl.
521 friend class Instruction;
522
523 AtomicCmpXchgInst *cloneImpl() const;
524
525public:
526 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
527 AtomicOrdering SuccessOrdering,
528 AtomicOrdering FailureOrdering,
529 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering,
533 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
534
535 // allocate space for exactly three operands
536 void *operator new(size_t s) {
537 return User::operator new(s, 3);
538 }
539
540 /// Return true if this is a cmpxchg from a volatile memory
541 /// location.
542 ///
543 bool isVolatile() const {
544 return getSubclassDataFromInstruction() & 1;
545 }
546
547 /// Specify whether this is a volatile cmpxchg.
548 ///
549 void setVolatile(bool V) {
550 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
551 (unsigned)V);
552 }
553
554 /// Return true if this cmpxchg may spuriously fail.
555 bool isWeak() const {
556 return getSubclassDataFromInstruction() & 0x100;
557 }
558
559 void setWeak(bool IsWeak) {
560 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
561 (IsWeak << 8));
562 }
563
564 /// Transparently provide more efficient getOperand methods.
565 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
566
567 /// Returns the success ordering constraint of this cmpxchg instruction.
568 AtomicOrdering getSuccessOrdering() const {
569 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
570 }
571
572 /// Sets the success ordering constraint of this cmpxchg instruction.
573 void setSuccessOrdering(AtomicOrdering Ordering) {
574 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 575, __extension__ __PRETTY_FUNCTION__))
575 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 575, __extension__ __PRETTY_FUNCTION__))
;
576 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
577 ((unsigned)Ordering << 2));
578 }
579
580 /// Returns the failure ordering constraint of this cmpxchg instruction.
581 AtomicOrdering getFailureOrdering() const {
582 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
583 }
584
585 /// Sets the failure ordering constraint of this cmpxchg instruction.
586 void setFailureOrdering(AtomicOrdering Ordering) {
587 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 588, __extension__ __PRETTY_FUNCTION__))
588 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 588, __extension__ __PRETTY_FUNCTION__))
;
589 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
590 ((unsigned)Ordering << 5));
591 }
592
593 /// Returns the synchronization scope ID of this cmpxchg instruction.
594 SyncScope::ID getSyncScopeID() const {
595 return SSID;
596 }
597
598 /// Sets the synchronization scope ID of this cmpxchg instruction.
599 void setSyncScopeID(SyncScope::ID SSID) {
600 this->SSID = SSID;
601 }
602
603 Value *getPointerOperand() { return getOperand(0); }
604 const Value *getPointerOperand() const { return getOperand(0); }
605 static unsigned getPointerOperandIndex() { return 0U; }
606
607 Value *getCompareOperand() { return getOperand(1); }
608 const Value *getCompareOperand() const { return getOperand(1); }
609
610 Value *getNewValOperand() { return getOperand(2); }
611 const Value *getNewValOperand() const { return getOperand(2); }
612
613 /// Returns the address space of the pointer operand.
614 unsigned getPointerAddressSpace() const {
615 return getPointerOperand()->getType()->getPointerAddressSpace();
616 }
617
618 /// Returns the strongest permitted ordering on failure, given the
619 /// desired ordering on success.
620 ///
621 /// If the comparison in a cmpxchg operation fails, there is no atomic store
622 /// so release semantics cannot be provided. So this function drops explicit
623 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
624 /// operation would remain SequentiallyConsistent.
625 static AtomicOrdering
626 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
627 switch (SuccessOrdering) {
628 default:
629 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 629)
;
630 case AtomicOrdering::Release:
631 case AtomicOrdering::Monotonic:
632 return AtomicOrdering::Monotonic;
633 case AtomicOrdering::AcquireRelease:
634 case AtomicOrdering::Acquire:
635 return AtomicOrdering::Acquire;
636 case AtomicOrdering::SequentiallyConsistent:
637 return AtomicOrdering::SequentiallyConsistent;
638 }
639 }
640
641 // Methods for support type inquiry through isa, cast, and dyn_cast:
642 static bool classof(const Instruction *I) {
643 return I->getOpcode() == Instruction::AtomicCmpXchg;
644 }
645 static bool classof(const Value *V) {
646 return isa<Instruction>(V) && classof(cast<Instruction>(V));
647 }
648
649private:
650 // Shadow Instruction::setInstructionSubclassData with a private forwarding
651 // method so that subclasses cannot accidentally use it.
652 void setInstructionSubclassData(unsigned short D) {
653 Instruction::setInstructionSubclassData(D);
654 }
655
656 /// The synchronization scope ID of this cmpxchg instruction. Not quite
657 /// enough room in SubClassData for everything, so synchronization scope ID
658 /// gets its own field.
659 SyncScope::ID SSID;
660};
661
662template <>
663struct OperandTraits<AtomicCmpXchgInst> :
664 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
665};
666
667DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 667, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 667, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
668
669//===----------------------------------------------------------------------===//
670// AtomicRMWInst Class
671//===----------------------------------------------------------------------===//
672
673/// an instruction that atomically reads a memory location,
674/// combines it with another value, and then stores the result back. Returns
675/// the old value.
676///
677class AtomicRMWInst : public Instruction {
678protected:
679 // Note: Instruction needs to be a friend here to call cloneImpl.
680 friend class Instruction;
681
682 AtomicRMWInst *cloneImpl() const;
683
684public:
685 /// This enumeration lists the possible modifications atomicrmw can make. In
686 /// the descriptions, 'p' is the pointer to the instruction's memory location,
687 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
688 /// instruction. These instructions always return 'old'.
689 enum BinOp {
690 /// *p = v
691 Xchg,
692 /// *p = old + v
693 Add,
694 /// *p = old - v
695 Sub,
696 /// *p = old & v
697 And,
698 /// *p = ~(old & v)
699 Nand,
700 /// *p = old | v
701 Or,
702 /// *p = old ^ v
703 Xor,
704 /// *p = old >signed v ? old : v
705 Max,
706 /// *p = old <signed v ? old : v
707 Min,
708 /// *p = old >unsigned v ? old : v
709 UMax,
710 /// *p = old <unsigned v ? old : v
711 UMin,
712
713 FIRST_BINOP = Xchg,
714 LAST_BINOP = UMin,
715 BAD_BINOP
716 };
717
718 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
719 AtomicOrdering Ordering, SyncScope::ID SSID,
720 Instruction *InsertBefore = nullptr);
721 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
722 AtomicOrdering Ordering, SyncScope::ID SSID,
723 BasicBlock *InsertAtEnd);
724
725 // allocate space for exactly two operands
726 void *operator new(size_t s) {
727 return User::operator new(s, 2);
728 }
729
730 BinOp getOperation() const {
731 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
732 }
733
734 void setOperation(BinOp Operation) {
735 unsigned short SubclassData = getSubclassDataFromInstruction();
736 setInstructionSubclassData((SubclassData & 31) |
737 (Operation << 5));
738 }
739
740 /// Return true if this is a RMW on a volatile memory location.
741 ///
742 bool isVolatile() const {
743 return getSubclassDataFromInstruction() & 1;
744 }
745
746 /// Specify whether this is a volatile RMW or not.
747 ///
748 void setVolatile(bool V) {
749 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
750 (unsigned)V);
751 }
752
753 /// Transparently provide more efficient getOperand methods.
754 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
755
756 /// Returns the ordering constraint of this rmw instruction.
757 AtomicOrdering getOrdering() const {
758 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
759 }
760
761 /// Sets the ordering constraint of this rmw instruction.
762 void setOrdering(AtomicOrdering Ordering) {
763 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 764, __extension__ __PRETTY_FUNCTION__))
764 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 764, __extension__ __PRETTY_FUNCTION__))
;
765 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
766 ((unsigned)Ordering << 2));
767 }
768
769 /// Returns the synchronization scope ID of this rmw instruction.
770 SyncScope::ID getSyncScopeID() const {
771 return SSID;
772 }
773
774 /// Sets the synchronization scope ID of this rmw instruction.
775 void setSyncScopeID(SyncScope::ID SSID) {
776 this->SSID = SSID;
777 }
778
779 Value *getPointerOperand() { return getOperand(0); }
780 const Value *getPointerOperand() const { return getOperand(0); }
781 static unsigned getPointerOperandIndex() { return 0U; }
782
783 Value *getValOperand() { return getOperand(1); }
784 const Value *getValOperand() const { return getOperand(1); }
785
786 /// Returns the address space of the pointer operand.
787 unsigned getPointerAddressSpace() const {
788 return getPointerOperand()->getType()->getPointerAddressSpace();
789 }
790
791 // Methods for support type inquiry through isa, cast, and dyn_cast:
792 static bool classof(const Instruction *I) {
793 return I->getOpcode() == Instruction::AtomicRMW;
794 }
795 static bool classof(const Value *V) {
796 return isa<Instruction>(V) && classof(cast<Instruction>(V));
797 }
798
799private:
800 void Init(BinOp Operation, Value *Ptr, Value *Val,
801 AtomicOrdering Ordering, SyncScope::ID SSID);
802
803 // Shadow Instruction::setInstructionSubclassData with a private forwarding
804 // method so that subclasses cannot accidentally use it.
805 void setInstructionSubclassData(unsigned short D) {
806 Instruction::setInstructionSubclassData(D);
807 }
808
809 /// The synchronization scope ID of this rmw instruction. Not quite enough
810 /// room in SubClassData for everything, so synchronization scope ID gets its
811 /// own field.
812 SyncScope::ID SSID;
813};
814
815template <>
816struct OperandTraits<AtomicRMWInst>
817 : public FixedNumOperandTraits<AtomicRMWInst,2> {
818};
819
820DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 820, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 820, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
821
822//===----------------------------------------------------------------------===//
823// GetElementPtrInst Class
824//===----------------------------------------------------------------------===//
825
826// checkGEPType - Simple wrapper function to give a better assertion failure
827// message on bad indexes for a gep instruction.
828//
829inline Type *checkGEPType(Type *Ty) {
830 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 830, __extension__ __PRETTY_FUNCTION__))
;
831 return Ty;
832}
833
834/// an instruction for type-safe pointer arithmetic to
835/// access elements of arrays and structs
836///
837class GetElementPtrInst : public Instruction {
838 Type *SourceElementType;
839 Type *ResultElementType;
840
841 GetElementPtrInst(const GetElementPtrInst &GEPI);
842
843 /// Constructors - Create a getelementptr instruction with a base pointer an
844 /// list of indices. The first ctor can optionally insert before an existing
845 /// instruction, the second appends the new instruction to the specified
846 /// BasicBlock.
847 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
848 ArrayRef<Value *> IdxList, unsigned Values,
849 const Twine &NameStr, Instruction *InsertBefore);
850 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
851 ArrayRef<Value *> IdxList, unsigned Values,
852 const Twine &NameStr, BasicBlock *InsertAtEnd);
853
854 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
855
856protected:
857 // Note: Instruction needs to be a friend here to call cloneImpl.
858 friend class Instruction;
859
860 GetElementPtrInst *cloneImpl() const;
861
862public:
863 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
864 ArrayRef<Value *> IdxList,
865 const Twine &NameStr = "",
866 Instruction *InsertBefore = nullptr) {
867 unsigned Values = 1 + unsigned(IdxList.size());
868 if (!PointeeType)
869 PointeeType =
870 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
871 else
872 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
873 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
874 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
;
875 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
876 NameStr, InsertBefore);
877 }
878
879 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
880 ArrayRef<Value *> IdxList,
881 const Twine &NameStr,
882 BasicBlock *InsertAtEnd) {
883 unsigned Values = 1 + unsigned(IdxList.size());
884 if (!PointeeType)
885 PointeeType =
886 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
887 else
888 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
889 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
890 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
;
891 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
892 NameStr, InsertAtEnd);
893 }
894
895 /// Create an "inbounds" getelementptr. See the documentation for the
896 /// "inbounds" flag in LangRef.html for details.
897 static GetElementPtrInst *CreateInBounds(Value *Ptr,
898 ArrayRef<Value *> IdxList,
899 const Twine &NameStr = "",
900 Instruction *InsertBefore = nullptr){
901 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
902 }
903
904 static GetElementPtrInst *
905 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
906 const Twine &NameStr = "",
907 Instruction *InsertBefore = nullptr) {
908 GetElementPtrInst *GEP =
909 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
910 GEP->setIsInBounds(true);
911 return GEP;
912 }
913
914 static GetElementPtrInst *CreateInBounds(Value *Ptr,
915 ArrayRef<Value *> IdxList,
916 const Twine &NameStr,
917 BasicBlock *InsertAtEnd) {
918 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
919 }
920
921 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
922 ArrayRef<Value *> IdxList,
923 const Twine &NameStr,
924 BasicBlock *InsertAtEnd) {
925 GetElementPtrInst *GEP =
926 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
927 GEP->setIsInBounds(true);
928 return GEP;
929 }
930
931 /// Transparently provide more efficient getOperand methods.
932 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
933
934 Type *getSourceElementType() const { return SourceElementType; }
935
936 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
937 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
938
939 Type *getResultElementType() const {
940 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 941, __extension__ __PRETTY_FUNCTION__))
941 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 941, __extension__ __PRETTY_FUNCTION__))
;
942 return ResultElementType;
943 }
944
945 /// Returns the address space of this instruction's pointer type.
946 unsigned getAddressSpace() const {
947 // Note that this is always the same as the pointer operand's address space
948 // and that is cheaper to compute, so cheat here.
949 return getPointerAddressSpace();
950 }
951
952 /// Returns the type of the element that would be loaded with
953 /// a load instruction with the specified parameters.
954 ///
955 /// Null is returned if the indices are invalid for the specified
956 /// pointer type.
957 ///
958 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
959 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
960 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
961
962 inline op_iterator idx_begin() { return op_begin()+1; }
963 inline const_op_iterator idx_begin() const { return op_begin()+1; }
964 inline op_iterator idx_end() { return op_end(); }
965 inline const_op_iterator idx_end() const { return op_end(); }
966
967 inline iterator_range<op_iterator> indices() {
968 return make_range(idx_begin(), idx_end());
969 }
970
971 inline iterator_range<const_op_iterator> indices() const {
972 return make_range(idx_begin(), idx_end());
973 }
974
975 Value *getPointerOperand() {
976 return getOperand(0);
977 }
978 const Value *getPointerOperand() const {
979 return getOperand(0);
980 }
981 static unsigned getPointerOperandIndex() {
982 return 0U; // get index for modifying correct operand.
983 }
984
985 /// Method to return the pointer operand as a
986 /// PointerType.
987 Type *getPointerOperandType() const {
988 return getPointerOperand()->getType();
989 }
990
991 /// Returns the address space of the pointer operand.
992 unsigned getPointerAddressSpace() const {
993 return getPointerOperandType()->getPointerAddressSpace();
994 }
995
996 /// Returns the pointer type returned by the GEP
997 /// instruction, which may be a vector of pointers.
998 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
999 return getGEPReturnType(
1000 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
1001 Ptr, IdxList);
1002 }
1003 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1004 ArrayRef<Value *> IdxList) {
1005 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1006 Ptr->getType()->getPointerAddressSpace());
1007 // Vector GEP
1008 if (Ptr->getType()->isVectorTy()) {
1009 unsigned NumElem = Ptr->getType()->getVectorNumElements();
1010 return VectorType::get(PtrTy, NumElem);
1011 }
1012 for (Value *Index : IdxList)
1013 if (Index->getType()->isVectorTy()) {
1014 unsigned NumElem = Index->getType()->getVectorNumElements();
1015 return VectorType::get(PtrTy, NumElem);
1016 }
1017 // Scalar GEP
1018 return PtrTy;
1019 }
1020
1021 unsigned getNumIndices() const { // Note: always non-negative
1022 return getNumOperands() - 1;
1023 }
1024
1025 bool hasIndices() const {
1026 return getNumOperands() > 1;
1027 }
1028
1029 /// Return true if all of the indices of this GEP are
1030 /// zeros. If so, the result pointer and the first operand have the same
1031 /// value, just potentially different types.
1032 bool hasAllZeroIndices() const;
1033
1034 /// Return true if all of the indices of this GEP are
1035 /// constant integers. If so, the result pointer and the first operand have
1036 /// a constant offset between them.
1037 bool hasAllConstantIndices() const;
1038
1039 /// Set or clear the inbounds flag on this GEP instruction.
1040 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1041 void setIsInBounds(bool b = true);
1042
1043 /// Determine whether the GEP has the inbounds flag.
1044 bool isInBounds() const;
1045
1046 /// Accumulate the constant address offset of this GEP if possible.
1047 ///
1048 /// This routine accepts an APInt into which it will accumulate the constant
1049 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1050 /// all-constant, it returns false and the value of the offset APInt is
1051 /// undefined (it is *not* preserved!). The APInt passed into this routine
1052 /// must be at least as wide as the IntPtr type for the address space of
1053 /// the base GEP pointer.
1054 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1055
1056 // Methods for support type inquiry through isa, cast, and dyn_cast:
1057 static bool classof(const Instruction *I) {
1058 return (I->getOpcode() == Instruction::GetElementPtr);
1059 }
1060 static bool classof(const Value *V) {
1061 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1062 }
1063};
1064
1065template <>
1066struct OperandTraits<GetElementPtrInst> :
1067 public VariadicOperandTraits<GetElementPtrInst, 1> {
1068};
1069
1070GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1071 ArrayRef<Value *> IdxList, unsigned Values,
1072 const Twine &NameStr,
1073 Instruction *InsertBefore)
1074 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1075 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1076 Values, InsertBefore),
1077 SourceElementType(PointeeType),
1078 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1079 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
1080 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
;
1081 init(Ptr, IdxList, NameStr);
1082}
1083
1084GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1085 ArrayRef<Value *> IdxList, unsigned Values,
1086 const Twine &NameStr,
1087 BasicBlock *InsertAtEnd)
1088 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1089 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1090 Values, InsertAtEnd),
1091 SourceElementType(PointeeType),
1092 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1093 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1094, __extension__ __PRETTY_FUNCTION__))
1094 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1094, __extension__ __PRETTY_FUNCTION__))
;
1095 init(Ptr, IdxList, NameStr);
1096}
1097
1098DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1099
1100//===----------------------------------------------------------------------===//
1101// ICmpInst Class
1102//===----------------------------------------------------------------------===//
1103
1104/// This instruction compares its operands according to the predicate given
1105/// to the constructor. It only operates on integers or pointers. The operands
1106/// must be identical types.
1107/// Represent an integer comparison operator.
1108class ICmpInst: public CmpInst {
1109 void AssertOK() {
1110 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1111, __extension__ __PRETTY_FUNCTION__))
1111 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1111, __extension__ __PRETTY_FUNCTION__))
;
1112 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
1113 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
;
1114 // Check that the operands are the right type
1115 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
1116 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
1117 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
;
1118 }
1119
1120protected:
1121 // Note: Instruction needs to be a friend here to call cloneImpl.
1122 friend class Instruction;
1123
1124 /// Clone an identical ICmpInst
1125 ICmpInst *cloneImpl() const;
1126
1127public:
1128 /// Constructor with insert-before-instruction semantics.
1129 ICmpInst(
1130 Instruction *InsertBefore, ///< Where to insert
1131 Predicate pred, ///< The predicate to use for the comparison
1132 Value *LHS, ///< The left-hand-side of the expression
1133 Value *RHS, ///< The right-hand-side of the expression
1134 const Twine &NameStr = "" ///< Name of the instruction
1135 ) : CmpInst(makeCmpResultType(LHS->getType()),
1136 Instruction::ICmp, pred, LHS, RHS, NameStr,
1137 InsertBefore) {
1138#ifndef NDEBUG
1139 AssertOK();
1140#endif
1141 }
1142
1143 /// Constructor with insert-at-end semantics.
1144 ICmpInst(
1145 BasicBlock &InsertAtEnd, ///< Block to insert into.
1146 Predicate pred, ///< The predicate to use for the comparison
1147 Value *LHS, ///< The left-hand-side of the expression
1148 Value *RHS, ///< The right-hand-side of the expression
1149 const Twine &NameStr = "" ///< Name of the instruction
1150 ) : CmpInst(makeCmpResultType(LHS->getType()),
1151 Instruction::ICmp, pred, LHS, RHS, NameStr,
1152 &InsertAtEnd) {
1153#ifndef NDEBUG
1154 AssertOK();
1155#endif
1156 }
1157
1158 /// Constructor with no-insertion semantics
1159 ICmpInst(
1160 Predicate pred, ///< The predicate to use for the comparison
1161 Value *LHS, ///< The left-hand-side of the expression
1162 Value *RHS, ///< The right-hand-side of the expression
1163 const Twine &NameStr = "" ///< Name of the instruction
1164 ) : CmpInst(makeCmpResultType(LHS->getType()),
1165 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1166#ifndef NDEBUG
1167 AssertOK();
1168#endif
1169 }
1170
1171 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1172 /// @returns the predicate that would be the result if the operand were
1173 /// regarded as signed.
1174 /// Return the signed version of the predicate
1175 Predicate getSignedPredicate() const {
1176 return getSignedPredicate(getPredicate());
1177 }
1178
1179 /// This is a static version that you can use without an instruction.
1180 /// Return the signed version of the predicate.
1181 static Predicate getSignedPredicate(Predicate pred);
1182
1183 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1184 /// @returns the predicate that would be the result if the operand were
1185 /// regarded as unsigned.
1186 /// Return the unsigned version of the predicate
1187 Predicate getUnsignedPredicate() const {
1188 return getUnsignedPredicate(getPredicate());
1189 }
1190
1191 /// This is a static version that you can use without an instruction.
1192 /// Return the unsigned version of the predicate.
1193 static Predicate getUnsignedPredicate(Predicate pred);
1194
1195 /// Return true if this predicate is either EQ or NE. This also
1196 /// tests for commutativity.
1197 static bool isEquality(Predicate P) {
1198 return P == ICMP_EQ || P == ICMP_NE;
1199 }
1200
1201 /// Return true if this predicate is either EQ or NE. This also
1202 /// tests for commutativity.
1203 bool isEquality() const {
1204 return isEquality(getPredicate());
1205 }
1206
1207 /// @returns true if the predicate of this ICmpInst is commutative
1208 /// Determine if this relation is commutative.
1209 bool isCommutative() const { return isEquality(); }
1210
1211 /// Return true if the predicate is relational (not EQ or NE).
1212 ///
1213 bool isRelational() const {
1214 return !isEquality();
1215 }
1216
1217 /// Return true if the predicate is relational (not EQ or NE).
1218 ///
1219 static bool isRelational(Predicate P) {
1220 return !isEquality(P);
1221 }
1222
1223 /// Exchange the two operands to this instruction in such a way that it does
1224 /// not modify the semantics of the instruction. The predicate value may be
1225 /// changed to retain the same result if the predicate is order dependent
1226 /// (e.g. ult).
1227 /// Swap operands and adjust predicate.
1228 void swapOperands() {
1229 setPredicate(getSwappedPredicate());
1230 Op<0>().swap(Op<1>());
1231 }
1232
1233 // Methods for support type inquiry through isa, cast, and dyn_cast:
1234 static bool classof(const Instruction *I) {
1235 return I->getOpcode() == Instruction::ICmp;
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1239 }
1240};
1241
1242//===----------------------------------------------------------------------===//
1243// FCmpInst Class
1244//===----------------------------------------------------------------------===//
1245
1246/// This instruction compares its operands according to the predicate given
1247/// to the constructor. It only operates on floating point values or packed
1248/// vectors of floating point values. The operands must be identical types.
1249/// Represents a floating point comparison operator.
1250class FCmpInst: public CmpInst {
1251 void AssertOK() {
1252 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1252, __extension__ __PRETTY_FUNCTION__))
;
1253 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1254, __extension__ __PRETTY_FUNCTION__))
1254 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1254, __extension__ __PRETTY_FUNCTION__))
;
1255 // Check that the operands are the right type
1256 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1257, __extension__ __PRETTY_FUNCTION__))
1257 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1257, __extension__ __PRETTY_FUNCTION__))
;
1258 }
1259
1260protected:
1261 // Note: Instruction needs to be a friend here to call cloneImpl.
1262 friend class Instruction;
1263
1264 /// Clone an identical FCmpInst
1265 FCmpInst *cloneImpl() const;
1266
1267public:
1268 /// Constructor with insert-before-instruction semantics.
1269 FCmpInst(
1270 Instruction *InsertBefore, ///< Where to insert
1271 Predicate pred, ///< The predicate to use for the comparison
1272 Value *LHS, ///< The left-hand-side of the expression
1273 Value *RHS, ///< The right-hand-side of the expression
1274 const Twine &NameStr = "" ///< Name of the instruction
1275 ) : CmpInst(makeCmpResultType(LHS->getType()),
1276 Instruction::FCmp, pred, LHS, RHS, NameStr,
1277 InsertBefore) {
1278 AssertOK();
1279 }
1280
1281 /// Constructor with insert-at-end semantics.
1282 FCmpInst(
1283 BasicBlock &InsertAtEnd, ///< Block to insert into.
1284 Predicate pred, ///< The predicate to use for the comparison
1285 Value *LHS, ///< The left-hand-side of the expression
1286 Value *RHS, ///< The right-hand-side of the expression
1287 const Twine &NameStr = "" ///< Name of the instruction
1288 ) : CmpInst(makeCmpResultType(LHS->getType()),
1289 Instruction::FCmp, pred, LHS, RHS, NameStr,
1290 &InsertAtEnd) {
1291 AssertOK();
1292 }
1293
1294 /// Constructor with no-insertion semantics
1295 FCmpInst(
1296 Predicate pred, ///< The predicate to use for the comparison
1297 Value *LHS, ///< The left-hand-side of the expression
1298 Value *RHS, ///< The right-hand-side of the expression
1299 const Twine &NameStr = "" ///< Name of the instruction
1300 ) : CmpInst(makeCmpResultType(LHS->getType()),
1301 Instruction::FCmp, pred, LHS, RHS, NameStr) {
1302 AssertOK();
1303 }
1304
1305 /// @returns true if the predicate of this instruction is EQ or NE.
1306 /// Determine if this is an equality predicate.
1307 static bool isEquality(Predicate Pred) {
1308 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1309 Pred == FCMP_UNE;
1310 }
1311
1312 /// @returns true if the predicate of this instruction is EQ or NE.
1313 /// Determine if this is an equality predicate.
1314 bool isEquality() const { return isEquality(getPredicate()); }
1315
1316 /// @returns true if the predicate of this instruction is commutative.
1317 /// Determine if this is a commutative predicate.
1318 bool isCommutative() const {
1319 return isEquality() ||
1320 getPredicate() == FCMP_FALSE ||
1321 getPredicate() == FCMP_TRUE ||
1322 getPredicate() == FCMP_ORD ||
1323 getPredicate() == FCMP_UNO;
1324 }
1325
1326 /// @returns true if the predicate is relational (not EQ or NE).
1327 /// Determine if this a relational predicate.
1328 bool isRelational() const { return !isEquality(); }
1329
1330 /// Exchange the two operands to this instruction in such a way that it does
1331 /// not modify the semantics of the instruction. The predicate value may be
1332 /// changed to retain the same result if the predicate is order dependent
1333 /// (e.g. ult).
1334 /// Swap operands and adjust predicate.
1335 void swapOperands() {
1336 setPredicate(getSwappedPredicate());
1337 Op<0>().swap(Op<1>());
1338 }
1339
1340 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1341 static bool classof(const Instruction *I) {
1342 return I->getOpcode() == Instruction::FCmp;
1343 }
1344 static bool classof(const Value *V) {
1345 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1346 }
1347};
1348
1349class CallInst;
1350class InvokeInst;
1351
1352template <class T> struct CallBaseParent { using type = Instruction; };
1353
1354template <> struct CallBaseParent<InvokeInst> { using type = TerminatorInst; };
1355
1356//===----------------------------------------------------------------------===//
1357/// Base class for all callable instructions (InvokeInst and CallInst)
1358/// Holds everything related to calling a function, abstracting from the base
1359/// type @p BaseInstTy and the concrete instruction @p InstTy
1360///
1361template <class InstTy>
1362class CallBase : public CallBaseParent<InstTy>::type,
1363 public OperandBundleUser<InstTy, User::op_iterator> {
1364protected:
1365 AttributeList Attrs; ///< parameter attributes for callable
1366 FunctionType *FTy;
1367 using BaseInstTy = typename CallBaseParent<InstTy>::type;
1368
1369 template <class... ArgsTy>
1370 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1371 : BaseInstTy(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1372 bool hasDescriptor() const { return Value::HasDescriptor; }
1373
1374 using BaseInstTy::BaseInstTy;
1375
1376 using OperandBundleUser<InstTy,
1377 User::op_iterator>::isFnAttrDisallowedByOpBundle;
1378 using OperandBundleUser<InstTy, User::op_iterator>::getNumTotalBundleOperands;
1379 using OperandBundleUser<InstTy, User::op_iterator>::bundleOperandHasAttr;
1380 using Instruction::getSubclassDataFromInstruction;
1381 using Instruction::setInstructionSubclassData;
1382
1383public:
1384 using Instruction::getContext;
1385 using OperandBundleUser<InstTy, User::op_iterator>::hasOperandBundles;
1386 using OperandBundleUser<InstTy,
1387 User::op_iterator>::getBundleOperandsStartIndex;
1388
1389 static bool classof(const Instruction *I) {
1390 llvm_unreachable(::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1391)
1391 "CallBase is not meant to be used as part of the classof hierarchy")::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1391)
;
1392 }
1393
1394public:
1395 /// Return the parameter attributes for this call.
1396 ///
1397 AttributeList getAttributes() const { return Attrs; }
1398
1399 /// Set the parameter attributes for this call.
1400 ///
1401 void setAttributes(AttributeList A) { Attrs = A; }
1402
1403 FunctionType *getFunctionType() const { return FTy; }
1404
1405 void mutateFunctionType(FunctionType *FTy) {
1406 Value::mutateType(FTy->getReturnType());
1407 this->FTy = FTy;
1408 }
1409
1410 /// Return the number of call arguments.
1411 ///
1412 unsigned getNumArgOperands() const {
1413 return getNumOperands() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1414 }
1415
1416 /// getArgOperand/setArgOperand - Return/set the i-th call argument.
1417 ///
1418 Value *getArgOperand(unsigned i) const {
1419 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1419, __extension__ __PRETTY_FUNCTION__))
;
1420 return getOperand(i);
1421 }
1422 void setArgOperand(unsigned i, Value *v) {
1423 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1423, __extension__ __PRETTY_FUNCTION__))
;
1424 setOperand(i, v);
1425 }
1426
1427 /// Return the iterator pointing to the beginning of the argument list.
1428 User::op_iterator arg_begin() { return op_begin(); }
1429
1430 /// Return the iterator pointing to the end of the argument list.
1431 User::op_iterator arg_end() {
1432 // [ call args ], [ operand bundles ], callee
1433 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1434 }
1435
1436 /// Iteration adapter for range-for loops.
1437 iterator_range<User::op_iterator> arg_operands() {
1438 return make_range(arg_begin(), arg_end());
1439 }
1440
1441 /// Return the iterator pointing to the beginning of the argument list.
1442 User::const_op_iterator arg_begin() const { return op_begin(); }
1443
1444 /// Return the iterator pointing to the end of the argument list.
1445 User::const_op_iterator arg_end() const {
1446 // [ call args ], [ operand bundles ], callee
1447 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1448 }
1449
1450 /// Iteration adapter for range-for loops.
1451 iterator_range<User::const_op_iterator> arg_operands() const {
1452 return make_range(arg_begin(), arg_end());
1453 }
1454
1455 /// Wrappers for getting the \c Use of a call argument.
1456 const Use &getArgOperandUse(unsigned i) const {
1457 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1457, __extension__ __PRETTY_FUNCTION__))
;
1458 return User::getOperandUse(i);
1459 }
1460 Use &getArgOperandUse(unsigned i) {
1461 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1461, __extension__ __PRETTY_FUNCTION__))
;
1462 return User::getOperandUse(i);
1463 }
1464
1465 /// If one of the arguments has the 'returned' attribute, return its
1466 /// operand value. Otherwise, return nullptr.
1467 Value *getReturnedArgOperand() const {
1468 unsigned Index;
1469
1470 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
1471 return getArgOperand(Index - AttributeList::FirstArgIndex);
1472 if (const Function *F = getCalledFunction())
1473 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
1474 Index)
1475 return getArgOperand(Index - AttributeList::FirstArgIndex);
1476
1477 return nullptr;
1478 }
1479
1480 User::op_iterator op_begin() {
1481 return OperandTraits<CallBase>::op_begin(this);
1482 }
1483
1484 User::const_op_iterator op_begin() const {
1485 return OperandTraits<CallBase>::op_begin(const_cast<CallBase *>(this));
1486 }
1487
1488 User::op_iterator op_end() { return OperandTraits<CallBase>::op_end(this); }
1489
1490 User::const_op_iterator op_end() const {
1491 return OperandTraits<CallBase>::op_end(const_cast<CallBase *>(this));
1492 }
1493
1494 Value *getOperand(unsigned i_nocapture) const {
1495 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1496, __extension__ __PRETTY_FUNCTION__))
1496 "getOperand() out of range!")(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1496, __extension__ __PRETTY_FUNCTION__))
;
1497 return cast_or_null<Value>(OperandTraits<CallBase>::op_begin(
1498 const_cast<CallBase *>(this))[i_nocapture]
1499 .get());
1500 }
1501
1502 void setOperand(unsigned i_nocapture, Value *Val_nocapture) {
1503 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1504, __extension__ __PRETTY_FUNCTION__))
1504 "setOperand() out of range!")(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1504, __extension__ __PRETTY_FUNCTION__))
;
1505 OperandTraits<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture;
1506 }
1507
1508 unsigned getNumOperands() const {
1509 return OperandTraits<CallBase>::operands(this);
1510 }
1511 template <int Idx_nocapture> Use &Op() {
1512 return User::OpFrom<Idx_nocapture>(this);
1513 }
1514 template <int Idx_nocapture> const Use &Op() const {
1515 return User::OpFrom<Idx_nocapture>(this);
1516 }
1517
1518 /// Return the function called, or null if this is an
1519 /// indirect function invocation.
1520 ///
1521 Function *getCalledFunction() const {
1522 return dyn_cast<Function>(Op<-InstTy::ArgOffset>());
1523 }
1524
1525 /// Determine whether this call has the given attribute.
1526 bool hasFnAttr(Attribute::AttrKind Kind) const {
1527 assert(Kind != Attribute::NoBuiltin &&(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1528, __extension__ __PRETTY_FUNCTION__))
1528 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1528, __extension__ __PRETTY_FUNCTION__))
;
1529 return hasFnAttrImpl(Kind);
1530 }
1531
1532 /// Determine whether this call has the given attribute.
1533 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1534
1535 /// getCallingConv/setCallingConv - Get or set the calling convention of this
1536 /// function call.
1537 CallingConv::ID getCallingConv() const {
1538 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1539 }
1540 void setCallingConv(CallingConv::ID CC) {
1541 auto ID = static_cast<unsigned>(CC);
1542 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")(static_cast <bool> (!(ID & ~CallingConv::MaxID) &&
"Unsupported calling convention") ? void (0) : __assert_fail
("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1542, __extension__ __PRETTY_FUNCTION__))
;
1543 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1544 (ID << 2));
1545 }
1546
1547
1548 /// adds the attribute to the list of attributes.
1549 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1550 AttributeList PAL = getAttributes();
1551 PAL = PAL.addAttribute(getContext(), i, Kind);
1552 setAttributes(PAL);
1553 }
1554
1555 /// adds the attribute to the list of attributes.
1556 void addAttribute(unsigned i, Attribute Attr) {
1557 AttributeList PAL = getAttributes();
1558 PAL = PAL.addAttribute(getContext(), i, Attr);
1559 setAttributes(PAL);
1560 }
1561
1562 /// Adds the attribute to the indicated argument
1563 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1564 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1564, __extension__ __PRETTY_FUNCTION__))
;
1565 AttributeList PAL = getAttributes();
1566 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1567 setAttributes(PAL);
1568 }
1569
1570 /// Adds the attribute to the indicated argument
1571 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1572 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1572, __extension__ __PRETTY_FUNCTION__))
;
1573 AttributeList PAL = getAttributes();
1574 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1575 setAttributes(PAL);
1576 }
1577
1578 /// removes the attribute from the list of attributes.
1579 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1580 AttributeList PAL = getAttributes();
1581 PAL = PAL.removeAttribute(getContext(), i, Kind);
1582 setAttributes(PAL);
1583 }
1584
1585 /// removes the attribute from the list of attributes.
1586 void removeAttribute(unsigned i, StringRef Kind) {
1587 AttributeList PAL = getAttributes();
1588 PAL = PAL.removeAttribute(getContext(), i, Kind);
1589 setAttributes(PAL);
1590 }
1591
1592 /// Removes the attribute from the given argument
1593 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1594 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1594, __extension__ __PRETTY_FUNCTION__))
;
1595 AttributeList PAL = getAttributes();
1596 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1597 setAttributes(PAL);
1598 }
1599
1600 /// Removes the attribute from the given argument
1601 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1602 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1602, __extension__ __PRETTY_FUNCTION__))
;
1603 AttributeList PAL = getAttributes();
1604 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1605 setAttributes(PAL);
1606 }
1607
1608 /// adds the dereferenceable attribute to the list of attributes.
1609 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1610 AttributeList PAL = getAttributes();
1611 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1612 setAttributes(PAL);
1613 }
1614
1615 /// adds the dereferenceable_or_null attribute to the list of
1616 /// attributes.
1617 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1618 AttributeList PAL = getAttributes();
1619 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1620 setAttributes(PAL);
1621 }
1622
1623 /// Determine whether the return value has the given attribute.
1624 bool hasRetAttr(Attribute::AttrKind Kind) const {
1625 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
1626 return true;
1627
1628 // Look at the callee, if available.
1629 if (const Function *F = getCalledFunction())
1630 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
1631 return false;
1632 }
1633
1634 /// Determine whether the argument or parameter has the given attribute.
1635 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1636 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Param index out of bounds!") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Param index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1636, __extension__ __PRETTY_FUNCTION__))
;
1637
1638 if (Attrs.hasParamAttribute(ArgNo, Kind))
1639 return true;
1640 if (const Function *F = getCalledFunction())
1641 return F->getAttributes().hasParamAttribute(ArgNo, Kind);
1642 return false;
1643 }
1644
1645 /// Get the attribute of a given kind at a position.
1646 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1647 return getAttributes().getAttribute(i, Kind);
1648 }
1649
1650 /// Get the attribute of a given kind at a position.
1651 Attribute getAttribute(unsigned i, StringRef Kind) const {
1652 return getAttributes().getAttribute(i, Kind);
1653 }
1654
1655 /// Get the attribute of a given kind from a given arg
1656 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1657 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1657, __extension__ __PRETTY_FUNCTION__))
;
1658 return getAttributes().getParamAttr(ArgNo, Kind);
1659 }
1660
1661 /// Get the attribute of a given kind from a given arg
1662 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1663 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1663, __extension__ __PRETTY_FUNCTION__))
;
1664 return getAttributes().getParamAttr(ArgNo, Kind);
1665 }
1666 /// Return true if the data operand at index \p i has the attribute \p
1667 /// A.
1668 ///
1669 /// Data operands include call arguments and values used in operand bundles,
1670 /// but does not include the callee operand. This routine dispatches to the
1671 /// underlying AttributeList or the OperandBundleUser as appropriate.
1672 ///
1673 /// The index \p i is interpreted as
1674 ///
1675 /// \p i == Attribute::ReturnIndex -> the return value
1676 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1677 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1678 /// (\p i - 1) in the operand list.
1679 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1680 // There are getNumOperands() - (InstTy::ArgOffset - 1) data operands.
1681 // The last operand is the callee.
1682 assert(i < (getNumOperands() - InstTy::ArgOffset + 1) &&(static_cast <bool> (i < (getNumOperands() - InstTy::
ArgOffset + 1) && "Data operand index out of bounds!"
) ? void (0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1683, __extension__ __PRETTY_FUNCTION__))
1683 "Data operand index out of bounds!")(static_cast <bool> (i < (getNumOperands() - InstTy::
ArgOffset + 1) && "Data operand index out of bounds!"
) ? void (0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1683, __extension__ __PRETTY_FUNCTION__))
;
1684
1685 // The attribute A can either be directly specified, if the operand in
1686 // question is a call argument; or be indirectly implied by the kind of its
1687 // containing operand bundle, if the operand is a bundle operand.
1688
1689 if (i == AttributeList::ReturnIndex)
1690 return hasRetAttr(Kind);
1691
1692 // FIXME: Avoid these i - 1 calculations and update the API to use
1693 // zero-based indices.
1694 if (i < (getNumArgOperands() + 1))
1695 return paramHasAttr(i - 1, Kind);
1696
1697 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1698, __extension__ __PRETTY_FUNCTION__))
1698 "Must be either a call argument or an operand bundle!")(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1698, __extension__ __PRETTY_FUNCTION__))
;
1699 return bundleOperandHasAttr(i - 1, Kind);
1700 }
1701
1702 /// Extract the alignment of the return value.
1703 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
1704
1705 /// Extract the alignment for a call or parameter (0=unknown).
1706 unsigned getParamAlignment(unsigned ArgNo) const {
1707 return Attrs.getParamAlignment(ArgNo);
1708 }
1709
1710 /// Extract the number of dereferenceable bytes for a call or
1711 /// parameter (0=unknown).
1712 uint64_t getDereferenceableBytes(unsigned i) const {
1713 return Attrs.getDereferenceableBytes(i);
1714 }
1715
1716 /// Extract the number of dereferenceable_or_null bytes for a call or
1717 /// parameter (0=unknown).
1718 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1719 return Attrs.getDereferenceableOrNullBytes(i);
1720 }
1721
1722 /// @brief Determine if the return value is marked with NoAlias attribute.
1723 bool returnDoesNotAlias() const {
1724 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1725 }
1726
1727 /// Return true if the call should not be treated as a call to a
1728 /// builtin.
1729 bool isNoBuiltin() const {
1730 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1731 !hasFnAttrImpl(Attribute::Builtin);
1732 }
1733
1734 /// Determine if the call requires strict floating point semantics.
1735 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1736
1737 /// Return true if the call should not be inlined.
1738 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1739 void setIsNoInline() {
1740 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1741 }
1742 /// Determine if the call does not access memory.
1743 bool doesNotAccessMemory() const {
1744 return hasFnAttr(Attribute::ReadNone);
1745 }
1746 void setDoesNotAccessMemory() {
1747 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1748 }
1749
1750 /// Determine if the call does not access or only reads memory.
1751 bool onlyReadsMemory() const {
1752 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1753 }
1754 void setOnlyReadsMemory() {
1755 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1756 }
1757
1758 /// Determine if the call does not access or only writes memory.
1759 bool doesNotReadMemory() const {
1760 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1761 }
1762 void setDoesNotReadMemory() {
1763 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1764 }
1765
1766 /// @brief Determine if the call can access memmory only using pointers based
1767 /// on its arguments.
1768 bool onlyAccessesArgMemory() const {
1769 return hasFnAttr(Attribute::ArgMemOnly);
1770 }
1771 void setOnlyAccessesArgMemory() {
1772 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1773 }
1774
1775 /// @brief Determine if the function may only access memory that is
1776 /// inaccessible from the IR.
1777 bool onlyAccessesInaccessibleMemory() const {
1778 return hasFnAttr(Attribute::InaccessibleMemOnly);
1779 }
1780 void setOnlyAccessesInaccessibleMemory() {
1781 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1782 }
1783
1784 /// @brief Determine if the function may only access memory that is
1785 /// either inaccessible from the IR or pointed to by its arguments.
1786 bool onlyAccessesInaccessibleMemOrArgMem() const {
1787 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1788 }
1789 void setOnlyAccessesInaccessibleMemOrArgMem() {
1790 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
1791 }
1792 /// Determine if the call cannot return.
1793 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1794 void setDoesNotReturn() {
1795 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1796 }
1797
1798 /// Determine if the call should not perform indirect branch tracking.
1799 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1800
1801 /// Determine if the call cannot unwind.
1802 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1803 void setDoesNotThrow() {
1804 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1805 }
1806
1807 /// Determine if the invoke cannot be duplicated.
1808 bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
1809 void setCannotDuplicate() {
1810 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1811 }
1812
1813 /// Determine if the invoke is convergent
1814 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1815 void setConvergent() {
1816 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1817 }
1818 void setNotConvergent() {
1819 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1820 }
1821
1822 /// Determine if the call returns a structure through first
1823 /// pointer argument.
1824 bool hasStructRetAttr() const {
1825 if (getNumArgOperands() == 0)
1826 return false;
1827
1828 // Be friendly and also check the callee.
1829 return paramHasAttr(0, Attribute::StructRet);
1830 }
1831
1832 /// Determine if any call argument is an aggregate passed by value.
1833 bool hasByValArgument() const {
1834 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1835 }
1836 /// Get a pointer to the function that is invoked by this
1837 /// instruction.
1838 const Value *getCalledValue() const { return Op<-InstTy::ArgOffset>(); }
1839 Value *getCalledValue() { return Op<-InstTy::ArgOffset>(); }
1840
1841 /// Set the function called.
1842 void setCalledFunction(Value* Fn) {
1843 setCalledFunction(
1844 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
1845 Fn);
1846 }
1847 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1848 this->FTy = FTy;
1849 assert(FTy == cast<FunctionType>((static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1850, __extension__ __PRETTY_FUNCTION__))
1850 cast<PointerType>(Fn->getType())->getElementType()))(static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1850, __extension__ __PRETTY_FUNCTION__))
;
1851 Op<-InstTy::ArgOffset>() = Fn;
1852 }
1853
1854protected:
1855 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
1856 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
1857 return true;
1858
1859 // Operand bundles override attributes on the called function, but don't
1860 // override attributes directly present on the call instruction.
1861 if (isFnAttrDisallowedByOpBundle(Kind))
1862 return false;
1863
1864 if (const Function *F = getCalledFunction())
1865 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
1866 Kind);
1867 return false;
1868 }
1869};
1870
1871//===----------------------------------------------------------------------===//
1872/// This class represents a function call, abstracting a target
1873/// machine's calling convention. This class uses low bit of the SubClassData
1874/// field to indicate whether or not this is a tail call. The rest of the bits
1875/// hold the calling convention of the call.
1876///
1877class CallInst : public CallBase<CallInst> {
1878 friend class OperandBundleUser<CallInst, User::op_iterator>;
1879
1880 CallInst(const CallInst &CI);
1881
1882 /// Construct a CallInst given a range of arguments.
1883 /// Construct a CallInst from a range of arguments
1884 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1885 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1886 Instruction *InsertBefore);
1887
1888 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1889 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1890 Instruction *InsertBefore)
1891 : CallInst(cast<FunctionType>(
1892 cast<PointerType>(Func->getType())->getElementType()),
1893 Func, Args, Bundles, NameStr, InsertBefore) {}
1894
1895 inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
1896 Instruction *InsertBefore)
1897 : CallInst(Func, Args, None, NameStr, InsertBefore) {}
1898
1899 /// Construct a CallInst given a range of arguments.
1900 /// Construct a CallInst from a range of arguments
1901 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1902 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1903 BasicBlock *InsertAtEnd);
1904
1905 explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore);
1906
1907 CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
1908
1909 void init(Value *Func, ArrayRef<Value *> Args,
1910 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
1911 init(cast<FunctionType>(
1912 cast<PointerType>(Func->getType())->getElementType()),
1913 Func, Args, Bundles, NameStr);
1914 }
1915 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1916 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1917 void init(Value *Func, const Twine &NameStr);
1918
1919protected:
1920 // Note: Instruction needs to be a friend here to call cloneImpl.
1921 friend class Instruction;
1922
1923 CallInst *cloneImpl() const;
1924
1925public:
1926 static constexpr int ArgOffset = 1;
1927
1928 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1929 ArrayRef<OperandBundleDef> Bundles = None,
1930 const Twine &NameStr = "",
1931 Instruction *InsertBefore = nullptr) {
1932 return Create(cast<FunctionType>(
1933 cast<PointerType>(Func->getType())->getElementType()),
1934 Func, Args, Bundles, NameStr, InsertBefore);
1935 }
1936
1937 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1938 const Twine &NameStr,
1939 Instruction *InsertBefore = nullptr) {
1940 return Create(cast<FunctionType>(
1941 cast<PointerType>(Func->getType())->getElementType()),
1942 Func, Args, None, NameStr, InsertBefore);
1943 }
1944
1945 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1946 const Twine &NameStr,
1947 Instruction *InsertBefore = nullptr) {
1948 return new (unsigned(Args.size() + 1))
1949 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1950 }
1951
1952 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1953 ArrayRef<OperandBundleDef> Bundles = None,
1954 const Twine &NameStr = "",
1955 Instruction *InsertBefore = nullptr) {
1956 const unsigned TotalOps =
1957 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1958 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1959
1960 return new (TotalOps, DescriptorBytes)
1961 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1962 }
1963
1964 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1965 ArrayRef<OperandBundleDef> Bundles,
1966 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1967 const unsigned TotalOps =
1968 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1969 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1970
1971 return new (TotalOps, DescriptorBytes)
1972 CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
1973 }
1974
1975 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1976 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1977 return new (unsigned(Args.size() + 1))
1978 CallInst(Func, Args, None, NameStr, InsertAtEnd);
1979 }
1980
1981 static CallInst *Create(Value *F, const Twine &NameStr = "",
1982 Instruction *InsertBefore = nullptr) {
1983 return new (1) CallInst(F, NameStr, InsertBefore);
1984 }
1985
1986 static CallInst *Create(Value *F, const Twine &NameStr,
1987 BasicBlock *InsertAtEnd) {
1988 return new (1) CallInst(F, NameStr, InsertAtEnd);
1989 }
1990
1991 /// Create a clone of \p CI with a different set of operand bundles and
1992 /// insert it before \p InsertPt.
1993 ///
1994 /// The returned call instruction is identical \p CI in every way except that
1995 /// the operand bundles for the new instruction are set to the operand bundles
1996 /// in \p Bundles.
1997 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1998 Instruction *InsertPt = nullptr);
1999
2000 /// Generate the IR for a call to malloc:
2001 /// 1. Compute the malloc call's argument as the specified type's size,
2002 /// possibly multiplied by the array size if the array size is not
2003 /// constant 1.
2004 /// 2. Call malloc with that argument.
2005 /// 3. Bitcast the result of the malloc call to the specified type.
2006 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2007 Type *AllocTy, Value *AllocSize,
2008 Value *ArraySize = nullptr,
2009 Function *MallocF = nullptr,
2010 const Twine &Name = "");
2011 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2012 Type *AllocTy, Value *AllocSize,
2013 Value *ArraySize = nullptr,
2014 Function *MallocF = nullptr,
2015 const Twine &Name = "");
2016 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2017 Type *AllocTy, Value *AllocSize,
2018 Value *ArraySize = nullptr,
2019 ArrayRef<OperandBundleDef> Bundles = None,
2020 Function *MallocF = nullptr,
2021 const Twine &Name = "");
2022 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2023 Type *AllocTy, Value *AllocSize,
2024 Value *ArraySize = nullptr,
2025 ArrayRef<OperandBundleDef> Bundles = None,
2026 Function *MallocF = nullptr,
2027 const Twine &Name = "");
2028 /// Generate the IR for a call to the builtin free function.
2029 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
2030 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
2031 static Instruction *CreateFree(Value *Source,
2032 ArrayRef<OperandBundleDef> Bundles,
2033 Instruction *InsertBefore);
2034 static Instruction *CreateFree(Value *Source,
2035 ArrayRef<OperandBundleDef> Bundles,
2036 BasicBlock *InsertAtEnd);
2037
2038 // Note that 'musttail' implies 'tail'.
2039 enum TailCallKind {
2040 TCK_None = 0,
2041 TCK_Tail = 1,
2042 TCK_MustTail = 2,
2043 TCK_NoTail = 3
2044 };
2045 TailCallKind getTailCallKind() const {
2046 return TailCallKind(getSubclassDataFromInstruction() & 3);
2047 }
2048
2049 bool isTailCall() const {
2050 unsigned Kind = getSubclassDataFromInstruction() & 3;
2051 return Kind == TCK_Tail || Kind == TCK_MustTail;
2052 }
2053
2054 bool isMustTailCall() const {
2055 return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
2056 }
2057
2058 bool isNoTailCall() const {
2059 return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
2060 }
2061
2062 void setTailCall(bool isTC = true) {
2063 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2064 unsigned(isTC ? TCK_Tail : TCK_None));
2065 }
2066
2067 void setTailCallKind(TailCallKind TCK) {
2068 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2069 unsigned(TCK));
2070 }
2071
2072 /// Return true if the call can return twice
2073 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
2074 void setCanReturnTwice() {
2075 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
2076 }
2077
2078 /// Check if this call is an inline asm statement.
2079 bool isInlineAsm() const { return isa<InlineAsm>(Op<-1>()); }
2080
2081 // Methods for support type inquiry through isa, cast, and dyn_cast:
2082 static bool classof(const Instruction *I) {
2083 return I->getOpcode() == Instruction::Call;
2084 }
2085 static bool classof(const Value *V) {
2086 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2087 }
2088
2089private:
2090 // Shadow Instruction::setInstructionSubclassData with a private forwarding
2091 // method so that subclasses cannot accidentally use it.
2092 void setInstructionSubclassData(unsigned short D) {
2093 Instruction::setInstructionSubclassData(D);
2094 }
2095};
2096
2097template <>
2098struct OperandTraits<CallBase<CallInst>>
2099 : public VariadicOperandTraits<CallBase<CallInst>, 1> {};
2100
2101CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
2102 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2103 BasicBlock *InsertAtEnd)
2104 : CallBase<CallInst>(
2105 cast<FunctionType>(
2106 cast<PointerType>(Func->getType())->getElementType())
2107 ->getReturnType(),
2108 Instruction::Call,
2109 OperandTraits<CallBase<CallInst>>::op_end(this) -
2110 (Args.size() + CountBundleInputs(Bundles) + 1),
2111 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
2112 init(Func, Args, Bundles, NameStr);
2113}
2114
2115CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
2116 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2117 Instruction *InsertBefore)
2118 : CallBase<CallInst>(Ty->getReturnType(), Instruction::Call,
2119 OperandTraits<CallBase<CallInst>>::op_end(this) -
2120 (Args.size() + CountBundleInputs(Bundles) + 1),
2121 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
2122 InsertBefore) {
2123 init(Ty, Func, Args, Bundles, NameStr);
2124}
2125
2126//===----------------------------------------------------------------------===//
2127// SelectInst Class
2128//===----------------------------------------------------------------------===//
2129
2130/// This class represents the LLVM 'select' instruction.
2131///
2132class SelectInst : public Instruction {
2133 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2134 Instruction *InsertBefore)
2135 : Instruction(S1->getType(), Instruction::Select,
2136 &Op<0>(), 3, InsertBefore) {
2137 init(C, S1, S2);
2138 setName(NameStr);
2139 }
2140
2141 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2142 BasicBlock *InsertAtEnd)
2143 : Instruction(S1->getType(), Instruction::Select,
2144 &Op<0>(), 3, InsertAtEnd) {
2145 init(C, S1, S2);
2146 setName(NameStr);
2147 }
2148
2149 void init(Value *C, Value *S1, Value *S2) {
2150 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2150, __extension__ __PRETTY_FUNCTION__))
;
2151 Op<0>() = C;
2152 Op<1>() = S1;
2153 Op<2>() = S2;
2154 }
2155
2156protected:
2157 // Note: Instruction needs to be a friend here to call cloneImpl.
2158 friend class Instruction;
2159
2160 SelectInst *cloneImpl() const;
2161
2162public:
2163 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2164 const Twine &NameStr = "",
2165 Instruction *InsertBefore = nullptr,
2166 Instruction *MDFrom = nullptr) {
2167 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
2168 if (MDFrom)
2169 Sel->copyMetadata(*MDFrom);
2170 return Sel;
2171 }
2172
2173 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2174 const Twine &NameStr,
2175 BasicBlock *InsertAtEnd) {
2176 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
2177 }
2178
2179 const Value *getCondition() const { return Op<0>(); }
2180 const Value *getTrueValue() const { return Op<1>(); }
2181 const Value *getFalseValue() const { return Op<2>(); }
2182 Value *getCondition() { return Op<0>(); }
2183 Value *getTrueValue() { return Op<1>(); }
2184 Value *getFalseValue() { return Op<2>(); }
2185
2186 void setCondition(Value *V) { Op<0>() = V; }
2187 void setTrueValue(Value *V) { Op<1>() = V; }
2188 void setFalseValue(Value *V) { Op<2>() = V; }
2189
2190 /// Return a string if the specified operands are invalid
2191 /// for a select operation, otherwise return null.
2192 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
2193
2194 /// Transparently provide more efficient getOperand methods.
2195 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2196
2197 OtherOps getOpcode() const {
2198 return static_cast<OtherOps>(Instruction::getOpcode());
2199 }
2200
2201 // Methods for support type inquiry through isa, cast, and dyn_cast:
2202 static bool classof(const Instruction *I) {
2203 return I->getOpcode() == Instruction::Select;
2204 }
2205 static bool classof(const Value *V) {
2206 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2207 }
2208};
2209
2210template <>
2211struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
2212};
2213
2214DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2214, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2214, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2215
2216//===----------------------------------------------------------------------===//
2217// VAArgInst Class
2218//===----------------------------------------------------------------------===//
2219
2220/// This class represents the va_arg llvm instruction, which returns
2221/// an argument of the specified type given a va_list and increments that list
2222///
2223class VAArgInst : public UnaryInstruction {
2224protected:
2225 // Note: Instruction needs to be a friend here to call cloneImpl.
2226 friend class Instruction;
2227
2228 VAArgInst *cloneImpl() const;
2229
2230public:
2231 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
2232 Instruction *InsertBefore = nullptr)
2233 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
2234 setName(NameStr);
2235 }
2236
2237 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
2238 BasicBlock *InsertAtEnd)
2239 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
2240 setName(NameStr);
2241 }
2242
2243 Value *getPointerOperand() { return getOperand(0); }
2244 const Value *getPointerOperand() const { return getOperand(0); }
2245 static unsigned getPointerOperandIndex() { return 0U; }
2246
2247 // Methods for support type inquiry through isa, cast, and dyn_cast:
2248 static bool classof(const Instruction *I) {
2249 return I->getOpcode() == VAArg;
2250 }
2251 static bool classof(const Value *V) {
2252 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2253 }
2254};
2255
2256//===----------------------------------------------------------------------===//
2257// ExtractElementInst Class
2258//===----------------------------------------------------------------------===//
2259
2260/// This instruction extracts a single (scalar)
2261/// element from a VectorType value
2262///
2263class ExtractElementInst : public Instruction {
2264 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
2265 Instruction *InsertBefore = nullptr);
2266 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2267 BasicBlock *InsertAtEnd);
2268
2269protected:
2270 // Note: Instruction needs to be a friend here to call cloneImpl.
2271 friend class Instruction;
2272
2273 ExtractElementInst *cloneImpl() const;
2274
2275public:
2276 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2277 const Twine &NameStr = "",
2278 Instruction *InsertBefore = nullptr) {
2279 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2280 }
2281
2282 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2283 const Twine &NameStr,
2284 BasicBlock *InsertAtEnd) {
2285 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
2286 }
2287
2288 /// Return true if an extractelement instruction can be
2289 /// formed with the specified operands.
2290 static bool isValidOperands(const Value *Vec, const Value *Idx);
2291
2292 Value *getVectorOperand() { return Op<0>(); }
2293 Value *getIndexOperand() { return Op<1>(); }
2294 const Value *getVectorOperand() const { return Op<0>(); }
2295 const Value *getIndexOperand() const { return Op<1>(); }
2296
2297 VectorType *getVectorOperandType() const {
2298 return cast<VectorType>(getVectorOperand()->getType());
2299 }
2300
2301 /// Transparently provide more efficient getOperand methods.
2302 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2303
2304 // Methods for support type inquiry through isa, cast, and dyn_cast:
2305 static bool classof(const Instruction *I) {
2306 return I->getOpcode() == Instruction::ExtractElement;
2307 }
2308 static bool classof(const Value *V) {
2309 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2310 }
2311};
2312
2313template <>
2314struct OperandTraits<ExtractElementInst> :
2315 public FixedNumOperandTraits<ExtractElementInst, 2> {
2316};
2317
2318DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2319
2320//===----------------------------------------------------------------------===//
2321// InsertElementInst Class
2322//===----------------------------------------------------------------------===//
2323
2324/// This instruction inserts a single (scalar)
2325/// element into a VectorType value
2326///
2327class InsertElementInst : public Instruction {
2328 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
2329 const Twine &NameStr = "",
2330 Instruction *InsertBefore = nullptr);
2331 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2332 BasicBlock *InsertAtEnd);
2333
2334protected:
2335 // Note: Instruction needs to be a friend here to call cloneImpl.
2336 friend class Instruction;
2337
2338 InsertElementInst *cloneImpl() const;
2339
2340public:
2341 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2342 const Twine &NameStr = "",
2343 Instruction *InsertBefore = nullptr) {
2344 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2345 }
2346
2347 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2348 const Twine &NameStr,
2349 BasicBlock *InsertAtEnd) {
2350 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
2351 }
2352
2353 /// Return true if an insertelement instruction can be
2354 /// formed with the specified operands.
2355 static bool isValidOperands(const Value *Vec, const Value *NewElt,
2356 const Value *Idx);
2357
2358 /// Overload to return most specific vector type.
2359 ///
2360 VectorType *getType() const {
2361 return cast<VectorType>(Instruction::getType());
2362 }
2363
2364 /// Transparently provide more efficient getOperand methods.
2365 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2366
2367 // Methods for support type inquiry through isa, cast, and dyn_cast:
2368 static bool classof(const Instruction *I) {
2369 return I->getOpcode() == Instruction::InsertElement;
2370 }
2371 static bool classof(const Value *V) {
2372 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2373 }
2374};
2375
2376template <>
2377struct OperandTraits<InsertElementInst> :
2378 public FixedNumOperandTraits<InsertElementInst, 3> {
2379};
2380
2381DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2381, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2381, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2382
2383//===----------------------------------------------------------------------===//
2384// ShuffleVectorInst Class
2385//===----------------------------------------------------------------------===//
2386
2387/// This instruction constructs a fixed permutation of two
2388/// input vectors.
2389///
2390class ShuffleVectorInst : public Instruction {
2391protected:
2392 // Note: Instruction needs to be a friend here to call cloneImpl.
2393 friend class Instruction;
2394
2395 ShuffleVectorInst *cloneImpl() const;
2396
2397public:
2398 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2399 const Twine &NameStr = "",
2400 Instruction *InsertBefor = nullptr);
2401 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2402 const Twine &NameStr, BasicBlock *InsertAtEnd);
2403
2404 // allocate space for exactly three operands
2405 void *operator new(size_t s) {
2406 return User::operator new(s, 3);
2407 }
2408
2409 /// Return true if a shufflevector instruction can be
2410 /// formed with the specified operands.
2411 static bool isValidOperands(const Value *V1, const Value *V2,
2412 const Value *Mask);
2413
2414 /// Overload to return most specific vector type.
2415 ///
2416 VectorType *getType() const {
2417 return cast<VectorType>(Instruction::getType());
2418 }
2419
2420 /// Transparently provide more efficient getOperand methods.
2421 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2422
2423 Constant *getMask() const {
2424 return cast<Constant>(getOperand(2));
2425 }
2426
2427 /// Return the shuffle mask value for the specified element of the mask.
2428 /// Return -1 if the element is undef.
2429 static int getMaskValue(Constant *Mask, unsigned Elt);
2430
2431 /// Return the shuffle mask value of this instruction for the given element
2432 /// index. Return -1 if the element is undef.
2433 int getMaskValue(unsigned Elt) const {
2434 return getMaskValue(getMask(), Elt);
2435 }
2436
2437 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2438 /// elements of the mask are returned as -1.
2439 static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result);
2440
2441 /// Return the mask for this instruction as a vector of integers. Undefined
2442 /// elements of the mask are returned as -1.
2443 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2444 return getShuffleMask(getMask(), Result);
2445 }
2446
2447 SmallVector<int, 16> getShuffleMask() const {
2448 SmallVector<int, 16> Mask;
2449 getShuffleMask(Mask);
2450 return Mask;
2451 }
2452
2453 /// Change values in a shuffle permute mask assuming the two vector operands
2454 /// of length InVecNumElts have swapped position.
2455 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2456 unsigned InVecNumElts) {
2457 for (int &Idx : Mask) {
2458 if (Idx == -1)
2459 continue;
2460 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2461 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2462, __extension__ __PRETTY_FUNCTION__))
2462 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2462, __extension__ __PRETTY_FUNCTION__))
;
2463 }
2464 }
2465
2466 // Methods for support type inquiry through isa, cast, and dyn_cast:
2467 static bool classof(const Instruction *I) {
2468 return I->getOpcode() == Instruction::ShuffleVector;
2469 }
2470 static bool classof(const Value *V) {
2471 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2472 }
2473};
2474
2475template <>
2476struct OperandTraits<ShuffleVectorInst> :
2477 public FixedNumOperandTraits<ShuffleVectorInst, 3> {
2478};
2479
2480DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2480, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2480, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2481
2482//===----------------------------------------------------------------------===//
2483// ExtractValueInst Class
2484//===----------------------------------------------------------------------===//
2485
2486/// This instruction extracts a struct member or array
2487/// element value from an aggregate value.
2488///
2489class ExtractValueInst : public UnaryInstruction {
2490 SmallVector<unsigned, 4> Indices;
2491
2492 ExtractValueInst(const ExtractValueInst &EVI);
2493
2494 /// Constructors - Create a extractvalue instruction with a base aggregate
2495 /// value and a list of indices. The first ctor can optionally insert before
2496 /// an existing instruction, the second appends the new instruction to the
2497 /// specified BasicBlock.
2498 inline ExtractValueInst(Value *Agg,
2499 ArrayRef<unsigned> Idxs,
2500 const Twine &NameStr,
2501 Instruction *InsertBefore);
2502 inline ExtractValueInst(Value *Agg,
2503 ArrayRef<unsigned> Idxs,
2504 const Twine &NameStr, BasicBlock *InsertAtEnd);
2505
2506 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2507
2508protected:
2509 // Note: Instruction needs to be a friend here to call cloneImpl.
2510 friend class Instruction;
2511
2512 ExtractValueInst *cloneImpl() const;
2513
2514public:
2515 static ExtractValueInst *Create(Value *Agg,
2516 ArrayRef<unsigned> Idxs,
2517 const Twine &NameStr = "",
2518 Instruction *InsertBefore = nullptr) {
2519 return new
2520 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2521 }
2522
2523 static ExtractValueInst *Create(Value *Agg,
2524 ArrayRef<unsigned> Idxs,
2525 const Twine &NameStr,
2526 BasicBlock *InsertAtEnd) {
2527 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2528 }
2529
2530 /// Returns the type of the element that would be extracted
2531 /// with an extractvalue instruction with the specified parameters.
2532 ///
2533 /// Null is returned if the indices are invalid for the specified type.
2534 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2535
2536 using idx_iterator = const unsigned*;
2537
2538 inline idx_iterator idx_begin() const { return Indices.begin(); }
2539 inline idx_iterator idx_end() const { return Indices.end(); }
2540 inline iterator_range<idx_iterator> indices() const {
2541 return make_range(idx_begin(), idx_end());
2542 }
2543
2544 Value *getAggregateOperand() {
2545 return getOperand(0);
2546 }
2547 const Value *getAggregateOperand() const {
2548 return getOperand(0);
2549 }
2550 static unsigned getAggregateOperandIndex() {
2551 return 0U; // get index for modifying correct operand
2552 }
2553
2554 ArrayRef<unsigned> getIndices() const {
2555 return Indices;
2556 }
2557
2558 unsigned getNumIndices() const {
2559 return (unsigned)Indices.size();
2560 }
2561
2562 bool hasIndices() const {
2563 return true;
2564 }
2565
2566 // Methods for support type inquiry through isa, cast, and dyn_cast:
2567 static bool classof(const Instruction *I) {
2568 return I->getOpcode() == Instruction::ExtractValue;
2569 }
2570 static bool classof(const Value *V) {
2571 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2572 }
2573};
2574
2575ExtractValueInst::ExtractValueInst(Value *Agg,
2576 ArrayRef<unsigned> Idxs,
2577 const Twine &NameStr,
2578 Instruction *InsertBefore)
2579 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2580 ExtractValue, Agg, InsertBefore) {
2581 init(Idxs, NameStr);
2582}
2583
2584ExtractValueInst::ExtractValueInst(Value *Agg,
2585 ArrayRef<unsigned> Idxs,
2586 const Twine &NameStr,
2587 BasicBlock *InsertAtEnd)
2588 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2589 ExtractValue, Agg, InsertAtEnd) {
2590 init(Idxs, NameStr);
2591}
2592
2593//===----------------------------------------------------------------------===//
2594// InsertValueInst Class
2595//===----------------------------------------------------------------------===//
2596
2597/// This instruction inserts a struct field of array element
2598/// value into an aggregate value.
2599///
2600class InsertValueInst : public Instruction {
2601 SmallVector<unsigned, 4> Indices;
2602
2603 InsertValueInst(const InsertValueInst &IVI);
2604
2605 /// Constructors - Create a insertvalue instruction with a base aggregate
2606 /// value, a value to insert, and a list of indices. The first ctor can
2607 /// optionally insert before an existing instruction, the second appends
2608 /// the new instruction to the specified BasicBlock.
2609 inline InsertValueInst(Value *Agg, Value *Val,
2610 ArrayRef<unsigned> Idxs,
2611 const Twine &NameStr,
2612 Instruction *InsertBefore);
2613 inline InsertValueInst(Value *Agg, Value *Val,
2614 ArrayRef<unsigned> Idxs,
2615 const Twine &NameStr, BasicBlock *InsertAtEnd);
2616
2617 /// Constructors - These two constructors are convenience methods because one
2618 /// and two index insertvalue instructions are so common.
2619 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2620 const Twine &NameStr = "",
2621 Instruction *InsertBefore = nullptr);
2622 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2623 BasicBlock *InsertAtEnd);
2624
2625 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2626 const Twine &NameStr);
2627
2628protected:
2629 // Note: Instruction needs to be a friend here to call cloneImpl.
2630 friend class Instruction;
2631
2632 InsertValueInst *cloneImpl() const;
2633
2634public:
2635 // allocate space for exactly two operands
2636 void *operator new(size_t s) {
2637 return User::operator new(s, 2);
2638 }
2639
2640 static InsertValueInst *Create(Value *Agg, Value *Val,
2641 ArrayRef<unsigned> Idxs,
2642 const Twine &NameStr = "",
2643 Instruction *InsertBefore = nullptr) {
2644 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2645 }
2646
2647 static InsertValueInst *Create(Value *Agg, Value *Val,
2648 ArrayRef<unsigned> Idxs,
2649 const Twine &NameStr,
2650 BasicBlock *InsertAtEnd) {
2651 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2652 }
2653
2654 /// Transparently provide more efficient getOperand methods.
2655 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2656
2657 using idx_iterator = const unsigned*;
2658
2659 inline idx_iterator idx_begin() const { return Indices.begin(); }
2660 inline idx_iterator idx_end() const { return Indices.end(); }
2661 inline iterator_range<idx_iterator> indices() const {
2662 return make_range(idx_begin(), idx_end());
2663 }
2664
2665 Value *getAggregateOperand() {
2666 return getOperand(0);
2667 }
2668 const Value *getAggregateOperand() const {
2669 return getOperand(0);
2670 }
2671 static unsigned getAggregateOperandIndex() {
2672 return 0U; // get index for modifying correct operand
2673 }
2674
2675 Value *getInsertedValueOperand() {
2676 return getOperand(1);
2677 }
2678 const Value *getInsertedValueOperand() const {
2679 return getOperand(1);
2680 }
2681 static unsigned getInsertedValueOperandIndex() {
2682 return 1U; // get index for modifying correct operand
2683 }
2684
2685 ArrayRef<unsigned> getIndices() const {
2686 return Indices;
2687 }
2688
2689 unsigned getNumIndices() const {
2690 return (unsigned)Indices.size();
2691 }
2692
2693 bool hasIndices() const {
2694 return true;
2695 }
2696
2697 // Methods for support type inquiry through isa, cast, and dyn_cast:
2698 static bool classof(const Instruction *I) {
2699 return I->getOpcode() == Instruction::InsertValue;
2700 }
2701 static bool classof(const Value *V) {
2702 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2703 }
2704};
2705
2706template <>
2707struct OperandTraits<InsertValueInst> :
2708 public FixedNumOperandTraits<InsertValueInst, 2> {
2709};
2710
2711InsertValueInst::InsertValueInst(Value *Agg,
2712 Value *Val,
2713 ArrayRef<unsigned> Idxs,
2714 const Twine &NameStr,
2715 Instruction *InsertBefore)
2716 : Instruction(Agg->getType(), InsertValue,
2717 OperandTraits<InsertValueInst>::op_begin(this),
2718 2, InsertBefore) {
2719 init(Agg, Val, Idxs, NameStr);
2720}
2721
2722InsertValueInst::InsertValueInst(Value *Agg,
2723 Value *Val,
2724 ArrayRef<unsigned> Idxs,
2725 const Twine &NameStr,
2726 BasicBlock *InsertAtEnd)
2727 : Instruction(Agg->getType(), InsertValue,
2728 OperandTraits<InsertValueInst>::op_begin(this),
2729 2, InsertAtEnd) {
2730 init(Agg, Val, Idxs, NameStr);
2731}
2732
2733DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2733, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2733, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2734
2735//===----------------------------------------------------------------------===//
2736// PHINode Class
2737//===----------------------------------------------------------------------===//
2738
2739// PHINode - The PHINode class is used to represent the magical mystical PHI
2740// node, that can not exist in nature, but can be synthesized in a computer
2741// scientist's overactive imagination.
2742//
2743class PHINode : public Instruction {
2744 /// The number of operands actually allocated. NumOperands is
2745 /// the number actually in use.
2746 unsigned ReservedSpace;
2747
2748 PHINode(const PHINode &PN);
2749
2750 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2751 const Twine &NameStr = "",
2752 Instruction *InsertBefore = nullptr)
2753 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2754 ReservedSpace(NumReservedValues) {
2755 setName(NameStr);
2756 allocHungoffUses(ReservedSpace);
2757 }
2758
2759 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2760 BasicBlock *InsertAtEnd)
2761 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2762 ReservedSpace(NumReservedValues) {
2763 setName(NameStr);
2764 allocHungoffUses(ReservedSpace);
2765 }
2766
2767protected:
2768 // Note: Instruction needs to be a friend here to call cloneImpl.
2769 friend class Instruction;
2770
2771 PHINode *cloneImpl() const;
2772
2773 // allocHungoffUses - this is more complicated than the generic
2774 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2775 // values and pointers to the incoming blocks, all in one allocation.
2776 void allocHungoffUses(unsigned N) {
2777 User::allocHungoffUses(N, /* IsPhi */ true);
2778 }
2779
2780public:
2781 /// Constructors - NumReservedValues is a hint for the number of incoming
2782 /// edges that this phi node will have (use 0 if you really have no idea).
2783 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2784 const Twine &NameStr = "",
2785 Instruction *InsertBefore = nullptr) {
2786 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2787 }
2788
2789 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2790 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2791 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2792 }
2793
2794 /// Provide fast operand accessors
2795 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2796
2797 // Block iterator interface. This provides access to the list of incoming
2798 // basic blocks, which parallels the list of incoming values.
2799
2800 using block_iterator = BasicBlock **;
2801 using const_block_iterator = BasicBlock * const *;
2802
2803 block_iterator block_begin() {
2804 Use::UserRef *ref =
2805 reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
2806 return reinterpret_cast<block_iterator>(ref + 1);
2807 }
2808
2809 const_block_iterator block_begin() const {
2810 const Use::UserRef *ref =
2811 reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
2812 return reinterpret_cast<const_block_iterator>(ref + 1);
2813 }
2814
2815 block_iterator block_end() {
2816 return block_begin() + getNumOperands();
2817 }
2818
2819 const_block_iterator block_end() const {
2820 return block_begin() + getNumOperands();
2821 }
2822
2823 iterator_range<block_iterator> blocks() {
2824 return make_range(block_begin(), block_end());
2825 }
2826
2827 iterator_range<const_block_iterator> blocks() const {
2828 return make_range(block_begin(), block_end());
2829 }
2830
2831 op_range incoming_values() { return operands(); }
2832
2833 const_op_range incoming_values() const { return operands(); }
2834
2835 /// Return the number of incoming edges
2836 ///
2837 unsigned getNumIncomingValues() const { return getNumOperands(); }
2838
2839 /// Return incoming value number x
2840 ///
2841 Value *getIncomingValue(unsigned i) const {
2842 return getOperand(i);
2843 }
2844 void setIncomingValue(unsigned i, Value *V) {
2845 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2845, __extension__ __PRETTY_FUNCTION__))
;
2846 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2847, __extension__ __PRETTY_FUNCTION__))
2847 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2847, __extension__ __PRETTY_FUNCTION__))
;
2848 setOperand(i, V);
2849 }
2850
2851 static unsigned getOperandNumForIncomingValue(unsigned i) {
2852 return i;
2853 }
2854
2855 static unsigned getIncomingValueNumForOperand(unsigned i) {
2856 return i;
2857 }
2858
2859 /// Return incoming basic block number @p i.
2860 ///
2861 BasicBlock *getIncomingBlock(unsigned i) const {
2862 return block_begin()[i];
2863 }
2864
2865 /// Return incoming basic block corresponding
2866 /// to an operand of the PHI.
2867 ///
2868 BasicBlock *getIncomingBlock(const Use &U) const {
2869 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2869, __extension__ __PRETTY_FUNCTION__))
;
2870 return getIncomingBlock(unsigned(&U - op_begin()));
2871 }
2872
2873 /// Return incoming basic block corresponding
2874 /// to value use iterator.
2875 ///
2876 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2877 return getIncomingBlock(I.getUse());
2878 }
2879
2880 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2881 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2881, __extension__ __PRETTY_FUNCTION__))
;
2882 block_begin()[i] = BB;
2883 }
2884
2885 /// Add an incoming value to the end of the PHI list
2886 ///
2887 void addIncoming(Value *V, BasicBlock *BB) {
2888 if (getNumOperands() == ReservedSpace)
2889 growOperands(); // Get more space!
2890 // Initialize some new operands.
2891 setNumHungOffUseOperands(getNumOperands() + 1);
2892 setIncomingValue(getNumOperands() - 1, V);
2893 setIncomingBlock(getNumOperands() - 1, BB);
2894 }
2895
2896 /// Remove an incoming value. This is useful if a
2897 /// predecessor basic block is deleted. The value removed is returned.
2898 ///
2899 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2900 /// is true), the PHI node is destroyed and any uses of it are replaced with
2901 /// dummy values. The only time there should be zero incoming values to a PHI
2902 /// node is when the block is dead, so this strategy is sound.
2903 ///
2904 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2905
2906 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2907 int Idx = getBasicBlockIndex(BB);
2908 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2908, __extension__ __PRETTY_FUNCTION__))
;
2909 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2910 }
2911
2912 /// Return the first index of the specified basic
2913 /// block in the value list for this PHI. Returns -1 if no instance.
2914 ///
2915 int getBasicBlockIndex(const BasicBlock *BB) const {
2916 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2917 if (block_begin()[i] == BB)
2918 return i;
2919 return -1;
2920 }
2921
2922 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2923 int Idx = getBasicBlockIndex(BB);
2924 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2924, __extension__ __PRETTY_FUNCTION__))
;
16
Within the expansion of the macro 'assert':
a
Assuming 'Idx' is >= 0
2925 return getIncomingValue(Idx);
17
Calling 'PHINode::getIncomingValue'
18
Returning from 'PHINode::getIncomingValue'
2926 }
2927
2928 /// If the specified PHI node always merges together the
2929 /// same value, return the value, otherwise return null.
2930 Value *hasConstantValue() const;
2931
2932 /// Whether the specified PHI node always merges
2933 /// together the same value, assuming undefs are equal to a unique
2934 /// non-undef value.
2935 bool hasConstantOrUndefValue() const;
2936
2937 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2938 static bool classof(const Instruction *I) {
2939 return I->getOpcode() == Instruction::PHI;
2940 }
2941 static bool classof(const Value *V) {
2942 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2943 }
2944
2945private:
2946 void growOperands();
2947};
2948
2949template <>
2950struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2951};
2952
2953DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2953, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2953, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2954
2955//===----------------------------------------------------------------------===//
2956// LandingPadInst Class
2957//===----------------------------------------------------------------------===//
2958
2959//===---------------------------------------------------------------------------
2960/// The landingpad instruction holds all of the information
2961/// necessary to generate correct exception handling. The landingpad instruction
2962/// cannot be moved from the top of a landing pad block, which itself is
2963/// accessible only from the 'unwind' edge of an invoke. This uses the
2964/// SubclassData field in Value to store whether or not the landingpad is a
2965/// cleanup.
2966///
2967class LandingPadInst : public Instruction {
2968 /// The number of operands actually allocated. NumOperands is
2969 /// the number actually in use.
2970 unsigned ReservedSpace;
2971
2972 LandingPadInst(const LandingPadInst &LP);
2973
2974public:
2975 enum ClauseType { Catch, Filter };
2976
2977private:
2978 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2979 const Twine &NameStr, Instruction *InsertBefore);
2980 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2981 const Twine &NameStr, BasicBlock *InsertAtEnd);
2982
2983 // Allocate space for exactly zero operands.
2984 void *operator new(size_t s) {
2985 return User::operator new(s);
2986 }
2987
2988 void growOperands(unsigned Size);
2989 void init(unsigned NumReservedValues, const Twine &NameStr);
2990
2991protected:
2992 // Note: Instruction needs to be a friend here to call cloneImpl.
2993 friend class Instruction;
2994
2995 LandingPadInst *cloneImpl() const;
2996
2997public:
2998 /// Constructors - NumReservedClauses is a hint for the number of incoming
2999 /// clauses that this landingpad will have (use 0 if you really have no idea).
3000 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3001 const Twine &NameStr = "",
3002 Instruction *InsertBefore = nullptr);
3003 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3004 const Twine &NameStr, BasicBlock *InsertAtEnd);
3005
3006 /// Provide fast operand accessors
3007 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3008
3009 /// Return 'true' if this landingpad instruction is a
3010 /// cleanup. I.e., it should be run when unwinding even if its landing pad
3011 /// doesn't catch the exception.
3012 bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
3013
3014 /// Indicate that this landingpad instruction is a cleanup.
3015 void setCleanup(bool V) {
3016 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
3017 (V ? 1 : 0));
3018 }
3019
3020 /// Add a catch or filter clause to the landing pad.
3021 void addClause(Constant *ClauseVal);
3022
3023 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3024 /// determine what type of clause this is.
3025 Constant *getClause(unsigned Idx) const {
3026 return cast<Constant>(getOperandList()[Idx]);
3027 }
3028
3029 /// Return 'true' if the clause and index Idx is a catch clause.
3030 bool isCatch(unsigned Idx) const {
3031 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3032 }
3033
3034 /// Return 'true' if the clause and index Idx is a filter clause.
3035 bool isFilter(unsigned Idx) const {
3036 return isa<ArrayType>(getOperandList()[Idx]->getType());
3037 }
3038
3039 /// Get the number of clauses for this landing pad.
3040 unsigned getNumClauses() const { return getNumOperands(); }
3041
3042 /// Grow the size of the operand list to accommodate the new
3043 /// number of clauses.
3044 void reserveClauses(unsigned Size) { growOperands(Size); }
3045
3046 // Methods for support type inquiry through isa, cast, and dyn_cast:
3047 static bool classof(const Instruction *I) {
3048 return I->getOpcode() == Instruction::LandingPad;
3049 }
3050 static bool classof(const Value *V) {
3051 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3052 }
3053};
3054
3055template <>
3056struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3057};
3058
3059DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3059, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3059, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3060
3061//===----------------------------------------------------------------------===//
3062// ReturnInst Class
3063//===----------------------------------------------------------------------===//
3064
3065//===---------------------------------------------------------------------------
3066/// Return a value (possibly void), from a function. Execution
3067/// does not continue in this function any longer.
3068///
3069class ReturnInst : public TerminatorInst {
3070 ReturnInst(const ReturnInst &RI);
3071
3072private:
3073 // ReturnInst constructors:
3074 // ReturnInst() - 'ret void' instruction
3075 // ReturnInst( null) - 'ret void' instruction
3076 // ReturnInst(Value* X) - 'ret X' instruction
3077 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3078 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3079 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3080 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3081 //
3082 // NOTE: If the Value* passed is of type void then the constructor behaves as
3083 // if it was passed NULL.
3084 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3085 Instruction *InsertBefore = nullptr);
3086 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3087 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3088
3089protected:
3090 // Note: Instruction needs to be a friend here to call cloneImpl.
3091 friend class Instruction;
3092
3093 ReturnInst *cloneImpl() const;
3094
3095public:
3096 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3097 Instruction *InsertBefore = nullptr) {
3098 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3099 }
3100
3101 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3102 BasicBlock *InsertAtEnd) {
3103 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3104 }
3105
3106 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3107 return new(0) ReturnInst(C, InsertAtEnd);
3108 }
3109
3110 /// Provide fast operand accessors
3111 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3112
3113 /// Convenience accessor. Returns null if there is no return value.
3114 Value *getReturnValue() const {
3115 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3116 }
3117
3118 unsigned getNumSuccessors() const { return 0; }
3119
3120 // Methods for support type inquiry through isa, cast, and dyn_cast:
3121 static bool classof(const Instruction *I) {
3122 return (I->getOpcode() == Instruction::Ret);
3123 }
3124 static bool classof(const Value *V) {
3125 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3126 }
3127
3128private:
3129 friend TerminatorInst;
3130
3131 BasicBlock *getSuccessor(unsigned idx) const {
3132 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3132)
;
3133 }
3134
3135 void setSuccessor(unsigned idx, BasicBlock *B) {
3136 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3136)
;
3137 }
3138};
3139
3140template <>
3141struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3142};
3143
3144DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3144, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3144, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3145
3146//===----------------------------------------------------------------------===//
3147// BranchInst Class
3148//===----------------------------------------------------------------------===//
3149
3150//===---------------------------------------------------------------------------
3151/// Conditional or Unconditional Branch instruction.
3152///
3153class BranchInst : public TerminatorInst {
3154 /// Ops list - Branches are strange. The operands are ordered:
3155 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3156 /// they don't have to check for cond/uncond branchness. These are mostly
3157 /// accessed relative from op_end().
3158 BranchInst(const BranchInst &BI);
3159 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3160 // BranchInst(BB *B) - 'br B'
3161 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3162 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3163 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3164 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3165 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3166 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3167 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3168 Instruction *InsertBefore = nullptr);
3169 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3170 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3171 BasicBlock *InsertAtEnd);
3172
3173 void AssertOK();
3174
3175protected:
3176 // Note: Instruction needs to be a friend here to call cloneImpl.
3177 friend class Instruction;
3178
3179 BranchInst *cloneImpl() const;
3180
3181public:
3182 static BranchInst *Create(BasicBlock *IfTrue,
3183 Instruction *InsertBefore = nullptr) {
3184 return new(1) BranchInst(IfTrue, InsertBefore);
3185 }
3186
3187 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3188 Value *Cond, Instruction *InsertBefore = nullptr) {
3189 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3190 }
3191
3192 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3193 return new(1) BranchInst(IfTrue, InsertAtEnd);
3194 }
3195
3196 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3197 Value *Cond, BasicBlock *InsertAtEnd) {
3198 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3199 }
3200
3201 /// Transparently provide more efficient getOperand methods.
3202 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3203
3204 bool isUnconditional() const { return getNumOperands() == 1; }
3205 bool isConditional() const { return getNumOperands() == 3; }
3206
3207 Value *getCondition() const {
3208 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3208, __extension__ __PRETTY_FUNCTION__))
;
3209 return Op<-3>();
3210 }
3211
3212 void setCondition(Value *V) {
3213 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3213, __extension__ __PRETTY_FUNCTION__))
;
3214 Op<-3>() = V;
3215 }
3216
3217 unsigned getNumSuccessors() const { return 1+isConditional(); }
3218
3219 BasicBlock *getSuccessor(unsigned i) const {
3220 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3220, __extension__ __PRETTY_FUNCTION__))
;
3221 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3222 }
3223
3224 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3225 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3225, __extension__ __PRETTY_FUNCTION__))
;
3226 *(&Op<-1>() - idx) = NewSucc;
3227 }
3228
3229 /// Swap the successors of this branch instruction.
3230 ///
3231 /// Swaps the successors of the branch instruction. This also swaps any
3232 /// branch weight metadata associated with the instruction so that it
3233 /// continues to map correctly to each operand.
3234 void swapSuccessors();
3235
3236 // Methods for support type inquiry through isa, cast, and dyn_cast:
3237 static bool classof(const Instruction *I) {
3238 return (I->getOpcode() == Instruction::Br);
3239 }
3240 static bool classof(const Value *V) {
3241 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3242 }
3243};
3244
3245template <>
3246struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3247};
3248
3249DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3249, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3249, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3250
3251//===----------------------------------------------------------------------===//
3252// SwitchInst Class
3253//===----------------------------------------------------------------------===//
3254
3255//===---------------------------------------------------------------------------
3256/// Multiway switch
3257///
3258class SwitchInst : public TerminatorInst {
3259 unsigned ReservedSpace;
3260
3261 // Operand[0] = Value to switch on
3262 // Operand[1] = Default basic block destination
3263 // Operand[2n ] = Value to match
3264 // Operand[2n+1] = BasicBlock to go to on match
3265 SwitchInst(const SwitchInst &SI);
3266
3267 /// Create a new switch instruction, specifying a value to switch on and a
3268 /// default destination. The number of additional cases can be specified here
3269 /// to make memory allocation more efficient. This constructor can also
3270 /// auto-insert before another instruction.
3271 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3272 Instruction *InsertBefore);
3273
3274 /// Create a new switch instruction, specifying a value to switch on and a
3275 /// default destination. The number of additional cases can be specified here
3276 /// to make memory allocation more efficient. This constructor also
3277 /// auto-inserts at the end of the specified BasicBlock.
3278 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3279 BasicBlock *InsertAtEnd);
3280
3281 // allocate space for exactly zero operands
3282 void *operator new(size_t s) {
3283 return User::operator new(s);
3284 }
3285
3286 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3287 void growOperands();
3288
3289protected:
3290 // Note: Instruction needs to be a friend here to call cloneImpl.
3291 friend class Instruction;
3292
3293 SwitchInst *cloneImpl() const;
3294
3295public:
3296 // -2
3297 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3298
3299 template <typename CaseHandleT> class CaseIteratorImpl;
3300
3301 /// A handle to a particular switch case. It exposes a convenient interface
3302 /// to both the case value and the successor block.
3303 ///
3304 /// We define this as a template and instantiate it to form both a const and
3305 /// non-const handle.
3306 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3307 class CaseHandleImpl {
3308 // Directly befriend both const and non-const iterators.
3309 friend class SwitchInst::CaseIteratorImpl<
3310 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3311
3312 protected:
3313 // Expose the switch type we're parameterized with to the iterator.
3314 using SwitchInstType = SwitchInstT;
3315
3316 SwitchInstT *SI;
3317 ptrdiff_t Index;
3318
3319 CaseHandleImpl() = default;
3320 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3321
3322 public:
3323 /// Resolves case value for current case.
3324 ConstantIntT *getCaseValue() const {
3325 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3326, __extension__ __PRETTY_FUNCTION__))
3326 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3326, __extension__ __PRETTY_FUNCTION__))
;
3327 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3328 }
3329
3330 /// Resolves successor for current case.
3331 BasicBlockT *getCaseSuccessor() const {
3332 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3334, __extension__ __PRETTY_FUNCTION__))
3333 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3334, __extension__ __PRETTY_FUNCTION__))
3334 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3334, __extension__ __PRETTY_FUNCTION__))
;
3335 return SI->getSuccessor(getSuccessorIndex());
3336 }
3337
3338 /// Returns number of current case.
3339 unsigned getCaseIndex() const { return Index; }
3340
3341 /// Returns TerminatorInst's successor index for current case successor.
3342 unsigned getSuccessorIndex() const {
3343 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3344 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3345 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
;
3346 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3347 }
3348
3349 bool operator==(const CaseHandleImpl &RHS) const {
3350 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3350, __extension__ __PRETTY_FUNCTION__))
;
3351 return Index == RHS.Index;
3352 }
3353 };
3354
3355 using ConstCaseHandle =
3356 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3357
3358 class CaseHandle
3359 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3360 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3361
3362 public:
3363 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3364
3365 /// Sets the new value for current case.
3366 void setValue(ConstantInt *V) {
3367 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3368, __extension__ __PRETTY_FUNCTION__))
3368 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3368, __extension__ __PRETTY_FUNCTION__))
;
3369 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3370 }
3371
3372 /// Sets the new successor for current case.
3373 void setSuccessor(BasicBlock *S) {
3374 SI->setSuccessor(getSuccessorIndex(), S);
3375 }
3376 };
3377
3378 template <typename CaseHandleT>
3379 class CaseIteratorImpl
3380 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3381 std::random_access_iterator_tag,
3382 CaseHandleT> {
3383 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3384
3385 CaseHandleT Case;
3386
3387 public:
3388 /// Default constructed iterator is in an invalid state until assigned to
3389 /// a case for a particular switch.
3390 CaseIteratorImpl() = default;
3391
3392 /// Initializes case iterator for given SwitchInst and for given
3393 /// case number.
3394 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3395
3396 /// Initializes case iterator for given SwitchInst and for given
3397 /// TerminatorInst's successor index.
3398 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3399 unsigned SuccessorIndex) {
3400 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3401, __extension__ __PRETTY_FUNCTION__))
3401 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3401, __extension__ __PRETTY_FUNCTION__))
;
3402 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3403 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3404 }
3405
3406 /// Support converting to the const variant. This will be a no-op for const
3407 /// variant.
3408 operator CaseIteratorImpl<ConstCaseHandle>() const {
3409 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3410 }
3411
3412 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3413 // Check index correctness after addition.
3414 // Note: Index == getNumCases() means end().
3415 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3417, __extension__ __PRETTY_FUNCTION__))
3416 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3417, __extension__ __PRETTY_FUNCTION__))
3417 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3417, __extension__ __PRETTY_FUNCTION__))
;
3418 Case.Index += N;
3419 return *this;
3420 }
3421 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3422 // Check index correctness after subtraction.
3423 // Note: Case.Index == getNumCases() means end().
3424 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3426, __extension__ __PRETTY_FUNCTION__))
3425 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3426, __extension__ __PRETTY_FUNCTION__))
3426 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3426, __extension__ __PRETTY_FUNCTION__))
;
3427 Case.Index -= N;
3428 return *this;
3429 }
3430 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3431 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3431, __extension__ __PRETTY_FUNCTION__))
;
3432 return Case.Index - RHS.Case.Index;
3433 }
3434 bool operator==(const CaseIteratorImpl &RHS) const {
3435 return Case == RHS.Case;
3436 }
3437 bool operator<(const CaseIteratorImpl &RHS) const {
3438 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3438, __extension__ __PRETTY_FUNCTION__))
;
3439 return Case.Index < RHS.Case.Index;
3440 }
3441 CaseHandleT &operator*() { return Case; }
3442 const CaseHandleT &operator*() const { return Case; }
3443 };
3444
3445 using CaseIt = CaseIteratorImpl<CaseHandle>;
3446 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3447
3448 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3449 unsigned NumCases,
3450 Instruction *InsertBefore = nullptr) {
3451 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3452 }
3453
3454 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3455 unsigned NumCases, BasicBlock *InsertAtEnd) {
3456 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3457 }
3458
3459 /// Provide fast operand accessors
3460 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3461
3462 // Accessor Methods for Switch stmt
3463 Value *getCondition() const { return getOperand(0); }
3464 void setCondition(Value *V) { setOperand(0, V); }
3465
3466 BasicBlock *getDefaultDest() const {
3467 return cast<BasicBlock>(getOperand(1));
3468 }
3469
3470 void setDefaultDest(BasicBlock *DefaultCase) {
3471 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3472 }
3473
3474 /// Return the number of 'cases' in this switch instruction, excluding the
3475 /// default case.
3476 unsigned getNumCases() const {
3477 return getNumOperands()/2 - 1;
3478 }
3479
3480 /// Returns a read/write iterator that points to the first case in the
3481 /// SwitchInst.
3482 CaseIt case_begin() {
3483 return CaseIt(this, 0);
3484 }
3485
3486 /// Returns a read-only iterator that points to the first case in the
3487 /// SwitchInst.
3488 ConstCaseIt case_begin() const {
3489 return ConstCaseIt(this, 0);
3490 }
3491
3492 /// Returns a read/write iterator that points one past the last in the
3493 /// SwitchInst.
3494 CaseIt case_end() {
3495 return CaseIt(this, getNumCases());
3496 }
3497
3498 /// Returns a read-only iterator that points one past the last in the
3499 /// SwitchInst.
3500 ConstCaseIt case_end() const {
3501 return ConstCaseIt(this, getNumCases());
3502 }
3503
3504 /// Iteration adapter for range-for loops.
3505 iterator_range<CaseIt> cases() {
3506 return make_range(case_begin(), case_end());
3507 }
3508
3509 /// Constant iteration adapter for range-for loops.
3510 iterator_range<ConstCaseIt> cases() const {
3511 return make_range(case_begin(), case_end());
3512 }
3513
3514 /// Returns an iterator that points to the default case.
3515 /// Note: this iterator allows to resolve successor only. Attempt
3516 /// to resolve case value causes an assertion.
3517 /// Also note, that increment and decrement also causes an assertion and
3518 /// makes iterator invalid.
3519 CaseIt case_default() {
3520 return CaseIt(this, DefaultPseudoIndex);
3521 }
3522 ConstCaseIt case_default() const {
3523 return ConstCaseIt(this, DefaultPseudoIndex);
3524 }
3525
3526 /// Search all of the case values for the specified constant. If it is
3527 /// explicitly handled, return the case iterator of it, otherwise return
3528 /// default case iterator to indicate that it is handled by the default
3529 /// handler.
3530 CaseIt findCaseValue(const ConstantInt *C) {
3531 CaseIt I = llvm::find_if(
3532 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3533 if (I != case_end())
3534 return I;
3535
3536 return case_default();
3537 }
3538 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3539 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3540 return Case.getCaseValue() == C;
3541 });
3542 if (I != case_end())
3543 return I;
3544
3545 return case_default();
3546 }
3547
3548 /// Finds the unique case value for a given successor. Returns null if the
3549 /// successor is not found, not unique, or is the default case.
3550 ConstantInt *findCaseDest(BasicBlock *BB) {
3551 if (BB == getDefaultDest())
3552 return nullptr;
3553
3554 ConstantInt *CI = nullptr;
3555 for (auto Case : cases()) {
3556 if (Case.getCaseSuccessor() != BB)
3557 continue;
3558
3559 if (CI)
3560 return nullptr; // Multiple cases lead to BB.
3561
3562 CI = Case.getCaseValue();
3563 }
3564
3565 return CI;
3566 }
3567
3568 /// Add an entry to the switch instruction.
3569 /// Note:
3570 /// This action invalidates case_end(). Old case_end() iterator will
3571 /// point to the added case.
3572 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3573
3574 /// This method removes the specified case and its successor from the switch
3575 /// instruction. Note that this operation may reorder the remaining cases at
3576 /// index idx and above.
3577 /// Note:
3578 /// This action invalidates iterators for all cases following the one removed,
3579 /// including the case_end() iterator. It returns an iterator for the next
3580 /// case.
3581 CaseIt removeCase(CaseIt I);
3582
3583 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3584 BasicBlock *getSuccessor(unsigned idx) const {
3585 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3585, __extension__ __PRETTY_FUNCTION__))
;
3586 return cast<BasicBlock>(getOperand(idx*2+1));
3587 }
3588 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3589 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3589, __extension__ __PRETTY_FUNCTION__))
;
3590 setOperand(idx * 2 + 1, NewSucc);
3591 }
3592
3593 // Methods for support type inquiry through isa, cast, and dyn_cast:
3594 static bool classof(const Instruction *I) {
3595 return I->getOpcode() == Instruction::Switch;
3596 }
3597 static bool classof(const Value *V) {
3598 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3599 }
3600};
3601
3602template <>
3603struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3604};
3605
3606DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3606, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SwitchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3606, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3607
3608//===----------------------------------------------------------------------===//
3609// IndirectBrInst Class
3610//===----------------------------------------------------------------------===//
3611
3612//===---------------------------------------------------------------------------
3613/// Indirect Branch Instruction.
3614///
3615class IndirectBrInst : public TerminatorInst {
3616 unsigned ReservedSpace;
3617
3618 // Operand[0] = Address to jump to
3619 // Operand[n+1] = n-th destination
3620 IndirectBrInst(const IndirectBrInst &IBI);
3621
3622 /// Create a new indirectbr instruction, specifying an
3623 /// Address to jump to. The number of expected destinations can be specified
3624 /// here to make memory allocation more efficient. This constructor can also
3625 /// autoinsert before another instruction.
3626 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3627
3628 /// Create a new indirectbr instruction, specifying an
3629 /// Address to jump to. The number of expected destinations can be specified
3630 /// here to make memory allocation more efficient. This constructor also
3631 /// autoinserts at the end of the specified BasicBlock.
3632 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3633
3634 // allocate space for exactly zero operands
3635 void *operator new(size_t s) {
3636 return User::operator new(s);
3637 }
3638
3639 void init(Value *Address, unsigned NumDests);
3640 void growOperands();
3641
3642protected:
3643 // Note: Instruction needs to be a friend here to call cloneImpl.
3644 friend class Instruction;
3645
3646 IndirectBrInst *cloneImpl() const;
3647
3648public:
3649 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3650 Instruction *InsertBefore = nullptr) {
3651 return new IndirectBrInst(Address, NumDests, InsertBefore);
3652 }
3653
3654 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3655 BasicBlock *InsertAtEnd) {
3656 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3657 }
3658
3659 /// Provide fast operand accessors.
3660 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3661
3662 // Accessor Methods for IndirectBrInst instruction.
3663 Value *getAddress() { return getOperand(0); }
3664 const Value *getAddress() const { return getOperand(0); }
3665 void setAddress(Value *V) { setOperand(0, V); }
3666
3667 /// return the number of possible destinations in this
3668 /// indirectbr instruction.
3669 unsigned getNumDestinations() const { return getNumOperands()-1; }
3670
3671 /// Return the specified destination.
3672 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3673 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3674
3675 /// Add a destination.
3676 ///
3677 void addDestination(BasicBlock *Dest);
3678
3679 /// This method removes the specified successor from the
3680 /// indirectbr instruction.
3681 void removeDestination(unsigned i);
3682
3683 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3684 BasicBlock *getSuccessor(unsigned i) const {
3685 return cast<BasicBlock>(getOperand(i+1));
3686 }
3687 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3688 setOperand(i + 1, NewSucc);
3689 }
3690
3691 // Methods for support type inquiry through isa, cast, and dyn_cast:
3692 static bool classof(const Instruction *I) {
3693 return I->getOpcode() == Instruction::IndirectBr;
3694 }
3695 static bool classof(const Value *V) {
3696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3697 }
3698};
3699
3700template <>
3701struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3702};
3703
3704DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3704, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<IndirectBrInst>::op_begin(
const_cast<IndirectBrInst*>(this))[i_nocapture].get());
} void IndirectBrInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3704, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
IndirectBrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3705
3706//===----------------------------------------------------------------------===//
3707// InvokeInst Class
3708//===----------------------------------------------------------------------===//
3709
3710/// Invoke instruction. The SubclassData field is used to hold the
3711/// calling convention of the call.
3712///
3713class InvokeInst : public CallBase<InvokeInst> {
3714 friend class OperandBundleUser<InvokeInst, User::op_iterator>;
3715
3716 InvokeInst(const InvokeInst &BI);
3717
3718 /// Construct an InvokeInst given a range of arguments.
3719 ///
3720 /// Construct an InvokeInst from a range of arguments
3721 inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
3722 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
3723 unsigned Values, const Twine &NameStr,
3724 Instruction *InsertBefore)
3725 : InvokeInst(cast<FunctionType>(
3726 cast<PointerType>(Func->getType())->getElementType()),
3727 Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
3728 InsertBefore) {}
3729
3730 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3731 BasicBlock *IfException, ArrayRef<Value *> Args,
3732 ArrayRef<OperandBundleDef> Bundles, unsigned Values,
3733 const Twine &NameStr, Instruction *InsertBefore);
3734 /// Construct an InvokeInst given a range of arguments.
3735 ///
3736 /// Construct an InvokeInst from a range of arguments
3737 inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
3738 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
3739 unsigned Values, const Twine &NameStr,
3740 BasicBlock *InsertAtEnd);
3741
3742
3743 void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
3744 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
3745 const Twine &NameStr) {
3746 init(cast<FunctionType>(
3747 cast<PointerType>(Func->getType())->getElementType()),
3748 Func, IfNormal, IfException, Args, Bundles, NameStr);
3749 }
3750
3751 void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal,
3752 BasicBlock *IfException, ArrayRef<Value *> Args,
3753 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3754
3755protected:
3756 // Note: Instruction needs to be a friend here to call cloneImpl.
3757 friend class Instruction;
3758
3759 InvokeInst *cloneImpl() const;
3760
3761public:
3762 static constexpr int ArgOffset = 3;
3763 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3764 BasicBlock *IfException, ArrayRef<Value *> Args,
3765 const Twine &NameStr,
3766 Instruction *InsertBefore = nullptr) {
3767 return Create(cast<FunctionType>(
3768 cast<PointerType>(Func->getType())->getElementType()),
3769 Func, IfNormal, IfException, Args, None, NameStr,
3770 InsertBefore);
3771 }
3772
3773 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3774 BasicBlock *IfException, ArrayRef<Value *> Args,
3775 ArrayRef<OperandBundleDef> Bundles = None,
3776 const Twine &NameStr = "",
3777 Instruction *InsertBefore = nullptr) {
3778 return Create(cast<FunctionType>(
3779 cast<PointerType>(Func->getType())->getElementType()),
3780 Func, IfNormal, IfException, Args, Bundles, NameStr,
3781 InsertBefore);
3782 }
3783
3784 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3785 BasicBlock *IfException, ArrayRef<Value *> Args,
3786 const Twine &NameStr,
3787 Instruction *InsertBefore = nullptr) {
3788 unsigned Values = unsigned(Args.size()) + 3;
3789 return new (Values) InvokeInst(Ty, Func, IfNormal, IfException, Args, None,
3790 Values, NameStr, InsertBefore);
3791 }
3792
3793 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3794 BasicBlock *IfException, ArrayRef<Value *> Args,
3795 ArrayRef<OperandBundleDef> Bundles = None,
3796 const Twine &NameStr = "",
3797 Instruction *InsertBefore = nullptr) {
3798 unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
3799 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3800
3801 return new (Values, DescriptorBytes)
3802 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, Values,
3803 NameStr, InsertBefore);
3804 }
3805
3806 static InvokeInst *Create(Value *Func,
3807 BasicBlock *IfNormal, BasicBlock *IfException,
3808 ArrayRef<Value *> Args, const Twine &NameStr,
3809 BasicBlock *InsertAtEnd) {
3810 unsigned Values = unsigned(Args.size()) + 3;
3811 return new (Values) InvokeInst(Func, IfNormal, IfException, Args, None,
3812 Values, NameStr, InsertAtEnd);
3813 }
3814
3815 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3816 BasicBlock *IfException, ArrayRef<Value *> Args,
3817 ArrayRef<OperandBundleDef> Bundles,
3818 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3819 unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
3820 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3821
3822 return new (Values, DescriptorBytes)
3823 InvokeInst(Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
3824 InsertAtEnd);
3825 }
3826
3827 /// Create a clone of \p II with a different set of operand bundles and
3828 /// insert it before \p InsertPt.
3829 ///
3830 /// The returned invoke instruction is identical to \p II in every way except
3831 /// that the operand bundles for the new instruction are set to the operand
3832 /// bundles in \p Bundles.
3833 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3834 Instruction *InsertPt = nullptr);
3835
3836 /// Determine if the call should not perform indirect branch tracking.
3837 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
3838
3839 /// Determine if the call cannot unwind.
3840 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
3841 void setDoesNotThrow() {
3842 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
3843 }
3844
3845 /// Return the function called, or null if this is an
3846 /// indirect function invocation.
3847 ///
3848 Function *getCalledFunction() const {
3849 return dyn_cast<Function>(Op<-3>());
3850 }
3851
3852 /// Get a pointer to the function that is invoked by this
3853 /// instruction
3854 const Value *getCalledValue() const { return Op<-3>(); }
3855 Value *getCalledValue() { return Op<-3>(); }
3856
3857 /// Set the function called.
3858 void setCalledFunction(Value* Fn) {
3859 setCalledFunction(
3860 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
3861 Fn);
3862 }
3863 void setCalledFunction(FunctionType *FTy, Value *Fn) {
3864 this->FTy = FTy;
3865 assert(FTy == cast<FunctionType>((static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3866, __extension__ __PRETTY_FUNCTION__))
3866 cast<PointerType>(Fn->getType())->getElementType()))(static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3866, __extension__ __PRETTY_FUNCTION__))
;
3867 Op<-3>() = Fn;
3868 }
3869
3870 // get*Dest - Return the destination basic blocks...
3871 BasicBlock *getNormalDest() const {
3872 return cast<BasicBlock>(Op<-2>());
3873 }
3874 BasicBlock *getUnwindDest() const {
3875 return cast<BasicBlock>(Op<-1>());
3876 }
3877 void setNormalDest(BasicBlock *B) {
3878 Op<-2>() = reinterpret_cast<Value*>(B);
3879 }
3880 void setUnwindDest(BasicBlock *B) {
3881 Op<-1>() = reinterpret_cast<Value*>(B);
3882 }
3883
3884 /// Get the landingpad instruction from the landing pad
3885 /// block (the unwind destination).
3886 LandingPadInst *getLandingPadInst() const;
3887
3888 BasicBlock *getSuccessor(unsigned i) const {
3889 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3889, __extension__ __PRETTY_FUNCTION__))
;
3890 return i == 0 ? getNormalDest() : getUnwindDest();
3891 }
3892
3893 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3894 assert(idx < 2 && "Successor # out of range for invoke!")(static_cast <bool> (idx < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("idx < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3894, __extension__ __PRETTY_FUNCTION__))
;
3895 *(&Op<-2>() + idx) = reinterpret_cast<Value*>(NewSucc);
3896 }
3897
3898 unsigned getNumSuccessors() const { return 2; }
3899
3900 // Methods for support type inquiry through isa, cast, and dyn_cast:
3901 static bool classof(const Instruction *I) {
3902 return (I->getOpcode() == Instruction::Invoke);
3903 }
3904 static bool classof(const Value *V) {
3905 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3906 }
3907
3908private:
3909
3910 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3911 // method so that subclasses cannot accidentally use it.
3912 void setInstructionSubclassData(unsigned short D) {
3913 Instruction::setInstructionSubclassData(D);
3914 }
3915};
3916
3917template <>
3918struct OperandTraits<CallBase<InvokeInst>>
3919 : public VariadicOperandTraits<CallBase<InvokeInst>, 3> {};
3920
3921InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3922 BasicBlock *IfException, ArrayRef<Value *> Args,
3923 ArrayRef<OperandBundleDef> Bundles, unsigned Values,
3924 const Twine &NameStr, Instruction *InsertBefore)
3925 : CallBase<InvokeInst>(Ty->getReturnType(), Instruction::Invoke,
3926 OperandTraits<CallBase<InvokeInst>>::op_end(this) -
3927 Values,
3928 Values, InsertBefore) {
3929 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3930}
3931
3932InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal,
3933 BasicBlock *IfException, ArrayRef<Value *> Args,
3934 ArrayRef<OperandBundleDef> Bundles, unsigned Values,
3935 const Twine &NameStr, BasicBlock *InsertAtEnd)
3936 : CallBase<InvokeInst>(
3937 cast<FunctionType>(
3938 cast<PointerType>(Func->getType())->getElementType())
3939 ->getReturnType(),
3940 Instruction::Invoke,
3941 OperandTraits<CallBase<InvokeInst>>::op_end(this) - Values, Values,
3942 InsertAtEnd) {
3943 init(Func, IfNormal, IfException, Args, Bundles, NameStr);
3944}
3945
3946
3947//===----------------------------------------------------------------------===//
3948// ResumeInst Class
3949//===----------------------------------------------------------------------===//
3950
3951//===---------------------------------------------------------------------------
3952/// Resume the propagation of an exception.
3953///
3954class ResumeInst : public TerminatorInst {
3955 ResumeInst(const ResumeInst &RI);
3956
3957 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
3958 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
3959
3960protected:
3961 // Note: Instruction needs to be a friend here to call cloneImpl.
3962 friend class Instruction;
3963
3964 ResumeInst *cloneImpl() const;
3965
3966public:
3967 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
3968 return new(1) ResumeInst(Exn, InsertBefore);
3969 }
3970
3971 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
3972 return new(1) ResumeInst(Exn, InsertAtEnd);
3973 }
3974
3975 /// Provide fast operand accessors
3976 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3977
3978 /// Convenience accessor.
3979 Value *getValue() const { return Op<0>(); }
3980
3981 unsigned getNumSuccessors() const { return 0; }
3982
3983 // Methods for support type inquiry through isa, cast, and dyn_cast:
3984 static bool classof(const Instruction *I) {
3985 return I->getOpcode() == Instruction::Resume;
3986 }
3987 static bool classof(const Value *V) {
3988 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3989 }
3990
3991private:
3992 friend TerminatorInst;
3993
3994 BasicBlock *getSuccessor(unsigned idx) const {
3995 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3995)
;
3996 }
3997
3998 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3999 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3999)
;
4000 }
4001};
4002
4003template <>
4004struct OperandTraits<ResumeInst> :
4005 public FixedNumOperandTraits<ResumeInst, 1> {
4006};
4007
4008DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4008, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ResumeInst>::op_begin(const_cast
<ResumeInst*>(this))[i_nocapture].get()); } void ResumeInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ResumeInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4008, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ResumeInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ResumeInst::getNumOperands() const { return OperandTraits
<ResumeInst>::operands(this); } template <int Idx_nocapture
> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4009
4010//===----------------------------------------------------------------------===//
4011// CatchSwitchInst Class
4012//===----------------------------------------------------------------------===//
4013class CatchSwitchInst : public TerminatorInst {
4014 /// The number of operands actually allocated. NumOperands is
4015 /// the number actually in use.
4016 unsigned ReservedSpace;
4017
4018 // Operand[0] = Outer scope
4019 // Operand[1] = Unwind block destination
4020 // Operand[n] = BasicBlock to go to on match
4021 CatchSwitchInst(const CatchSwitchInst &CSI);
4022
4023 /// Create a new switch instruction, specifying a
4024 /// default destination. The number of additional handlers can be specified
4025 /// here to make memory allocation more efficient.
4026 /// This constructor can also autoinsert before another instruction.
4027 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4028 unsigned NumHandlers, const Twine &NameStr,
4029 Instruction *InsertBefore);
4030
4031 /// Create a new switch instruction, specifying a
4032 /// default destination. The number of additional handlers can be specified
4033 /// here to make memory allocation more efficient.
4034 /// This constructor also autoinserts at the end of the specified BasicBlock.
4035 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4036 unsigned NumHandlers, const Twine &NameStr,
4037 BasicBlock *InsertAtEnd);
4038
4039 // allocate space for exactly zero operands
4040 void *operator new(size_t s) { return User::operator new(s); }
4041
4042 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4043 void growOperands(unsigned Size);
4044
4045protected:
4046 // Note: Instruction needs to be a friend here to call cloneImpl.
4047 friend class Instruction;
4048
4049 CatchSwitchInst *cloneImpl() const;
4050
4051public:
4052 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4053 unsigned NumHandlers,
4054 const Twine &NameStr = "",
4055 Instruction *InsertBefore = nullptr) {
4056 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4057 InsertBefore);
4058 }
4059
4060 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4061 unsigned NumHandlers, const Twine &NameStr,
4062 BasicBlock *InsertAtEnd) {
4063 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4064 InsertAtEnd);
4065 }
4066
4067 /// Provide fast operand accessors
4068 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4069
4070 // Accessor Methods for CatchSwitch stmt
4071 Value *getParentPad() const { return getOperand(0); }
4072 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4073
4074 // Accessor Methods for CatchSwitch stmt
4075 bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
4076 bool unwindsToCaller() const { return !hasUnwindDest(); }
4077 BasicBlock *getUnwindDest() const {
4078 if (hasUnwindDest())
4079 return cast<BasicBlock>(getOperand(1));
4080 return nullptr;
4081 }
4082 void setUnwindDest(BasicBlock *UnwindDest) {
4083 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4083, __extension__ __PRETTY_FUNCTION__))
;
4084 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4084, __extension__ __PRETTY_FUNCTION__))
;
4085 setOperand(1, UnwindDest);
4086 }
4087
4088 /// return the number of 'handlers' in this catchswitch
4089 /// instruction, except the default handler
4090 unsigned getNumHandlers() const {
4091 if (hasUnwindDest())
4092 return getNumOperands() - 2;
4093 return getNumOperands() - 1;
4094 }
4095
4096private:
4097 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4098 static const BasicBlock *handler_helper(const Value *V) {
4099 return cast<BasicBlock>(V);
4100 }
4101
4102public:
4103 using DerefFnTy = BasicBlock *(*)(Value *);
4104 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4105 using handler_range = iterator_range<handler_iterator>;
4106 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4107 using const_handler_iterator =
4108 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4109 using const_handler_range = iterator_range<const_handler_iterator>;
4110
4111 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4112 handler_iterator handler_begin() {
4113 op_iterator It = op_begin() + 1;
4114 if (hasUnwindDest())
4115 ++It;
4116 return handler_iterator(It, DerefFnTy(handler_helper));
4117 }
4118
4119 /// Returns an iterator that points to the first handler in the
4120 /// CatchSwitchInst.
4121 const_handler_iterator handler_begin() const {
4122 const_op_iterator It = op_begin() + 1;
4123 if (hasUnwindDest())
4124 ++It;
4125 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4126 }
4127
4128 /// Returns a read-only iterator that points one past the last
4129 /// handler in the CatchSwitchInst.
4130 handler_iterator handler_end() {
4131 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4132 }
4133
4134 /// Returns an iterator that points one past the last handler in the
4135 /// CatchSwitchInst.
4136 const_handler_iterator handler_end() const {
4137 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4138 }
4139
4140 /// iteration adapter for range-for loops.
4141 handler_range handlers() {
4142 return make_range(handler_begin(), handler_end());
4143 }
4144
4145 /// iteration adapter for range-for loops.
4146 const_handler_range handlers() const {
4147 return make_range(handler_begin(), handler_end());
4148 }
4149
4150 /// Add an entry to the switch instruction...
4151 /// Note:
4152 /// This action invalidates handler_end(). Old handler_end() iterator will
4153 /// point to the added handler.
4154 void addHandler(BasicBlock *Dest);
4155
4156 void removeHandler(handler_iterator HI);
4157
4158 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4159 BasicBlock *getSuccessor(unsigned Idx) const {
4160 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4161, __extension__ __PRETTY_FUNCTION__))
4161 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4161, __extension__ __PRETTY_FUNCTION__))
;
4162 return cast<BasicBlock>(getOperand(Idx + 1));
4163 }
4164 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4165 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4166, __extension__ __PRETTY_FUNCTION__))
4166 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4166, __extension__ __PRETTY_FUNCTION__))
;
4167 setOperand(Idx + 1, NewSucc);
4168 }
4169
4170 // Methods for support type inquiry through isa, cast, and dyn_cast:
4171 static bool classof(const Instruction *I) {
4172 return I->getOpcode() == Instruction::CatchSwitch;
4173 }
4174 static bool classof(const Value *V) {
4175 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4176 }
4177};
4178
4179template <>
4180struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4181
4182DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4182, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CatchSwitchInst>::op_begin
(const_cast<CatchSwitchInst*>(this))[i_nocapture].get()
); } void CatchSwitchInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4182, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CatchSwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CatchSwitchInst::getNumOperands() const { return
OperandTraits<CatchSwitchInst>::operands(this); } template
<int Idx_nocapture> Use &CatchSwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &CatchSwitchInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
4183
4184//===----------------------------------------------------------------------===//
4185// CleanupPadInst Class
4186//===----------------------------------------------------------------------===//
4187class CleanupPadInst : public FuncletPadInst {
4188private:
4189 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4190 unsigned Values, const Twine &NameStr,
4191 Instruction *InsertBefore)
4192 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4193 NameStr, InsertBefore) {}
4194 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4195 unsigned Values, const Twine &NameStr,
4196 BasicBlock *InsertAtEnd)
4197 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4198 NameStr, InsertAtEnd) {}
4199
4200public:
4201 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4202 const Twine &NameStr = "",
4203 Instruction *InsertBefore = nullptr) {
4204 unsigned Values = 1 + Args.size();
4205 return new (Values)
4206 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4207 }
4208
4209 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4210 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4211 unsigned Values = 1 + Args.size();
4212 return new (Values)
4213 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4214 }
4215
4216 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4217 static bool classof(const Instruction *I) {
4218 return I->getOpcode() == Instruction::CleanupPad;
4219 }
4220 static bool classof(const Value *V) {
4221 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4222 }
4223};
4224
4225//===----------------------------------------------------------------------===//
4226// CatchPadInst Class
4227//===----------------------------------------------------------------------===//
4228class CatchPadInst : public FuncletPadInst {
4229private:
4230 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4231 unsigned Values, const Twine &NameStr,
4232 Instruction *InsertBefore)
4233 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4234 NameStr, InsertBefore) {}
4235 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4236 unsigned Values, const Twine &NameStr,
4237 BasicBlock *InsertAtEnd)
4238 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4239 NameStr, InsertAtEnd) {}
4240
4241public:
4242 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4243 const Twine &NameStr = "",
4244 Instruction *InsertBefore = nullptr) {
4245 unsigned Values = 1 + Args.size();
4246 return new (Values)
4247 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4248 }
4249
4250 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4251 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4252 unsigned Values = 1 + Args.size();
4253 return new (Values)
4254 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4255 }
4256
4257 /// Convenience accessors
4258 CatchSwitchInst *getCatchSwitch() const {
4259 return cast<CatchSwitchInst>(Op<-1>());
4260 }
4261 void setCatchSwitch(Value *CatchSwitch) {
4262 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4262, __extension__ __PRETTY_FUNCTION__))
;
4263 Op<-1>() = CatchSwitch;
4264 }
4265
4266 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4267 static bool classof(const Instruction *I) {
4268 return I->getOpcode() == Instruction::CatchPad;
4269 }
4270 static bool classof(const Value *V) {
4271 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4272 }
4273};
4274
4275//===----------------------------------------------------------------------===//
4276// CatchReturnInst Class
4277//===----------------------------------------------------------------------===//
4278
4279class CatchReturnInst : public TerminatorInst {
4280 CatchReturnInst(const CatchReturnInst &RI);
4281 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4282 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4283
4284 void init(Value *CatchPad, BasicBlock *BB);
4285
4286protected:
4287 // Note: Instruction needs to be a friend here to call cloneImpl.
4288 friend class Instruction;
4289
4290 CatchReturnInst *cloneImpl() const;
4291
4292public:
4293 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4294 Instruction *InsertBefore = nullptr) {
4295 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4295, __extension__ __PRETTY_FUNCTION__))
;
4296 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4296, __extension__ __PRETTY_FUNCTION__))
;
4297 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4298 }
4299
4300 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4301 BasicBlock *InsertAtEnd) {
4302 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4302, __extension__ __PRETTY_FUNCTION__))
;
4303 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4303, __extension__ __PRETTY_FUNCTION__))
;
4304 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4305 }
4306
4307 /// Provide fast operand accessors
4308 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4309
4310 /// Convenience accessors.
4311 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4312 void setCatchPad(CatchPadInst *CatchPad) {
4313 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4313, __extension__ __PRETTY_FUNCTION__))
;
4314 Op<0>() = CatchPad;
4315 }
4316
4317 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4318 void setSuccessor(BasicBlock *NewSucc) {
4319 assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail
("NewSucc", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4319, __extension__ __PRETTY_FUNCTION__))
;
4320 Op<1>() = NewSucc;
4321 }
4322 unsigned getNumSuccessors() const { return 1; }
4323
4324 /// Get the parentPad of this catchret's catchpad's catchswitch.
4325 /// The successor block is implicitly a member of this funclet.
4326 Value *getCatchSwitchParentPad() const {
4327 return getCatchPad()->getCatchSwitch()->getParentPad();
4328 }
4329
4330 // Methods for support type inquiry through isa, cast, and dyn_cast:
4331 static bool classof(const Instruction *I) {
4332 return (I->getOpcode() == Instruction::CatchRet);
4333 }
4334 static bool classof(const Value *V) {
4335 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4336 }
4337
4338private:
4339 friend TerminatorInst;
4340
4341 BasicBlock *getSuccessor(unsigned Idx) const {
4342 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4342, __extension__ __PRETTY_FUNCTION__))
;
4343 return getSuccessor();
4344 }
4345
4346 void setSuccessor(unsigned Idx, BasicBlock *B) {
4347 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4347, __extension__ __PRETTY_FUNCTION__))
;
4348 setSuccessor(B);
4349 }
4350};
4351
4352template <>
4353struct OperandTraits<CatchReturnInst>
4354 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4355
4356DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchReturnInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4356, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CatchReturnInst>::op_begin
(const_cast<CatchReturnInst*>(this))[i_nocapture].get()
); } void CatchReturnInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<CatchReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4356, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CatchReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CatchReturnInst::getNumOperands() const { return
OperandTraits<CatchReturnInst>::operands(this); } template
<int Idx_nocapture> Use &CatchReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &CatchReturnInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
4357
4358//===----------------------------------------------------------------------===//
4359// CleanupReturnInst Class
4360//===----------------------------------------------------------------------===//
4361
4362class CleanupReturnInst : public TerminatorInst {
4363private:
4364 CleanupReturnInst(const CleanupReturnInst &RI);
4365 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4366 Instruction *InsertBefore = nullptr);
4367 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4368 BasicBlock *InsertAtEnd);
4369
4370 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4371
4372protected:
4373 // Note: Instruction needs to be a friend here to call cloneImpl.
4374 friend class Instruction;
4375
4376 CleanupReturnInst *cloneImpl() const;
4377
4378public:
4379 static CleanupReturnInst *Create(Value *CleanupPad,
4380 BasicBlock *UnwindBB = nullptr,
4381 Instruction *InsertBefore = nullptr) {
4382 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4382, __extension__ __PRETTY_FUNCTION__))
;
4383 unsigned Values = 1;
4384 if (UnwindBB)
4385 ++Values;
4386 return new (Values)
4387 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4388 }
4389
4390 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4391 BasicBlock *InsertAtEnd) {
4392 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4392, __extension__ __PRETTY_FUNCTION__))
;
4393 unsigned Values = 1;
4394 if (UnwindBB)
4395 ++Values;
4396 return new (Values)
4397 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4398 }
4399
4400 /// Provide fast operand accessors
4401 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4402
4403 bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
4404 bool unwindsToCaller() const { return !hasUnwindDest(); }
4405
4406 /// Convenience accessor.
4407 CleanupPadInst *getCleanupPad() const {
4408 return cast<CleanupPadInst>(Op<0>());
4409 }
4410 void setCleanupPad(CleanupPadInst *CleanupPad) {
4411 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4411, __extension__ __PRETTY_FUNCTION__))
;
4412 Op<0>() = CleanupPad;
4413 }
4414
4415 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4416
4417 BasicBlock *getUnwindDest() const {
4418 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4419 }
4420 void setUnwindDest(BasicBlock *NewDest) {
4421 assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail
("NewDest", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4421, __extension__ __PRETTY_FUNCTION__))
;
4422 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4422, __extension__ __PRETTY_FUNCTION__))
;
4423 Op<1>() = NewDest;
4424 }
4425
4426 // Methods for support type inquiry through isa, cast, and dyn_cast:
4427 static bool classof(const Instruction *I) {
4428 return (I->getOpcode() == Instruction::CleanupRet);
4429 }
4430 static bool classof(const Value *V) {
4431 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4432 }
4433
4434private:
4435 friend TerminatorInst;
4436
4437 BasicBlock *getSuccessor(unsigned Idx) const {
4438 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4438, __extension__ __PRETTY_FUNCTION__))
;
4439 return getUnwindDest();
4440 }
4441
4442 void setSuccessor(unsigned Idx, BasicBlock *B) {
4443 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4443, __extension__ __PRETTY_FUNCTION__))
;
4444 setUnwindDest(B);
4445 }
4446
4447 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4448 // method so that subclasses cannot accidentally use it.
4449 void setInstructionSubclassData(unsigned short D) {
4450 Instruction::setInstructionSubclassData(D);
4451 }
4452};
4453
4454template <>
4455struct OperandTraits<CleanupReturnInst>
4456 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4457
4458DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CleanupReturnInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4458, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CleanupReturnInst>::op_begin
(const_cast<CleanupReturnInst*>(this))[i_nocapture].get
()); } void CleanupReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CleanupReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4458, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CleanupReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CleanupReturnInst::getNumOperands() const { return
OperandTraits<CleanupReturnInst>::operands(this); } template
<int Idx_nocapture> Use &CleanupReturnInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CleanupReturnInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4459
4460//===----------------------------------------------------------------------===//
4461// UnreachableInst Class
4462//===----------------------------------------------------------------------===//
4463
4464//===---------------------------------------------------------------------------
4465/// This function has undefined behavior. In particular, the
4466/// presence of this instruction indicates some higher level knowledge that the
4467/// end of the block cannot be reached.
4468///
4469class UnreachableInst : public TerminatorInst {
4470protected:
4471 // Note: Instruction needs to be a friend here to call cloneImpl.
4472 friend class Instruction;
4473
4474 UnreachableInst *cloneImpl() const;
4475
4476public:
4477 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4478 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4479
4480 // allocate space for exactly zero operands
4481 void *operator new(size_t s) {
4482 return User::operator new(s, 0);
4483 }
4484
4485 unsigned getNumSuccessors() const { return 0; }
4486
4487 // Methods for support type inquiry through isa, cast, and dyn_cast:
4488 static bool classof(const Instruction *I) {
4489 return I->getOpcode() == Instruction::Unreachable;
4490 }
4491 static bool classof(const Value *V) {
4492 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4493 }
4494
4495private:
4496 friend TerminatorInst;
4497
4498 BasicBlock *getSuccessor(unsigned idx) const {
4499 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4499)
;
4500 }
4501
4502 void setSuccessor(unsigned idx, BasicBlock *B) {
4503 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 4503)
;
4504 }
4505};
4506
4507//===----------------------------------------------------------------------===//
4508// TruncInst Class
4509//===----------------------------------------------------------------------===//
4510
4511/// This class represents a truncation of integer types.
4512class TruncInst : public CastInst {
4513protected:
4514 // Note: Instruction needs to be a friend here to call cloneImpl.
4515 friend class Instruction;
4516
4517 /// Clone an identical TruncInst
4518 TruncInst *cloneImpl() const;
4519
4520public:
4521 /// Constructor with insert-before-instruction semantics
4522 TruncInst(
4523 Value *S, ///< The value to be truncated
4524 Type *Ty, ///< The (smaller) type to truncate to
4525 const Twine &NameStr = "", ///< A name for the new instruction
4526 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4527 );
4528
4529 /// Constructor with insert-at-end-of-block semantics
4530 TruncInst(
4531 Value *S, ///< The value to be truncated
4532 Type *Ty, ///< The (smaller) type to truncate to
4533 const Twine &NameStr, ///< A name for the new instruction
4534 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4535 );
4536
4537 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4538 static bool classof(const Instruction *I) {
4539 return I->getOpcode() == Trunc;
4540 }
4541 static bool classof(const Value *V) {
4542 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4543 }
4544};
4545
4546//===----------------------------------------------------------------------===//
4547// ZExtInst Class
4548//===----------------------------------------------------------------------===//
4549
4550/// This class represents zero extension of integer types.
4551class ZExtInst : public CastInst {
4552protected:
4553 // Note: Instruction needs to be a friend here to call cloneImpl.
4554 friend class Instruction;
4555
4556 /// Clone an identical ZExtInst
4557 ZExtInst *cloneImpl() const;
4558
4559public:
4560 /// Constructor with insert-before-instruction semantics
4561 ZExtInst(
4562 Value *S, ///< The value to be zero extended
4563 Type *Ty, ///< The type to zero extend to
4564 const Twine &NameStr = "", ///< A name for the new instruction
4565 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4566 );
4567
4568 /// Constructor with insert-at-end semantics.
4569 ZExtInst(
4570 Value *S, ///< The value to be zero extended
4571 Type *Ty, ///< The type to zero extend to
4572 const Twine &NameStr, ///< A name for the new instruction
4573 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4574 );
4575
4576 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4577 static bool classof(const Instruction *I) {
4578 return I->getOpcode() == ZExt;
4579 }
4580 static bool classof(const Value *V) {
4581 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4582 }
4583};
4584
4585//===----------------------------------------------------------------------===//
4586// SExtInst Class
4587//===----------------------------------------------------------------------===//
4588
4589/// This class represents a sign extension of integer types.
4590class SExtInst : public CastInst {
4591protected:
4592 // Note: Instruction needs to be a friend here to call cloneImpl.
4593 friend class Instruction;
4594
4595 /// Clone an identical SExtInst
4596 SExtInst *cloneImpl() const;
4597
4598public:
4599 /// Constructor with insert-before-instruction semantics
4600 SExtInst(
4601 Value *S, ///< The value to be sign extended
4602 Type *Ty, ///< The type to sign extend to
4603 const Twine &NameStr = "", ///< A name for the new instruction
4604 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4605 );
4606
4607 /// Constructor with insert-at-end-of-block semantics
4608 SExtInst(
4609 Value *S, ///< The value to be sign extended
4610 Type *Ty, ///< The type to sign extend to
4611 const Twine &NameStr, ///< A name for the new instruction
4612 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4613 );
4614
4615 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4616 static bool classof(const Instruction *I) {
4617 return I->getOpcode() == SExt;
4618 }
4619 static bool classof(const Value *V) {
4620 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4621 }
4622};
4623
4624//===----------------------------------------------------------------------===//
4625// FPTruncInst Class
4626//===----------------------------------------------------------------------===//
4627
4628/// This class represents a truncation of floating point types.
4629class FPTruncInst : public CastInst {
4630protected:
4631 // Note: Instruction needs to be a friend here to call cloneImpl.
4632 friend class Instruction;
4633
4634 /// Clone an identical FPTruncInst
4635 FPTruncInst *cloneImpl() const;
4636
4637public:
4638 /// Constructor with insert-before-instruction semantics
4639 FPTruncInst(
4640 Value *S, ///< The value to be truncated
4641 Type *Ty, ///< The type to truncate to
4642 const Twine &NameStr = "", ///< A name for the new instruction
4643 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4644 );
4645
4646 /// Constructor with insert-before-instruction semantics
4647 FPTruncInst(
4648 Value *S, ///< The value to be truncated
4649 Type *Ty, ///< The type to truncate to
4650 const Twine &NameStr, ///< A name for the new instruction
4651 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4652 );
4653
4654 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4655 static bool classof(const Instruction *I) {
4656 return I->getOpcode() == FPTrunc;
4657 }
4658 static bool classof(const Value *V) {
4659 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4660 }
4661};
4662
4663//===----------------------------------------------------------------------===//
4664// FPExtInst Class
4665//===----------------------------------------------------------------------===//
4666
4667/// This class represents an extension of floating point types.
4668class FPExtInst : public CastInst {
4669protected:
4670 // Note: Instruction needs to be a friend here to call cloneImpl.
4671 friend class Instruction;
4672
4673 /// Clone an identical FPExtInst
4674 FPExtInst *cloneImpl() const;
4675
4676public:
4677 /// Constructor with insert-before-instruction semantics
4678 FPExtInst(
4679 Value *S, ///< The value to be extended
4680 Type *Ty, ///< The type to extend to
4681 const Twine &NameStr = "", ///< A name for the new instruction
4682 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4683 );
4684
4685 /// Constructor with insert-at-end-of-block semantics
4686 FPExtInst(
4687 Value *S, ///< The value to be extended
4688 Type *Ty, ///< The type to extend to
4689 const Twine &NameStr, ///< A name for the new instruction
4690 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4691 );
4692
4693 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4694 static bool classof(const Instruction *I) {
4695 return I->getOpcode() == FPExt;
4696 }
4697 static bool classof(const Value *V) {
4698 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4699 }
4700};
4701
4702//===----------------------------------------------------------------------===//
4703// UIToFPInst Class
4704//===----------------------------------------------------------------------===//
4705
4706/// This class represents a cast unsigned integer to floating point.
4707class UIToFPInst : public CastInst {
4708protected:
4709 // Note: Instruction needs to be a friend here to call cloneImpl.
4710 friend class Instruction;
4711
4712 /// Clone an identical UIToFPInst
4713 UIToFPInst *cloneImpl() const;
4714
4715public:
4716 /// Constructor with insert-before-instruction semantics
4717 UIToFPInst(
4718 Value *S, ///< The value to be converted
4719 Type *Ty, ///< The type to convert to
4720 const Twine &NameStr = "", ///< A name for the new instruction
4721 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4722 );
4723
4724 /// Constructor with insert-at-end-of-block semantics
4725 UIToFPInst(
4726 Value *S, ///< The value to be converted
4727 Type *Ty, ///< The type to convert to
4728 const Twine &NameStr, ///< A name for the new instruction
4729 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4730 );
4731
4732 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4733 static bool classof(const Instruction *I) {
4734 return I->getOpcode() == UIToFP;
4735 }
4736 static bool classof(const Value *V) {
4737 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4738 }
4739};
4740
4741//===----------------------------------------------------------------------===//
4742// SIToFPInst Class
4743//===----------------------------------------------------------------------===//
4744
4745/// This class represents a cast from signed integer to floating point.
4746class SIToFPInst : public CastInst {
4747protected:
4748 // Note: Instruction needs to be a friend here to call cloneImpl.
4749 friend class Instruction;
4750
4751 /// Clone an identical SIToFPInst
4752 SIToFPInst *cloneImpl() const;
4753
4754public:
4755 /// Constructor with insert-before-instruction semantics
4756 SIToFPInst(
4757 Value *S, ///< The value to be converted
4758 Type *Ty, ///< The type to convert to
4759 const Twine &NameStr = "", ///< A name for the new instruction
4760 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4761 );
4762
4763 /// Constructor with insert-at-end-of-block semantics
4764 SIToFPInst(
4765 Value *S, ///< The value to be converted
4766 Type *Ty, ///< The type to convert to
4767 const Twine &NameStr, ///< A name for the new instruction
4768 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4769 );
4770
4771 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4772 static bool classof(const Instruction *I) {
4773 return I->getOpcode() == SIToFP;
4774 }
4775 static bool classof(const Value *V) {
4776 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4777 }
4778};
4779
4780//===----------------------------------------------------------------------===//
4781// FPToUIInst Class
4782//===----------------------------------------------------------------------===//
4783
4784/// This class represents a cast from floating point to unsigned integer
4785class FPToUIInst : public CastInst {
4786protected:
4787 // Note: Instruction needs to be a friend here to call cloneImpl.
4788 friend class Instruction;
4789
4790 /// Clone an identical FPToUIInst
4791 FPToUIInst *cloneImpl() const;
4792
4793public:
4794 /// Constructor with insert-before-instruction semantics
4795 FPToUIInst(
4796 Value *S, ///< The value to be converted
4797 Type *Ty, ///< The type to convert to
4798 const Twine &NameStr = "", ///< A name for the new instruction
4799 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4800 );
4801
4802 /// Constructor with insert-at-end-of-block semantics
4803 FPToUIInst(
4804 Value *S, ///< The value to be converted
4805 Type *Ty, ///< The type to convert to
4806 const Twine &NameStr, ///< A name for the new instruction
4807 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
4808 );
4809
4810 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4811 static bool classof(const Instruction *I) {
4812 return I->getOpcode() == FPToUI;
4813 }
4814 static bool classof(const Value *V) {
4815 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4816 }
4817};
4818
4819//===----------------------------------------------------------------------===//
4820// FPToSIInst Class
4821//===----------------------------------------------------------------------===//
4822
4823/// This class represents a cast from floating point to signed integer.
4824class FPToSIInst : public CastInst {
4825protected:
4826 // Note: Instruction needs to be a friend here to call cloneImpl.
4827 friend class Instruction;
4828
4829 /// Clone an identical FPToSIInst
4830 FPToSIInst *cloneImpl() const;
4831
4832public:
4833 /// Constructor with insert-before-instruction semantics
4834 FPToSIInst(
4835 Value *S, ///< The value to be converted
4836 Type *Ty, ///< The type to convert to
4837 const Twine &NameStr = "", ///< A name for the new instruction
4838 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4839 );
4840
4841 /// Constructor with insert-at-end-of-block semantics
4842 FPToSIInst(
4843 Value *S, ///< The value to be converted
4844 Type *Ty, ///< The type to convert to
4845 const Twine &NameStr, ///< A name for the new instruction
4846 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4847 );
4848
4849 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4850 static bool classof(const Instruction *I) {
4851 return I->getOpcode() == FPToSI;
4852 }
4853 static bool classof(const Value *V) {
4854 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4855 }
4856};
4857
4858//===----------------------------------------------------------------------===//
4859// IntToPtrInst Class
4860//===----------------------------------------------------------------------===//
4861
4862/// This class represents a cast from an integer to a pointer.
4863class IntToPtrInst : public CastInst {
4864public:
4865 // Note: Instruction needs to be a friend here to call cloneImpl.
4866 friend class Instruction;
4867
4868 /// Constructor with insert-before-instruction semantics
4869 IntToPtrInst(
4870 Value *S, ///< The value to be converted
4871 Type *Ty, ///< The type to convert to
4872 const Twine &NameStr = "", ///< A name for the new instruction
4873 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4874 );
4875
4876 /// Constructor with insert-at-end-of-block semantics
4877 IntToPtrInst(
4878 Value *S, ///< The value to be converted
4879 Type *Ty, ///< The type to convert to
4880 const Twine &NameStr, ///< A name for the new instruction
4881 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4882 );
4883
4884 /// Clone an identical IntToPtrInst.
4885 IntToPtrInst *cloneImpl() const;
4886
4887 /// Returns the address space of this instruction's pointer type.
4888 unsigned getAddressSpace() const {
4889 return getType()->getPointerAddressSpace();
4890 }
4891
4892 // Methods for support type inquiry through isa, cast, and dyn_cast:
4893 static bool classof(const Instruction *I) {
4894 return I->getOpcode() == IntToPtr;
4895 }
4896 static bool classof(const Value *V) {
4897 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4898 }
4899};
4900
4901//===----------------------------------------------------------------------===//
4902// PtrToIntInst Class
4903//===----------------------------------------------------------------------===//
4904
4905/// This class represents a cast from a pointer to an integer.
4906class PtrToIntInst : public CastInst {
4907protected:
4908 // Note: Instruction needs to be a friend here to call cloneImpl.
4909 friend class Instruction;
4910
4911 /// Clone an identical PtrToIntInst.
4912 PtrToIntInst *cloneImpl() const;
4913
4914public:
4915 /// Constructor with insert-before-instruction semantics
4916 PtrToIntInst(
4917 Value *S, ///< The value to be converted
4918 Type *Ty, ///< The type to convert to
4919 const Twine &NameStr = "", ///< A name for the new instruction
4920 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4921 );
4922
4923 /// Constructor with insert-at-end-of-block semantics
4924 PtrToIntInst(
4925 Value *S, ///< The value to be converted
4926 Type *Ty, ///< The type to convert to
4927 const Twine &NameStr, ///< A name for the new instruction
4928 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4929 );
4930
4931 /// Gets the pointer operand.
4932 Value *getPointerOperand() { return getOperand(0); }
4933 /// Gets the pointer operand.
4934 const Value *getPointerOperand() const { return getOperand(0); }
4935 /// Gets the operand index of the pointer operand.
4936 static unsigned getPointerOperandIndex() { return 0U; }
4937
4938 /// Returns the address space of the pointer operand.
4939 unsigned getPointerAddressSpace() const {
4940 return getPointerOperand()->getType()->getPointerAddressSpace();
4941 }
4942
4943 // Methods for support type inquiry through isa, cast, and dyn_cast:
4944 static bool classof(const Instruction *I) {
4945 return I->getOpcode() == PtrToInt;
4946 }
4947 static bool classof(const Value *V) {
4948 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4949 }
4950};
4951
4952//===----------------------------------------------------------------------===//
4953// BitCastInst Class
4954//===----------------------------------------------------------------------===//
4955
4956/// This class represents a no-op cast from one type to another.
4957class BitCastInst : public CastInst {
4958protected:
4959 // Note: Instruction needs to be a friend here to call cloneImpl.
4960 friend class Instruction;
4961
4962 /// Clone an identical BitCastInst.
4963 BitCastInst *cloneImpl() const;
4964
4965public:
4966 /// Constructor with insert-before-instruction semantics
4967 BitCastInst(
4968 Value *S, ///< The value to be casted
4969 Type *Ty, ///< The type to casted to
4970 const Twine &NameStr = "", ///< A name for the new instruction
4971 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4972 );
4973
4974 /// Constructor with insert-at-end-of-block semantics
4975 BitCastInst(
4976 Value *S, ///< The value to be casted
4977 Type *Ty, ///< The type to casted to
4978 const Twine &NameStr, ///< A name for the new instruction
4979 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4980 );
4981
4982 // Methods for support type inquiry through isa, cast, and dyn_cast:
4983 static bool classof(const Instruction *I) {
4984 return I->getOpcode() == BitCast;
4985 }
4986 static bool classof(const Value *V) {
4987 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4988 }
4989};
4990
4991//===----------------------------------------------------------------------===//
4992// AddrSpaceCastInst Class
4993//===----------------------------------------------------------------------===//
4994
4995/// This class represents a conversion between pointers from one address space
4996/// to another.
4997class AddrSpaceCastInst : public CastInst {
4998protected:
4999 // Note: Instruction needs to be a friend here to call cloneImpl.
5000 friend class Instruction;
5001
5002 /// Clone an identical AddrSpaceCastInst.
5003 AddrSpaceCastInst *cloneImpl() const;
5004
5005public:
5006 /// Constructor with insert-before-instruction semantics
5007 AddrSpaceCastInst(
5008 Value *S, ///< The value to be casted
5009 Type *Ty, ///< The type to casted to
5010 const Twine &NameStr = "", ///< A name for the new instruction
5011 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5012 );
5013
5014 /// Constructor with insert-at-end-of-block semantics
5015 AddrSpaceCastInst(
5016 Value *S, ///< The value to be casted
5017 Type *Ty, ///< The type to casted to
5018 const Twine &NameStr, ///< A name for the new instruction
5019 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5020 );
5021
5022 // Methods for support type inquiry through isa, cast, and dyn_cast:
5023 static bool classof(const Instruction *I) {
5024 return I->getOpcode() == AddrSpaceCast;
5025 }
5026 static bool classof(const Value *V) {
5027 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5028 }
5029
5030 /// Gets the pointer operand.
5031 Value *getPointerOperand() {
5032 return getOperand(0);
5033 }
5034
5035 /// Gets the pointer operand.
5036 const Value *getPointerOperand() const {
5037 return getOperand(0);
5038 }
5039
5040 /// Gets the operand index of the pointer operand.
5041 static unsigned getPointerOperandIndex() {
5042 return 0U;
5043 }
5044
5045 /// Returns the address space of the pointer operand.
5046 unsigned getSrcAddressSpace() const {
5047 return getPointerOperand()->getType()->getPointerAddressSpace();
5048 }
5049
5050 /// Returns the address space of the result.
5051 unsigned getDestAddressSpace() const {
5052 return getType()->getPointerAddressSpace();
5053 }
5054};
5055
5056/// A helper function that returns the pointer operand of a load or store
5057/// instruction. Returns nullptr if not load or store.
5058inline Value *getLoadStorePointerOperand(Value *V) {
5059 if (auto *Load = dyn_cast<LoadInst>(V))
5060 return Load->getPointerOperand();
5061 if (auto *Store = dyn_cast<StoreInst>(V))
5062 return Store->getPointerOperand();
5063 return nullptr;
5064}
5065
5066/// A helper function that returns the pointer operand of a load, store
5067/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5068inline Value *getPointerOperand(Value *V) {
5069 if (auto *Ptr = getLoadStorePointerOperand(V))
5070 return Ptr;
5071 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5072 return Gep->getPointerOperand();
5073 return nullptr;
5074}
5075
5076} // end namespace llvm
5077
5078#endif // LLVM_IR_INSTRUCTIONS_H