Bug Summary

File:lib/Transforms/Scalar/LoopIdiomRecognize.cpp
Warning:line 1429, column 46
Called C++ object pointer is null

Annotated Source Code

/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp

1//===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass implements an idiom recognizer that transforms simple loops into a
11// non-loop form. In cases that this kicks in, it can be a significant
12// performance win.
13//
14// If compiling for code size we avoid idiom recognition if the resulting
15// code could be larger than the code for the original loop. One way this could
16// happen is if the loop is not removable after idiom recognition due to the
17// presence of non-idiom instructions. The initial implementation of the
18// heuristics applies to idioms in multi-block loops.
19//
20//===----------------------------------------------------------------------===//
21//
22// TODO List:
23//
24// Future loop memory idioms to recognize:
25// memcmp, memmove, strlen, etc.
26// Future floating point idioms to recognize in -ffast-math mode:
27// fpowi
28// Future integer operation idioms to recognize:
29// ctpop, ctlz, cttz
30//
31// Beware that isel's default lowering for ctpop is highly inefficient for
32// i64 and larger types when i64 is legal and the value has few bits set. It
33// would be good to enhance isel to emit a loop for ctpop in this case.
34//
35// This could recognize common matrix multiplies and dot product idioms and
36// replace them with calls to BLAS (if linked in??).
37//
38//===----------------------------------------------------------------------===//
39
40#include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
41#include "llvm/ADT/APInt.h"
42#include "llvm/ADT/ArrayRef.h"
43#include "llvm/ADT/DenseMap.h"
44#include "llvm/ADT/MapVector.h"
45#include "llvm/ADT/SetVector.h"
46#include "llvm/ADT/SmallPtrSet.h"
47#include "llvm/ADT/SmallVector.h"
48#include "llvm/ADT/Statistic.h"
49#include "llvm/ADT/StringRef.h"
50#include "llvm/Analysis/AliasAnalysis.h"
51#include "llvm/Analysis/LoopAccessAnalysis.h"
52#include "llvm/Analysis/LoopInfo.h"
53#include "llvm/Analysis/LoopPass.h"
54#include "llvm/Analysis/MemoryLocation.h"
55#include "llvm/Analysis/ScalarEvolution.h"
56#include "llvm/Analysis/ScalarEvolutionExpander.h"
57#include "llvm/Analysis/ScalarEvolutionExpressions.h"
58#include "llvm/Analysis/TargetLibraryInfo.h"
59#include "llvm/Analysis/TargetTransformInfo.h"
60#include "llvm/Analysis/ValueTracking.h"
61#include "llvm/IR/Attributes.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
66#include "llvm/IR/DebugLoc.h"
67#include "llvm/IR/DerivedTypes.h"
68#include "llvm/IR/Dominators.h"
69#include "llvm/IR/GlobalValue.h"
70#include "llvm/IR/GlobalVariable.h"
71#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/InstrTypes.h"
73#include "llvm/IR/Instruction.h"
74#include "llvm/IR/Instructions.h"
75#include "llvm/IR/IntrinsicInst.h"
76#include "llvm/IR/Intrinsics.h"
77#include "llvm/IR/LLVMContext.h"
78#include "llvm/IR/Module.h"
79#include "llvm/IR/PassManager.h"
80#include "llvm/IR/Type.h"
81#include "llvm/IR/User.h"
82#include "llvm/IR/Value.h"
83#include "llvm/IR/ValueHandle.h"
84#include "llvm/Pass.h"
85#include "llvm/Support/Casting.h"
86#include "llvm/Support/CommandLine.h"
87#include "llvm/Support/Debug.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/Transforms/Scalar.h"
90#include "llvm/Transforms/Utils/BuildLibCalls.h"
91#include "llvm/Transforms/Utils/Local.h"
92#include "llvm/Transforms/Utils/LoopUtils.h"
93#include <algorithm>
94#include <cassert>
95#include <cstdint>
96#include <utility>
97#include <vector>
98
99using namespace llvm;
100
101#define DEBUG_TYPE"loop-idiom" "loop-idiom"
102
103STATISTIC(NumMemSet, "Number of memset's formed from loop stores")static llvm::Statistic NumMemSet = {"loop-idiom", "NumMemSet"
, "Number of memset's formed from loop stores", {0}, false}
;
104STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores")static llvm::Statistic NumMemCpy = {"loop-idiom", "NumMemCpy"
, "Number of memcpy's formed from loop load+stores", {0}, false
}
;
105
106static cl::opt<bool> UseLIRCodeSizeHeurs(
107 "use-lir-code-size-heurs",
108 cl::desc("Use loop idiom recognition code size heuristics when compiling"
109 "with -Os/-Oz"),
110 cl::init(true), cl::Hidden);
111
112namespace {
113
114class LoopIdiomRecognize {
115 Loop *CurLoop = nullptr;
116 AliasAnalysis *AA;
117 DominatorTree *DT;
118 LoopInfo *LI;
119 ScalarEvolution *SE;
120 TargetLibraryInfo *TLI;
121 const TargetTransformInfo *TTI;
122 const DataLayout *DL;
123 bool ApplyCodeSizeHeuristics;
124
125public:
126 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
127 LoopInfo *LI, ScalarEvolution *SE,
128 TargetLibraryInfo *TLI,
129 const TargetTransformInfo *TTI,
130 const DataLayout *DL)
131 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL) {}
132
133 bool runOnLoop(Loop *L);
134
135private:
136 using StoreList = SmallVector<StoreInst *, 8>;
137 using StoreListMap = MapVector<Value *, StoreList>;
138
139 StoreListMap StoreRefsForMemset;
140 StoreListMap StoreRefsForMemsetPattern;
141 StoreList StoreRefsForMemcpy;
142 bool HasMemset;
143 bool HasMemsetPattern;
144 bool HasMemcpy;
145
146 /// Return code for isLegalStore()
147 enum LegalStoreKind {
148 None = 0,
149 Memset,
150 MemsetPattern,
151 Memcpy,
152 UnorderedAtomicMemcpy,
153 DontUse // Dummy retval never to be used. Allows catching errors in retval
154 // handling.
155 };
156
157 /// \name Countable Loop Idiom Handling
158 /// @{
159
160 bool runOnCountableLoop();
161 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
162 SmallVectorImpl<BasicBlock *> &ExitBlocks);
163
164 void collectStores(BasicBlock *BB);
165 LegalStoreKind isLegalStore(StoreInst *SI);
166 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
167 bool ForMemset);
168 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
169
170 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
171 unsigned StoreAlignment, Value *StoredVal,
172 Instruction *TheStore,
173 SmallPtrSetImpl<Instruction *> &Stores,
174 const SCEVAddRecExpr *Ev, const SCEV *BECount,
175 bool NegStride, bool IsLoopMemset = false);
176 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
177 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
178 bool IsLoopMemset = false);
179
180 /// @}
181 /// \name Noncountable Loop Idiom Handling
182 /// @{
183
184 bool runOnNoncountableLoop();
185
186 bool recognizePopcount();
187 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
188 PHINode *CntPhi, Value *Var);
189 bool recognizeAndInsertCTLZ();
190 void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
191 PHINode *CntPhi, Value *Var, const DebugLoc DL,
192 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop);
193
194 /// @}
195};
196
197class LoopIdiomRecognizeLegacyPass : public LoopPass {
198public:
199 static char ID;
200
201 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
202 initializeLoopIdiomRecognizeLegacyPassPass(
203 *PassRegistry::getPassRegistry());
204 }
205
206 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
207 if (skipLoop(L))
208 return false;
209
210 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
211 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
212 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
213 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
214 TargetLibraryInfo *TLI =
215 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
216 const TargetTransformInfo *TTI =
217 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
218 *L->getHeader()->getParent());
219 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
220
221 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
222 return LIR.runOnLoop(L);
223 }
224
225 /// This transformation requires natural loop information & requires that
226 /// loop preheaders be inserted into the CFG.
227 void getAnalysisUsage(AnalysisUsage &AU) const override {
228 AU.addRequired<TargetLibraryInfoWrapperPass>();
229 AU.addRequired<TargetTransformInfoWrapperPass>();
230 getLoopAnalysisUsage(AU);
231 }
232};
233
234} // end anonymous namespace
235
236char LoopIdiomRecognizeLegacyPass::ID = 0;
237
238PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
239 LoopStandardAnalysisResults &AR,
240 LPMUpdater &) {
241 const auto *DL = &L.getHeader()->getModule()->getDataLayout();
242
243 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
244 if (!LIR.runOnLoop(&L))
1
Calling 'LoopIdiomRecognize::runOnLoop'
245 return PreservedAnalyses::all();
246
247 return getLoopPassPreservedAnalyses();
248}
249
250INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",static void *initializeLoopIdiomRecognizeLegacyPassPassOnce(PassRegistry
&Registry) {
251 "Recognize loop idioms", false, false)static void *initializeLoopIdiomRecognizeLegacyPassPassOnce(PassRegistry
&Registry) {
252INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry);
253INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
254INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
255INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",PassInfo *PI = new PassInfo( "Recognize loop idioms", "loop-idiom"
, &LoopIdiomRecognizeLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<LoopIdiomRecognizeLegacyPass>), false,
false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeLoopIdiomRecognizeLegacyPassPassFlag
; void llvm::initializeLoopIdiomRecognizeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLoopIdiomRecognizeLegacyPassPassFlag
, initializeLoopIdiomRecognizeLegacyPassPassOnce, std::ref(Registry
)); }
256 "Recognize loop idioms", false, false)PassInfo *PI = new PassInfo( "Recognize loop idioms", "loop-idiom"
, &LoopIdiomRecognizeLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<LoopIdiomRecognizeLegacyPass>), false,
false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeLoopIdiomRecognizeLegacyPassPassFlag
; void llvm::initializeLoopIdiomRecognizeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLoopIdiomRecognizeLegacyPassPassFlag
, initializeLoopIdiomRecognizeLegacyPassPassOnce, std::ref(Registry
)); }
257
258Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
259
260static void deleteDeadInstruction(Instruction *I) {
261 I->replaceAllUsesWith(UndefValue::get(I->getType()));
262 I->eraseFromParent();
263}
264
265//===----------------------------------------------------------------------===//
266//
267// Implementation of LoopIdiomRecognize
268//
269//===----------------------------------------------------------------------===//
270
271bool LoopIdiomRecognize::runOnLoop(Loop *L) {
272 CurLoop = L;
273 // If the loop could not be converted to canonical form, it must have an
274 // indirectbr in it, just give up.
275 if (!L->getLoopPreheader())
2
Assuming the condition is false
3
Taking false branch
276 return false;
277
278 // Disable loop idiom recognition if the function's name is a common idiom.
279 StringRef Name = L->getHeader()->getParent()->getName();
280 if (Name == "memset" || Name == "memcpy")
4
Assuming the condition is false
5
Assuming the condition is false
6
Taking false branch
281 return false;
282
283 // Determine if code size heuristics need to be applied.
284 ApplyCodeSizeHeuristics =
285 L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
7
Assuming the condition is false
286
287 HasMemset = TLI->has(LibFunc_memset);
288 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
289 HasMemcpy = TLI->has(LibFunc_memcpy);
290
291 if (HasMemset || HasMemsetPattern || HasMemcpy)
8
Taking false branch
292 if (SE->hasLoopInvariantBackedgeTakenCount(L))
293 return runOnCountableLoop();
294
295 return runOnNoncountableLoop();
9
Calling 'LoopIdiomRecognize::runOnNoncountableLoop'
296}
297
298bool LoopIdiomRecognize::runOnCountableLoop() {
299 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
300 assert(!isa<SCEVCouldNotCompute>(BECount) &&(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 302, __extension__ __PRETTY_FUNCTION__))
301 "runOnCountableLoop() called on a loop without a predictable"(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 302, __extension__ __PRETTY_FUNCTION__))
302 "backedge-taken count")(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 302, __extension__ __PRETTY_FUNCTION__))
;
303
304 // If this loop executes exactly one time, then it should be peeled, not
305 // optimized by this pass.
306 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
307 if (BECst->getAPInt() == 0)
308 return false;
309
310 SmallVector<BasicBlock *, 8> ExitBlocks;
311 CurLoop->getUniqueExitBlocks(ExitBlocks);
312
313 DEBUG(dbgs() << "loop-idiom Scanning: F["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
314 << CurLoop->getHeader()->getParent()->getName() << "] Loop %"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
315 << CurLoop->getHeader()->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
;
316
317 bool MadeChange = false;
318
319 // The following transforms hoist stores/memsets into the loop pre-header.
320 // Give up if the loop has instructions may throw.
321 LoopSafetyInfo SafetyInfo;
322 computeLoopSafetyInfo(&SafetyInfo, CurLoop);
323 if (SafetyInfo.MayThrow)
324 return MadeChange;
325
326 // Scan all the blocks in the loop that are not in subloops.
327 for (auto *BB : CurLoop->getBlocks()) {
328 // Ignore blocks in subloops.
329 if (LI->getLoopFor(BB) != CurLoop)
330 continue;
331
332 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
333 }
334 return MadeChange;
335}
336
337static unsigned getStoreSizeInBytes(StoreInst *SI, const DataLayout *DL) {
338 uint64_t SizeInBits = DL->getTypeSizeInBits(SI->getValueOperand()->getType());
339 assert(((SizeInBits & 7) || (SizeInBits >> 32) == 0) &&(static_cast <bool> (((SizeInBits & 7) || (SizeInBits
>> 32) == 0) && "Don't overflow unsigned.") ? void
(0) : __assert_fail ("((SizeInBits & 7) || (SizeInBits >> 32) == 0) && \"Don't overflow unsigned.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 340, __extension__ __PRETTY_FUNCTION__))
340 "Don't overflow unsigned.")(static_cast <bool> (((SizeInBits & 7) || (SizeInBits
>> 32) == 0) && "Don't overflow unsigned.") ? void
(0) : __assert_fail ("((SizeInBits & 7) || (SizeInBits >> 32) == 0) && \"Don't overflow unsigned.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 340, __extension__ __PRETTY_FUNCTION__))
;
341 return (unsigned)SizeInBits >> 3;
342}
343
344static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
345 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
346 return ConstStride->getAPInt();
347}
348
349/// getMemSetPatternValue - If a strided store of the specified value is safe to
350/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
351/// be passed in. Otherwise, return null.
352///
353/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
354/// just replicate their input array and then pass on to memset_pattern16.
355static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
356 // If the value isn't a constant, we can't promote it to being in a constant
357 // array. We could theoretically do a store to an alloca or something, but
358 // that doesn't seem worthwhile.
359 Constant *C = dyn_cast<Constant>(V);
360 if (!C)
361 return nullptr;
362
363 // Only handle simple values that are a power of two bytes in size.
364 uint64_t Size = DL->getTypeSizeInBits(V->getType());
365 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
366 return nullptr;
367
368 // Don't care enough about darwin/ppc to implement this.
369 if (DL->isBigEndian())
370 return nullptr;
371
372 // Convert to size in bytes.
373 Size /= 8;
374
375 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
376 // if the top and bottom are the same (e.g. for vectors and large integers).
377 if (Size > 16)
378 return nullptr;
379
380 // If the constant is exactly 16 bytes, just use it.
381 if (Size == 16)
382 return C;
383
384 // Otherwise, we'll use an array of the constants.
385 unsigned ArraySize = 16 / Size;
386 ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
387 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
388}
389
390LoopIdiomRecognize::LegalStoreKind
391LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
392 // Don't touch volatile stores.
393 if (SI->isVolatile())
394 return LegalStoreKind::None;
395 // We only want simple or unordered-atomic stores.
396 if (!SI->isUnordered())
397 return LegalStoreKind::None;
398
399 // Don't convert stores of non-integral pointer types to memsets (which stores
400 // integers).
401 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
402 return LegalStoreKind::None;
403
404 // Avoid merging nontemporal stores.
405 if (SI->getMetadata(LLVMContext::MD_nontemporal))
406 return LegalStoreKind::None;
407
408 Value *StoredVal = SI->getValueOperand();
409 Value *StorePtr = SI->getPointerOperand();
410
411 // Reject stores that are so large that they overflow an unsigned.
412 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
413 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
414 return LegalStoreKind::None;
415
416 // See if the pointer expression is an AddRec like {base,+,1} on the current
417 // loop, which indicates a strided store. If we have something else, it's a
418 // random store we can't handle.
419 const SCEVAddRecExpr *StoreEv =
420 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
421 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
422 return LegalStoreKind::None;
423
424 // Check to see if we have a constant stride.
425 if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
426 return LegalStoreKind::None;
427
428 // See if the store can be turned into a memset.
429
430 // If the stored value is a byte-wise value (like i32 -1), then it may be
431 // turned into a memset of i8 -1, assuming that all the consecutive bytes
432 // are stored. A store of i32 0x01020304 can never be turned into a memset,
433 // but it can be turned into memset_pattern if the target supports it.
434 Value *SplatValue = isBytewiseValue(StoredVal);
435 Constant *PatternValue = nullptr;
436
437 // Note: memset and memset_pattern on unordered-atomic is yet not supported
438 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
439
440 // If we're allowed to form a memset, and the stored value would be
441 // acceptable for memset, use it.
442 if (!UnorderedAtomic && HasMemset && SplatValue &&
443 // Verify that the stored value is loop invariant. If not, we can't
444 // promote the memset.
445 CurLoop->isLoopInvariant(SplatValue)) {
446 // It looks like we can use SplatValue.
447 return LegalStoreKind::Memset;
448 } else if (!UnorderedAtomic && HasMemsetPattern &&
449 // Don't create memset_pattern16s with address spaces.
450 StorePtr->getType()->getPointerAddressSpace() == 0 &&
451 (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
452 // It looks like we can use PatternValue!
453 return LegalStoreKind::MemsetPattern;
454 }
455
456 // Otherwise, see if the store can be turned into a memcpy.
457 if (HasMemcpy) {
458 // Check to see if the stride matches the size of the store. If so, then we
459 // know that every byte is touched in the loop.
460 APInt Stride = getStoreStride(StoreEv);
461 unsigned StoreSize = getStoreSizeInBytes(SI, DL);
462 if (StoreSize != Stride && StoreSize != -Stride)
463 return LegalStoreKind::None;
464
465 // The store must be feeding a non-volatile load.
466 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
467
468 // Only allow non-volatile loads
469 if (!LI || LI->isVolatile())
470 return LegalStoreKind::None;
471 // Only allow simple or unordered-atomic loads
472 if (!LI->isUnordered())
473 return LegalStoreKind::None;
474
475 // See if the pointer expression is an AddRec like {base,+,1} on the current
476 // loop, which indicates a strided load. If we have something else, it's a
477 // random load we can't handle.
478 const SCEVAddRecExpr *LoadEv =
479 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
480 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
481 return LegalStoreKind::None;
482
483 // The store and load must share the same stride.
484 if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
485 return LegalStoreKind::None;
486
487 // Success. This store can be converted into a memcpy.
488 UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
489 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
490 : LegalStoreKind::Memcpy;
491 }
492 // This store can't be transformed into a memset/memcpy.
493 return LegalStoreKind::None;
494}
495
496void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
497 StoreRefsForMemset.clear();
498 StoreRefsForMemsetPattern.clear();
499 StoreRefsForMemcpy.clear();
500 for (Instruction &I : *BB) {
501 StoreInst *SI = dyn_cast<StoreInst>(&I);
502 if (!SI)
503 continue;
504
505 // Make sure this is a strided store with a constant stride.
506 switch (isLegalStore(SI)) {
507 case LegalStoreKind::None:
508 // Nothing to do
509 break;
510 case LegalStoreKind::Memset: {
511 // Find the base pointer.
512 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
513 StoreRefsForMemset[Ptr].push_back(SI);
514 } break;
515 case LegalStoreKind::MemsetPattern: {
516 // Find the base pointer.
517 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
518 StoreRefsForMemsetPattern[Ptr].push_back(SI);
519 } break;
520 case LegalStoreKind::Memcpy:
521 case LegalStoreKind::UnorderedAtomicMemcpy:
522 StoreRefsForMemcpy.push_back(SI);
523 break;
524 default:
525 assert(false && "unhandled return value")(static_cast <bool> (false && "unhandled return value"
) ? void (0) : __assert_fail ("false && \"unhandled return value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 525, __extension__ __PRETTY_FUNCTION__))
;
526 break;
527 }
528 }
529}
530
531/// runOnLoopBlock - Process the specified block, which lives in a counted loop
532/// with the specified backedge count. This block is known to be in the current
533/// loop and not in any subloops.
534bool LoopIdiomRecognize::runOnLoopBlock(
535 BasicBlock *BB, const SCEV *BECount,
536 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
537 // We can only promote stores in this block if they are unconditionally
538 // executed in the loop. For a block to be unconditionally executed, it has
539 // to dominate all the exit blocks of the loop. Verify this now.
540 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
541 if (!DT->dominates(BB, ExitBlocks[i]))
542 return false;
543
544 bool MadeChange = false;
545 // Look for store instructions, which may be optimized to memset/memcpy.
546 collectStores(BB);
547
548 // Look for a single store or sets of stores with a common base, which can be
549 // optimized into a memset (memset_pattern). The latter most commonly happens
550 // with structs and handunrolled loops.
551 for (auto &SL : StoreRefsForMemset)
552 MadeChange |= processLoopStores(SL.second, BECount, true);
553
554 for (auto &SL : StoreRefsForMemsetPattern)
555 MadeChange |= processLoopStores(SL.second, BECount, false);
556
557 // Optimize the store into a memcpy, if it feeds an similarly strided load.
558 for (auto &SI : StoreRefsForMemcpy)
559 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
560
561 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
562 Instruction *Inst = &*I++;
563 // Look for memset instructions, which may be optimized to a larger memset.
564 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
565 WeakTrackingVH InstPtr(&*I);
566 if (!processLoopMemSet(MSI, BECount))
567 continue;
568 MadeChange = true;
569
570 // If processing the memset invalidated our iterator, start over from the
571 // top of the block.
572 if (!InstPtr)
573 I = BB->begin();
574 continue;
575 }
576 }
577
578 return MadeChange;
579}
580
581/// processLoopStores - See if this store(s) can be promoted to a memset.
582bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
583 const SCEV *BECount,
584 bool ForMemset) {
585 // Try to find consecutive stores that can be transformed into memsets.
586 SetVector<StoreInst *> Heads, Tails;
587 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
588
589 // Do a quadratic search on all of the given stores and find
590 // all of the pairs of stores that follow each other.
591 SmallVector<unsigned, 16> IndexQueue;
592 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
593 assert(SL[i]->isSimple() && "Expected only non-volatile stores.")(static_cast <bool> (SL[i]->isSimple() && "Expected only non-volatile stores."
) ? void (0) : __assert_fail ("SL[i]->isSimple() && \"Expected only non-volatile stores.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 593, __extension__ __PRETTY_FUNCTION__))
;
594
595 Value *FirstStoredVal = SL[i]->getValueOperand();
596 Value *FirstStorePtr = SL[i]->getPointerOperand();
597 const SCEVAddRecExpr *FirstStoreEv =
598 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
599 APInt FirstStride = getStoreStride(FirstStoreEv);
600 unsigned FirstStoreSize = getStoreSizeInBytes(SL[i], DL);
601
602 // See if we can optimize just this store in isolation.
603 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
604 Heads.insert(SL[i]);
605 continue;
606 }
607
608 Value *FirstSplatValue = nullptr;
609 Constant *FirstPatternValue = nullptr;
610
611 if (ForMemset)
612 FirstSplatValue = isBytewiseValue(FirstStoredVal);
613 else
614 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
615
616 assert((FirstSplatValue || FirstPatternValue) &&(static_cast <bool> ((FirstSplatValue || FirstPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(FirstSplatValue || FirstPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 617, __extension__ __PRETTY_FUNCTION__))
617 "Expected either splat value or pattern value.")(static_cast <bool> ((FirstSplatValue || FirstPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(FirstSplatValue || FirstPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 617, __extension__ __PRETTY_FUNCTION__))
;
618
619 IndexQueue.clear();
620 // If a store has multiple consecutive store candidates, search Stores
621 // array according to the sequence: from i+1 to e, then from i-1 to 0.
622 // This is because usually pairing with immediate succeeding or preceding
623 // candidate create the best chance to find memset opportunity.
624 unsigned j = 0;
625 for (j = i + 1; j < e; ++j)
626 IndexQueue.push_back(j);
627 for (j = i; j > 0; --j)
628 IndexQueue.push_back(j - 1);
629
630 for (auto &k : IndexQueue) {
631 assert(SL[k]->isSimple() && "Expected only non-volatile stores.")(static_cast <bool> (SL[k]->isSimple() && "Expected only non-volatile stores."
) ? void (0) : __assert_fail ("SL[k]->isSimple() && \"Expected only non-volatile stores.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 631, __extension__ __PRETTY_FUNCTION__))
;
632 Value *SecondStorePtr = SL[k]->getPointerOperand();
633 const SCEVAddRecExpr *SecondStoreEv =
634 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
635 APInt SecondStride = getStoreStride(SecondStoreEv);
636
637 if (FirstStride != SecondStride)
638 continue;
639
640 Value *SecondStoredVal = SL[k]->getValueOperand();
641 Value *SecondSplatValue = nullptr;
642 Constant *SecondPatternValue = nullptr;
643
644 if (ForMemset)
645 SecondSplatValue = isBytewiseValue(SecondStoredVal);
646 else
647 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
648
649 assert((SecondSplatValue || SecondPatternValue) &&(static_cast <bool> ((SecondSplatValue || SecondPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(SecondSplatValue || SecondPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 650, __extension__ __PRETTY_FUNCTION__))
650 "Expected either splat value or pattern value.")(static_cast <bool> ((SecondSplatValue || SecondPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(SecondSplatValue || SecondPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 650, __extension__ __PRETTY_FUNCTION__))
;
651
652 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
653 if (ForMemset) {
654 if (FirstSplatValue != SecondSplatValue)
655 continue;
656 } else {
657 if (FirstPatternValue != SecondPatternValue)
658 continue;
659 }
660 Tails.insert(SL[k]);
661 Heads.insert(SL[i]);
662 ConsecutiveChain[SL[i]] = SL[k];
663 break;
664 }
665 }
666 }
667
668 // We may run into multiple chains that merge into a single chain. We mark the
669 // stores that we transformed so that we don't visit the same store twice.
670 SmallPtrSet<Value *, 16> TransformedStores;
671 bool Changed = false;
672
673 // For stores that start but don't end a link in the chain:
674 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
675 it != e; ++it) {
676 if (Tails.count(*it))
677 continue;
678
679 // We found a store instr that starts a chain. Now follow the chain and try
680 // to transform it.
681 SmallPtrSet<Instruction *, 8> AdjacentStores;
682 StoreInst *I = *it;
683
684 StoreInst *HeadStore = I;
685 unsigned StoreSize = 0;
686
687 // Collect the chain into a list.
688 while (Tails.count(I) || Heads.count(I)) {
689 if (TransformedStores.count(I))
690 break;
691 AdjacentStores.insert(I);
692
693 StoreSize += getStoreSizeInBytes(I, DL);
694 // Move to the next value in the chain.
695 I = ConsecutiveChain[I];
696 }
697
698 Value *StoredVal = HeadStore->getValueOperand();
699 Value *StorePtr = HeadStore->getPointerOperand();
700 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
701 APInt Stride = getStoreStride(StoreEv);
702
703 // Check to see if the stride matches the size of the stores. If so, then
704 // we know that every byte is touched in the loop.
705 if (StoreSize != Stride && StoreSize != -Stride)
706 continue;
707
708 bool NegStride = StoreSize == -Stride;
709
710 if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(),
711 StoredVal, HeadStore, AdjacentStores, StoreEv,
712 BECount, NegStride)) {
713 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
714 Changed = true;
715 }
716 }
717
718 return Changed;
719}
720
721/// processLoopMemSet - See if this memset can be promoted to a large memset.
722bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
723 const SCEV *BECount) {
724 // We can only handle non-volatile memsets with a constant size.
725 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
726 return false;
727
728 // If we're not allowed to hack on memset, we fail.
729 if (!HasMemset)
730 return false;
731
732 Value *Pointer = MSI->getDest();
733
734 // See if the pointer expression is an AddRec like {base,+,1} on the current
735 // loop, which indicates a strided store. If we have something else, it's a
736 // random store we can't handle.
737 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
738 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
739 return false;
740
741 // Reject memsets that are so large that they overflow an unsigned.
742 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
743 if ((SizeInBytes >> 32) != 0)
744 return false;
745
746 // Check to see if the stride matches the size of the memset. If so, then we
747 // know that every byte is touched in the loop.
748 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
749 if (!ConstStride)
750 return false;
751
752 APInt Stride = ConstStride->getAPInt();
753 if (SizeInBytes != Stride && SizeInBytes != -Stride)
754 return false;
755
756 // Verify that the memset value is loop invariant. If not, we can't promote
757 // the memset.
758 Value *SplatValue = MSI->getValue();
759 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
760 return false;
761
762 SmallPtrSet<Instruction *, 1> MSIs;
763 MSIs.insert(MSI);
764 bool NegStride = SizeInBytes == -Stride;
765 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
766 MSI->getAlignment(), SplatValue, MSI, MSIs, Ev,
767 BECount, NegStride, /*IsLoopMemset=*/true);
768}
769
770/// mayLoopAccessLocation - Return true if the specified loop might access the
771/// specified pointer location, which is a loop-strided access. The 'Access'
772/// argument specifies what the verboten forms of access are (read or write).
773static bool
774mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
775 const SCEV *BECount, unsigned StoreSize,
776 AliasAnalysis &AA,
777 SmallPtrSetImpl<Instruction *> &IgnoredStores) {
778 // Get the location that may be stored across the loop. Since the access is
779 // strided positively through memory, we say that the modified location starts
780 // at the pointer and has infinite size.
781 uint64_t AccessSize = MemoryLocation::UnknownSize;
782
783 // If the loop iterates a fixed number of times, we can refine the access size
784 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
785 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
786 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
787
788 // TODO: For this to be really effective, we have to dive into the pointer
789 // operand in the store. Store to &A[i] of 100 will always return may alias
790 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
791 // which will then no-alias a store to &A[100].
792 MemoryLocation StoreLoc(Ptr, AccessSize);
793
794 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
795 ++BI)
796 for (Instruction &I : **BI)
797 if (IgnoredStores.count(&I) == 0 &&
798 (AA.getModRefInfo(&I, StoreLoc) & Access))
799 return true;
800
801 return false;
802}
803
804// If we have a negative stride, Start refers to the end of the memory location
805// we're trying to memset. Therefore, we need to recompute the base pointer,
806// which is just Start - BECount*Size.
807static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
808 Type *IntPtr, unsigned StoreSize,
809 ScalarEvolution *SE) {
810 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
811 if (StoreSize != 1)
812 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
813 SCEV::FlagNUW);
814 return SE->getMinusSCEV(Start, Index);
815}
816
817/// Compute the number of bytes as a SCEV from the backedge taken count.
818///
819/// This also maps the SCEV into the provided type and tries to handle the
820/// computation in a way that will fold cleanly.
821static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
822 unsigned StoreSize, Loop *CurLoop,
823 const DataLayout *DL, ScalarEvolution *SE) {
824 const SCEV *NumBytesS;
825 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
826 // pointer size if it isn't already.
827 //
828 // If we're going to need to zero extend the BE count, check if we can add
829 // one to it prior to zero extending without overflow. Provided this is safe,
830 // it allows better simplification of the +1.
831 if (DL->getTypeSizeInBits(BECount->getType()) <
832 DL->getTypeSizeInBits(IntPtr) &&
833 SE->isLoopEntryGuardedByCond(
834 CurLoop, ICmpInst::ICMP_NE, BECount,
835 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
836 NumBytesS = SE->getZeroExtendExpr(
837 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
838 IntPtr);
839 } else {
840 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
841 SE->getOne(IntPtr), SCEV::FlagNUW);
842 }
843
844 // And scale it based on the store size.
845 if (StoreSize != 1) {
846 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
847 SCEV::FlagNUW);
848 }
849 return NumBytesS;
850}
851
852/// processLoopStridedStore - We see a strided store of some value. If we can
853/// transform this into a memset or memset_pattern in the loop preheader, do so.
854bool LoopIdiomRecognize::processLoopStridedStore(
855 Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
856 Value *StoredVal, Instruction *TheStore,
857 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
858 const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
859 Value *SplatValue = isBytewiseValue(StoredVal);
860 Constant *PatternValue = nullptr;
861
862 if (!SplatValue)
863 PatternValue = getMemSetPatternValue(StoredVal, DL);
864
865 assert((SplatValue || PatternValue) &&(static_cast <bool> ((SplatValue || PatternValue) &&
"Expected either splat value or pattern value.") ? void (0) :
__assert_fail ("(SplatValue || PatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 866, __extension__ __PRETTY_FUNCTION__))
866 "Expected either splat value or pattern value.")(static_cast <bool> ((SplatValue || PatternValue) &&
"Expected either splat value or pattern value.") ? void (0) :
__assert_fail ("(SplatValue || PatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 866, __extension__ __PRETTY_FUNCTION__))
;
867
868 // The trip count of the loop and the base pointer of the addrec SCEV is
869 // guaranteed to be loop invariant, which means that it should dominate the
870 // header. This allows us to insert code for it in the preheader.
871 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
872 BasicBlock *Preheader = CurLoop->getLoopPreheader();
873 IRBuilder<> Builder(Preheader->getTerminator());
874 SCEVExpander Expander(*SE, *DL, "loop-idiom");
875
876 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
877 Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
878
879 const SCEV *Start = Ev->getStart();
880 // Handle negative strided loops.
881 if (NegStride)
882 Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
883
884 // TODO: ideally we should still be able to generate memset if SCEV expander
885 // is taught to generate the dependencies at the latest point.
886 if (!isSafeToExpand(Start, *SE))
887 return false;
888
889 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
890 // this into a memset in the loop preheader now if we want. However, this
891 // would be unsafe to do if there is anything else in the loop that may read
892 // or write to the aliased location. Check for any overlap by generating the
893 // base pointer and checking the region.
894 Value *BasePtr =
895 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
896 if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize,
897 *AA, Stores)) {
898 Expander.clear();
899 // If we generated new code for the base pointer, clean up.
900 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
901 return false;
902 }
903
904 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
905 return false;
906
907 // Okay, everything looks good, insert the memset.
908
909 const SCEV *NumBytesS =
910 getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
911
912 // TODO: ideally we should still be able to generate memset if SCEV expander
913 // is taught to generate the dependencies at the latest point.
914 if (!isSafeToExpand(NumBytesS, *SE))
915 return false;
916
917 Value *NumBytes =
918 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
919
920 CallInst *NewCall;
921 if (SplatValue) {
922 NewCall =
923 Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
924 } else {
925 // Everything is emitted in default address space
926 Type *Int8PtrTy = DestInt8PtrTy;
927
928 Module *M = TheStore->getModule();
929 Value *MSP =
930 M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
931 Int8PtrTy, Int8PtrTy, IntPtr);
932 inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI);
933
934 // Otherwise we should form a memset_pattern16. PatternValue is known to be
935 // an constant array of 16-bytes. Plop the value into a mergable global.
936 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
937 GlobalValue::PrivateLinkage,
938 PatternValue, ".memset_pattern");
939 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
940 GV->setAlignment(16);
941 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
942 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
943 }
944
945 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memset: " <<
*NewCall << "\n" << " from store to: " <<
*Ev << " at: " << *TheStore << "\n"; } } while
(false)
946 << " from store to: " << *Ev << " at: " << *TheStore << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memset: " <<
*NewCall << "\n" << " from store to: " <<
*Ev << " at: " << *TheStore << "\n"; } } while
(false)
;
947 NewCall->setDebugLoc(TheStore->getDebugLoc());
948
949 // Okay, the memset has been formed. Zap the original store and anything that
950 // feeds into it.
951 for (auto *I : Stores)
952 deleteDeadInstruction(I);
953 ++NumMemSet;
954 return true;
955}
956
957/// If the stored value is a strided load in the same loop with the same stride
958/// this may be transformable into a memcpy. This kicks in for stuff like
959/// for (i) A[i] = B[i];
960bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
961 const SCEV *BECount) {
962 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.")(static_cast <bool> (SI->isUnordered() && "Expected only non-volatile non-ordered stores."
) ? void (0) : __assert_fail ("SI->isUnordered() && \"Expected only non-volatile non-ordered stores.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 962, __extension__ __PRETTY_FUNCTION__))
;
963
964 Value *StorePtr = SI->getPointerOperand();
965 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
966 APInt Stride = getStoreStride(StoreEv);
967 unsigned StoreSize = getStoreSizeInBytes(SI, DL);
968 bool NegStride = StoreSize == -Stride;
969
970 // The store must be feeding a non-volatile load.
971 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
972 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.")(static_cast <bool> (LI->isUnordered() && "Expected only non-volatile non-ordered loads."
) ? void (0) : __assert_fail ("LI->isUnordered() && \"Expected only non-volatile non-ordered loads.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 972, __extension__ __PRETTY_FUNCTION__))
;
973
974 // See if the pointer expression is an AddRec like {base,+,1} on the current
975 // loop, which indicates a strided load. If we have something else, it's a
976 // random load we can't handle.
977 const SCEVAddRecExpr *LoadEv =
978 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
979
980 // The trip count of the loop and the base pointer of the addrec SCEV is
981 // guaranteed to be loop invariant, which means that it should dominate the
982 // header. This allows us to insert code for it in the preheader.
983 BasicBlock *Preheader = CurLoop->getLoopPreheader();
984 IRBuilder<> Builder(Preheader->getTerminator());
985 SCEVExpander Expander(*SE, *DL, "loop-idiom");
986
987 const SCEV *StrStart = StoreEv->getStart();
988 unsigned StrAS = SI->getPointerAddressSpace();
989 Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
990
991 // Handle negative strided loops.
992 if (NegStride)
993 StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
994
995 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
996 // this into a memcpy in the loop preheader now if we want. However, this
997 // would be unsafe to do if there is anything else in the loop that may read
998 // or write the memory region we're storing to. This includes the load that
999 // feeds the stores. Check for an alias by generating the base address and
1000 // checking everything.
1001 Value *StoreBasePtr = Expander.expandCodeFor(
1002 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
1003
1004 SmallPtrSet<Instruction *, 1> Stores;
1005 Stores.insert(SI);
1006 if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
1007 StoreSize, *AA, Stores)) {
1008 Expander.clear();
1009 // If we generated new code for the base pointer, clean up.
1010 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1011 return false;
1012 }
1013
1014 const SCEV *LdStart = LoadEv->getStart();
1015 unsigned LdAS = LI->getPointerAddressSpace();
1016
1017 // Handle negative strided loops.
1018 if (NegStride)
1019 LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
1020
1021 // For a memcpy, we have to make sure that the input array is not being
1022 // mutated by the loop.
1023 Value *LoadBasePtr = Expander.expandCodeFor(
1024 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
1025
1026 if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
1027 *AA, Stores)) {
1028 Expander.clear();
1029 // If we generated new code for the base pointer, clean up.
1030 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
1031 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1032 return false;
1033 }
1034
1035 if (avoidLIRForMultiBlockLoop())
1036 return false;
1037
1038 // Okay, everything is safe, we can transform this!
1039
1040 const SCEV *NumBytesS =
1041 getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
1042
1043 Value *NumBytes =
1044 Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
1045
1046 unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
1047 CallInst *NewCall = nullptr;
1048 // Check whether to generate an unordered atomic memcpy:
1049 // If the load or store are atomic, then they must neccessarily be unordered
1050 // by previous checks.
1051 if (!SI->isAtomic() && !LI->isAtomic())
1052 NewCall = Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, Align);
1053 else {
1054 // We cannot allow unaligned ops for unordered load/store, so reject
1055 // anything where the alignment isn't at least the element size.
1056 if (Align < StoreSize)
1057 return false;
1058
1059 // If the element.atomic memcpy is not lowered into explicit
1060 // loads/stores later, then it will be lowered into an element-size
1061 // specific lib call. If the lib call doesn't exist for our store size, then
1062 // we shouldn't generate the memcpy.
1063 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1064 return false;
1065
1066 // Create the call.
1067 // Note that unordered atomic loads/stores are *required* by the spec to
1068 // have an alignment but non-atomic loads/stores may not.
1069 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1070 StoreBasePtr, SI->getAlignment(), LoadBasePtr, LI->getAlignment(),
1071 NumBytes, StoreSize);
1072 }
1073 NewCall->setDebugLoc(SI->getDebugLoc());
1074
1075 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
1076 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
1077 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
;
1078
1079 // Okay, the memcpy has been formed. Zap the original store and anything that
1080 // feeds into it.
1081 deleteDeadInstruction(SI);
1082 ++NumMemCpy;
1083 return true;
1084}
1085
1086// When compiling for codesize we avoid idiom recognition for a multi-block loop
1087// unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1088//
1089bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1090 bool IsLoopMemset) {
1091 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1092 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
1093 DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
1094 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
1095 << " avoided: multi-block top-level loop\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
;
1096 return true;
1097 }
1098 }
1099
1100 return false;
1101}
1102
1103bool LoopIdiomRecognize::runOnNoncountableLoop() {
1104 return recognizePopcount() || recognizeAndInsertCTLZ();
10
Calling 'LoopIdiomRecognize::recognizeAndInsertCTLZ'
1105}
1106
1107/// Check if the given conditional branch is based on the comparison between
1108/// a variable and zero, and if the variable is non-zero, the control yields to
1109/// the loop entry. If the branch matches the behavior, the variable involved
1110/// in the comparison is returned. This function will be called to see if the
1111/// precondition and postcondition of the loop are in desirable form.
1112static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
1113 if (!BI || !BI->isConditional())
1114 return nullptr;
1115
1116 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1117 if (!Cond)
1118 return nullptr;
1119
1120 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1121 if (!CmpZero || !CmpZero->isZero())
1122 return nullptr;
1123
1124 ICmpInst::Predicate Pred = Cond->getPredicate();
1125 if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
1126 (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
1127 return Cond->getOperand(0);
1128
1129 return nullptr;
1130}
1131
1132// Check if the recurrence variable `VarX` is in the right form to create
1133// the idiom. Returns the value coerced to a PHINode if so.
1134static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
1135 BasicBlock *LoopEntry) {
1136 auto *PhiX = dyn_cast<PHINode>(VarX);
1137 if (PhiX && PhiX->getParent() == LoopEntry &&
1138 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
1139 return PhiX;
1140 return nullptr;
1141}
1142
1143/// Return true iff the idiom is detected in the loop.
1144///
1145/// Additionally:
1146/// 1) \p CntInst is set to the instruction counting the population bit.
1147/// 2) \p CntPhi is set to the corresponding phi node.
1148/// 3) \p Var is set to the value whose population bits are being counted.
1149///
1150/// The core idiom we are trying to detect is:
1151/// \code
1152/// if (x0 != 0)
1153/// goto loop-exit // the precondition of the loop
1154/// cnt0 = init-val;
1155/// do {
1156/// x1 = phi (x0, x2);
1157/// cnt1 = phi(cnt0, cnt2);
1158///
1159/// cnt2 = cnt1 + 1;
1160/// ...
1161/// x2 = x1 & (x1 - 1);
1162/// ...
1163/// } while(x != 0);
1164///
1165/// loop-exit:
1166/// \endcode
1167static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1168 Instruction *&CntInst, PHINode *&CntPhi,
1169 Value *&Var) {
1170 // step 1: Check to see if the look-back branch match this pattern:
1171 // "if (a!=0) goto loop-entry".
1172 BasicBlock *LoopEntry;
1173 Instruction *DefX2, *CountInst;
1174 Value *VarX1, *VarX0;
1175 PHINode *PhiX, *CountPhi;
1176
1177 DefX2 = CountInst = nullptr;
1178 VarX1 = VarX0 = nullptr;
1179 PhiX = CountPhi = nullptr;
1180 LoopEntry = *(CurLoop->block_begin());
1181
1182 // step 1: Check if the loop-back branch is in desirable form.
1183 {
1184 if (Value *T = matchCondition(
1185 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1186 DefX2 = dyn_cast<Instruction>(T);
1187 else
1188 return false;
1189 }
1190
1191 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1192 {
1193 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1194 return false;
1195
1196 BinaryOperator *SubOneOp;
1197
1198 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1199 VarX1 = DefX2->getOperand(1);
1200 else {
1201 VarX1 = DefX2->getOperand(0);
1202 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1203 }
1204 if (!SubOneOp)
1205 return false;
1206
1207 Instruction *SubInst = cast<Instruction>(SubOneOp);
1208 ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1));
1209 if (!Dec ||
1210 !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1211 (SubInst->getOpcode() == Instruction::Add &&
1212 Dec->isMinusOne()))) {
1213 return false;
1214 }
1215 }
1216
1217 // step 3: Check the recurrence of variable X
1218 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
1219 if (!PhiX)
1220 return false;
1221
1222 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1223 {
1224 CountInst = nullptr;
1225 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1226 IterE = LoopEntry->end();
1227 Iter != IterE; Iter++) {
1228 Instruction *Inst = &*Iter;
1229 if (Inst->getOpcode() != Instruction::Add)
1230 continue;
1231
1232 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1233 if (!Inc || !Inc->isOne())
1234 continue;
1235
1236 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1237 if (!Phi)
1238 continue;
1239
1240 // Check if the result of the instruction is live of the loop.
1241 bool LiveOutLoop = false;
1242 for (User *U : Inst->users()) {
1243 if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1244 LiveOutLoop = true;
1245 break;
1246 }
1247 }
1248
1249 if (LiveOutLoop) {
1250 CountInst = Inst;
1251 CountPhi = Phi;
1252 break;
1253 }
1254 }
1255
1256 if (!CountInst)
1257 return false;
1258 }
1259
1260 // step 5: check if the precondition is in this form:
1261 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1262 {
1263 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1264 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1265 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1266 return false;
1267
1268 CntInst = CountInst;
1269 CntPhi = CountPhi;
1270 Var = T;
1271 }
1272
1273 return true;
1274}
1275
1276/// Return true if the idiom is detected in the loop.
1277///
1278/// Additionally:
1279/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
1280/// or nullptr if there is no such.
1281/// 2) \p CntPhi is set to the corresponding phi node
1282/// or nullptr if there is no such.
1283/// 3) \p Var is set to the value whose CTLZ could be used.
1284/// 4) \p DefX is set to the instruction calculating Loop exit condition.
1285///
1286/// The core idiom we are trying to detect is:
1287/// \code
1288/// if (x0 == 0)
1289/// goto loop-exit // the precondition of the loop
1290/// cnt0 = init-val;
1291/// do {
1292/// x = phi (x0, x.next); //PhiX
1293/// cnt = phi(cnt0, cnt.next);
1294///
1295/// cnt.next = cnt + 1;
1296/// ...
1297/// x.next = x >> 1; // DefX
1298/// ...
1299/// } while(x.next != 0);
1300///
1301/// loop-exit:
1302/// \endcode
1303static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
1304 Instruction *&CntInst, PHINode *&CntPhi,
1305 Instruction *&DefX) {
1306 BasicBlock *LoopEntry;
1307 Value *VarX = nullptr;
1308
1309 DefX = nullptr;
1310 PhiX = nullptr;
1311 CntInst = nullptr;
1312 CntPhi = nullptr;
1313 LoopEntry = *(CurLoop->block_begin());
1314
1315 // step 1: Check if the loop-back branch is in desirable form.
1316 if (Value *T = matchCondition(
1317 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1318 DefX = dyn_cast<Instruction>(T);
1319 else
1320 return false;
1321
1322 // step 2: detect instructions corresponding to "x.next = x >> 1"
1323 if (!DefX || DefX->getOpcode() != Instruction::AShr)
1324 return false;
1325 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
1326 if (!Shft || !Shft->isOne())
1327 return false;
1328 VarX = DefX->getOperand(0);
1329
1330 // step 3: Check the recurrence of variable X
1331 PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
1332 if (!PhiX)
1333 return false;
1334
1335 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
1336 // TODO: We can skip the step. If loop trip count is known (CTLZ),
1337 // then all uses of "cnt.next" could be optimized to the trip count
1338 // plus "cnt0". Currently it is not optimized.
1339 // This step could be used to detect POPCNT instruction:
1340 // cnt.next = cnt + (x.next & 1)
1341 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1342 IterE = LoopEntry->end();
1343 Iter != IterE; Iter++) {
1344 Instruction *Inst = &*Iter;
1345 if (Inst->getOpcode() != Instruction::Add)
1346 continue;
1347
1348 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1349 if (!Inc || !Inc->isOne())
1350 continue;
1351
1352 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1353 if (!Phi)
1354 continue;
1355
1356 CntInst = Inst;
1357 CntPhi = Phi;
1358 break;
1359 }
1360 if (!CntInst)
1361 return false;
1362
1363 return true;
1364}
1365
1366/// Recognize CTLZ idiom in a non-countable loop and convert the loop
1367/// to countable (with CTLZ trip count).
1368/// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
1369bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
1370 // Give up if the loop has multiple blocks or multiple backedges.
1371 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
11
Assuming the condition is false
12
Assuming the condition is false
13
Taking false branch
1372 return false;
1373
1374 Instruction *CntInst, *DefX;
1375 PHINode *CntPhi, *PhiX;
1376 if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
14
Assuming the condition is false
15
Taking false branch
1377 return false;
1378
1379 bool IsCntPhiUsedOutsideLoop = false;
1380 for (User *U : CntPhi->users())
1381 if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
1382 IsCntPhiUsedOutsideLoop = true;
1383 break;
1384 }
1385 bool IsCntInstUsedOutsideLoop = false;
1386 for (User *U : CntInst->users())
1387 if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
1388 IsCntInstUsedOutsideLoop = true;
1389 break;
1390 }
1391 // If both CntInst and CntPhi are used outside the loop the profitability
1392 // is questionable.
1393 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1394 return false;
1395
1396 // For some CPUs result of CTLZ(X) intrinsic is undefined
1397 // when X is 0. If we can not guarantee X != 0, we need to check this
1398 // when expand.
1399 bool ZeroCheck = false;
1400 // It is safe to assume Preheader exist as it was checked in
1401 // parent function RunOnLoop.
1402 BasicBlock *PH = CurLoop->getLoopPreheader();
1403 Value *InitX = PhiX->getIncomingValueForBlock(PH);
16
Calling 'PHINode::getIncomingValueForBlock'
20
Returning from 'PHINode::getIncomingValueForBlock'
21
'InitX' initialized here
1404 // If we check X != 0 before entering the loop we don't need a zero
1405 // check in CTLZ intrinsic, but only if Cnt Phi is not used outside of the
1406 // loop (if it is used we count CTLZ(X >> 1)).
1407 if (!IsCntPhiUsedOutsideLoop)
22
Taking true branch
1408 if (BasicBlock *PreCondBB = PH->getSinglePredecessor())
23
Assuming 'PreCondBB' is non-null
24
Taking true branch
1409 if (BranchInst *PreCondBr =
25
Assuming 'PreCondBr' is non-null
26
Taking true branch
1410 dyn_cast<BranchInst>(PreCondBB->getTerminator())) {
1411 if (matchCondition(PreCondBr, PH) == InitX)
27
Assuming the condition is true
28
Assuming pointer value is null
29
Taking true branch
1412 ZeroCheck = true;
1413 }
1414
1415 // Check if CTLZ intrinsic is profitable. Assume it is always profitable
1416 // if we delete the loop (the loop has only 6 instructions):
1417 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
1418 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
1419 // %shr = ashr %n.addr.0, 1
1420 // %tobool = icmp eq %shr, 0
1421 // %inc = add nsw %i.0, 1
1422 // br i1 %tobool
1423
1424 IRBuilder<> Builder(PH->getTerminator());
1425 SmallVector<const Value *, 2> Ops =
1426 {InitX, ZeroCheck ? Builder.getTrue() : Builder.getFalse()};
30
'?' condition is true
1427 ArrayRef<const Value *> Args(Ops);
1428 if (CurLoop->getHeader()->size() != 6 &&
31
Assuming the condition is true
1429 TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
32
Called C++ object pointer is null
1430 TargetTransformInfo::TCC_Basic)
1431 return false;
1432
1433 const DebugLoc DL = DefX->getDebugLoc();
1434 transformLoopToCountable(PH, CntInst, CntPhi, InitX, DL, ZeroCheck,
1435 IsCntPhiUsedOutsideLoop);
1436 return true;
1437}
1438
1439/// Recognizes a population count idiom in a non-countable loop.
1440///
1441/// If detected, transforms the relevant code to issue the popcount intrinsic
1442/// function call, and returns true; otherwise, returns false.
1443bool LoopIdiomRecognize::recognizePopcount() {
1444 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1445 return false;
1446
1447 // Counting population are usually conducted by few arithmetic instructions.
1448 // Such instructions can be easily "absorbed" by vacant slots in a
1449 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1450 // in a compact loop.
1451
1452 // Give up if the loop has multiple blocks or multiple backedges.
1453 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1454 return false;
1455
1456 BasicBlock *LoopBody = *(CurLoop->block_begin());
1457 if (LoopBody->size() >= 20) {
1458 // The loop is too big, bail out.
1459 return false;
1460 }
1461
1462 // It should have a preheader containing nothing but an unconditional branch.
1463 BasicBlock *PH = CurLoop->getLoopPreheader();
1464 if (!PH || &PH->front() != PH->getTerminator())
1465 return false;
1466 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1467 if (!EntryBI || EntryBI->isConditional())
1468 return false;
1469
1470 // It should have a precondition block where the generated popcount instrinsic
1471 // function can be inserted.
1472 auto *PreCondBB = PH->getSinglePredecessor();
1473 if (!PreCondBB)
1474 return false;
1475 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1476 if (!PreCondBI || PreCondBI->isUnconditional())
1477 return false;
1478
1479 Instruction *CntInst;
1480 PHINode *CntPhi;
1481 Value *Val;
1482 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1483 return false;
1484
1485 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1486 return true;
1487}
1488
1489static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1490 const DebugLoc &DL) {
1491 Value *Ops[] = {Val};
1492 Type *Tys[] = {Val->getType()};
1493
1494 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1495 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1496 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1497 CI->setDebugLoc(DL);
1498
1499 return CI;
1500}
1501
1502static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1503 const DebugLoc &DL, bool ZeroCheck) {
1504 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
1505 Type *Tys[] = {Val->getType()};
1506
1507 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1508 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
1509 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1510 CI->setDebugLoc(DL);
1511
1512 return CI;
1513}
1514
1515/// Transform the following loop:
1516/// loop:
1517/// CntPhi = PHI [Cnt0, CntInst]
1518/// PhiX = PHI [InitX, DefX]
1519/// CntInst = CntPhi + 1
1520/// DefX = PhiX >> 1
1521/// LOOP_BODY
1522/// Br: loop if (DefX != 0)
1523/// Use(CntPhi) or Use(CntInst)
1524///
1525/// Into:
1526/// If CntPhi used outside the loop:
1527/// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
1528/// Count = CountPrev + 1
1529/// else
1530/// Count = BitWidth(InitX) - CTLZ(InitX)
1531/// loop:
1532/// CntPhi = PHI [Cnt0, CntInst]
1533/// PhiX = PHI [InitX, DefX]
1534/// PhiCount = PHI [Count, Dec]
1535/// CntInst = CntPhi + 1
1536/// DefX = PhiX >> 1
1537/// Dec = PhiCount - 1
1538/// LOOP_BODY
1539/// Br: loop if (Dec != 0)
1540/// Use(CountPrev + Cnt0) // Use(CntPhi)
1541/// or
1542/// Use(Count + Cnt0) // Use(CntInst)
1543///
1544/// If LOOP_BODY is empty the loop will be deleted.
1545/// If CntInst and DefX are not used in LOOP_BODY they will be removed.
1546void LoopIdiomRecognize::transformLoopToCountable(
1547 BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
1548 const DebugLoc DL, bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) {
1549 BranchInst *PreheaderBr = dyn_cast<BranchInst>(Preheader->getTerminator());
1550
1551 // Step 1: Insert the CTLZ instruction at the end of the preheader block
1552 // Count = BitWidth - CTLZ(InitX);
1553 // If there are uses of CntPhi create:
1554 // CountPrev = BitWidth - CTLZ(InitX >> 1);
1555 IRBuilder<> Builder(PreheaderBr);
1556 Builder.SetCurrentDebugLocation(DL);
1557 Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
1558
1559 if (IsCntPhiUsedOutsideLoop)
1560 InitXNext = Builder.CreateAShr(InitX,
1561 ConstantInt::get(InitX->getType(), 1));
1562 else
1563 InitXNext = InitX;
1564 CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
1565 Count = Builder.CreateSub(
1566 ConstantInt::get(CTLZ->getType(),
1567 CTLZ->getType()->getIntegerBitWidth()),
1568 CTLZ);
1569 if (IsCntPhiUsedOutsideLoop) {
1570 CountPrev = Count;
1571 Count = Builder.CreateAdd(
1572 CountPrev,
1573 ConstantInt::get(CountPrev->getType(), 1));
1574 }
1575 if (IsCntPhiUsedOutsideLoop)
1576 NewCount = Builder.CreateZExtOrTrunc(CountPrev,
1577 cast<IntegerType>(CntInst->getType()));
1578 else
1579 NewCount = Builder.CreateZExtOrTrunc(Count,
1580 cast<IntegerType>(CntInst->getType()));
1581
1582 // If the CTLZ counter's initial value is not zero, insert Add Inst.
1583 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
1584 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1585 if (!InitConst || !InitConst->isZero())
1586 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1587
1588 // Step 2: Insert new IV and loop condition:
1589 // loop:
1590 // ...
1591 // PhiCount = PHI [Count, Dec]
1592 // ...
1593 // Dec = PhiCount - 1
1594 // ...
1595 // Br: loop if (Dec != 0)
1596 BasicBlock *Body = *(CurLoop->block_begin());
1597 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1598 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1599 Type *Ty = Count->getType();
1600
1601 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1602
1603 Builder.SetInsertPoint(LbCond);
1604 Instruction *TcDec = cast<Instruction>(
1605 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1606 "tcdec", false, true));
1607
1608 TcPhi->addIncoming(Count, Preheader);
1609 TcPhi->addIncoming(TcDec, Body);
1610
1611 CmpInst::Predicate Pred =
1612 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
1613 LbCond->setPredicate(Pred);
1614 LbCond->setOperand(0, TcDec);
1615 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1616
1617 // Step 3: All the references to the original counter outside
1618 // the loop are replaced with the NewCount -- the value returned from
1619 // __builtin_ctlz(x).
1620 if (IsCntPhiUsedOutsideLoop)
1621 CntPhi->replaceUsesOutsideBlock(NewCount, Body);
1622 else
1623 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1624
1625 // step 4: Forget the "non-computable" trip-count SCEV associated with the
1626 // loop. The loop would otherwise not be deleted even if it becomes empty.
1627 SE->forgetLoop(CurLoop);
1628}
1629
1630void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1631 Instruction *CntInst,
1632 PHINode *CntPhi, Value *Var) {
1633 BasicBlock *PreHead = CurLoop->getLoopPreheader();
1634 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1635 const DebugLoc DL = CntInst->getDebugLoc();
1636
1637 // Assuming before transformation, the loop is following:
1638 // if (x) // the precondition
1639 // do { cnt++; x &= x - 1; } while(x);
1640
1641 // Step 1: Insert the ctpop instruction at the end of the precondition block
1642 IRBuilder<> Builder(PreCondBr);
1643 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1644 {
1645 PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1646 NewCount = PopCntZext =
1647 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1648
1649 if (NewCount != PopCnt)
1650 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1651
1652 // TripCnt is exactly the number of iterations the loop has
1653 TripCnt = NewCount;
1654
1655 // If the population counter's initial value is not zero, insert Add Inst.
1656 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1657 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1658 if (!InitConst || !InitConst->isZero()) {
1659 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1660 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1661 }
1662 }
1663
1664 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1665 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1666 // function would be partial dead code, and downstream passes will drag
1667 // it back from the precondition block to the preheader.
1668 {
1669 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1670
1671 Value *Opnd0 = PopCntZext;
1672 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1673 if (PreCond->getOperand(0) != Var)
1674 std::swap(Opnd0, Opnd1);
1675
1676 ICmpInst *NewPreCond = cast<ICmpInst>(
1677 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1678 PreCondBr->setCondition(NewPreCond);
1679
1680 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1681 }
1682
1683 // Step 3: Note that the population count is exactly the trip count of the
1684 // loop in question, which enable us to to convert the loop from noncountable
1685 // loop into a countable one. The benefit is twofold:
1686 //
1687 // - If the loop only counts population, the entire loop becomes dead after
1688 // the transformation. It is a lot easier to prove a countable loop dead
1689 // than to prove a noncountable one. (In some C dialects, an infinite loop
1690 // isn't dead even if it computes nothing useful. In general, DCE needs
1691 // to prove a noncountable loop finite before safely delete it.)
1692 //
1693 // - If the loop also performs something else, it remains alive.
1694 // Since it is transformed to countable form, it can be aggressively
1695 // optimized by some optimizations which are in general not applicable
1696 // to a noncountable loop.
1697 //
1698 // After this step, this loop (conceptually) would look like following:
1699 // newcnt = __builtin_ctpop(x);
1700 // t = newcnt;
1701 // if (x)
1702 // do { cnt++; x &= x-1; t--) } while (t > 0);
1703 BasicBlock *Body = *(CurLoop->block_begin());
1704 {
1705 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1706 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1707 Type *Ty = TripCnt->getType();
1708
1709 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1710
1711 Builder.SetInsertPoint(LbCond);
1712 Instruction *TcDec = cast<Instruction>(
1713 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1714 "tcdec", false, true));
1715
1716 TcPhi->addIncoming(TripCnt, PreHead);
1717 TcPhi->addIncoming(TcDec, Body);
1718
1719 CmpInst::Predicate Pred =
1720 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1721 LbCond->setPredicate(Pred);
1722 LbCond->setOperand(0, TcDec);
1723 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1724 }
1725
1726 // Step 4: All the references to the original population counter outside
1727 // the loop are replaced with the NewCount -- the value returned from
1728 // __builtin_ctpop().
1729 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1730
1731 // step 5: Forget the "non-computable" trip-count SCEV associated with the
1732 // loop. The loop would otherwise not be deleted even if it becomes empty.
1733 SE->forgetLoop(CurLoop);
1734}

/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes the class definitions of all of the subclasses of the
11// Instruction class. This is meant to be an easy way to get access to all
12// instruction subclasses.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_IR_INSTRUCTIONS_H
17#define LLVM_IR_INSTRUCTIONS_H
18
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/OperandTraits.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Use.h"
38#include "llvm/IR/User.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/ErrorHandling.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <iterator>
47
48namespace llvm {
49
50class APInt;
51class ConstantInt;
52class DataLayout;
53class LLVMContext;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
60class AllocaInst : public UnaryInstruction {
61 Type *AllocatedType;
62
63protected:
64 // Note: Instruction needs to be a friend here to call cloneImpl.
65 friend class Instruction;
66
67 AllocaInst *cloneImpl() const;
68
69public:
70 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
71 Value *ArraySize = nullptr,
72 const Twine &Name = "",
73 Instruction *InsertBefore = nullptr);
74 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
75 const Twine &Name, BasicBlock *InsertAtEnd);
76
77 AllocaInst(Type *Ty, unsigned AddrSpace,
78 const Twine &Name, Instruction *InsertBefore = nullptr);
79 AllocaInst(Type *Ty, unsigned AddrSpace,
80 const Twine &Name, BasicBlock *InsertAtEnd);
81
82 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
83 const Twine &Name = "", Instruction *InsertBefore = nullptr);
84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
85 const Twine &Name, BasicBlock *InsertAtEnd);
86
87 /// Return true if there is an allocation size parameter to the allocation
88 /// instruction that is not 1.
89 bool isArrayAllocation() const;
90
91 /// Get the number of elements allocated. For a simple allocation of a single
92 /// element, this will return a constant 1 value.
93 const Value *getArraySize() const { return getOperand(0); }
94 Value *getArraySize() { return getOperand(0); }
95
96 /// Overload to return most specific pointer type.
97 PointerType *getType() const {
98 return cast<PointerType>(Instruction::getType());
99 }
100
101 /// Return the type that is being allocated by the instruction.
102 Type *getAllocatedType() const { return AllocatedType; }
103 /// for use only in special circumstances that need to generically
104 /// transform a whole instruction (eg: IR linking and vectorization).
105 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
106
107 /// Return the alignment of the memory that is being allocated by the
108 /// instruction.
109 unsigned getAlignment() const {
110 return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
111 }
112 void setAlignment(unsigned Align);
113
114 /// Return true if this alloca is in the entry block of the function and is a
115 /// constant size. If so, the code generator will fold it into the
116 /// prolog/epilog code, so it is basically free.
117 bool isStaticAlloca() const;
118
119 /// Return true if this alloca is used as an inalloca argument to a call. Such
120 /// allocas are never considered static even if they are in the entry block.
121 bool isUsedWithInAlloca() const {
122 return getSubclassDataFromInstruction() & 32;
123 }
124
125 /// Specify whether this alloca is used to represent the arguments to a call.
126 void setUsedWithInAlloca(bool V) {
127 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
128 (V ? 32 : 0));
129 }
130
131 /// Return true if this alloca is used as a swifterror argument to a call.
132 bool isSwiftError() const {
133 return getSubclassDataFromInstruction() & 64;
134 }
135
136 /// Specify whether this alloca is used to represent a swifterror.
137 void setSwiftError(bool V) {
138 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
139 (V ? 64 : 0));
140 }
141
142 // Methods for support type inquiry through isa, cast, and dyn_cast:
143 static bool classof(const Instruction *I) {
144 return (I->getOpcode() == Instruction::Alloca);
145 }
146 static bool classof(const Value *V) {
147 return isa<Instruction>(V) && classof(cast<Instruction>(V));
148 }
149
150private:
151 // Shadow Instruction::setInstructionSubclassData with a private forwarding
152 // method so that subclasses cannot accidentally use it.
153 void setInstructionSubclassData(unsigned short D) {
154 Instruction::setInstructionSubclassData(D);
155 }
156};
157
158//===----------------------------------------------------------------------===//
159// LoadInst Class
160//===----------------------------------------------------------------------===//
161
162/// An instruction for reading from memory. This uses the SubclassData field in
163/// Value to store whether or not the load is volatile.
164class LoadInst : public UnaryInstruction {
165 void AssertOK();
166
167protected:
168 // Note: Instruction needs to be a friend here to call cloneImpl.
169 friend class Instruction;
170
171 LoadInst *cloneImpl() const;
172
173public:
174 LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
175 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
176 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
177 Instruction *InsertBefore = nullptr);
178 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
179 Instruction *InsertBefore = nullptr)
180 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
181 NameStr, isVolatile, InsertBefore) {}
182 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
183 BasicBlock *InsertAtEnd);
184 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
185 Instruction *InsertBefore = nullptr)
186 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
187 NameStr, isVolatile, Align, InsertBefore) {}
188 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
189 unsigned Align, Instruction *InsertBefore = nullptr);
190 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
191 unsigned Align, BasicBlock *InsertAtEnd);
192 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
193 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
194 Instruction *InsertBefore = nullptr)
195 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
196 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 unsigned Align, AtomicOrdering Order,
199 SyncScope::ID SSID = SyncScope::System,
200 Instruction *InsertBefore = nullptr);
201 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
202 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
203 BasicBlock *InsertAtEnd);
204 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
205 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
206 LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
207 bool isVolatile = false, Instruction *InsertBefore = nullptr);
208 explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
209 bool isVolatile = false,
210 Instruction *InsertBefore = nullptr)
211 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
212 NameStr, isVolatile, InsertBefore) {}
213 LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
214 BasicBlock *InsertAtEnd);
215
216 /// Return true if this is a load from a volatile memory location.
217 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
218
219 /// Specify whether this is a volatile load or not.
220 void setVolatile(bool V) {
221 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
222 (V ? 1 : 0));
223 }
224
225 /// Return the alignment of the access that is being performed.
226 unsigned getAlignment() const {
227 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
228 }
229
230 void setAlignment(unsigned Align);
231
232 /// Returns the ordering constraint of this load instruction.
233 AtomicOrdering getOrdering() const {
234 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
235 }
236
237 /// Sets the ordering constraint of this load instruction. May not be Release
238 /// or AcquireRelease.
239 void setOrdering(AtomicOrdering Ordering) {
240 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
241 ((unsigned)Ordering << 7));
242 }
243
244 /// Returns the synchronization scope ID of this load instruction.
245 SyncScope::ID getSyncScopeID() const {
246 return SSID;
247 }
248
249 /// Sets the synchronization scope ID of this load instruction.
250 void setSyncScopeID(SyncScope::ID SSID) {
251 this->SSID = SSID;
252 }
253
254 /// Sets the ordering constraint and the synchronization scope ID of this load
255 /// instruction.
256 void setAtomic(AtomicOrdering Ordering,
257 SyncScope::ID SSID = SyncScope::System) {
258 setOrdering(Ordering);
259 setSyncScopeID(SSID);
260 }
261
262 bool isSimple() const { return !isAtomic() && !isVolatile(); }
263
264 bool isUnordered() const {
265 return (getOrdering() == AtomicOrdering::NotAtomic ||
266 getOrdering() == AtomicOrdering::Unordered) &&
267 !isVolatile();
268 }
269
270 Value *getPointerOperand() { return getOperand(0); }
271 const Value *getPointerOperand() const { return getOperand(0); }
272 static unsigned getPointerOperandIndex() { return 0U; }
273 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
274
275 /// Returns the address space of the pointer operand.
276 unsigned getPointerAddressSpace() const {
277 return getPointerOperandType()->getPointerAddressSpace();
278 }
279
280 // Methods for support type inquiry through isa, cast, and dyn_cast:
281 static bool classof(const Instruction *I) {
282 return I->getOpcode() == Instruction::Load;
283 }
284 static bool classof(const Value *V) {
285 return isa<Instruction>(V) && classof(cast<Instruction>(V));
286 }
287
288private:
289 // Shadow Instruction::setInstructionSubclassData with a private forwarding
290 // method so that subclasses cannot accidentally use it.
291 void setInstructionSubclassData(unsigned short D) {
292 Instruction::setInstructionSubclassData(D);
293 }
294
295 /// The synchronization scope ID of this load instruction. Not quite enough
296 /// room in SubClassData for everything, so synchronization scope ID gets its
297 /// own field.
298 SyncScope::ID SSID;
299};
300
301//===----------------------------------------------------------------------===//
302// StoreInst Class
303//===----------------------------------------------------------------------===//
304
305/// An instruction for storing to memory.
306class StoreInst : public Instruction {
307 void AssertOK();
308
309protected:
310 // Note: Instruction needs to be a friend here to call cloneImpl.
311 friend class Instruction;
312
313 StoreInst *cloneImpl() const;
314
315public:
316 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
317 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
318 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
319 Instruction *InsertBefore = nullptr);
320 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
322 unsigned Align, Instruction *InsertBefore = nullptr);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
324 unsigned Align, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
326 unsigned Align, AtomicOrdering Order,
327 SyncScope::ID SSID = SyncScope::System,
328 Instruction *InsertBefore = nullptr);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
330 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
331 BasicBlock *InsertAtEnd);
332
333 // allocate space for exactly two operands
334 void *operator new(size_t s) {
335 return User::operator new(s, 2);
336 }
337
338 /// Return true if this is a store to a volatile memory location.
339 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
340
341 /// Specify whether this is a volatile store or not.
342 void setVolatile(bool V) {
343 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
344 (V ? 1 : 0));
345 }
346
347 /// Transparently provide more efficient getOperand methods.
348 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
349
350 /// Return the alignment of the access that is being performed
351 unsigned getAlignment() const {
352 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
353 }
354
355 void setAlignment(unsigned Align);
356
357 /// Returns the ordering constraint of this store instruction.
358 AtomicOrdering getOrdering() const {
359 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
360 }
361
362 /// Sets the ordering constraint of this store instruction. May not be
363 /// Acquire or AcquireRelease.
364 void setOrdering(AtomicOrdering Ordering) {
365 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
366 ((unsigned)Ordering << 7));
367 }
368
369 /// Returns the synchronization scope ID of this store instruction.
370 SyncScope::ID getSyncScopeID() const {
371 return SSID;
372 }
373
374 /// Sets the synchronization scope ID of this store instruction.
375 void setSyncScopeID(SyncScope::ID SSID) {
376 this->SSID = SSID;
377 }
378
379 /// Sets the ordering constraint and the synchronization scope ID of this
380 /// store instruction.
381 void setAtomic(AtomicOrdering Ordering,
382 SyncScope::ID SSID = SyncScope::System) {
383 setOrdering(Ordering);
384 setSyncScopeID(SSID);
385 }
386
387 bool isSimple() const { return !isAtomic() && !isVolatile(); }
388
389 bool isUnordered() const {
390 return (getOrdering() == AtomicOrdering::NotAtomic ||
391 getOrdering() == AtomicOrdering::Unordered) &&
392 !isVolatile();
393 }
394
395 Value *getValueOperand() { return getOperand(0); }
396 const Value *getValueOperand() const { return getOperand(0); }
397
398 Value *getPointerOperand() { return getOperand(1); }
399 const Value *getPointerOperand() const { return getOperand(1); }
400 static unsigned getPointerOperandIndex() { return 1U; }
401 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
402
403 /// Returns the address space of the pointer operand.
404 unsigned getPointerAddressSpace() const {
405 return getPointerOperandType()->getPointerAddressSpace();
406 }
407
408 // Methods for support type inquiry through isa, cast, and dyn_cast:
409 static bool classof(const Instruction *I) {
410 return I->getOpcode() == Instruction::Store;
411 }
412 static bool classof(const Value *V) {
413 return isa<Instruction>(V) && classof(cast<Instruction>(V));
414 }
415
416private:
417 // Shadow Instruction::setInstructionSubclassData with a private forwarding
418 // method so that subclasses cannot accidentally use it.
419 void setInstructionSubclassData(unsigned short D) {
420 Instruction::setInstructionSubclassData(D);
421 }
422
423 /// The synchronization scope ID of this store instruction. Not quite enough
424 /// room in SubClassData for everything, so synchronization scope ID gets its
425 /// own field.
426 SyncScope::ID SSID;
427};
428
429template <>
430struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
431};
432
433DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 433, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 433, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
434
435//===----------------------------------------------------------------------===//
436// FenceInst Class
437//===----------------------------------------------------------------------===//
438
439/// An instruction for ordering other memory operations.
440class FenceInst : public Instruction {
441 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
442
443protected:
444 // Note: Instruction needs to be a friend here to call cloneImpl.
445 friend class Instruction;
446
447 FenceInst *cloneImpl() const;
448
449public:
450 // Ordering may only be Acquire, Release, AcquireRelease, or
451 // SequentiallyConsistent.
452 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
453 SyncScope::ID SSID = SyncScope::System,
454 Instruction *InsertBefore = nullptr);
455 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
456 BasicBlock *InsertAtEnd);
457
458 // allocate space for exactly zero operands
459 void *operator new(size_t s) {
460 return User::operator new(s, 0);
461 }
462
463 /// Returns the ordering constraint of this fence instruction.
464 AtomicOrdering getOrdering() const {
465 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
466 }
467
468 /// Sets the ordering constraint of this fence instruction. May only be
469 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
470 void setOrdering(AtomicOrdering Ordering) {
471 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
472 ((unsigned)Ordering << 1));
473 }
474
475 /// Returns the synchronization scope ID of this fence instruction.
476 SyncScope::ID getSyncScopeID() const {
477 return SSID;
478 }
479
480 /// Sets the synchronization scope ID of this fence instruction.
481 void setSyncScopeID(SyncScope::ID SSID) {
482 this->SSID = SSID;
483 }
484
485 // Methods for support type inquiry through isa, cast, and dyn_cast:
486 static bool classof(const Instruction *I) {
487 return I->getOpcode() == Instruction::Fence;
488 }
489 static bool classof(const Value *V) {
490 return isa<Instruction>(V) && classof(cast<Instruction>(V));
491 }
492
493private:
494 // Shadow Instruction::setInstructionSubclassData with a private forwarding
495 // method so that subclasses cannot accidentally use it.
496 void setInstructionSubclassData(unsigned short D) {
497 Instruction::setInstructionSubclassData(D);
498 }
499
500 /// The synchronization scope ID of this fence instruction. Not quite enough
501 /// room in SubClassData for everything, so synchronization scope ID gets its
502 /// own field.
503 SyncScope::ID SSID;
504};
505
506//===----------------------------------------------------------------------===//
507// AtomicCmpXchgInst Class
508//===----------------------------------------------------------------------===//
509
510/// an instruction that atomically checks whether a
511/// specified value is in a memory location, and, if it is, stores a new value
512/// there. Returns the value that was loaded.
513///
514class AtomicCmpXchgInst : public Instruction {
515 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
516 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
517 SyncScope::ID SSID);
518
519protected:
520 // Note: Instruction needs to be a friend here to call cloneImpl.
521 friend class Instruction;
522
523 AtomicCmpXchgInst *cloneImpl() const;
524
525public:
526 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
527 AtomicOrdering SuccessOrdering,
528 AtomicOrdering FailureOrdering,
529 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering,
533 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
534
535 // allocate space for exactly three operands
536 void *operator new(size_t s) {
537 return User::operator new(s, 3);
538 }
539
540 /// Return true if this is a cmpxchg from a volatile memory
541 /// location.
542 ///
543 bool isVolatile() const {
544 return getSubclassDataFromInstruction() & 1;
545 }
546
547 /// Specify whether this is a volatile cmpxchg.
548 ///
549 void setVolatile(bool V) {
550 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
551 (unsigned)V);
552 }
553
554 /// Return true if this cmpxchg may spuriously fail.
555 bool isWeak() const {
556 return getSubclassDataFromInstruction() & 0x100;
557 }
558
559 void setWeak(bool IsWeak) {
560 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
561 (IsWeak << 8));
562 }
563
564 /// Transparently provide more efficient getOperand methods.
565 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
566
567 /// Returns the success ordering constraint of this cmpxchg instruction.
568 AtomicOrdering getSuccessOrdering() const {
569 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
570 }
571
572 /// Sets the success ordering constraint of this cmpxchg instruction.
573 void setSuccessOrdering(AtomicOrdering Ordering) {
574 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 575, __extension__ __PRETTY_FUNCTION__))
575 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 575, __extension__ __PRETTY_FUNCTION__))
;
576 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
577 ((unsigned)Ordering << 2));
578 }
579
580 /// Returns the failure ordering constraint of this cmpxchg instruction.
581 AtomicOrdering getFailureOrdering() const {
582 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
583 }
584
585 /// Sets the failure ordering constraint of this cmpxchg instruction.
586 void setFailureOrdering(AtomicOrdering Ordering) {
587 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 588, __extension__ __PRETTY_FUNCTION__))
588 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 588, __extension__ __PRETTY_FUNCTION__))
;
589 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
590 ((unsigned)Ordering << 5));
591 }
592
593 /// Returns the synchronization scope ID of this cmpxchg instruction.
594 SyncScope::ID getSyncScopeID() const {
595 return SSID;
596 }
597
598 /// Sets the synchronization scope ID of this cmpxchg instruction.
599 void setSyncScopeID(SyncScope::ID SSID) {
600 this->SSID = SSID;
601 }
602
603 Value *getPointerOperand() { return getOperand(0); }
604 const Value *getPointerOperand() const { return getOperand(0); }
605 static unsigned getPointerOperandIndex() { return 0U; }
606
607 Value *getCompareOperand() { return getOperand(1); }
608 const Value *getCompareOperand() const { return getOperand(1); }
609
610 Value *getNewValOperand() { return getOperand(2); }
611 const Value *getNewValOperand() const { return getOperand(2); }
612
613 /// Returns the address space of the pointer operand.
614 unsigned getPointerAddressSpace() const {
615 return getPointerOperand()->getType()->getPointerAddressSpace();
616 }
617
618 /// Returns the strongest permitted ordering on failure, given the
619 /// desired ordering on success.
620 ///
621 /// If the comparison in a cmpxchg operation fails, there is no atomic store
622 /// so release semantics cannot be provided. So this function drops explicit
623 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
624 /// operation would remain SequentiallyConsistent.
625 static AtomicOrdering
626 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
627 switch (SuccessOrdering) {
628 default:
629 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 629)
;
630 case AtomicOrdering::Release:
631 case AtomicOrdering::Monotonic:
632 return AtomicOrdering::Monotonic;
633 case AtomicOrdering::AcquireRelease:
634 case AtomicOrdering::Acquire:
635 return AtomicOrdering::Acquire;
636 case AtomicOrdering::SequentiallyConsistent:
637 return AtomicOrdering::SequentiallyConsistent;
638 }
639 }
640
641 // Methods for support type inquiry through isa, cast, and dyn_cast:
642 static bool classof(const Instruction *I) {
643 return I->getOpcode() == Instruction::AtomicCmpXchg;
644 }
645 static bool classof(const Value *V) {
646 return isa<Instruction>(V) && classof(cast<Instruction>(V));
647 }
648
649private:
650 // Shadow Instruction::setInstructionSubclassData with a private forwarding
651 // method so that subclasses cannot accidentally use it.
652 void setInstructionSubclassData(unsigned short D) {
653 Instruction::setInstructionSubclassData(D);
654 }
655
656 /// The synchronization scope ID of this cmpxchg instruction. Not quite
657 /// enough room in SubClassData for everything, so synchronization scope ID
658 /// gets its own field.
659 SyncScope::ID SSID;
660};
661
662template <>
663struct OperandTraits<AtomicCmpXchgInst> :
664 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
665};
666
667DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 667, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 667, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
668
669//===----------------------------------------------------------------------===//
670// AtomicRMWInst Class
671//===----------------------------------------------------------------------===//
672
673/// an instruction that atomically reads a memory location,
674/// combines it with another value, and then stores the result back. Returns
675/// the old value.
676///
677class AtomicRMWInst : public Instruction {
678protected:
679 // Note: Instruction needs to be a friend here to call cloneImpl.
680 friend class Instruction;
681
682 AtomicRMWInst *cloneImpl() const;
683
684public:
685 /// This enumeration lists the possible modifications atomicrmw can make. In
686 /// the descriptions, 'p' is the pointer to the instruction's memory location,
687 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
688 /// instruction. These instructions always return 'old'.
689 enum BinOp {
690 /// *p = v
691 Xchg,
692 /// *p = old + v
693 Add,
694 /// *p = old - v
695 Sub,
696 /// *p = old & v
697 And,
698 /// *p = ~(old & v)
699 Nand,
700 /// *p = old | v
701 Or,
702 /// *p = old ^ v
703 Xor,
704 /// *p = old >signed v ? old : v
705 Max,
706 /// *p = old <signed v ? old : v
707 Min,
708 /// *p = old >unsigned v ? old : v
709 UMax,
710 /// *p = old <unsigned v ? old : v
711 UMin,
712
713 FIRST_BINOP = Xchg,
714 LAST_BINOP = UMin,
715 BAD_BINOP
716 };
717
718 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
719 AtomicOrdering Ordering, SyncScope::ID SSID,
720 Instruction *InsertBefore = nullptr);
721 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
722 AtomicOrdering Ordering, SyncScope::ID SSID,
723 BasicBlock *InsertAtEnd);
724
725 // allocate space for exactly two operands
726 void *operator new(size_t s) {
727 return User::operator new(s, 2);
728 }
729
730 BinOp getOperation() const {
731 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
732 }
733
734 void setOperation(BinOp Operation) {
735 unsigned short SubclassData = getSubclassDataFromInstruction();
736 setInstructionSubclassData((SubclassData & 31) |
737 (Operation << 5));
738 }
739
740 /// Return true if this is a RMW on a volatile memory location.
741 ///
742 bool isVolatile() const {
743 return getSubclassDataFromInstruction() & 1;
744 }
745
746 /// Specify whether this is a volatile RMW or not.
747 ///
748 void setVolatile(bool V) {
749 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
750 (unsigned)V);
751 }
752
753 /// Transparently provide more efficient getOperand methods.
754 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
755
756 /// Returns the ordering constraint of this rmw instruction.
757 AtomicOrdering getOrdering() const {
758 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
759 }
760
761 /// Sets the ordering constraint of this rmw instruction.
762 void setOrdering(AtomicOrdering Ordering) {
763 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 764, __extension__ __PRETTY_FUNCTION__))
764 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 764, __extension__ __PRETTY_FUNCTION__))
;
765 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
766 ((unsigned)Ordering << 2));
767 }
768
769 /// Returns the synchronization scope ID of this rmw instruction.
770 SyncScope::ID getSyncScopeID() const {
771 return SSID;
772 }
773
774 /// Sets the synchronization scope ID of this rmw instruction.
775 void setSyncScopeID(SyncScope::ID SSID) {
776 this->SSID = SSID;
777 }
778
779 Value *getPointerOperand() { return getOperand(0); }
780 const Value *getPointerOperand() const { return getOperand(0); }
781 static unsigned getPointerOperandIndex() { return 0U; }
782
783 Value *getValOperand() { return getOperand(1); }
784 const Value *getValOperand() const { return getOperand(1); }
785
786 /// Returns the address space of the pointer operand.
787 unsigned getPointerAddressSpace() const {
788 return getPointerOperand()->getType()->getPointerAddressSpace();
789 }
790
791 // Methods for support type inquiry through isa, cast, and dyn_cast:
792 static bool classof(const Instruction *I) {
793 return I->getOpcode() == Instruction::AtomicRMW;
794 }
795 static bool classof(const Value *V) {
796 return isa<Instruction>(V) && classof(cast<Instruction>(V));
797 }
798
799private:
800 void Init(BinOp Operation, Value *Ptr, Value *Val,
801 AtomicOrdering Ordering, SyncScope::ID SSID);
802
803 // Shadow Instruction::setInstructionSubclassData with a private forwarding
804 // method so that subclasses cannot accidentally use it.
805 void setInstructionSubclassData(unsigned short D) {
806 Instruction::setInstructionSubclassData(D);
807 }
808
809 /// The synchronization scope ID of this rmw instruction. Not quite enough
810 /// room in SubClassData for everything, so synchronization scope ID gets its
811 /// own field.
812 SyncScope::ID SSID;
813};
814
815template <>
816struct OperandTraits<AtomicRMWInst>
817 : public FixedNumOperandTraits<AtomicRMWInst,2> {
818};
819
820DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 820, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 820, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
821
822//===----------------------------------------------------------------------===//
823// GetElementPtrInst Class
824//===----------------------------------------------------------------------===//
825
826// checkGEPType - Simple wrapper function to give a better assertion failure
827// message on bad indexes for a gep instruction.
828//
829inline Type *checkGEPType(Type *Ty) {
830 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 830, __extension__ __PRETTY_FUNCTION__))
;
831 return Ty;
832}
833
834/// an instruction for type-safe pointer arithmetic to
835/// access elements of arrays and structs
836///
837class GetElementPtrInst : public Instruction {
838 Type *SourceElementType;
839 Type *ResultElementType;
840
841 GetElementPtrInst(const GetElementPtrInst &GEPI);
842
843 /// Constructors - Create a getelementptr instruction with a base pointer an
844 /// list of indices. The first ctor can optionally insert before an existing
845 /// instruction, the second appends the new instruction to the specified
846 /// BasicBlock.
847 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
848 ArrayRef<Value *> IdxList, unsigned Values,
849 const Twine &NameStr, Instruction *InsertBefore);
850 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
851 ArrayRef<Value *> IdxList, unsigned Values,
852 const Twine &NameStr, BasicBlock *InsertAtEnd);
853
854 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
855
856protected:
857 // Note: Instruction needs to be a friend here to call cloneImpl.
858 friend class Instruction;
859
860 GetElementPtrInst *cloneImpl() const;
861
862public:
863 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
864 ArrayRef<Value *> IdxList,
865 const Twine &NameStr = "",
866 Instruction *InsertBefore = nullptr) {
867 unsigned Values = 1 + unsigned(IdxList.size());
868 if (!PointeeType)
869 PointeeType =
870 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
871 else
872 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
873 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
874 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
;
875 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
876 NameStr, InsertBefore);
877 }
878
879 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
880 ArrayRef<Value *> IdxList,
881 const Twine &NameStr,
882 BasicBlock *InsertAtEnd) {
883 unsigned Values = 1 + unsigned(IdxList.size());
884 if (!PointeeType)
885 PointeeType =
886 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
887 else
888 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
889 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
890 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
;
891 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
892 NameStr, InsertAtEnd);
893 }
894
895 /// Create an "inbounds" getelementptr. See the documentation for the
896 /// "inbounds" flag in LangRef.html for details.
897 static GetElementPtrInst *CreateInBounds(Value *Ptr,
898 ArrayRef<Value *> IdxList,
899 const Twine &NameStr = "",
900 Instruction *InsertBefore = nullptr){
901 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
902 }
903
904 static GetElementPtrInst *
905 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
906 const Twine &NameStr = "",
907 Instruction *InsertBefore = nullptr) {
908 GetElementPtrInst *GEP =
909 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
910 GEP->setIsInBounds(true);
911 return GEP;
912 }
913
914 static GetElementPtrInst *CreateInBounds(Value *Ptr,
915 ArrayRef<Value *> IdxList,
916 const Twine &NameStr,
917 BasicBlock *InsertAtEnd) {
918 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
919 }
920
921 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
922 ArrayRef<Value *> IdxList,
923 const Twine &NameStr,
924 BasicBlock *InsertAtEnd) {
925 GetElementPtrInst *GEP =
926 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
927 GEP->setIsInBounds(true);
928 return GEP;
929 }
930
931 /// Transparently provide more efficient getOperand methods.
932 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
933
934 Type *getSourceElementType() const { return SourceElementType; }
935
936 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
937 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
938
939 Type *getResultElementType() const {
940 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 941, __extension__ __PRETTY_FUNCTION__))
941 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 941, __extension__ __PRETTY_FUNCTION__))
;
942 return ResultElementType;
943 }
944
945 /// Returns the address space of this instruction's pointer type.
946 unsigned getAddressSpace() const {
947 // Note that this is always the same as the pointer operand's address space
948 // and that is cheaper to compute, so cheat here.
949 return getPointerAddressSpace();
950 }
951
952 /// Returns the type of the element that would be loaded with
953 /// a load instruction with the specified parameters.
954 ///
955 /// Null is returned if the indices are invalid for the specified
956 /// pointer type.
957 ///
958 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
959 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
960 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
961
962 inline op_iterator idx_begin() { return op_begin()+1; }
963 inline const_op_iterator idx_begin() const { return op_begin()+1; }
964 inline op_iterator idx_end() { return op_end(); }
965 inline const_op_iterator idx_end() const { return op_end(); }
966
967 inline iterator_range<op_iterator> indices() {
968 return make_range(idx_begin(), idx_end());
969 }
970
971 inline iterator_range<const_op_iterator> indices() const {
972 return make_range(idx_begin(), idx_end());
973 }
974
975 Value *getPointerOperand() {
976 return getOperand(0);
977 }
978 const Value *getPointerOperand() const {
979 return getOperand(0);
980 }
981 static unsigned getPointerOperandIndex() {
982 return 0U; // get index for modifying correct operand.
983 }
984
985 /// Method to return the pointer operand as a
986 /// PointerType.
987 Type *getPointerOperandType() const {
988 return getPointerOperand()->getType();
989 }
990
991 /// Returns the address space of the pointer operand.
992 unsigned getPointerAddressSpace() const {
993 return getPointerOperandType()->getPointerAddressSpace();
994 }
995
996 /// Returns the pointer type returned by the GEP
997 /// instruction, which may be a vector of pointers.
998 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
999 return getGEPReturnType(
1000 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
1001 Ptr, IdxList);
1002 }
1003 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1004 ArrayRef<Value *> IdxList) {
1005 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1006 Ptr->getType()->getPointerAddressSpace());
1007 // Vector GEP
1008 if (Ptr->getType()->isVectorTy()) {
1009 unsigned NumElem = Ptr->getType()->getVectorNumElements();
1010 return VectorType::get(PtrTy, NumElem);
1011 }
1012 for (Value *Index : IdxList)
1013 if (Index->getType()->isVectorTy()) {
1014 unsigned NumElem = Index->getType()->getVectorNumElements();
1015 return VectorType::get(PtrTy, NumElem);
1016 }
1017 // Scalar GEP
1018 return PtrTy;
1019 }
1020
1021 unsigned getNumIndices() const { // Note: always non-negative
1022 return getNumOperands() - 1;
1023 }
1024
1025 bool hasIndices() const {
1026 return getNumOperands() > 1;
1027 }
1028
1029 /// Return true if all of the indices of this GEP are
1030 /// zeros. If so, the result pointer and the first operand have the same
1031 /// value, just potentially different types.
1032 bool hasAllZeroIndices() const;
1033
1034 /// Return true if all of the indices of this GEP are
1035 /// constant integers. If so, the result pointer and the first operand have
1036 /// a constant offset between them.
1037 bool hasAllConstantIndices() const;
1038
1039 /// Set or clear the inbounds flag on this GEP instruction.
1040 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1041 void setIsInBounds(bool b = true);
1042
1043 /// Determine whether the GEP has the inbounds flag.
1044 bool isInBounds() const;
1045
1046 /// Accumulate the constant address offset of this GEP if possible.
1047 ///
1048 /// This routine accepts an APInt into which it will accumulate the constant
1049 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1050 /// all-constant, it returns false and the value of the offset APInt is
1051 /// undefined (it is *not* preserved!). The APInt passed into this routine
1052 /// must be at least as wide as the IntPtr type for the address space of
1053 /// the base GEP pointer.
1054 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1055
1056 // Methods for support type inquiry through isa, cast, and dyn_cast:
1057 static bool classof(const Instruction *I) {
1058 return (I->getOpcode() == Instruction::GetElementPtr);
1059 }
1060 static bool classof(const Value *V) {
1061 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1062 }
1063};
1064
1065template <>
1066struct OperandTraits<GetElementPtrInst> :
1067 public VariadicOperandTraits<GetElementPtrInst, 1> {
1068};
1069
1070GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1071 ArrayRef<Value *> IdxList, unsigned Values,
1072 const Twine &NameStr,
1073 Instruction *InsertBefore)
1074 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1075 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1076 Values, InsertBefore),
1077 SourceElementType(PointeeType),
1078 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1079 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
1080 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
;
1081 init(Ptr, IdxList, NameStr);
1082}
1083
1084GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1085 ArrayRef<Value *> IdxList, unsigned Values,
1086 const Twine &NameStr,
1087 BasicBlock *InsertAtEnd)
1088 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1089 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1090 Values, InsertAtEnd),
1091 SourceElementType(PointeeType),
1092 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1093 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1094, __extension__ __PRETTY_FUNCTION__))
1094 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1094, __extension__ __PRETTY_FUNCTION__))
;
1095 init(Ptr, IdxList, NameStr);
1096}
1097
1098DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1099
1100//===----------------------------------------------------------------------===//
1101// ICmpInst Class
1102//===----------------------------------------------------------------------===//
1103
1104/// This instruction compares its operands according to the predicate given
1105/// to the constructor. It only operates on integers or pointers. The operands
1106/// must be identical types.
1107/// Represent an integer comparison operator.
1108class ICmpInst: public CmpInst {
1109 void AssertOK() {
1110 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1111, __extension__ __PRETTY_FUNCTION__))
1111 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1111, __extension__ __PRETTY_FUNCTION__))
;
1112 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
1113 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
;
1114 // Check that the operands are the right type
1115 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
1116 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
1117 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
;
1118 }
1119
1120protected:
1121 // Note: Instruction needs to be a friend here to call cloneImpl.
1122 friend class Instruction;
1123
1124 /// Clone an identical ICmpInst
1125 ICmpInst *cloneImpl() const;
1126
1127public:
1128 /// Constructor with insert-before-instruction semantics.
1129 ICmpInst(
1130 Instruction *InsertBefore, ///< Where to insert
1131 Predicate pred, ///< The predicate to use for the comparison
1132 Value *LHS, ///< The left-hand-side of the expression
1133 Value *RHS, ///< The right-hand-side of the expression
1134 const Twine &NameStr = "" ///< Name of the instruction
1135 ) : CmpInst(makeCmpResultType(LHS->getType()),
1136 Instruction::ICmp, pred, LHS, RHS, NameStr,
1137 InsertBefore) {
1138#ifndef NDEBUG
1139 AssertOK();
1140#endif
1141 }
1142
1143 /// Constructor with insert-at-end semantics.
1144 ICmpInst(
1145 BasicBlock &InsertAtEnd, ///< Block to insert into.
1146 Predicate pred, ///< The predicate to use for the comparison
1147 Value *LHS, ///< The left-hand-side of the expression
1148 Value *RHS, ///< The right-hand-side of the expression
1149 const Twine &NameStr = "" ///< Name of the instruction
1150 ) : CmpInst(makeCmpResultType(LHS->getType()),
1151 Instruction::ICmp, pred, LHS, RHS, NameStr,
1152 &InsertAtEnd) {
1153#ifndef NDEBUG
1154 AssertOK();
1155#endif
1156 }
1157
1158 /// Constructor with no-insertion semantics
1159 ICmpInst(
1160 Predicate pred, ///< The predicate to use for the comparison
1161 Value *LHS, ///< The left-hand-side of the expression
1162 Value *RHS, ///< The right-hand-side of the expression
1163 const Twine &NameStr = "" ///< Name of the instruction
1164 ) : CmpInst(makeCmpResultType(LHS->getType()),
1165 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1166#ifndef NDEBUG
1167 AssertOK();
1168#endif
1169 }
1170
1171 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1172 /// @returns the predicate that would be the result if the operand were
1173 /// regarded as signed.
1174 /// Return the signed version of the predicate
1175 Predicate getSignedPredicate() const {
1176 return getSignedPredicate(getPredicate());
1177 }
1178
1179 /// This is a static version that you can use without an instruction.
1180 /// Return the signed version of the predicate.
1181 static Predicate getSignedPredicate(Predicate pred);
1182
1183 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1184 /// @returns the predicate that would be the result if the operand were
1185 /// regarded as unsigned.
1186 /// Return the unsigned version of the predicate
1187 Predicate getUnsignedPredicate() const {
1188 return getUnsignedPredicate(getPredicate());
1189 }
1190
1191 /// This is a static version that you can use without an instruction.
1192 /// Return the unsigned version of the predicate.
1193 static Predicate getUnsignedPredicate(Predicate pred);
1194
1195 /// Return true if this predicate is either EQ or NE. This also
1196 /// tests for commutativity.
1197 static bool isEquality(Predicate P) {
1198 return P == ICMP_EQ || P == ICMP_NE;
1199 }
1200
1201 /// Return true if this predicate is either EQ or NE. This also
1202 /// tests for commutativity.
1203 bool isEquality() const {
1204 return isEquality(getPredicate());
1205 }
1206
1207 /// @returns true if the predicate of this ICmpInst is commutative
1208 /// Determine if this relation is commutative.
1209 bool isCommutative() const { return isEquality(); }
1210
1211 /// Return true if the predicate is relational (not EQ or NE).
1212 ///
1213 bool isRelational() const {
1214 return !isEquality();
1215 }
1216
1217 /// Return true if the predicate is relational (not EQ or NE).
1218 ///
1219 static bool isRelational(Predicate P) {
1220 return !isEquality(P);
1221 }
1222
1223 /// Exchange the two operands to this instruction in such a way that it does
1224 /// not modify the semantics of the instruction. The predicate value may be
1225 /// changed to retain the same result if the predicate is order dependent
1226 /// (e.g. ult).
1227 /// Swap operands and adjust predicate.
1228 void swapOperands() {
1229 setPredicate(getSwappedPredicate());
1230 Op<0>().swap(Op<1>());
1231 }
1232
1233 // Methods for support type inquiry through isa, cast, and dyn_cast:
1234 static bool classof(const Instruction *I) {
1235 return I->getOpcode() == Instruction::ICmp;
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1239 }
1240};
1241
1242//===----------------------------------------------------------------------===//
1243// FCmpInst Class
1244//===----------------------------------------------------------------------===//
1245
1246/// This instruction compares its operands according to the predicate given
1247/// to the constructor. It only operates on floating point values or packed
1248/// vectors of floating point values. The operands must be identical types.
1249/// Represents a floating point comparison operator.
1250class FCmpInst: public CmpInst {
1251 void AssertOK() {
1252 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1252, __extension__ __PRETTY_FUNCTION__))
;
1253 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1254, __extension__ __PRETTY_FUNCTION__))
1254 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1254, __extension__ __PRETTY_FUNCTION__))
;
1255 // Check that the operands are the right type
1256 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1257, __extension__ __PRETTY_FUNCTION__))
1257 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1257, __extension__ __PRETTY_FUNCTION__))
;
1258 }
1259
1260protected:
1261 // Note: Instruction needs to be a friend here to call cloneImpl.
1262 friend class Instruction;
1263
1264 /// Clone an identical FCmpInst
1265 FCmpInst *cloneImpl() const;
1266
1267public:
1268 /// Constructor with insert-before-instruction semantics.
1269 FCmpInst(
1270 Instruction *InsertBefore, ///< Where to insert
1271 Predicate pred, ///< The predicate to use for the comparison
1272 Value *LHS, ///< The left-hand-side of the expression
1273 Value *RHS, ///< The right-hand-side of the expression
1274 const Twine &NameStr = "" ///< Name of the instruction
1275 ) : CmpInst(makeCmpResultType(LHS->getType()),
1276 Instruction::FCmp, pred, LHS, RHS, NameStr,
1277 InsertBefore) {
1278 AssertOK();
1279 }
1280
1281 /// Constructor with insert-at-end semantics.
1282 FCmpInst(
1283 BasicBlock &InsertAtEnd, ///< Block to insert into.
1284 Predicate pred, ///< The predicate to use for the comparison
1285 Value *LHS, ///< The left-hand-side of the expression
1286 Value *RHS, ///< The right-hand-side of the expression
1287 const Twine &NameStr = "" ///< Name of the instruction
1288 ) : CmpInst(makeCmpResultType(LHS->getType()),
1289 Instruction::FCmp, pred, LHS, RHS, NameStr,
1290 &InsertAtEnd) {
1291 AssertOK();
1292 }
1293
1294 /// Constructor with no-insertion semantics
1295 FCmpInst(
1296 Predicate pred, ///< The predicate to use for the comparison
1297 Value *LHS, ///< The left-hand-side of the expression
1298 Value *RHS, ///< The right-hand-side of the expression
1299 const Twine &NameStr = "" ///< Name of the instruction
1300 ) : CmpInst(makeCmpResultType(LHS->getType()),
1301 Instruction::FCmp, pred, LHS, RHS, NameStr) {
1302 AssertOK();
1303 }
1304
1305 /// @returns true if the predicate of this instruction is EQ or NE.
1306 /// Determine if this is an equality predicate.
1307 static bool isEquality(Predicate Pred) {
1308 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1309 Pred == FCMP_UNE;
1310 }
1311
1312 /// @returns true if the predicate of this instruction is EQ or NE.
1313 /// Determine if this is an equality predicate.
1314 bool isEquality() const { return isEquality(getPredicate()); }
1315
1316 /// @returns true if the predicate of this instruction is commutative.
1317 /// Determine if this is a commutative predicate.
1318 bool isCommutative() const {
1319 return isEquality() ||
1320 getPredicate() == FCMP_FALSE ||
1321 getPredicate() == FCMP_TRUE ||
1322 getPredicate() == FCMP_ORD ||
1323 getPredicate() == FCMP_UNO;
1324 }
1325
1326 /// @returns true if the predicate is relational (not EQ or NE).
1327 /// Determine if this a relational predicate.
1328 bool isRelational() const { return !isEquality(); }
1329
1330 /// Exchange the two operands to this instruction in such a way that it does
1331 /// not modify the semantics of the instruction. The predicate value may be
1332 /// changed to retain the same result if the predicate is order dependent
1333 /// (e.g. ult).
1334 /// Swap operands and adjust predicate.
1335 void swapOperands() {
1336 setPredicate(getSwappedPredicate());
1337 Op<0>().swap(Op<1>());
1338 }
1339
1340 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1341 static bool classof(const Instruction *I) {
1342 return I->getOpcode() == Instruction::FCmp;
1343 }
1344 static bool classof(const Value *V) {
1345 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1346 }
1347};
1348
1349//===----------------------------------------------------------------------===//
1350/// This class represents a function call, abstracting a target
1351/// machine's calling convention. This class uses low bit of the SubClassData
1352/// field to indicate whether or not this is a tail call. The rest of the bits
1353/// hold the calling convention of the call.
1354///
1355class CallInst : public Instruction,
1356 public OperandBundleUser<CallInst, User::op_iterator> {
1357 friend class OperandBundleUser<CallInst, User::op_iterator>;
1358
1359 AttributeList Attrs; ///< parameter attributes for call
1360 FunctionType *FTy;
1361
1362 CallInst(const CallInst &CI);
1363
1364 /// Construct a CallInst given a range of arguments.
1365 /// Construct a CallInst from a range of arguments
1366 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1367 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1368 Instruction *InsertBefore);
1369
1370 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1371 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1372 Instruction *InsertBefore)
1373 : CallInst(cast<FunctionType>(
1374 cast<PointerType>(Func->getType())->getElementType()),
1375 Func, Args, Bundles, NameStr, InsertBefore) {}
1376
1377 inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
1378 Instruction *InsertBefore)
1379 : CallInst(Func, Args, None, NameStr, InsertBefore) {}
1380
1381 /// Construct a CallInst given a range of arguments.
1382 /// Construct a CallInst from a range of arguments
1383 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1384 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1385 BasicBlock *InsertAtEnd);
1386
1387 explicit CallInst(Value *F, const Twine &NameStr,
1388 Instruction *InsertBefore);
1389
1390 CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
1391
1392 void init(Value *Func, ArrayRef<Value *> Args,
1393 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
1394 init(cast<FunctionType>(
1395 cast<PointerType>(Func->getType())->getElementType()),
1396 Func, Args, Bundles, NameStr);
1397 }
1398 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1399 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1400 void init(Value *Func, const Twine &NameStr);
1401
1402 bool hasDescriptor() const { return HasDescriptor; }
1403
1404protected:
1405 // Note: Instruction needs to be a friend here to call cloneImpl.
1406 friend class Instruction;
1407
1408 CallInst *cloneImpl() const;
1409
1410public:
1411 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1412 ArrayRef<OperandBundleDef> Bundles = None,
1413 const Twine &NameStr = "",
1414 Instruction *InsertBefore = nullptr) {
1415 return Create(cast<FunctionType>(
1416 cast<PointerType>(Func->getType())->getElementType()),
1417 Func, Args, Bundles, NameStr, InsertBefore);
1418 }
1419
1420 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1421 const Twine &NameStr,
1422 Instruction *InsertBefore = nullptr) {
1423 return Create(cast<FunctionType>(
1424 cast<PointerType>(Func->getType())->getElementType()),
1425 Func, Args, None, NameStr, InsertBefore);
1426 }
1427
1428 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1429 const Twine &NameStr,
1430 Instruction *InsertBefore = nullptr) {
1431 return new (unsigned(Args.size() + 1))
1432 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1433 }
1434
1435 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1436 ArrayRef<OperandBundleDef> Bundles = None,
1437 const Twine &NameStr = "",
1438 Instruction *InsertBefore = nullptr) {
1439 const unsigned TotalOps =
1440 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1441 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1442
1443 return new (TotalOps, DescriptorBytes)
1444 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1445 }
1446
1447 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1448 ArrayRef<OperandBundleDef> Bundles,
1449 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1450 const unsigned TotalOps =
1451 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1452 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1453
1454 return new (TotalOps, DescriptorBytes)
1455 CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
1456 }
1457
1458 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1459 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1460 return new (unsigned(Args.size() + 1))
1461 CallInst(Func, Args, None, NameStr, InsertAtEnd);
1462 }
1463
1464 static CallInst *Create(Value *F, const Twine &NameStr = "",
1465 Instruction *InsertBefore = nullptr) {
1466 return new(1) CallInst(F, NameStr, InsertBefore);
1467 }
1468
1469 static CallInst *Create(Value *F, const Twine &NameStr,
1470 BasicBlock *InsertAtEnd) {
1471 return new(1) CallInst(F, NameStr, InsertAtEnd);
1472 }
1473
1474 /// Create a clone of \p CI with a different set of operand bundles and
1475 /// insert it before \p InsertPt.
1476 ///
1477 /// The returned call instruction is identical \p CI in every way except that
1478 /// the operand bundles for the new instruction are set to the operand bundles
1479 /// in \p Bundles.
1480 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1481 Instruction *InsertPt = nullptr);
1482
1483 /// Generate the IR for a call to malloc:
1484 /// 1. Compute the malloc call's argument as the specified type's size,
1485 /// possibly multiplied by the array size if the array size is not
1486 /// constant 1.
1487 /// 2. Call malloc with that argument.
1488 /// 3. Bitcast the result of the malloc call to the specified type.
1489 static Instruction *CreateMalloc(Instruction *InsertBefore,
1490 Type *IntPtrTy, Type *AllocTy,
1491 Value *AllocSize, Value *ArraySize = nullptr,
1492 Function* MallocF = nullptr,
1493 const Twine &Name = "");
1494 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
1495 Type *IntPtrTy, Type *AllocTy,
1496 Value *AllocSize, Value *ArraySize = nullptr,
1497 Function* MallocF = nullptr,
1498 const Twine &Name = "");
1499 static Instruction *CreateMalloc(Instruction *InsertBefore,
1500 Type *IntPtrTy, Type *AllocTy,
1501 Value *AllocSize, Value *ArraySize = nullptr,
1502 ArrayRef<OperandBundleDef> Bundles = None,
1503 Function* MallocF = nullptr,
1504 const Twine &Name = "");
1505 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
1506 Type *IntPtrTy, Type *AllocTy,
1507 Value *AllocSize, Value *ArraySize = nullptr,
1508 ArrayRef<OperandBundleDef> Bundles = None,
1509 Function* MallocF = nullptr,
1510 const Twine &Name = "");
1511 /// Generate the IR for a call to the builtin free function.
1512 static Instruction *CreateFree(Value *Source,
1513 Instruction *InsertBefore);
1514 static Instruction *CreateFree(Value *Source,
1515 BasicBlock *InsertAtEnd);
1516 static Instruction *CreateFree(Value *Source,
1517 ArrayRef<OperandBundleDef> Bundles,
1518 Instruction *InsertBefore);
1519 static Instruction *CreateFree(Value *Source,
1520 ArrayRef<OperandBundleDef> Bundles,
1521 BasicBlock *InsertAtEnd);
1522
1523 FunctionType *getFunctionType() const { return FTy; }
1524
1525 void mutateFunctionType(FunctionType *FTy) {
1526 mutateType(FTy->getReturnType());
1527 this->FTy = FTy;
1528 }
1529
1530 // Note that 'musttail' implies 'tail'.
1531 enum TailCallKind { TCK_None = 0, TCK_Tail = 1, TCK_MustTail = 2,
1532 TCK_NoTail = 3 };
1533 TailCallKind getTailCallKind() const {
1534 return TailCallKind(getSubclassDataFromInstruction() & 3);
1535 }
1536
1537 bool isTailCall() const {
1538 unsigned Kind = getSubclassDataFromInstruction() & 3;
1539 return Kind == TCK_Tail || Kind == TCK_MustTail;
1540 }
1541
1542 bool isMustTailCall() const {
1543 return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
1544 }
1545
1546 bool isNoTailCall() const {
1547 return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
1548 }
1549
1550 void setTailCall(bool isTC = true) {
1551 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
1552 unsigned(isTC ? TCK_Tail : TCK_None));
1553 }
1554
1555 void setTailCallKind(TailCallKind TCK) {
1556 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
1557 unsigned(TCK));
1558 }
1559
1560 /// Provide fast operand accessors
1561 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1562
1563 /// Return the number of call arguments.
1564 ///
1565 unsigned getNumArgOperands() const {
1566 return getNumOperands() - getNumTotalBundleOperands() - 1;
1567 }
1568
1569 /// getArgOperand/setArgOperand - Return/set the i-th call argument.
1570 ///
1571 Value *getArgOperand(unsigned i) const {
1572 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1572, __extension__ __PRETTY_FUNCTION__))
;
1573 return getOperand(i);
1574 }
1575 void setArgOperand(unsigned i, Value *v) {
1576 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1576, __extension__ __PRETTY_FUNCTION__))
;
1577 setOperand(i, v);
1578 }
1579
1580 /// Return the iterator pointing to the beginning of the argument list.
1581 op_iterator arg_begin() { return op_begin(); }
1582
1583 /// Return the iterator pointing to the end of the argument list.
1584 op_iterator arg_end() {
1585 // [ call args ], [ operand bundles ], callee
1586 return op_end() - getNumTotalBundleOperands() - 1;
1587 }
1588
1589 /// Iteration adapter for range-for loops.
1590 iterator_range<op_iterator> arg_operands() {
1591 return make_range(arg_begin(), arg_end());
1592 }
1593
1594 /// Return the iterator pointing to the beginning of the argument list.
1595 const_op_iterator arg_begin() const { return op_begin(); }
1596
1597 /// Return the iterator pointing to the end of the argument list.
1598 const_op_iterator arg_end() const {
1599 // [ call args ], [ operand bundles ], callee
1600 return op_end() - getNumTotalBundleOperands() - 1;
1601 }
1602
1603 /// Iteration adapter for range-for loops.
1604 iterator_range<const_op_iterator> arg_operands() const {
1605 return make_range(arg_begin(), arg_end());
1606 }
1607
1608 /// Wrappers for getting the \c Use of a call argument.
1609 const Use &getArgOperandUse(unsigned i) const {
1610 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1610, __extension__ __PRETTY_FUNCTION__))
;
1611 return getOperandUse(i);
1612 }
1613 Use &getArgOperandUse(unsigned i) {
1614 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1614, __extension__ __PRETTY_FUNCTION__))
;
1615 return getOperandUse(i);
1616 }
1617
1618 /// If one of the arguments has the 'returned' attribute, return its
1619 /// operand value. Otherwise, return nullptr.
1620 Value *getReturnedArgOperand() const;
1621
1622 /// getCallingConv/setCallingConv - Get or set the calling convention of this
1623 /// function call.
1624 CallingConv::ID getCallingConv() const {
1625 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1626 }
1627 void setCallingConv(CallingConv::ID CC) {
1628 auto ID = static_cast<unsigned>(CC);
1629 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")(static_cast <bool> (!(ID & ~CallingConv::MaxID) &&
"Unsupported calling convention") ? void (0) : __assert_fail
("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1629, __extension__ __PRETTY_FUNCTION__))
;
1630 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1631 (ID << 2));
1632 }
1633
1634 /// Return the parameter attributes for this call.
1635 ///
1636 AttributeList getAttributes() const { return Attrs; }
1637
1638 /// Set the parameter attributes for this call.
1639 ///
1640 void setAttributes(AttributeList A) { Attrs = A; }
1641
1642 /// adds the attribute to the list of attributes.
1643 void addAttribute(unsigned i, Attribute::AttrKind Kind);
1644
1645 /// adds the attribute to the list of attributes.
1646 void addAttribute(unsigned i, Attribute Attr);
1647
1648 /// Adds the attribute to the indicated argument
1649 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
1650
1651 /// Adds the attribute to the indicated argument
1652 void addParamAttr(unsigned ArgNo, Attribute Attr);
1653
1654 /// removes the attribute from the list of attributes.
1655 void removeAttribute(unsigned i, Attribute::AttrKind Kind);
1656
1657 /// removes the attribute from the list of attributes.
1658 void removeAttribute(unsigned i, StringRef Kind);
1659
1660 /// Removes the attribute from the given argument
1661 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
1662
1663 /// Removes the attribute from the given argument
1664 void removeParamAttr(unsigned ArgNo, StringRef Kind);
1665
1666 /// adds the dereferenceable attribute to the list of attributes.
1667 void addDereferenceableAttr(unsigned i, uint64_t Bytes);
1668
1669 /// adds the dereferenceable_or_null attribute to the list of
1670 /// attributes.
1671 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
1672
1673 /// Determine whether this call has the given attribute.
1674 bool hasFnAttr(Attribute::AttrKind Kind) const {
1675 assert(Kind != Attribute::NoBuiltin &&(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1676, __extension__ __PRETTY_FUNCTION__))
1676 "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1676, __extension__ __PRETTY_FUNCTION__))
;
1677 return hasFnAttrImpl(Kind);
1678 }
1679
1680 /// Determine whether this call has the given attribute.
1681 bool hasFnAttr(StringRef Kind) const {
1682 return hasFnAttrImpl(Kind);
1683 }
1684
1685 /// Determine whether the return value has the given attribute.
1686 bool hasRetAttr(Attribute::AttrKind Kind) const;
1687
1688 /// Determine whether the argument or parameter has the given attribute.
1689 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1690
1691 /// Get the attribute of a given kind at a position.
1692 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1693 return getAttributes().getAttribute(i, Kind);
1694 }
1695
1696 /// Get the attribute of a given kind at a position.
1697 Attribute getAttribute(unsigned i, StringRef Kind) const {
1698 return getAttributes().getAttribute(i, Kind);
1699 }
1700
1701 /// Get the attribute of a given kind from a given arg
1702 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1703 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1703, __extension__ __PRETTY_FUNCTION__))
;
1704 return getAttributes().getParamAttr(ArgNo, Kind);
1705 }
1706
1707 /// Get the attribute of a given kind from a given arg
1708 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1709 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1709, __extension__ __PRETTY_FUNCTION__))
;
1710 return getAttributes().getParamAttr(ArgNo, Kind);
1711 }
1712
1713 /// Return true if the data operand at index \p i has the attribute \p
1714 /// A.
1715 ///
1716 /// Data operands include call arguments and values used in operand bundles,
1717 /// but does not include the callee operand. This routine dispatches to the
1718 /// underlying AttributeList or the OperandBundleUser as appropriate.
1719 ///
1720 /// The index \p i is interpreted as
1721 ///
1722 /// \p i == Attribute::ReturnIndex -> the return value
1723 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1724 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1725 /// (\p i - 1) in the operand list.
1726 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const;
1727
1728 /// Extract the alignment of the return value.
1729 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
1730
1731 /// Extract the alignment for a call or parameter (0=unknown).
1732 unsigned getParamAlignment(unsigned ArgNo) const {
1733 return Attrs.getParamAlignment(ArgNo);
1734 }
1735
1736 /// Extract the number of dereferenceable bytes for a call or
1737 /// parameter (0=unknown).
1738 uint64_t getDereferenceableBytes(unsigned i) const {
1739 return Attrs.getDereferenceableBytes(i);
1740 }
1741
1742 /// Extract the number of dereferenceable_or_null bytes for a call or
1743 /// parameter (0=unknown).
1744 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1745 return Attrs.getDereferenceableOrNullBytes(i);
1746 }
1747
1748 /// @brief Determine if the return value is marked with NoAlias attribute.
1749 bool returnDoesNotAlias() const {
1750 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1751 }
1752
1753 /// Return true if the call should not be treated as a call to a
1754 /// builtin.
1755 bool isNoBuiltin() const {
1756 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1757 !hasFnAttrImpl(Attribute::Builtin);
1758 }
1759
1760 /// Determine if the call requires strict floating point semantics.
1761 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1762
1763 /// Return true if the call should not be inlined.
1764 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1765 void setIsNoInline() {
1766 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1767 }
1768
1769 /// Return true if the call can return twice
1770 bool canReturnTwice() const {
1771 return hasFnAttr(Attribute::ReturnsTwice);
1772 }
1773 void setCanReturnTwice() {
1774 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1775 }
1776
1777 /// Determine if the call does not access memory.
1778 bool doesNotAccessMemory() const {
1779 return hasFnAttr(Attribute::ReadNone);
1780 }
1781 void setDoesNotAccessMemory() {
1782 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1783 }
1784
1785 /// Determine if the call does not access or only reads memory.
1786 bool onlyReadsMemory() const {
1787 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1788 }
1789 void setOnlyReadsMemory() {
1790 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1791 }
1792
1793 /// Determine if the call does not access or only writes memory.
1794 bool doesNotReadMemory() const {
1795 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1796 }
1797 void setDoesNotReadMemory() {
1798 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1799 }
1800
1801 /// @brief Determine if the call can access memmory only using pointers based
1802 /// on its arguments.
1803 bool onlyAccessesArgMemory() const {
1804 return hasFnAttr(Attribute::ArgMemOnly);
1805 }
1806 void setOnlyAccessesArgMemory() {
1807 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1808 }
1809
1810 /// @brief Determine if the function may only access memory that is
1811 /// inaccessible from the IR.
1812 bool onlyAccessesInaccessibleMemory() const {
1813 return hasFnAttr(Attribute::InaccessibleMemOnly);
1814 }
1815 void setOnlyAccessesInaccessibleMemory() {
1816 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1817 }
1818
1819 /// @brief Determine if the function may only access memory that is
1820 /// either inaccessible from the IR or pointed to by its arguments.
1821 bool onlyAccessesInaccessibleMemOrArgMem() const {
1822 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1823 }
1824 void setOnlyAccessesInaccessibleMemOrArgMem() {
1825 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
1826 }
1827
1828 /// Determine if the call cannot return.
1829 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1830 void setDoesNotReturn() {
1831 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1832 }
1833
1834 /// Determine if the call cannot unwind.
1835 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1836 void setDoesNotThrow() {
1837 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1838 }
1839
1840 /// Determine if the call cannot be duplicated.
1841 bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
1842 void setCannotDuplicate() {
1843 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1844 }
1845
1846 /// Determine if the call is convergent
1847 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1848 void setConvergent() {
1849 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1850 }
1851 void setNotConvergent() {
1852 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1853 }
1854
1855 /// Determine if the call returns a structure through first
1856 /// pointer argument.
1857 bool hasStructRetAttr() const {
1858 if (getNumArgOperands() == 0)
1859 return false;
1860
1861 // Be friendly and also check the callee.
1862 return paramHasAttr(0, Attribute::StructRet);
1863 }
1864
1865 /// Determine if any call argument is an aggregate passed by value.
1866 bool hasByValArgument() const {
1867 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1868 }
1869
1870 /// Return the function called, or null if this is an
1871 /// indirect function invocation.
1872 ///
1873 Function *getCalledFunction() const {
1874 return dyn_cast<Function>(Op<-1>());
1875 }
1876
1877 /// Get a pointer to the function that is invoked by this
1878 /// instruction.
1879 const Value *getCalledValue() const { return Op<-1>(); }
1880 Value *getCalledValue() { return Op<-1>(); }
1881
1882 /// Set the function called.
1883 void setCalledFunction(Value* Fn) {
1884 setCalledFunction(
1885 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
1886 Fn);
1887 }
1888 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1889 this->FTy = FTy;
1890 assert(FTy == cast<FunctionType>((static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1891, __extension__ __PRETTY_FUNCTION__))
1891 cast<PointerType>(Fn->getType())->getElementType()))(static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1891, __extension__ __PRETTY_FUNCTION__))
;
1892 Op<-1>() = Fn;
1893 }
1894
1895 /// Check if this call is an inline asm statement.
1896 bool isInlineAsm() const {
1897 return isa<InlineAsm>(Op<-1>());
1898 }
1899
1900 // Methods for support type inquiry through isa, cast, and dyn_cast:
1901 static bool classof(const Instruction *I) {
1902 return I->getOpcode() == Instruction::Call;
1903 }
1904 static bool classof(const Value *V) {
1905 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1906 }
1907
1908private:
1909 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
1910 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
1911 return true;
1912
1913 // Operand bundles override attributes on the called function, but don't
1914 // override attributes directly present on the call instruction.
1915 if (isFnAttrDisallowedByOpBundle(Kind))
1916 return false;
1917
1918 if (const Function *F = getCalledFunction())
1919 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
1920 Kind);
1921 return false;
1922 }
1923
1924 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1925 // method so that subclasses cannot accidentally use it.
1926 void setInstructionSubclassData(unsigned short D) {
1927 Instruction::setInstructionSubclassData(D);
1928 }
1929};
1930
1931template <>
1932struct OperandTraits<CallInst> : public VariadicOperandTraits<CallInst, 1> {
1933};
1934
1935CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
1936 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1937 BasicBlock *InsertAtEnd)
1938 : Instruction(
1939 cast<FunctionType>(cast<PointerType>(Func->getType())
1940 ->getElementType())->getReturnType(),
1941 Instruction::Call, OperandTraits<CallInst>::op_end(this) -
1942 (Args.size() + CountBundleInputs(Bundles) + 1),
1943 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
1944 init(Func, Args, Bundles, NameStr);
1945}
1946
1947CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1948 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1949 Instruction *InsertBefore)
1950 : Instruction(Ty->getReturnType(), Instruction::Call,
1951 OperandTraits<CallInst>::op_end(this) -
1952 (Args.size() + CountBundleInputs(Bundles) + 1),
1953 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1954 InsertBefore) {
1955 init(Ty, Func, Args, Bundles, NameStr);
1956}
1957
1958// Note: if you get compile errors about private methods then
1959// please update your code to use the high-level operand
1960// interfaces. See line 943 above.
1961DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value)CallInst::op_iterator CallInst::op_begin() { return OperandTraits
<CallInst>::op_begin(this); } CallInst::const_op_iterator
CallInst::op_begin() const { return OperandTraits<CallInst
>::op_begin(const_cast<CallInst*>(this)); } CallInst
::op_iterator CallInst::op_end() { return OperandTraits<CallInst
>::op_end(this); } CallInst::const_op_iterator CallInst::op_end
() const { return OperandTraits<CallInst>::op_end(const_cast
<CallInst*>(this)); } Value *CallInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CallInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1961, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CallInst>::op_begin(const_cast
<CallInst*>(this))[i_nocapture].get()); } void CallInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<CallInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CallInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1961, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CallInst>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CallInst::getNumOperands() const { return OperandTraits<CallInst
>::operands(this); } template <int Idx_nocapture> Use
&CallInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
CallInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1962
1963//===----------------------------------------------------------------------===//
1964// SelectInst Class
1965//===----------------------------------------------------------------------===//
1966
1967/// This class represents the LLVM 'select' instruction.
1968///
1969class SelectInst : public Instruction {
1970 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1971 Instruction *InsertBefore)
1972 : Instruction(S1->getType(), Instruction::Select,
1973 &Op<0>(), 3, InsertBefore) {
1974 init(C, S1, S2);
1975 setName(NameStr);
1976 }
1977
1978 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1979 BasicBlock *InsertAtEnd)
1980 : Instruction(S1->getType(), Instruction::Select,
1981 &Op<0>(), 3, InsertAtEnd) {
1982 init(C, S1, S2);
1983 setName(NameStr);
1984 }
1985
1986 void init(Value *C, Value *S1, Value *S2) {
1987 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 1987, __extension__ __PRETTY_FUNCTION__))
;
1988 Op<0>() = C;
1989 Op<1>() = S1;
1990 Op<2>() = S2;
1991 }
1992
1993protected:
1994 // Note: Instruction needs to be a friend here to call cloneImpl.
1995 friend class Instruction;
1996
1997 SelectInst *cloneImpl() const;
1998
1999public:
2000 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2001 const Twine &NameStr = "",
2002 Instruction *InsertBefore = nullptr,
2003 Instruction *MDFrom = nullptr) {
2004 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
2005 if (MDFrom)
2006 Sel->copyMetadata(*MDFrom);
2007 return Sel;
2008 }
2009
2010 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2011 const Twine &NameStr,
2012 BasicBlock *InsertAtEnd) {
2013 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
2014 }
2015
2016 const Value *getCondition() const { return Op<0>(); }
2017 const Value *getTrueValue() const { return Op<1>(); }
2018 const Value *getFalseValue() const { return Op<2>(); }
2019 Value *getCondition() { return Op<0>(); }
2020 Value *getTrueValue() { return Op<1>(); }
2021 Value *getFalseValue() { return Op<2>(); }
2022
2023 void setCondition(Value *V) { Op<0>() = V; }
2024 void setTrueValue(Value *V) { Op<1>() = V; }
2025 void setFalseValue(Value *V) { Op<2>() = V; }
2026
2027 /// Return a string if the specified operands are invalid
2028 /// for a select operation, otherwise return null.
2029 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
2030
2031 /// Transparently provide more efficient getOperand methods.
2032 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2033
2034 OtherOps getOpcode() const {
2035 return static_cast<OtherOps>(Instruction::getOpcode());
2036 }
2037
2038 // Methods for support type inquiry through isa, cast, and dyn_cast:
2039 static bool classof(const Instruction *I) {
2040 return I->getOpcode() == Instruction::Select;
2041 }
2042 static bool classof(const Value *V) {
2043 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2044 }
2045};
2046
2047template <>
2048struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
2049};
2050
2051DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2051, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2051, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2052
2053//===----------------------------------------------------------------------===//
2054// VAArgInst Class
2055//===----------------------------------------------------------------------===//
2056
2057/// This class represents the va_arg llvm instruction, which returns
2058/// an argument of the specified type given a va_list and increments that list
2059///
2060class VAArgInst : public UnaryInstruction {
2061protected:
2062 // Note: Instruction needs to be a friend here to call cloneImpl.
2063 friend class Instruction;
2064
2065 VAArgInst *cloneImpl() const;
2066
2067public:
2068 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
2069 Instruction *InsertBefore = nullptr)
2070 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
2071 setName(NameStr);
2072 }
2073
2074 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
2075 BasicBlock *InsertAtEnd)
2076 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
2077 setName(NameStr);
2078 }
2079
2080 Value *getPointerOperand() { return getOperand(0); }
2081 const Value *getPointerOperand() const { return getOperand(0); }
2082 static unsigned getPointerOperandIndex() { return 0U; }
2083
2084 // Methods for support type inquiry through isa, cast, and dyn_cast:
2085 static bool classof(const Instruction *I) {
2086 return I->getOpcode() == VAArg;
2087 }
2088 static bool classof(const Value *V) {
2089 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2090 }
2091};
2092
2093//===----------------------------------------------------------------------===//
2094// ExtractElementInst Class
2095//===----------------------------------------------------------------------===//
2096
2097/// This instruction extracts a single (scalar)
2098/// element from a VectorType value
2099///
2100class ExtractElementInst : public Instruction {
2101 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
2102 Instruction *InsertBefore = nullptr);
2103 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2104 BasicBlock *InsertAtEnd);
2105
2106protected:
2107 // Note: Instruction needs to be a friend here to call cloneImpl.
2108 friend class Instruction;
2109
2110 ExtractElementInst *cloneImpl() const;
2111
2112public:
2113 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2114 const Twine &NameStr = "",
2115 Instruction *InsertBefore = nullptr) {
2116 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2117 }
2118
2119 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2120 const Twine &NameStr,
2121 BasicBlock *InsertAtEnd) {
2122 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
2123 }
2124
2125 /// Return true if an extractelement instruction can be
2126 /// formed with the specified operands.
2127 static bool isValidOperands(const Value *Vec, const Value *Idx);
2128
2129 Value *getVectorOperand() { return Op<0>(); }
2130 Value *getIndexOperand() { return Op<1>(); }
2131 const Value *getVectorOperand() const { return Op<0>(); }
2132 const Value *getIndexOperand() const { return Op<1>(); }
2133
2134 VectorType *getVectorOperandType() const {
2135 return cast<VectorType>(getVectorOperand()->getType());
2136 }
2137
2138 /// Transparently provide more efficient getOperand methods.
2139 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2140
2141 // Methods for support type inquiry through isa, cast, and dyn_cast:
2142 static bool classof(const Instruction *I) {
2143 return I->getOpcode() == Instruction::ExtractElement;
2144 }
2145 static bool classof(const Value *V) {
2146 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2147 }
2148};
2149
2150template <>
2151struct OperandTraits<ExtractElementInst> :
2152 public FixedNumOperandTraits<ExtractElementInst, 2> {
2153};
2154
2155DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2155, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2155, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2156
2157//===----------------------------------------------------------------------===//
2158// InsertElementInst Class
2159//===----------------------------------------------------------------------===//
2160
2161/// This instruction inserts a single (scalar)
2162/// element into a VectorType value
2163///
2164class InsertElementInst : public Instruction {
2165 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
2166 const Twine &NameStr = "",
2167 Instruction *InsertBefore = nullptr);
2168 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2169 BasicBlock *InsertAtEnd);
2170
2171protected:
2172 // Note: Instruction needs to be a friend here to call cloneImpl.
2173 friend class Instruction;
2174
2175 InsertElementInst *cloneImpl() const;
2176
2177public:
2178 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2179 const Twine &NameStr = "",
2180 Instruction *InsertBefore = nullptr) {
2181 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2182 }
2183
2184 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2185 const Twine &NameStr,
2186 BasicBlock *InsertAtEnd) {
2187 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
2188 }
2189
2190 /// Return true if an insertelement instruction can be
2191 /// formed with the specified operands.
2192 static bool isValidOperands(const Value *Vec, const Value *NewElt,
2193 const Value *Idx);
2194
2195 /// Overload to return most specific vector type.
2196 ///
2197 VectorType *getType() const {
2198 return cast<VectorType>(Instruction::getType());
2199 }
2200
2201 /// Transparently provide more efficient getOperand methods.
2202 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2203
2204 // Methods for support type inquiry through isa, cast, and dyn_cast:
2205 static bool classof(const Instruction *I) {
2206 return I->getOpcode() == Instruction::InsertElement;
2207 }
2208 static bool classof(const Value *V) {
2209 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2210 }
2211};
2212
2213template <>
2214struct OperandTraits<InsertElementInst> :
2215 public FixedNumOperandTraits<InsertElementInst, 3> {
2216};
2217
2218DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2218, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2218, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2219
2220//===----------------------------------------------------------------------===//
2221// ShuffleVectorInst Class
2222//===----------------------------------------------------------------------===//
2223
2224/// This instruction constructs a fixed permutation of two
2225/// input vectors.
2226///
2227class ShuffleVectorInst : public Instruction {
2228protected:
2229 // Note: Instruction needs to be a friend here to call cloneImpl.
2230 friend class Instruction;
2231
2232 ShuffleVectorInst *cloneImpl() const;
2233
2234public:
2235 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2236 const Twine &NameStr = "",
2237 Instruction *InsertBefor = nullptr);
2238 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2239 const Twine &NameStr, BasicBlock *InsertAtEnd);
2240
2241 // allocate space for exactly three operands
2242 void *operator new(size_t s) {
2243 return User::operator new(s, 3);
2244 }
2245
2246 /// Return true if a shufflevector instruction can be
2247 /// formed with the specified operands.
2248 static bool isValidOperands(const Value *V1, const Value *V2,
2249 const Value *Mask);
2250
2251 /// Overload to return most specific vector type.
2252 ///
2253 VectorType *getType() const {
2254 return cast<VectorType>(Instruction::getType());
2255 }
2256
2257 /// Transparently provide more efficient getOperand methods.
2258 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2259
2260 Constant *getMask() const {
2261 return cast<Constant>(getOperand(2));
2262 }
2263
2264 /// Return the shuffle mask value for the specified element of the mask.
2265 /// Return -1 if the element is undef.
2266 static int getMaskValue(Constant *Mask, unsigned Elt);
2267
2268 /// Return the shuffle mask value of this instruction for the given element
2269 /// index. Return -1 if the element is undef.
2270 int getMaskValue(unsigned Elt) const {
2271 return getMaskValue(getMask(), Elt);
2272 }
2273
2274 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2275 /// elements of the mask are returned as -1.
2276 static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result);
2277
2278 /// Return the mask for this instruction as a vector of integers. Undefined
2279 /// elements of the mask are returned as -1.
2280 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2281 return getShuffleMask(getMask(), Result);
2282 }
2283
2284 SmallVector<int, 16> getShuffleMask() const {
2285 SmallVector<int, 16> Mask;
2286 getShuffleMask(Mask);
2287 return Mask;
2288 }
2289
2290 /// Change values in a shuffle permute mask assuming the two vector operands
2291 /// of length InVecNumElts have swapped position.
2292 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2293 unsigned InVecNumElts) {
2294 for (int &Idx : Mask) {
2295 if (Idx == -1)
2296 continue;
2297 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2298 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2299, __extension__ __PRETTY_FUNCTION__))
2299 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2299, __extension__ __PRETTY_FUNCTION__))
;
2300 }
2301 }
2302
2303 // Methods for support type inquiry through isa, cast, and dyn_cast:
2304 static bool classof(const Instruction *I) {
2305 return I->getOpcode() == Instruction::ShuffleVector;
2306 }
2307 static bool classof(const Value *V) {
2308 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2309 }
2310};
2311
2312template <>
2313struct OperandTraits<ShuffleVectorInst> :
2314 public FixedNumOperandTraits<ShuffleVectorInst, 3> {
2315};
2316
2317DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2317, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2317, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2318
2319//===----------------------------------------------------------------------===//
2320// ExtractValueInst Class
2321//===----------------------------------------------------------------------===//
2322
2323/// This instruction extracts a struct member or array
2324/// element value from an aggregate value.
2325///
2326class ExtractValueInst : public UnaryInstruction {
2327 SmallVector<unsigned, 4> Indices;
2328
2329 ExtractValueInst(const ExtractValueInst &EVI);
2330
2331 /// Constructors - Create a extractvalue instruction with a base aggregate
2332 /// value and a list of indices. The first ctor can optionally insert before
2333 /// an existing instruction, the second appends the new instruction to the
2334 /// specified BasicBlock.
2335 inline ExtractValueInst(Value *Agg,
2336 ArrayRef<unsigned> Idxs,
2337 const Twine &NameStr,
2338 Instruction *InsertBefore);
2339 inline ExtractValueInst(Value *Agg,
2340 ArrayRef<unsigned> Idxs,
2341 const Twine &NameStr, BasicBlock *InsertAtEnd);
2342
2343 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2344
2345protected:
2346 // Note: Instruction needs to be a friend here to call cloneImpl.
2347 friend class Instruction;
2348
2349 ExtractValueInst *cloneImpl() const;
2350
2351public:
2352 static ExtractValueInst *Create(Value *Agg,
2353 ArrayRef<unsigned> Idxs,
2354 const Twine &NameStr = "",
2355 Instruction *InsertBefore = nullptr) {
2356 return new
2357 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2358 }
2359
2360 static ExtractValueInst *Create(Value *Agg,
2361 ArrayRef<unsigned> Idxs,
2362 const Twine &NameStr,
2363 BasicBlock *InsertAtEnd) {
2364 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2365 }
2366
2367 /// Returns the type of the element that would be extracted
2368 /// with an extractvalue instruction with the specified parameters.
2369 ///
2370 /// Null is returned if the indices are invalid for the specified type.
2371 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2372
2373 using idx_iterator = const unsigned*;
2374
2375 inline idx_iterator idx_begin() const { return Indices.begin(); }
2376 inline idx_iterator idx_end() const { return Indices.end(); }
2377 inline iterator_range<idx_iterator> indices() const {
2378 return make_range(idx_begin(), idx_end());
2379 }
2380
2381 Value *getAggregateOperand() {
2382 return getOperand(0);
2383 }
2384 const Value *getAggregateOperand() const {
2385 return getOperand(0);
2386 }
2387 static unsigned getAggregateOperandIndex() {
2388 return 0U; // get index for modifying correct operand
2389 }
2390
2391 ArrayRef<unsigned> getIndices() const {
2392 return Indices;
2393 }
2394
2395 unsigned getNumIndices() const {
2396 return (unsigned)Indices.size();
2397 }
2398
2399 bool hasIndices() const {
2400 return true;
2401 }
2402
2403 // Methods for support type inquiry through isa, cast, and dyn_cast:
2404 static bool classof(const Instruction *I) {
2405 return I->getOpcode() == Instruction::ExtractValue;
2406 }
2407 static bool classof(const Value *V) {
2408 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2409 }
2410};
2411
2412ExtractValueInst::ExtractValueInst(Value *Agg,
2413 ArrayRef<unsigned> Idxs,
2414 const Twine &NameStr,
2415 Instruction *InsertBefore)
2416 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2417 ExtractValue, Agg, InsertBefore) {
2418 init(Idxs, NameStr);
2419}
2420
2421ExtractValueInst::ExtractValueInst(Value *Agg,
2422 ArrayRef<unsigned> Idxs,
2423 const Twine &NameStr,
2424 BasicBlock *InsertAtEnd)
2425 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2426 ExtractValue, Agg, InsertAtEnd) {
2427 init(Idxs, NameStr);
2428}
2429
2430//===----------------------------------------------------------------------===//
2431// InsertValueInst Class
2432//===----------------------------------------------------------------------===//
2433
2434/// This instruction inserts a struct field of array element
2435/// value into an aggregate value.
2436///
2437class InsertValueInst : public Instruction {
2438 SmallVector<unsigned, 4> Indices;
2439
2440 InsertValueInst(const InsertValueInst &IVI);
2441
2442 /// Constructors - Create a insertvalue instruction with a base aggregate
2443 /// value, a value to insert, and a list of indices. The first ctor can
2444 /// optionally insert before an existing instruction, the second appends
2445 /// the new instruction to the specified BasicBlock.
2446 inline InsertValueInst(Value *Agg, Value *Val,
2447 ArrayRef<unsigned> Idxs,
2448 const Twine &NameStr,
2449 Instruction *InsertBefore);
2450 inline InsertValueInst(Value *Agg, Value *Val,
2451 ArrayRef<unsigned> Idxs,
2452 const Twine &NameStr, BasicBlock *InsertAtEnd);
2453
2454 /// Constructors - These two constructors are convenience methods because one
2455 /// and two index insertvalue instructions are so common.
2456 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2457 const Twine &NameStr = "",
2458 Instruction *InsertBefore = nullptr);
2459 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2460 BasicBlock *InsertAtEnd);
2461
2462 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2463 const Twine &NameStr);
2464
2465protected:
2466 // Note: Instruction needs to be a friend here to call cloneImpl.
2467 friend class Instruction;
2468
2469 InsertValueInst *cloneImpl() const;
2470
2471public:
2472 // allocate space for exactly two operands
2473 void *operator new(size_t s) {
2474 return User::operator new(s, 2);
2475 }
2476
2477 static InsertValueInst *Create(Value *Agg, Value *Val,
2478 ArrayRef<unsigned> Idxs,
2479 const Twine &NameStr = "",
2480 Instruction *InsertBefore = nullptr) {
2481 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2482 }
2483
2484 static InsertValueInst *Create(Value *Agg, Value *Val,
2485 ArrayRef<unsigned> Idxs,
2486 const Twine &NameStr,
2487 BasicBlock *InsertAtEnd) {
2488 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2489 }
2490
2491 /// Transparently provide more efficient getOperand methods.
2492 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2493
2494 using idx_iterator = const unsigned*;
2495
2496 inline idx_iterator idx_begin() const { return Indices.begin(); }
2497 inline idx_iterator idx_end() const { return Indices.end(); }
2498 inline iterator_range<idx_iterator> indices() const {
2499 return make_range(idx_begin(), idx_end());
2500 }
2501
2502 Value *getAggregateOperand() {
2503 return getOperand(0);
2504 }
2505 const Value *getAggregateOperand() const {
2506 return getOperand(0);
2507 }
2508 static unsigned getAggregateOperandIndex() {
2509 return 0U; // get index for modifying correct operand
2510 }
2511
2512 Value *getInsertedValueOperand() {
2513 return getOperand(1);
2514 }
2515 const Value *getInsertedValueOperand() const {
2516 return getOperand(1);
2517 }
2518 static unsigned getInsertedValueOperandIndex() {
2519 return 1U; // get index for modifying correct operand
2520 }
2521
2522 ArrayRef<unsigned> getIndices() const {
2523 return Indices;
2524 }
2525
2526 unsigned getNumIndices() const {
2527 return (unsigned)Indices.size();
2528 }
2529
2530 bool hasIndices() const {
2531 return true;
2532 }
2533
2534 // Methods for support type inquiry through isa, cast, and dyn_cast:
2535 static bool classof(const Instruction *I) {
2536 return I->getOpcode() == Instruction::InsertValue;
2537 }
2538 static bool classof(const Value *V) {
2539 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2540 }
2541};
2542
2543template <>
2544struct OperandTraits<InsertValueInst> :
2545 public FixedNumOperandTraits<InsertValueInst, 2> {
2546};
2547
2548InsertValueInst::InsertValueInst(Value *Agg,
2549 Value *Val,
2550 ArrayRef<unsigned> Idxs,
2551 const Twine &NameStr,
2552 Instruction *InsertBefore)
2553 : Instruction(Agg->getType(), InsertValue,
2554 OperandTraits<InsertValueInst>::op_begin(this),
2555 2, InsertBefore) {
2556 init(Agg, Val, Idxs, NameStr);
2557}
2558
2559InsertValueInst::InsertValueInst(Value *Agg,
2560 Value *Val,
2561 ArrayRef<unsigned> Idxs,
2562 const Twine &NameStr,
2563 BasicBlock *InsertAtEnd)
2564 : Instruction(Agg->getType(), InsertValue,
2565 OperandTraits<InsertValueInst>::op_begin(this),
2566 2, InsertAtEnd) {
2567 init(Agg, Val, Idxs, NameStr);
2568}
2569
2570DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2570, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2570, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2571
2572//===----------------------------------------------------------------------===//
2573// PHINode Class
2574//===----------------------------------------------------------------------===//
2575
2576// PHINode - The PHINode class is used to represent the magical mystical PHI
2577// node, that can not exist in nature, but can be synthesized in a computer
2578// scientist's overactive imagination.
2579//
2580class PHINode : public Instruction {
2581 /// The number of operands actually allocated. NumOperands is
2582 /// the number actually in use.
2583 unsigned ReservedSpace;
2584
2585 PHINode(const PHINode &PN);
2586
2587 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2588 const Twine &NameStr = "",
2589 Instruction *InsertBefore = nullptr)
2590 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2591 ReservedSpace(NumReservedValues) {
2592 setName(NameStr);
2593 allocHungoffUses(ReservedSpace);
2594 }
2595
2596 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2597 BasicBlock *InsertAtEnd)
2598 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2599 ReservedSpace(NumReservedValues) {
2600 setName(NameStr);
2601 allocHungoffUses(ReservedSpace);
2602 }
2603
2604protected:
2605 // Note: Instruction needs to be a friend here to call cloneImpl.
2606 friend class Instruction;
2607
2608 PHINode *cloneImpl() const;
2609
2610 // allocHungoffUses - this is more complicated than the generic
2611 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2612 // values and pointers to the incoming blocks, all in one allocation.
2613 void allocHungoffUses(unsigned N) {
2614 User::allocHungoffUses(N, /* IsPhi */ true);
2615 }
2616
2617public:
2618 /// Constructors - NumReservedValues is a hint for the number of incoming
2619 /// edges that this phi node will have (use 0 if you really have no idea).
2620 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2621 const Twine &NameStr = "",
2622 Instruction *InsertBefore = nullptr) {
2623 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2624 }
2625
2626 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2627 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2628 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2629 }
2630
2631 /// Provide fast operand accessors
2632 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2633
2634 // Block iterator interface. This provides access to the list of incoming
2635 // basic blocks, which parallels the list of incoming values.
2636
2637 using block_iterator = BasicBlock **;
2638 using const_block_iterator = BasicBlock * const *;
2639
2640 block_iterator block_begin() {
2641 Use::UserRef *ref =
2642 reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
2643 return reinterpret_cast<block_iterator>(ref + 1);
2644 }
2645
2646 const_block_iterator block_begin() const {
2647 const Use::UserRef *ref =
2648 reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
2649 return reinterpret_cast<const_block_iterator>(ref + 1);
2650 }
2651
2652 block_iterator block_end() {
2653 return block_begin() + getNumOperands();
2654 }
2655
2656 const_block_iterator block_end() const {
2657 return block_begin() + getNumOperands();
2658 }
2659
2660 iterator_range<block_iterator> blocks() {
2661 return make_range(block_begin(), block_end());
2662 }
2663
2664 iterator_range<const_block_iterator> blocks() const {
2665 return make_range(block_begin(), block_end());
2666 }
2667
2668 op_range incoming_values() { return operands(); }
2669
2670 const_op_range incoming_values() const { return operands(); }
2671
2672 /// Return the number of incoming edges
2673 ///
2674 unsigned getNumIncomingValues() const { return getNumOperands(); }
2675
2676 /// Return incoming value number x
2677 ///
2678 Value *getIncomingValue(unsigned i) const {
2679 return getOperand(i);
2680 }
2681 void setIncomingValue(unsigned i, Value *V) {
2682 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2682, __extension__ __PRETTY_FUNCTION__))
;
2683 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2684, __extension__ __PRETTY_FUNCTION__))
2684 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2684, __extension__ __PRETTY_FUNCTION__))
;
2685 setOperand(i, V);
2686 }
2687
2688 static unsigned getOperandNumForIncomingValue(unsigned i) {
2689 return i;
2690 }
2691
2692 static unsigned getIncomingValueNumForOperand(unsigned i) {
2693 return i;
2694 }
2695
2696 /// Return incoming basic block number @p i.
2697 ///
2698 BasicBlock *getIncomingBlock(unsigned i) const {
2699 return block_begin()[i];
2700 }
2701
2702 /// Return incoming basic block corresponding
2703 /// to an operand of the PHI.
2704 ///
2705 BasicBlock *getIncomingBlock(const Use &U) const {
2706 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2706, __extension__ __PRETTY_FUNCTION__))
;
2707 return getIncomingBlock(unsigned(&U - op_begin()));
2708 }
2709
2710 /// Return incoming basic block corresponding
2711 /// to value use iterator.
2712 ///
2713 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2714 return getIncomingBlock(I.getUse());
2715 }
2716
2717 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2718 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2718, __extension__ __PRETTY_FUNCTION__))
;
2719 block_begin()[i] = BB;
2720 }
2721
2722 /// Add an incoming value to the end of the PHI list
2723 ///
2724 void addIncoming(Value *V, BasicBlock *BB) {
2725 if (getNumOperands() == ReservedSpace)
2726 growOperands(); // Get more space!
2727 // Initialize some new operands.
2728 setNumHungOffUseOperands(getNumOperands() + 1);
2729 setIncomingValue(getNumOperands() - 1, V);
2730 setIncomingBlock(getNumOperands() - 1, BB);
2731 }
2732
2733 /// Remove an incoming value. This is useful if a
2734 /// predecessor basic block is deleted. The value removed is returned.
2735 ///
2736 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2737 /// is true), the PHI node is destroyed and any uses of it are replaced with
2738 /// dummy values. The only time there should be zero incoming values to a PHI
2739 /// node is when the block is dead, so this strategy is sound.
2740 ///
2741 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2742
2743 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2744 int Idx = getBasicBlockIndex(BB);
2745 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2745, __extension__ __PRETTY_FUNCTION__))
;
2746 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2747 }
2748
2749 /// Return the first index of the specified basic
2750 /// block in the value list for this PHI. Returns -1 if no instance.
2751 ///
2752 int getBasicBlockIndex(const BasicBlock *BB) const {
2753 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2754 if (block_begin()[i] == BB)
2755 return i;
2756 return -1;
2757 }
2758
2759 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2760 int Idx = getBasicBlockIndex(BB);
2761 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2761, __extension__ __PRETTY_FUNCTION__))
;
17
Within the expansion of the macro 'assert':
a
Assuming 'Idx' is >= 0
2762 return getIncomingValue(Idx);
18
Calling 'PHINode::getIncomingValue'
19
Returning from 'PHINode::getIncomingValue'
2763 }
2764
2765 /// If the specified PHI node always merges together the
2766 /// same value, return the value, otherwise return null.
2767 Value *hasConstantValue() const;
2768
2769 /// Whether the specified PHI node always merges
2770 /// together the same value, assuming undefs are equal to a unique
2771 /// non-undef value.
2772 bool hasConstantOrUndefValue() const;
2773
2774 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2775 static bool classof(const Instruction *I) {
2776 return I->getOpcode() == Instruction::PHI;
2777 }
2778 static bool classof(const Value *V) {
2779 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2780 }
2781
2782private:
2783 void growOperands();
2784};
2785
2786template <>
2787struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2788};
2789
2790DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2790, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2790, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2791
2792//===----------------------------------------------------------------------===//
2793// LandingPadInst Class
2794//===----------------------------------------------------------------------===//
2795
2796//===---------------------------------------------------------------------------
2797/// The landingpad instruction holds all of the information
2798/// necessary to generate correct exception handling. The landingpad instruction
2799/// cannot be moved from the top of a landing pad block, which itself is
2800/// accessible only from the 'unwind' edge of an invoke. This uses the
2801/// SubclassData field in Value to store whether or not the landingpad is a
2802/// cleanup.
2803///
2804class LandingPadInst : public Instruction {
2805 /// The number of operands actually allocated. NumOperands is
2806 /// the number actually in use.
2807 unsigned ReservedSpace;
2808
2809 LandingPadInst(const LandingPadInst &LP);
2810
2811public:
2812 enum ClauseType { Catch, Filter };
2813
2814private:
2815 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2816 const Twine &NameStr, Instruction *InsertBefore);
2817 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2818 const Twine &NameStr, BasicBlock *InsertAtEnd);
2819
2820 // Allocate space for exactly zero operands.
2821 void *operator new(size_t s) {
2822 return User::operator new(s);
2823 }
2824
2825 void growOperands(unsigned Size);
2826 void init(unsigned NumReservedValues, const Twine &NameStr);
2827
2828protected:
2829 // Note: Instruction needs to be a friend here to call cloneImpl.
2830 friend class Instruction;
2831
2832 LandingPadInst *cloneImpl() const;
2833
2834public:
2835 /// Constructors - NumReservedClauses is a hint for the number of incoming
2836 /// clauses that this landingpad will have (use 0 if you really have no idea).
2837 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2838 const Twine &NameStr = "",
2839 Instruction *InsertBefore = nullptr);
2840 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2841 const Twine &NameStr, BasicBlock *InsertAtEnd);
2842
2843 /// Provide fast operand accessors
2844 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2845
2846 /// Return 'true' if this landingpad instruction is a
2847 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2848 /// doesn't catch the exception.
2849 bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
2850
2851 /// Indicate that this landingpad instruction is a cleanup.
2852 void setCleanup(bool V) {
2853 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
2854 (V ? 1 : 0));
2855 }
2856
2857 /// Add a catch or filter clause to the landing pad.
2858 void addClause(Constant *ClauseVal);
2859
2860 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2861 /// determine what type of clause this is.
2862 Constant *getClause(unsigned Idx) const {
2863 return cast<Constant>(getOperandList()[Idx]);
2864 }
2865
2866 /// Return 'true' if the clause and index Idx is a catch clause.
2867 bool isCatch(unsigned Idx) const {
2868 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2869 }
2870
2871 /// Return 'true' if the clause and index Idx is a filter clause.
2872 bool isFilter(unsigned Idx) const {
2873 return isa<ArrayType>(getOperandList()[Idx]->getType());
2874 }
2875
2876 /// Get the number of clauses for this landing pad.
2877 unsigned getNumClauses() const { return getNumOperands(); }
2878
2879 /// Grow the size of the operand list to accommodate the new
2880 /// number of clauses.
2881 void reserveClauses(unsigned Size) { growOperands(Size); }
2882
2883 // Methods for support type inquiry through isa, cast, and dyn_cast:
2884 static bool classof(const Instruction *I) {
2885 return I->getOpcode() == Instruction::LandingPad;
2886 }
2887 static bool classof(const Value *V) {
2888 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2889 }
2890};
2891
2892template <>
2893struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2894};
2895
2896DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2896, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2896, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2897
2898//===----------------------------------------------------------------------===//
2899// ReturnInst Class
2900//===----------------------------------------------------------------------===//
2901
2902//===---------------------------------------------------------------------------
2903/// Return a value (possibly void), from a function. Execution
2904/// does not continue in this function any longer.
2905///
2906class ReturnInst : public TerminatorInst {
2907 ReturnInst(const ReturnInst &RI);
2908
2909private:
2910 // ReturnInst constructors:
2911 // ReturnInst() - 'ret void' instruction
2912 // ReturnInst( null) - 'ret void' instruction
2913 // ReturnInst(Value* X) - 'ret X' instruction
2914 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2915 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2916 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2917 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2918 //
2919 // NOTE: If the Value* passed is of type void then the constructor behaves as
2920 // if it was passed NULL.
2921 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2922 Instruction *InsertBefore = nullptr);
2923 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2924 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2925
2926protected:
2927 // Note: Instruction needs to be a friend here to call cloneImpl.
2928 friend class Instruction;
2929
2930 ReturnInst *cloneImpl() const;
2931
2932public:
2933 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2934 Instruction *InsertBefore = nullptr) {
2935 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2936 }
2937
2938 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2939 BasicBlock *InsertAtEnd) {
2940 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2941 }
2942
2943 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2944 return new(0) ReturnInst(C, InsertAtEnd);
2945 }
2946
2947 /// Provide fast operand accessors
2948 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2949
2950 /// Convenience accessor. Returns null if there is no return value.
2951 Value *getReturnValue() const {
2952 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2953 }
2954
2955 unsigned getNumSuccessors() const { return 0; }
2956
2957 // Methods for support type inquiry through isa, cast, and dyn_cast:
2958 static bool classof(const Instruction *I) {
2959 return (I->getOpcode() == Instruction::Ret);
2960 }
2961 static bool classof(const Value *V) {
2962 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2963 }
2964
2965private:
2966 friend TerminatorInst;
2967
2968 BasicBlock *getSuccessor(unsigned idx) const {
2969 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2969)
;
2970 }
2971
2972 void setSuccessor(unsigned idx, BasicBlock *B) {
2973 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2973)
;
2974 }
2975};
2976
2977template <>
2978struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
2979};
2980
2981DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2981, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 2981, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2982
2983//===----------------------------------------------------------------------===//
2984// BranchInst Class
2985//===----------------------------------------------------------------------===//
2986
2987//===---------------------------------------------------------------------------
2988/// Conditional or Unconditional Branch instruction.
2989///
2990class BranchInst : public TerminatorInst {
2991 /// Ops list - Branches are strange. The operands are ordered:
2992 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
2993 /// they don't have to check for cond/uncond branchness. These are mostly
2994 /// accessed relative from op_end().
2995 BranchInst(const BranchInst &BI);
2996 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
2997 // BranchInst(BB *B) - 'br B'
2998 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
2999 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3000 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3001 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3002 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3003 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3004 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3005 Instruction *InsertBefore = nullptr);
3006 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3007 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3008 BasicBlock *InsertAtEnd);
3009
3010 void AssertOK();
3011
3012protected:
3013 // Note: Instruction needs to be a friend here to call cloneImpl.
3014 friend class Instruction;
3015
3016 BranchInst *cloneImpl() const;
3017
3018public:
3019 static BranchInst *Create(BasicBlock *IfTrue,
3020 Instruction *InsertBefore = nullptr) {
3021 return new(1) BranchInst(IfTrue, InsertBefore);
3022 }
3023
3024 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3025 Value *Cond, Instruction *InsertBefore = nullptr) {
3026 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3027 }
3028
3029 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3030 return new(1) BranchInst(IfTrue, InsertAtEnd);
3031 }
3032
3033 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3034 Value *Cond, BasicBlock *InsertAtEnd) {
3035 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3036 }
3037
3038 /// Transparently provide more efficient getOperand methods.
3039 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3040
3041 bool isUnconditional() const { return getNumOperands() == 1; }
3042 bool isConditional() const { return getNumOperands() == 3; }
3043
3044 Value *getCondition() const {
3045 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3045, __extension__ __PRETTY_FUNCTION__))
;
3046 return Op<-3>();
3047 }
3048
3049 void setCondition(Value *V) {
3050 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3050, __extension__ __PRETTY_FUNCTION__))
;
3051 Op<-3>() = V;
3052 }
3053
3054 unsigned getNumSuccessors() const { return 1+isConditional(); }
3055
3056 BasicBlock *getSuccessor(unsigned i) const {
3057 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3057, __extension__ __PRETTY_FUNCTION__))
;
3058 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3059 }
3060
3061 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3062 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3062, __extension__ __PRETTY_FUNCTION__))
;
3063 *(&Op<-1>() - idx) = NewSucc;
3064 }
3065
3066 /// Swap the successors of this branch instruction.
3067 ///
3068 /// Swaps the successors of the branch instruction. This also swaps any
3069 /// branch weight metadata associated with the instruction so that it
3070 /// continues to map correctly to each operand.
3071 void swapSuccessors();
3072
3073 // Methods for support type inquiry through isa, cast, and dyn_cast:
3074 static bool classof(const Instruction *I) {
3075 return (I->getOpcode() == Instruction::Br);
3076 }
3077 static bool classof(const Value *V) {
3078 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3079 }
3080};
3081
3082template <>
3083struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3084};
3085
3086DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3086, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3086, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3087
3088//===----------------------------------------------------------------------===//
3089// SwitchInst Class
3090//===----------------------------------------------------------------------===//
3091
3092//===---------------------------------------------------------------------------
3093/// Multiway switch
3094///
3095class SwitchInst : public TerminatorInst {
3096 unsigned ReservedSpace;
3097
3098 // Operand[0] = Value to switch on
3099 // Operand[1] = Default basic block destination
3100 // Operand[2n ] = Value to match
3101 // Operand[2n+1] = BasicBlock to go to on match
3102 SwitchInst(const SwitchInst &SI);
3103
3104 /// Create a new switch instruction, specifying a value to switch on and a
3105 /// default destination. The number of additional cases can be specified here
3106 /// to make memory allocation more efficient. This constructor can also
3107 /// auto-insert before another instruction.
3108 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3109 Instruction *InsertBefore);
3110
3111 /// Create a new switch instruction, specifying a value to switch on and a
3112 /// default destination. The number of additional cases can be specified here
3113 /// to make memory allocation more efficient. This constructor also
3114 /// auto-inserts at the end of the specified BasicBlock.
3115 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3116 BasicBlock *InsertAtEnd);
3117
3118 // allocate space for exactly zero operands
3119 void *operator new(size_t s) {
3120 return User::operator new(s);
3121 }
3122
3123 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3124 void growOperands();
3125
3126protected:
3127 // Note: Instruction needs to be a friend here to call cloneImpl.
3128 friend class Instruction;
3129
3130 SwitchInst *cloneImpl() const;
3131
3132public:
3133 // -2
3134 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3135
3136 template <typename CaseHandleT> class CaseIteratorImpl;
3137
3138 /// A handle to a particular switch case. It exposes a convenient interface
3139 /// to both the case value and the successor block.
3140 ///
3141 /// We define this as a template and instantiate it to form both a const and
3142 /// non-const handle.
3143 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3144 class CaseHandleImpl {
3145 // Directly befriend both const and non-const iterators.
3146 friend class SwitchInst::CaseIteratorImpl<
3147 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3148
3149 protected:
3150 // Expose the switch type we're parameterized with to the iterator.
3151 using SwitchInstType = SwitchInstT;
3152
3153 SwitchInstT *SI;
3154 ptrdiff_t Index;
3155
3156 CaseHandleImpl() = default;
3157 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3158
3159 public:
3160 /// Resolves case value for current case.
3161 ConstantIntT *getCaseValue() const {
3162 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3163, __extension__ __PRETTY_FUNCTION__))
3163 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3163, __extension__ __PRETTY_FUNCTION__))
;
3164 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3165 }
3166
3167 /// Resolves successor for current case.
3168 BasicBlockT *getCaseSuccessor() const {
3169 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3171, __extension__ __PRETTY_FUNCTION__))
3170 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3171, __extension__ __PRETTY_FUNCTION__))
3171 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3171, __extension__ __PRETTY_FUNCTION__))
;
3172 return SI->getSuccessor(getSuccessorIndex());
3173 }
3174
3175 /// Returns number of current case.
3176 unsigned getCaseIndex() const { return Index; }
3177
3178 /// Returns TerminatorInst's successor index for current case successor.
3179 unsigned getSuccessorIndex() const {
3180 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3182, __extension__ __PRETTY_FUNCTION__))
3181 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3182, __extension__ __PRETTY_FUNCTION__))
3182 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3182, __extension__ __PRETTY_FUNCTION__))
;
3183 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3184 }
3185
3186 bool operator==(const CaseHandleImpl &RHS) const {
3187 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3187, __extension__ __PRETTY_FUNCTION__))
;
3188 return Index == RHS.Index;
3189 }
3190 };
3191
3192 using ConstCaseHandle =
3193 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3194
3195 class CaseHandle
3196 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3197 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3198
3199 public:
3200 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3201
3202 /// Sets the new value for current case.
3203 void setValue(ConstantInt *V) {
3204 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3205, __extension__ __PRETTY_FUNCTION__))
3205 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3205, __extension__ __PRETTY_FUNCTION__))
;
3206 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3207 }
3208
3209 /// Sets the new successor for current case.
3210 void setSuccessor(BasicBlock *S) {
3211 SI->setSuccessor(getSuccessorIndex(), S);
3212 }
3213 };
3214
3215 template <typename CaseHandleT>
3216 class CaseIteratorImpl
3217 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3218 std::random_access_iterator_tag,
3219 CaseHandleT> {
3220 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3221
3222 CaseHandleT Case;
3223
3224 public:
3225 /// Default constructed iterator is in an invalid state until assigned to
3226 /// a case for a particular switch.
3227 CaseIteratorImpl() = default;
3228
3229 /// Initializes case iterator for given SwitchInst and for given
3230 /// case number.
3231 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3232
3233 /// Initializes case iterator for given SwitchInst and for given
3234 /// TerminatorInst's successor index.
3235 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3236 unsigned SuccessorIndex) {
3237 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3238, __extension__ __PRETTY_FUNCTION__))
3238 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3238, __extension__ __PRETTY_FUNCTION__))
;
3239 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3240 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3241 }
3242
3243 /// Support converting to the const variant. This will be a no-op for const
3244 /// variant.
3245 operator CaseIteratorImpl<ConstCaseHandle>() const {
3246 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3247 }
3248
3249 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3250 // Check index correctness after addition.
3251 // Note: Index == getNumCases() means end().
3252 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3254, __extension__ __PRETTY_FUNCTION__))
3253 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3254, __extension__ __PRETTY_FUNCTION__))
3254 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3254, __extension__ __PRETTY_FUNCTION__))
;
3255 Case.Index += N;
3256 return *this;
3257 }
3258 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3259 // Check index correctness after subtraction.
3260 // Note: Case.Index == getNumCases() means end().
3261 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3263, __extension__ __PRETTY_FUNCTION__))
3262 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3263, __extension__ __PRETTY_FUNCTION__))
3263 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3263, __extension__ __PRETTY_FUNCTION__))
;
3264 Case.Index -= N;
3265 return *this;
3266 }
3267 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3268 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3268, __extension__ __PRETTY_FUNCTION__))
;
3269 return Case.Index - RHS.Case.Index;
3270 }
3271 bool operator==(const CaseIteratorImpl &RHS) const {
3272 return Case == RHS.Case;
3273 }
3274 bool operator<(const CaseIteratorImpl &RHS) const {
3275 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3275, __extension__ __PRETTY_FUNCTION__))
;
3276 return Case.Index < RHS.Case.Index;
3277 }
3278 CaseHandleT &operator*() { return Case; }
3279 const CaseHandleT &operator*() const { return Case; }
3280 };
3281
3282 using CaseIt = CaseIteratorImpl<CaseHandle>;
3283 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3284
3285 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3286 unsigned NumCases,
3287 Instruction *InsertBefore = nullptr) {
3288 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3289 }
3290
3291 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3292 unsigned NumCases, BasicBlock *InsertAtEnd) {
3293 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3294 }
3295
3296 /// Provide fast operand accessors
3297 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3298
3299 // Accessor Methods for Switch stmt
3300 Value *getCondition() const { return getOperand(0); }
3301 void setCondition(Value *V) { setOperand(0, V); }
3302
3303 BasicBlock *getDefaultDest() const {
3304 return cast<BasicBlock>(getOperand(1));
3305 }
3306
3307 void setDefaultDest(BasicBlock *DefaultCase) {
3308 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3309 }
3310
3311 /// Return the number of 'cases' in this switch instruction, excluding the
3312 /// default case.
3313 unsigned getNumCases() const {
3314 return getNumOperands()/2 - 1;
3315 }
3316
3317 /// Returns a read/write iterator that points to the first case in the
3318 /// SwitchInst.
3319 CaseIt case_begin() {
3320 return CaseIt(this, 0);
3321 }
3322
3323 /// Returns a read-only iterator that points to the first case in the
3324 /// SwitchInst.
3325 ConstCaseIt case_begin() const {
3326 return ConstCaseIt(this, 0);
3327 }
3328
3329 /// Returns a read/write iterator that points one past the last in the
3330 /// SwitchInst.
3331 CaseIt case_end() {
3332 return CaseIt(this, getNumCases());
3333 }
3334
3335 /// Returns a read-only iterator that points one past the last in the
3336 /// SwitchInst.
3337 ConstCaseIt case_end() const {
3338 return ConstCaseIt(this, getNumCases());
3339 }
3340
3341 /// Iteration adapter for range-for loops.
3342 iterator_range<CaseIt> cases() {
3343 return make_range(case_begin(), case_end());
3344 }
3345
3346 /// Constant iteration adapter for range-for loops.
3347 iterator_range<ConstCaseIt> cases() const {
3348 return make_range(case_begin(), case_end());
3349 }
3350
3351 /// Returns an iterator that points to the default case.
3352 /// Note: this iterator allows to resolve successor only. Attempt
3353 /// to resolve case value causes an assertion.
3354 /// Also note, that increment and decrement also causes an assertion and
3355 /// makes iterator invalid.
3356 CaseIt case_default() {
3357 return CaseIt(this, DefaultPseudoIndex);
3358 }
3359 ConstCaseIt case_default() const {
3360 return ConstCaseIt(this, DefaultPseudoIndex);
3361 }
3362
3363 /// Search all of the case values for the specified constant. If it is
3364 /// explicitly handled, return the case iterator of it, otherwise return
3365 /// default case iterator to indicate that it is handled by the default
3366 /// handler.
3367 CaseIt findCaseValue(const ConstantInt *C) {
3368 CaseIt I = llvm::find_if(
3369 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3370 if (I != case_end())
3371 return I;
3372
3373 return case_default();
3374 }
3375 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3376 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3377 return Case.getCaseValue() == C;
3378 });
3379 if (I != case_end())
3380 return I;
3381
3382 return case_default();
3383 }
3384
3385 /// Finds the unique case value for a given successor. Returns null if the
3386 /// successor is not found, not unique, or is the default case.
3387 ConstantInt *findCaseDest(BasicBlock *BB) {
3388 if (BB == getDefaultDest())
3389 return nullptr;
3390
3391 ConstantInt *CI = nullptr;
3392 for (auto Case : cases()) {
3393 if (Case.getCaseSuccessor() != BB)
3394 continue;
3395
3396 if (CI)
3397 return nullptr; // Multiple cases lead to BB.
3398
3399 CI = Case.getCaseValue();
3400 }
3401
3402 return CI;
3403 }
3404
3405 /// Add an entry to the switch instruction.
3406 /// Note:
3407 /// This action invalidates case_end(). Old case_end() iterator will
3408 /// point to the added case.
3409 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3410
3411 /// This method removes the specified case and its successor from the switch
3412 /// instruction. Note that this operation may reorder the remaining cases at
3413 /// index idx and above.
3414 /// Note:
3415 /// This action invalidates iterators for all cases following the one removed,
3416 /// including the case_end() iterator. It returns an iterator for the next
3417 /// case.
3418 CaseIt removeCase(CaseIt I);
3419
3420 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3421 BasicBlock *getSuccessor(unsigned idx) const {
3422 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3422, __extension__ __PRETTY_FUNCTION__))
;
3423 return cast<BasicBlock>(getOperand(idx*2+1));
3424 }
3425 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3426 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3426, __extension__ __PRETTY_FUNCTION__))
;
3427 setOperand(idx * 2 + 1, NewSucc);
3428 }
3429
3430 // Methods for support type inquiry through isa, cast, and dyn_cast:
3431 static bool classof(const Instruction *I) {
3432 return I->getOpcode() == Instruction::Switch;
3433 }
3434 static bool classof(const Value *V) {
3435 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3436 }
3437};
3438
3439template <>
3440struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3441};
3442
3443DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3443, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SwitchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3443, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3444
3445//===----------------------------------------------------------------------===//
3446// IndirectBrInst Class
3447//===----------------------------------------------------------------------===//
3448
3449//===---------------------------------------------------------------------------
3450/// Indirect Branch Instruction.
3451///
3452class IndirectBrInst : public TerminatorInst {
3453 unsigned ReservedSpace;
3454
3455 // Operand[0] = Address to jump to
3456 // Operand[n+1] = n-th destination
3457 IndirectBrInst(const IndirectBrInst &IBI);
3458
3459 /// Create a new indirectbr instruction, specifying an
3460 /// Address to jump to. The number of expected destinations can be specified
3461 /// here to make memory allocation more efficient. This constructor can also
3462 /// autoinsert before another instruction.
3463 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3464
3465 /// Create a new indirectbr instruction, specifying an
3466 /// Address to jump to. The number of expected destinations can be specified
3467 /// here to make memory allocation more efficient. This constructor also
3468 /// autoinserts at the end of the specified BasicBlock.
3469 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3470
3471 // allocate space for exactly zero operands
3472 void *operator new(size_t s) {
3473 return User::operator new(s);
3474 }
3475
3476 void init(Value *Address, unsigned NumDests);
3477 void growOperands();
3478
3479protected:
3480 // Note: Instruction needs to be a friend here to call cloneImpl.
3481 friend class Instruction;
3482
3483 IndirectBrInst *cloneImpl() const;
3484
3485public:
3486 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3487 Instruction *InsertBefore = nullptr) {
3488 return new IndirectBrInst(Address, NumDests, InsertBefore);
3489 }
3490
3491 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3492 BasicBlock *InsertAtEnd) {
3493 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3494 }
3495
3496 /// Provide fast operand accessors.
3497 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3498
3499 // Accessor Methods for IndirectBrInst instruction.
3500 Value *getAddress() { return getOperand(0); }
3501 const Value *getAddress() const { return getOperand(0); }
3502 void setAddress(Value *V) { setOperand(0, V); }
3503
3504 /// return the number of possible destinations in this
3505 /// indirectbr instruction.
3506 unsigned getNumDestinations() const { return getNumOperands()-1; }
3507
3508 /// Return the specified destination.
3509 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3510 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3511
3512 /// Add a destination.
3513 ///
3514 void addDestination(BasicBlock *Dest);
3515
3516 /// This method removes the specified successor from the
3517 /// indirectbr instruction.
3518 void removeDestination(unsigned i);
3519
3520 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3521 BasicBlock *getSuccessor(unsigned i) const {
3522 return cast<BasicBlock>(getOperand(i+1));
3523 }
3524 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3525 setOperand(i + 1, NewSucc);
3526 }
3527
3528 // Methods for support type inquiry through isa, cast, and dyn_cast:
3529 static bool classof(const Instruction *I) {
3530 return I->getOpcode() == Instruction::IndirectBr;
3531 }
3532 static bool classof(const Value *V) {
3533 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3534 }
3535};
3536
3537template <>
3538struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3539};
3540
3541DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3541, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<IndirectBrInst>::op_begin(
const_cast<IndirectBrInst*>(this))[i_nocapture].get());
} void IndirectBrInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3541, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
IndirectBrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3542
3543//===----------------------------------------------------------------------===//
3544// InvokeInst Class
3545//===----------------------------------------------------------------------===//
3546
3547/// Invoke instruction. The SubclassData field is used to hold the
3548/// calling convention of the call.
3549///
3550class InvokeInst : public TerminatorInst,
3551 public OperandBundleUser<InvokeInst, User::op_iterator> {
3552 friend class OperandBundleUser<InvokeInst, User::op_iterator>;
3553
3554 AttributeList Attrs;
3555 FunctionType *FTy;
3556
3557 InvokeInst(const InvokeInst &BI);
3558
3559 /// Construct an InvokeInst given a range of arguments.
3560 ///
3561 /// Construct an InvokeInst from a range of arguments
3562 inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
3563 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
3564 unsigned Values, const Twine &NameStr,
3565 Instruction *InsertBefore)
3566 : InvokeInst(cast<FunctionType>(
3567 cast<PointerType>(Func->getType())->getElementType()),
3568 Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
3569 InsertBefore) {}
3570
3571 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3572 BasicBlock *IfException, ArrayRef<Value *> Args,
3573 ArrayRef<OperandBundleDef> Bundles, unsigned Values,
3574 const Twine &NameStr, Instruction *InsertBefore);
3575 /// Construct an InvokeInst given a range of arguments.
3576 ///
3577 /// Construct an InvokeInst from a range of arguments
3578 inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
3579 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
3580 unsigned Values, const Twine &NameStr,
3581 BasicBlock *InsertAtEnd);
3582
3583 bool hasDescriptor() const { return HasDescriptor; }
3584
3585 void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
3586 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
3587 const Twine &NameStr) {
3588 init(cast<FunctionType>(
3589 cast<PointerType>(Func->getType())->getElementType()),
3590 Func, IfNormal, IfException, Args, Bundles, NameStr);
3591 }
3592
3593 void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal,
3594 BasicBlock *IfException, ArrayRef<Value *> Args,
3595 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3596
3597protected:
3598 // Note: Instruction needs to be a friend here to call cloneImpl.
3599 friend class Instruction;
3600
3601 InvokeInst *cloneImpl() const;
3602
3603public:
3604 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3605 BasicBlock *IfException, ArrayRef<Value *> Args,
3606 const Twine &NameStr,
3607 Instruction *InsertBefore = nullptr) {
3608 return Create(cast<FunctionType>(
3609 cast<PointerType>(Func->getType())->getElementType()),
3610 Func, IfNormal, IfException, Args, None, NameStr,
3611 InsertBefore);
3612 }
3613
3614 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3615 BasicBlock *IfException, ArrayRef<Value *> Args,
3616 ArrayRef<OperandBundleDef> Bundles = None,
3617 const Twine &NameStr = "",
3618 Instruction *InsertBefore = nullptr) {
3619 return Create(cast<FunctionType>(
3620 cast<PointerType>(Func->getType())->getElementType()),
3621 Func, IfNormal, IfException, Args, Bundles, NameStr,
3622 InsertBefore);
3623 }
3624
3625 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3626 BasicBlock *IfException, ArrayRef<Value *> Args,
3627 const Twine &NameStr,
3628 Instruction *InsertBefore = nullptr) {
3629 unsigned Values = unsigned(Args.size()) + 3;
3630 return new (Values) InvokeInst(Ty, Func, IfNormal, IfException, Args, None,
3631 Values, NameStr, InsertBefore);
3632 }
3633
3634 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3635 BasicBlock *IfException, ArrayRef<Value *> Args,
3636 ArrayRef<OperandBundleDef> Bundles = None,
3637 const Twine &NameStr = "",
3638 Instruction *InsertBefore = nullptr) {
3639 unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
3640 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3641
3642 return new (Values, DescriptorBytes)
3643 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, Values,
3644 NameStr, InsertBefore);
3645 }
3646
3647 static InvokeInst *Create(Value *Func,
3648 BasicBlock *IfNormal, BasicBlock *IfException,
3649 ArrayRef<Value *> Args, const Twine &NameStr,
3650 BasicBlock *InsertAtEnd) {
3651 unsigned Values = unsigned(Args.size()) + 3;
3652 return new (Values) InvokeInst(Func, IfNormal, IfException, Args, None,
3653 Values, NameStr, InsertAtEnd);
3654 }
3655
3656 static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
3657 BasicBlock *IfException, ArrayRef<Value *> Args,
3658 ArrayRef<OperandBundleDef> Bundles,
3659 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3660 unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
3661 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3662
3663 return new (Values, DescriptorBytes)
3664 InvokeInst(Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
3665 InsertAtEnd);
3666 }
3667
3668 /// Create a clone of \p II with a different set of operand bundles and
3669 /// insert it before \p InsertPt.
3670 ///
3671 /// The returned invoke instruction is identical to \p II in every way except
3672 /// that the operand bundles for the new instruction are set to the operand
3673 /// bundles in \p Bundles.
3674 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3675 Instruction *InsertPt = nullptr);
3676
3677 /// Provide fast operand accessors
3678 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3679
3680 FunctionType *getFunctionType() const { return FTy; }
3681
3682 void mutateFunctionType(FunctionType *FTy) {
3683 mutateType(FTy->getReturnType());
3684 this->FTy = FTy;
3685 }
3686
3687 /// Return the number of invoke arguments.
3688 ///
3689 unsigned getNumArgOperands() const {
3690 return getNumOperands() - getNumTotalBundleOperands() - 3;
3691 }
3692
3693 /// getArgOperand/setArgOperand - Return/set the i-th invoke argument.
3694 ///
3695 Value *getArgOperand(unsigned i) const {
3696 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3696, __extension__ __PRETTY_FUNCTION__))
;
3697 return getOperand(i);
3698 }
3699 void setArgOperand(unsigned i, Value *v) {
3700 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3700, __extension__ __PRETTY_FUNCTION__))
;
3701 setOperand(i, v);
3702 }
3703
3704 /// Return the iterator pointing to the beginning of the argument list.
3705 op_iterator arg_begin() { return op_begin(); }
3706
3707 /// Return the iterator pointing to the end of the argument list.
3708 op_iterator arg_end() {
3709 // [ invoke args ], [ operand bundles ], normal dest, unwind dest, callee
3710 return op_end() - getNumTotalBundleOperands() - 3;
3711 }
3712
3713 /// Iteration adapter for range-for loops.
3714 iterator_range<op_iterator> arg_operands() {
3715 return make_range(arg_begin(), arg_end());
3716 }
3717
3718 /// Return the iterator pointing to the beginning of the argument list.
3719 const_op_iterator arg_begin() const { return op_begin(); }
3720
3721 /// Return the iterator pointing to the end of the argument list.
3722 const_op_iterator arg_end() const {
3723 // [ invoke args ], [ operand bundles ], normal dest, unwind dest, callee
3724 return op_end() - getNumTotalBundleOperands() - 3;
3725 }
3726
3727 /// Iteration adapter for range-for loops.
3728 iterator_range<const_op_iterator> arg_operands() const {
3729 return make_range(arg_begin(), arg_end());
3730 }
3731
3732 /// Wrappers for getting the \c Use of a invoke argument.
3733 const Use &getArgOperandUse(unsigned i) const {
3734 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3734, __extension__ __PRETTY_FUNCTION__))
;
3735 return getOperandUse(i);
3736 }
3737 Use &getArgOperandUse(unsigned i) {
3738 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3738, __extension__ __PRETTY_FUNCTION__))
;
3739 return getOperandUse(i);
3740 }
3741
3742 /// If one of the arguments has the 'returned' attribute, return its
3743 /// operand value. Otherwise, return nullptr.
3744 Value *getReturnedArgOperand() const;
3745
3746 /// getCallingConv/setCallingConv - Get or set the calling convention of this
3747 /// function call.
3748 CallingConv::ID getCallingConv() const {
3749 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction());
3750 }
3751 void setCallingConv(CallingConv::ID CC) {
3752 auto ID = static_cast<unsigned>(CC);
3753 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")(static_cast <bool> (!(ID & ~CallingConv::MaxID) &&
"Unsupported calling convention") ? void (0) : __assert_fail
("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3753, __extension__ __PRETTY_FUNCTION__))
;
3754 setInstructionSubclassData(ID);
3755 }
3756
3757 /// Return the parameter attributes for this invoke.
3758 ///
3759 AttributeList getAttributes() const { return Attrs; }
3760
3761 /// Set the parameter attributes for this invoke.
3762 ///
3763 void setAttributes(AttributeList A) { Attrs = A; }
3764
3765 /// adds the attribute to the list of attributes.
3766 void addAttribute(unsigned i, Attribute::AttrKind Kind);
3767
3768 /// adds the attribute to the list of attributes.
3769 void addAttribute(unsigned i, Attribute Attr);
3770
3771 /// Adds the attribute to the indicated argument
3772 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
3773
3774 /// removes the attribute from the list of attributes.
3775 void removeAttribute(unsigned i, Attribute::AttrKind Kind);
3776
3777 /// removes the attribute from the list of attributes.
3778 void removeAttribute(unsigned i, StringRef Kind);
3779
3780 /// Removes the attribute from the given argument
3781 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
3782
3783 /// adds the dereferenceable attribute to the list of attributes.
3784 void addDereferenceableAttr(unsigned i, uint64_t Bytes);
3785
3786 /// adds the dereferenceable_or_null attribute to the list of
3787 /// attributes.
3788 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
3789
3790 /// Determine whether this call has the given attribute.
3791 bool hasFnAttr(Attribute::AttrKind Kind) const {
3792 assert(Kind != Attribute::NoBuiltin &&(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3793, __extension__ __PRETTY_FUNCTION__))
3793 "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3793, __extension__ __PRETTY_FUNCTION__))
;
3794 return hasFnAttrImpl(Kind);
3795 }
3796
3797 /// Determine whether this call has the given attribute.
3798 bool hasFnAttr(StringRef Kind) const {
3799 return hasFnAttrImpl(Kind);
3800 }
3801
3802 /// Determine whether the return value has the given attribute.
3803 bool hasRetAttr(Attribute::AttrKind Kind) const;
3804
3805 /// Determine whether the argument or parameter has the given attribute.
3806 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
3807
3808 /// Get the attribute of a given kind at a position.
3809 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
3810 return getAttributes().getAttribute(i, Kind);
3811 }
3812
3813 /// Get the attribute of a given kind at a position.
3814 Attribute getAttribute(unsigned i, StringRef Kind) const {
3815 return getAttributes().getAttribute(i, Kind);
3816 }
3817
3818 /// Return true if the data operand at index \p i has the attribute \p
3819 /// A.
3820 ///
3821 /// Data operands include invoke arguments and values used in operand bundles,
3822 /// but does not include the invokee operand, or the two successor blocks.
3823 /// This routine dispatches to the underlying AttributeList or the
3824 /// OperandBundleUser as appropriate.
3825 ///
3826 /// The index \p i is interpreted as
3827 ///
3828 /// \p i == Attribute::ReturnIndex -> the return value
3829 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
3830 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
3831 /// (\p i - 1) in the operand list.
3832 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const;
3833
3834 /// Extract the alignment of the return value.
3835 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
3836
3837 /// Extract the alignment for a call or parameter (0=unknown).
3838 unsigned getParamAlignment(unsigned ArgNo) const {
3839 return Attrs.getParamAlignment(ArgNo);
3840 }
3841
3842 /// Extract the number of dereferenceable bytes for a call or
3843 /// parameter (0=unknown).
3844 uint64_t getDereferenceableBytes(unsigned i) const {
3845 return Attrs.getDereferenceableBytes(i);
3846 }
3847
3848 /// Extract the number of dereferenceable_or_null bytes for a call or
3849 /// parameter (0=unknown).
3850 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
3851 return Attrs.getDereferenceableOrNullBytes(i);
3852 }
3853
3854 /// @brief Determine if the return value is marked with NoAlias attribute.
3855 bool returnDoesNotAlias() const {
3856 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
3857 }
3858
3859 /// Return true if the call should not be treated as a call to a
3860 /// builtin.
3861 bool isNoBuiltin() const {
3862 // We assert in hasFnAttr if one passes in Attribute::NoBuiltin, so we have
3863 // to check it by hand.
3864 return hasFnAttrImpl(Attribute::NoBuiltin) &&
3865 !hasFnAttrImpl(Attribute::Builtin);
3866 }
3867
3868 /// Determine if the call requires strict floating point semantics.
3869 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
3870
3871 /// Return true if the call should not be inlined.
3872 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
3873 void setIsNoInline() {
3874 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
3875 }
3876
3877 /// Determine if the call does not access memory.
3878 bool doesNotAccessMemory() const {
3879 return hasFnAttr(Attribute::ReadNone);
3880 }
3881 void setDoesNotAccessMemory() {
3882 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
3883 }
3884
3885 /// Determine if the call does not access or only reads memory.
3886 bool onlyReadsMemory() const {
3887 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
3888 }
3889 void setOnlyReadsMemory() {
3890 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
3891 }
3892
3893 /// Determine if the call does not access or only writes memory.
3894 bool doesNotReadMemory() const {
3895 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
3896 }
3897 void setDoesNotReadMemory() {
3898 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
3899 }
3900
3901 /// @brief Determine if the call access memmory only using it's pointer
3902 /// arguments.
3903 bool onlyAccessesArgMemory() const {
3904 return hasFnAttr(Attribute::ArgMemOnly);
3905 }
3906 void setOnlyAccessesArgMemory() {
3907 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
3908 }
3909
3910 /// @brief Determine if the function may only access memory that is
3911 /// inaccessible from the IR.
3912 bool onlyAccessesInaccessibleMemory() const {
3913 return hasFnAttr(Attribute::InaccessibleMemOnly);
3914 }
3915 void setOnlyAccessesInaccessibleMemory() {
3916 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
3917 }
3918
3919 /// @brief Determine if the function may only access memory that is
3920 /// either inaccessible from the IR or pointed to by its arguments.
3921 bool onlyAccessesInaccessibleMemOrArgMem() const {
3922 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
3923 }
3924 void setOnlyAccessesInaccessibleMemOrArgMem() {
3925 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
3926 }
3927
3928 /// Determine if the call cannot return.
3929 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
3930 void setDoesNotReturn() {
3931 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
3932 }
3933
3934 /// Determine if the call cannot unwind.
3935 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
3936 void setDoesNotThrow() {
3937 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
3938 }
3939
3940 /// Determine if the invoke cannot be duplicated.
3941 bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
3942 void setCannotDuplicate() {
3943 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
3944 }
3945
3946 /// Determine if the invoke is convergent
3947 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
3948 void setConvergent() {
3949 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
3950 }
3951 void setNotConvergent() {
3952 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
3953 }
3954
3955 /// Determine if the call returns a structure through first
3956 /// pointer argument.
3957 bool hasStructRetAttr() const {
3958 if (getNumArgOperands() == 0)
3959 return false;
3960
3961 // Be friendly and also check the callee.
3962 return paramHasAttr(0, Attribute::StructRet);
3963 }
3964
3965 /// Determine if any call argument is an aggregate passed by value.
3966 bool hasByValArgument() const {
3967 return Attrs.hasAttrSomewhere(Attribute::ByVal);
3968 }
3969
3970 /// Return the function called, or null if this is an
3971 /// indirect function invocation.
3972 ///
3973 Function *getCalledFunction() const {
3974 return dyn_cast<Function>(Op<-3>());
3975 }
3976
3977 /// Get a pointer to the function that is invoked by this
3978 /// instruction
3979 const Value *getCalledValue() const { return Op<-3>(); }
3980 Value *getCalledValue() { return Op<-3>(); }
3981
3982 /// Set the function called.
3983 void setCalledFunction(Value* Fn) {
3984 setCalledFunction(
3985 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
3986 Fn);
3987 }
3988 void setCalledFunction(FunctionType *FTy, Value *Fn) {
3989 this->FTy = FTy;
3990 assert(FTy == cast<FunctionType>((static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3991, __extension__ __PRETTY_FUNCTION__))
3991 cast<PointerType>(Fn->getType())->getElementType()))(static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 3991, __extension__ __PRETTY_FUNCTION__))
;
3992 Op<-3>() = Fn;
3993 }
3994
3995 // get*Dest - Return the destination basic blocks...
3996 BasicBlock *getNormalDest() const {
3997 return cast<BasicBlock>(Op<-2>());
3998 }
3999 BasicBlock *getUnwindDest() const {
4000 return cast<BasicBlock>(Op<-1>());
4001 }
4002 void setNormalDest(BasicBlock *B) {
4003 Op<-2>() = reinterpret_cast<Value*>(B);
4004 }
4005 void setUnwindDest(BasicBlock *B) {
4006 Op<-1>() = reinterpret_cast<Value*>(B);
4007 }
4008
4009 /// Get the landingpad instruction from the landing pad
4010 /// block (the unwind destination).
4011 LandingPadInst *getLandingPadInst() const;
4012
4013 BasicBlock *getSuccessor(unsigned i) const {
4014 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4014, __extension__ __PRETTY_FUNCTION__))
;
4015 return i == 0 ? getNormalDest() : getUnwindDest();
4016 }
4017
4018 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4019 assert(idx < 2 && "Successor # out of range for invoke!")(static_cast <bool> (idx < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("idx < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4019, __extension__ __PRETTY_FUNCTION__))
;
4020 *(&Op<-2>() + idx) = reinterpret_cast<Value*>(NewSucc);
4021 }
4022
4023 unsigned getNumSuccessors() const { return 2; }
4024
4025 // Methods for support type inquiry through isa, cast, and dyn_cast:
4026 static bool classof(const Instruction *I) {
4027 return (I->getOpcode() == Instruction::Invoke);
4028 }
4029 static bool classof(const Value *V) {
4030 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4031 }
4032
4033private:
4034 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
4035 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
4036 return true;
4037
4038 // Operand bundles override attributes on the called function, but don't
4039 // override attributes directly present on the invoke instruction.
4040 if (isFnAttrDisallowedByOpBundle(Kind))
4041 return false;
4042
4043 if (const Function *F = getCalledFunction())
4044 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
4045 Kind);
4046 return false;
4047 }
4048
4049 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4050 // method so that subclasses cannot accidentally use it.
4051 void setInstructionSubclassData(unsigned short D) {
4052 Instruction::setInstructionSubclassData(D);
4053 }
4054};
4055
4056template <>
4057struct OperandTraits<InvokeInst> : public VariadicOperandTraits<InvokeInst, 3> {
4058};
4059
4060InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4061 BasicBlock *IfException, ArrayRef<Value *> Args,
4062 ArrayRef<OperandBundleDef> Bundles, unsigned Values,
4063 const Twine &NameStr, Instruction *InsertBefore)
4064 : TerminatorInst(Ty->getReturnType(), Instruction::Invoke,
4065 OperandTraits<InvokeInst>::op_end(this) - Values, Values,
4066 InsertBefore) {
4067 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4068}
4069
4070InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal,
4071 BasicBlock *IfException, ArrayRef<Value *> Args,
4072 ArrayRef<OperandBundleDef> Bundles, unsigned Values,
4073 const Twine &NameStr, BasicBlock *InsertAtEnd)
4074 : TerminatorInst(
4075 cast<FunctionType>(cast<PointerType>(Func->getType())
4076 ->getElementType())->getReturnType(),
4077 Instruction::Invoke, OperandTraits<InvokeInst>::op_end(this) - Values,
4078 Values, InsertAtEnd) {
4079 init(Func, IfNormal, IfException, Args, Bundles, NameStr);
4080}
4081
4082DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value)InvokeInst::op_iterator InvokeInst::op_begin() { return OperandTraits
<InvokeInst>::op_begin(this); } InvokeInst::const_op_iterator
InvokeInst::op_begin() const { return OperandTraits<InvokeInst
>::op_begin(const_cast<InvokeInst*>(this)); } InvokeInst
::op_iterator InvokeInst::op_end() { return OperandTraits<
InvokeInst>::op_end(this); } InvokeInst::const_op_iterator
InvokeInst::op_end() const { return OperandTraits<InvokeInst
>::op_end(const_cast<InvokeInst*>(this)); } Value *InvokeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InvokeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<InvokeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4082, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InvokeInst>::op_begin(const_cast
<InvokeInst*>(this))[i_nocapture].get()); } void InvokeInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<InvokeInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InvokeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4082, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InvokeInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned InvokeInst::getNumOperands() const { return OperandTraits
<InvokeInst>::operands(this); } template <int Idx_nocapture
> Use &InvokeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
InvokeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4083
4084//===----------------------------------------------------------------------===//
4085// ResumeInst Class
4086//===----------------------------------------------------------------------===//
4087
4088//===---------------------------------------------------------------------------
4089/// Resume the propagation of an exception.
4090///
4091class ResumeInst : public TerminatorInst {
4092 ResumeInst(const ResumeInst &RI);
4093
4094 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4095 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4096
4097protected:
4098 // Note: Instruction needs to be a friend here to call cloneImpl.
4099 friend class Instruction;
4100
4101 ResumeInst *cloneImpl() const;
4102
4103public:
4104 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4105 return new(1) ResumeInst(Exn, InsertBefore);
4106 }
4107
4108 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4109 return new(1) ResumeInst(Exn, InsertAtEnd);
4110 }
4111
4112 /// Provide fast operand accessors
4113 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4114
4115 /// Convenience accessor.
4116 Value *getValue() const { return Op<0>(); }
4117
4118 unsigned getNumSuccessors() const { return 0; }
4119
4120 // Methods for support type inquiry through isa, cast, and dyn_cast:
4121 static bool classof(const Instruction *I) {
4122 return I->getOpcode() == Instruction::Resume;
4123 }
4124 static bool classof(const Value *V) {
4125 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4126 }
4127
4128private:
4129 friend TerminatorInst;
4130
4131 BasicBlock *getSuccessor(unsigned idx) const {
4132 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4132)
;
4133 }
4134
4135 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4136 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4136)
;
4137 }
4138};
4139
4140template <>
4141struct OperandTraits<ResumeInst> :
4142 public FixedNumOperandTraits<ResumeInst, 1> {
4143};
4144
4145DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4145, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ResumeInst>::op_begin(const_cast
<ResumeInst*>(this))[i_nocapture].get()); } void ResumeInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ResumeInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4145, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ResumeInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ResumeInst::getNumOperands() const { return OperandTraits
<ResumeInst>::operands(this); } template <int Idx_nocapture
> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4146
4147//===----------------------------------------------------------------------===//
4148// CatchSwitchInst Class
4149//===----------------------------------------------------------------------===//
4150class CatchSwitchInst : public TerminatorInst {
4151 /// The number of operands actually allocated. NumOperands is
4152 /// the number actually in use.
4153 unsigned ReservedSpace;
4154
4155 // Operand[0] = Outer scope
4156 // Operand[1] = Unwind block destination
4157 // Operand[n] = BasicBlock to go to on match
4158 CatchSwitchInst(const CatchSwitchInst &CSI);
4159
4160 /// Create a new switch instruction, specifying a
4161 /// default destination. The number of additional handlers can be specified
4162 /// here to make memory allocation more efficient.
4163 /// This constructor can also autoinsert before another instruction.
4164 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4165 unsigned NumHandlers, const Twine &NameStr,
4166 Instruction *InsertBefore);
4167
4168 /// Create a new switch instruction, specifying a
4169 /// default destination. The number of additional handlers can be specified
4170 /// here to make memory allocation more efficient.
4171 /// This constructor also autoinserts at the end of the specified BasicBlock.
4172 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4173 unsigned NumHandlers, const Twine &NameStr,
4174 BasicBlock *InsertAtEnd);
4175
4176 // allocate space for exactly zero operands
4177 void *operator new(size_t s) { return User::operator new(s); }
4178
4179 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4180 void growOperands(unsigned Size);
4181
4182protected:
4183 // Note: Instruction needs to be a friend here to call cloneImpl.
4184 friend class Instruction;
4185
4186 CatchSwitchInst *cloneImpl() const;
4187
4188public:
4189 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4190 unsigned NumHandlers,
4191 const Twine &NameStr = "",
4192 Instruction *InsertBefore = nullptr) {
4193 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4194 InsertBefore);
4195 }
4196
4197 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4198 unsigned NumHandlers, const Twine &NameStr,
4199 BasicBlock *InsertAtEnd) {
4200 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4201 InsertAtEnd);
4202 }
4203
4204 /// Provide fast operand accessors
4205 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4206
4207 // Accessor Methods for CatchSwitch stmt
4208 Value *getParentPad() const { return getOperand(0); }
4209 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4210
4211 // Accessor Methods for CatchSwitch stmt
4212 bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
4213 bool unwindsToCaller() const { return !hasUnwindDest(); }
4214 BasicBlock *getUnwindDest() const {
4215 if (hasUnwindDest())
4216 return cast<BasicBlock>(getOperand(1));
4217 return nullptr;
4218 }
4219 void setUnwindDest(BasicBlock *UnwindDest) {
4220 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4220, __extension__ __PRETTY_FUNCTION__))
;
4221 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4221, __extension__ __PRETTY_FUNCTION__))
;
4222 setOperand(1, UnwindDest);
4223 }
4224
4225 /// return the number of 'handlers' in this catchswitch
4226 /// instruction, except the default handler
4227 unsigned getNumHandlers() const {
4228 if (hasUnwindDest())
4229 return getNumOperands() - 2;
4230 return getNumOperands() - 1;
4231 }
4232
4233private:
4234 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4235 static const BasicBlock *handler_helper(const Value *V) {
4236 return cast<BasicBlock>(V);
4237 }
4238
4239public:
4240 using DerefFnTy = BasicBlock *(*)(Value *);
4241 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4242 using handler_range = iterator_range<handler_iterator>;
4243 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4244 using const_handler_iterator =
4245 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4246 using const_handler_range = iterator_range<const_handler_iterator>;
4247
4248 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4249 handler_iterator handler_begin() {
4250 op_iterator It = op_begin() + 1;
4251 if (hasUnwindDest())
4252 ++It;
4253 return handler_iterator(It, DerefFnTy(handler_helper));
4254 }
4255
4256 /// Returns an iterator that points to the first handler in the
4257 /// CatchSwitchInst.
4258 const_handler_iterator handler_begin() const {
4259 const_op_iterator It = op_begin() + 1;
4260 if (hasUnwindDest())
4261 ++It;
4262 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4263 }
4264
4265 /// Returns a read-only iterator that points one past the last
4266 /// handler in the CatchSwitchInst.
4267 handler_iterator handler_end() {
4268 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4269 }
4270
4271 /// Returns an iterator that points one past the last handler in the
4272 /// CatchSwitchInst.
4273 const_handler_iterator handler_end() const {
4274 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4275 }
4276
4277 /// iteration adapter for range-for loops.
4278 handler_range handlers() {
4279 return make_range(handler_begin(), handler_end());
4280 }
4281
4282 /// iteration adapter for range-for loops.
4283 const_handler_range handlers() const {
4284 return make_range(handler_begin(), handler_end());
4285 }
4286
4287 /// Add an entry to the switch instruction...
4288 /// Note:
4289 /// This action invalidates handler_end(). Old handler_end() iterator will
4290 /// point to the added handler.
4291 void addHandler(BasicBlock *Dest);
4292
4293 void removeHandler(handler_iterator HI);
4294
4295 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4296 BasicBlock *getSuccessor(unsigned Idx) const {
4297 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4298, __extension__ __PRETTY_FUNCTION__))
4298 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4298, __extension__ __PRETTY_FUNCTION__))
;
4299 return cast<BasicBlock>(getOperand(Idx + 1));
4300 }
4301 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4302 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4303, __extension__ __PRETTY_FUNCTION__))
4303 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4303, __extension__ __PRETTY_FUNCTION__))
;
4304 setOperand(Idx + 1, NewSucc);
4305 }
4306
4307 // Methods for support type inquiry through isa, cast, and dyn_cast:
4308 static bool classof(const Instruction *I) {
4309 return I->getOpcode() == Instruction::CatchSwitch;
4310 }
4311 static bool classof(const Value *V) {
4312 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4313 }
4314};
4315
4316template <>
4317struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4318
4319DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4319, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CatchSwitchInst>::op_begin
(const_cast<CatchSwitchInst*>(this))[i_nocapture].get()
); } void CatchSwitchInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4319, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CatchSwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CatchSwitchInst::getNumOperands() const { return
OperandTraits<CatchSwitchInst>::operands(this); } template
<int Idx_nocapture> Use &CatchSwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &CatchSwitchInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
4320
4321//===----------------------------------------------------------------------===//
4322// CleanupPadInst Class
4323//===----------------------------------------------------------------------===//
4324class CleanupPadInst : public FuncletPadInst {
4325private:
4326 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4327 unsigned Values, const Twine &NameStr,
4328 Instruction *InsertBefore)
4329 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4330 NameStr, InsertBefore) {}
4331 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4332 unsigned Values, const Twine &NameStr,
4333 BasicBlock *InsertAtEnd)
4334 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4335 NameStr, InsertAtEnd) {}
4336
4337public:
4338 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4339 const Twine &NameStr = "",
4340 Instruction *InsertBefore = nullptr) {
4341 unsigned Values = 1 + Args.size();
4342 return new (Values)
4343 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4344 }
4345
4346 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4347 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4348 unsigned Values = 1 + Args.size();
4349 return new (Values)
4350 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4351 }
4352
4353 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4354 static bool classof(const Instruction *I) {
4355 return I->getOpcode() == Instruction::CleanupPad;
4356 }
4357 static bool classof(const Value *V) {
4358 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4359 }
4360};
4361
4362//===----------------------------------------------------------------------===//
4363// CatchPadInst Class
4364//===----------------------------------------------------------------------===//
4365class CatchPadInst : public FuncletPadInst {
4366private:
4367 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4368 unsigned Values, const Twine &NameStr,
4369 Instruction *InsertBefore)
4370 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4371 NameStr, InsertBefore) {}
4372 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4373 unsigned Values, const Twine &NameStr,
4374 BasicBlock *InsertAtEnd)
4375 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4376 NameStr, InsertAtEnd) {}
4377
4378public:
4379 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4380 const Twine &NameStr = "",
4381 Instruction *InsertBefore = nullptr) {
4382 unsigned Values = 1 + Args.size();
4383 return new (Values)
4384 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4385 }
4386
4387 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4388 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4389 unsigned Values = 1 + Args.size();
4390 return new (Values)
4391 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4392 }
4393
4394 /// Convenience accessors
4395 CatchSwitchInst *getCatchSwitch() const {
4396 return cast<CatchSwitchInst>(Op<-1>());
4397 }
4398 void setCatchSwitch(Value *CatchSwitch) {
4399 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4399, __extension__ __PRETTY_FUNCTION__))
;
4400 Op<-1>() = CatchSwitch;
4401 }
4402
4403 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4404 static bool classof(const Instruction *I) {
4405 return I->getOpcode() == Instruction::CatchPad;
4406 }
4407 static bool classof(const Value *V) {
4408 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4409 }
4410};
4411
4412//===----------------------------------------------------------------------===//
4413// CatchReturnInst Class
4414//===----------------------------------------------------------------------===//
4415
4416class CatchReturnInst : public TerminatorInst {
4417 CatchReturnInst(const CatchReturnInst &RI);
4418 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4419 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4420
4421 void init(Value *CatchPad, BasicBlock *BB);
4422
4423protected:
4424 // Note: Instruction needs to be a friend here to call cloneImpl.
4425 friend class Instruction;
4426
4427 CatchReturnInst *cloneImpl() const;
4428
4429public:
4430 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4431 Instruction *InsertBefore = nullptr) {
4432 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4432, __extension__ __PRETTY_FUNCTION__))
;
4433 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4433, __extension__ __PRETTY_FUNCTION__))
;
4434 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4435 }
4436
4437 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4438 BasicBlock *InsertAtEnd) {
4439 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4439, __extension__ __PRETTY_FUNCTION__))
;
4440 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4440, __extension__ __PRETTY_FUNCTION__))
;
4441 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4442 }
4443
4444 /// Provide fast operand accessors
4445 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4446
4447 /// Convenience accessors.
4448 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4449 void setCatchPad(CatchPadInst *CatchPad) {
4450 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4450, __extension__ __PRETTY_FUNCTION__))
;
4451 Op<0>() = CatchPad;
4452 }
4453
4454 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4455 void setSuccessor(BasicBlock *NewSucc) {
4456 assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail
("NewSucc", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4456, __extension__ __PRETTY_FUNCTION__))
;
4457 Op<1>() = NewSucc;
4458 }
4459 unsigned getNumSuccessors() const { return 1; }
4460
4461 /// Get the parentPad of this catchret's catchpad's catchswitch.
4462 /// The successor block is implicitly a member of this funclet.
4463 Value *getCatchSwitchParentPad() const {
4464 return getCatchPad()->getCatchSwitch()->getParentPad();
4465 }
4466
4467 // Methods for support type inquiry through isa, cast, and dyn_cast:
4468 static bool classof(const Instruction *I) {
4469 return (I->getOpcode() == Instruction::CatchRet);
4470 }
4471 static bool classof(const Value *V) {
4472 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4473 }
4474
4475private:
4476 friend TerminatorInst;
4477
4478 BasicBlock *getSuccessor(unsigned Idx) const {
4479 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4479, __extension__ __PRETTY_FUNCTION__))
;
4480 return getSuccessor();
4481 }
4482
4483 void setSuccessor(unsigned Idx, BasicBlock *B) {
4484 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4484, __extension__ __PRETTY_FUNCTION__))
;
4485 setSuccessor(B);
4486 }
4487};
4488
4489template <>
4490struct OperandTraits<CatchReturnInst>
4491 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4492
4493DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchReturnInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4493, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CatchReturnInst>::op_begin
(const_cast<CatchReturnInst*>(this))[i_nocapture].get()
); } void CatchReturnInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<CatchReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4493, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CatchReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CatchReturnInst::getNumOperands() const { return
OperandTraits<CatchReturnInst>::operands(this); } template
<int Idx_nocapture> Use &CatchReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &CatchReturnInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
4494
4495//===----------------------------------------------------------------------===//
4496// CleanupReturnInst Class
4497//===----------------------------------------------------------------------===//
4498
4499class CleanupReturnInst : public TerminatorInst {
4500private:
4501 CleanupReturnInst(const CleanupReturnInst &RI);
4502 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4503 Instruction *InsertBefore = nullptr);
4504 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4505 BasicBlock *InsertAtEnd);
4506
4507 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4508
4509protected:
4510 // Note: Instruction needs to be a friend here to call cloneImpl.
4511 friend class Instruction;
4512
4513 CleanupReturnInst *cloneImpl() const;
4514
4515public:
4516 static CleanupReturnInst *Create(Value *CleanupPad,
4517 BasicBlock *UnwindBB = nullptr,
4518 Instruction *InsertBefore = nullptr) {
4519 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4519, __extension__ __PRETTY_FUNCTION__))
;
4520 unsigned Values = 1;
4521 if (UnwindBB)
4522 ++Values;
4523 return new (Values)
4524 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4525 }
4526
4527 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4528 BasicBlock *InsertAtEnd) {
4529 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4529, __extension__ __PRETTY_FUNCTION__))
;
4530 unsigned Values = 1;
4531 if (UnwindBB)
4532 ++Values;
4533 return new (Values)
4534 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4535 }
4536
4537 /// Provide fast operand accessors
4538 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4539
4540 bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
4541 bool unwindsToCaller() const { return !hasUnwindDest(); }
4542
4543 /// Convenience accessor.
4544 CleanupPadInst *getCleanupPad() const {
4545 return cast<CleanupPadInst>(Op<0>());
4546 }
4547 void setCleanupPad(CleanupPadInst *CleanupPad) {
4548 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4548, __extension__ __PRETTY_FUNCTION__))
;
4549 Op<0>() = CleanupPad;
4550 }
4551
4552 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4553
4554 BasicBlock *getUnwindDest() const {
4555 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4556 }
4557 void setUnwindDest(BasicBlock *NewDest) {
4558 assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail
("NewDest", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4558, __extension__ __PRETTY_FUNCTION__))
;
4559 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4559, __extension__ __PRETTY_FUNCTION__))
;
4560 Op<1>() = NewDest;
4561 }
4562
4563 // Methods for support type inquiry through isa, cast, and dyn_cast:
4564 static bool classof(const Instruction *I) {
4565 return (I->getOpcode() == Instruction::CleanupRet);
4566 }
4567 static bool classof(const Value *V) {
4568 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4569 }
4570
4571private:
4572 friend TerminatorInst;
4573
4574 BasicBlock *getSuccessor(unsigned Idx) const {
4575 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4575, __extension__ __PRETTY_FUNCTION__))
;
4576 return getUnwindDest();
4577 }
4578
4579 void setSuccessor(unsigned Idx, BasicBlock *B) {
4580 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4580, __extension__ __PRETTY_FUNCTION__))
;
4581 setUnwindDest(B);
4582 }
4583
4584 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4585 // method so that subclasses cannot accidentally use it.
4586 void setInstructionSubclassData(unsigned short D) {
4587 Instruction::setInstructionSubclassData(D);
4588 }
4589};
4590
4591template <>
4592struct OperandTraits<CleanupReturnInst>
4593 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4594
4595DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CleanupReturnInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4595, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CleanupReturnInst>::op_begin
(const_cast<CleanupReturnInst*>(this))[i_nocapture].get
()); } void CleanupReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CleanupReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4595, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CleanupReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CleanupReturnInst::getNumOperands() const { return
OperandTraits<CleanupReturnInst>::operands(this); } template
<int Idx_nocapture> Use &CleanupReturnInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CleanupReturnInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4596
4597//===----------------------------------------------------------------------===//
4598// UnreachableInst Class
4599//===----------------------------------------------------------------------===//
4600
4601//===---------------------------------------------------------------------------
4602/// This function has undefined behavior. In particular, the
4603/// presence of this instruction indicates some higher level knowledge that the
4604/// end of the block cannot be reached.
4605///
4606class UnreachableInst : public TerminatorInst {
4607protected:
4608 // Note: Instruction needs to be a friend here to call cloneImpl.
4609 friend class Instruction;
4610
4611 UnreachableInst *cloneImpl() const;
4612
4613public:
4614 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4615 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4616
4617 // allocate space for exactly zero operands
4618 void *operator new(size_t s) {
4619 return User::operator new(s, 0);
4620 }
4621
4622 unsigned getNumSuccessors() const { return 0; }
4623
4624 // Methods for support type inquiry through isa, cast, and dyn_cast:
4625 static bool classof(const Instruction *I) {
4626 return I->getOpcode() == Instruction::Unreachable;
4627 }
4628 static bool classof(const Value *V) {
4629 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4630 }
4631
4632private:
4633 friend TerminatorInst;
4634
4635 BasicBlock *getSuccessor(unsigned idx) const {
4636 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4636)
;
4637 }
4638
4639 void setSuccessor(unsigned idx, BasicBlock *B) {
4640 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-6.0~svn318601/include/llvm/IR/Instructions.h"
, 4640)
;
4641 }
4642};
4643
4644//===----------------------------------------------------------------------===//
4645// TruncInst Class
4646//===----------------------------------------------------------------------===//
4647
4648/// This class represents a truncation of integer types.
4649class TruncInst : public CastInst {
4650protected:
4651 // Note: Instruction needs to be a friend here to call cloneImpl.
4652 friend class Instruction;
4653
4654 /// Clone an identical TruncInst
4655 TruncInst *cloneImpl() const;
4656
4657public:
4658 /// Constructor with insert-before-instruction semantics
4659 TruncInst(
4660 Value *S, ///< The value to be truncated
4661 Type *Ty, ///< The (smaller) type to truncate to
4662 const Twine &NameStr = "", ///< A name for the new instruction
4663 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4664 );
4665
4666 /// Constructor with insert-at-end-of-block semantics
4667 TruncInst(
4668 Value *S, ///< The value to be truncated
4669 Type *Ty, ///< The (smaller) type to truncate to
4670 const Twine &NameStr, ///< A name for the new instruction
4671 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4672 );
4673
4674 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4675 static bool classof(const Instruction *I) {
4676 return I->getOpcode() == Trunc;
4677 }
4678 static bool classof(const Value *V) {
4679 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4680 }
4681};
4682
4683//===----------------------------------------------------------------------===//
4684// ZExtInst Class
4685//===----------------------------------------------------------------------===//
4686
4687/// This class represents zero extension of integer types.
4688class ZExtInst : public CastInst {
4689protected:
4690 // Note: Instruction needs to be a friend here to call cloneImpl.
4691 friend class Instruction;
4692
4693 /// Clone an identical ZExtInst
4694 ZExtInst *cloneImpl() const;
4695
4696public:
4697 /// Constructor with insert-before-instruction semantics
4698 ZExtInst(
4699 Value *S, ///< The value to be zero extended
4700 Type *Ty, ///< The type to zero extend to
4701 const Twine &NameStr = "", ///< A name for the new instruction
4702 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4703 );
4704
4705 /// Constructor with insert-at-end semantics.
4706 ZExtInst(
4707 Value *S, ///< The value to be zero extended
4708 Type *Ty, ///< The type to zero extend to
4709 const Twine &NameStr, ///< A name for the new instruction
4710 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4711 );
4712
4713 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4714 static bool classof(const Instruction *I) {
4715 return I->getOpcode() == ZExt;
4716 }
4717 static bool classof(const Value *V) {
4718 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4719 }
4720};
4721
4722//===----------------------------------------------------------------------===//
4723// SExtInst Class
4724//===----------------------------------------------------------------------===//
4725
4726/// This class represents a sign extension of integer types.
4727class SExtInst : public CastInst {
4728protected:
4729 // Note: Instruction needs to be a friend here to call cloneImpl.
4730 friend class Instruction;
4731
4732 /// Clone an identical SExtInst
4733 SExtInst *cloneImpl() const;
4734
4735public:
4736 /// Constructor with insert-before-instruction semantics
4737 SExtInst(
4738 Value *S, ///< The value to be sign extended
4739 Type *Ty, ///< The type to sign extend to
4740 const Twine &NameStr = "", ///< A name for the new instruction
4741 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4742 );
4743
4744 /// Constructor with insert-at-end-of-block semantics
4745 SExtInst(
4746 Value *S, ///< The value to be sign extended
4747 Type *Ty, ///< The type to sign extend to
4748 const Twine &NameStr, ///< A name for the new instruction
4749 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4750 );
4751
4752 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4753 static bool classof(const Instruction *I) {
4754 return I->getOpcode() == SExt;
4755 }
4756 static bool classof(const Value *V) {
4757 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4758 }
4759};
4760
4761//===----------------------------------------------------------------------===//
4762// FPTruncInst Class
4763//===----------------------------------------------------------------------===//
4764
4765/// This class represents a truncation of floating point types.
4766class FPTruncInst : public CastInst {
4767protected:
4768 // Note: Instruction needs to be a friend here to call cloneImpl.
4769 friend class Instruction;
4770
4771 /// Clone an identical FPTruncInst
4772 FPTruncInst *cloneImpl() const;
4773
4774public:
4775 /// Constructor with insert-before-instruction semantics
4776 FPTruncInst(
4777 Value *S, ///< The value to be truncated
4778 Type *Ty, ///< The type to truncate to
4779 const Twine &NameStr = "", ///< A name for the new instruction
4780 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4781 );
4782
4783 /// Constructor with insert-before-instruction semantics
4784 FPTruncInst(
4785 Value *S, ///< The value to be truncated
4786 Type *Ty, ///< The type to truncate to
4787 const Twine &NameStr, ///< A name for the new instruction
4788 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4789 );
4790
4791 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4792 static bool classof(const Instruction *I) {
4793 return I->getOpcode() == FPTrunc;
4794 }
4795 static bool classof(const Value *V) {
4796 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4797 }
4798};
4799
4800//===----------------------------------------------------------------------===//
4801// FPExtInst Class
4802//===----------------------------------------------------------------------===//
4803
4804/// This class represents an extension of floating point types.
4805class FPExtInst : public CastInst {
4806protected:
4807 // Note: Instruction needs to be a friend here to call cloneImpl.
4808 friend class Instruction;
4809
4810 /// Clone an identical FPExtInst
4811 FPExtInst *cloneImpl() const;
4812
4813public:
4814 /// Constructor with insert-before-instruction semantics
4815 FPExtInst(
4816 Value *S, ///< The value to be extended
4817 Type *Ty, ///< The type to extend to
4818 const Twine &NameStr = "", ///< A name for the new instruction
4819 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4820 );
4821
4822 /// Constructor with insert-at-end-of-block semantics
4823 FPExtInst(
4824 Value *S, ///< The value to be extended
4825 Type *Ty, ///< The type to extend to
4826 const Twine &NameStr, ///< A name for the new instruction
4827 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4828 );
4829
4830 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4831 static bool classof(const Instruction *I) {
4832 return I->getOpcode() == FPExt;
4833 }
4834 static bool classof(const Value *V) {
4835 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4836 }
4837};
4838
4839//===----------------------------------------------------------------------===//
4840// UIToFPInst Class
4841//===----------------------------------------------------------------------===//
4842
4843/// This class represents a cast unsigned integer to floating point.
4844class UIToFPInst : public CastInst {
4845protected:
4846 // Note: Instruction needs to be a friend here to call cloneImpl.
4847 friend class Instruction;
4848
4849 /// Clone an identical UIToFPInst
4850 UIToFPInst *cloneImpl() const;
4851
4852public:
4853 /// Constructor with insert-before-instruction semantics
4854 UIToFPInst(
4855 Value *S, ///< The value to be converted
4856 Type *Ty, ///< The type to convert to
4857 const Twine &NameStr = "", ///< A name for the new instruction
4858 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4859 );
4860
4861 /// Constructor with insert-at-end-of-block semantics
4862 UIToFPInst(
4863 Value *S, ///< The value to be converted
4864 Type *Ty, ///< The type to convert to
4865 const Twine &NameStr, ///< A name for the new instruction
4866 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4867 );
4868
4869 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4870 static bool classof(const Instruction *I) {
4871 return I->getOpcode() == UIToFP;
4872 }
4873 static bool classof(const Value *V) {
4874 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4875 }
4876};
4877
4878//===----------------------------------------------------------------------===//
4879// SIToFPInst Class
4880//===----------------------------------------------------------------------===//
4881
4882/// This class represents a cast from signed integer to floating point.
4883class SIToFPInst : public CastInst {
4884protected:
4885 // Note: Instruction needs to be a friend here to call cloneImpl.
4886 friend class Instruction;
4887
4888 /// Clone an identical SIToFPInst
4889 SIToFPInst *cloneImpl() const;
4890
4891public:
4892 /// Constructor with insert-before-instruction semantics
4893 SIToFPInst(
4894 Value *S, ///< The value to be converted
4895 Type *Ty, ///< The type to convert to
4896 const Twine &NameStr = "", ///< A name for the new instruction
4897 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4898 );
4899
4900 /// Constructor with insert-at-end-of-block semantics
4901 SIToFPInst(
4902 Value *S, ///< The value to be converted
4903 Type *Ty, ///< The type to convert to
4904 const Twine &NameStr, ///< A name for the new instruction
4905 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4906 );
4907
4908 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4909 static bool classof(const Instruction *I) {
4910 return I->getOpcode() == SIToFP;
4911 }
4912 static bool classof(const Value *V) {
4913 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4914 }
4915};
4916
4917//===----------------------------------------------------------------------===//
4918// FPToUIInst Class
4919//===----------------------------------------------------------------------===//
4920
4921/// This class represents a cast from floating point to unsigned integer
4922class FPToUIInst : public CastInst {
4923protected:
4924 // Note: Instruction needs to be a friend here to call cloneImpl.
4925 friend class Instruction;
4926
4927 /// Clone an identical FPToUIInst
4928 FPToUIInst *cloneImpl() const;
4929
4930public:
4931 /// Constructor with insert-before-instruction semantics
4932 FPToUIInst(
4933 Value *S, ///< The value to be converted
4934 Type *Ty, ///< The type to convert to
4935 const Twine &NameStr = "", ///< A name for the new instruction
4936 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4937 );
4938
4939 /// Constructor with insert-at-end-of-block semantics
4940 FPToUIInst(
4941 Value *S, ///< The value to be converted
4942 Type *Ty, ///< The type to convert to
4943 const Twine &NameStr, ///< A name for the new instruction
4944 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
4945 );
4946
4947 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4948 static bool classof(const Instruction *I) {
4949 return I->getOpcode() == FPToUI;
4950 }
4951 static bool classof(const Value *V) {
4952 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4953 }
4954};
4955
4956//===----------------------------------------------------------------------===//
4957// FPToSIInst Class
4958//===----------------------------------------------------------------------===//
4959
4960/// This class represents a cast from floating point to signed integer.
4961class FPToSIInst : public CastInst {
4962protected:
4963 // Note: Instruction needs to be a friend here to call cloneImpl.
4964 friend class Instruction;
4965
4966 /// Clone an identical FPToSIInst
4967 FPToSIInst *cloneImpl() const;
4968
4969public:
4970 /// Constructor with insert-before-instruction semantics
4971 FPToSIInst(
4972 Value *S, ///< The value to be converted
4973 Type *Ty, ///< The type to convert to
4974 const Twine &NameStr = "", ///< A name for the new instruction
4975 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4976 );
4977
4978 /// Constructor with insert-at-end-of-block semantics
4979 FPToSIInst(
4980 Value *S, ///< The value to be converted
4981 Type *Ty, ///< The type to convert to
4982 const Twine &NameStr, ///< A name for the new instruction
4983 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4984 );
4985
4986 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4987 static bool classof(const Instruction *I) {
4988 return I->getOpcode() == FPToSI;
4989 }
4990 static bool classof(const Value *V) {
4991 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4992 }
4993};
4994
4995//===----------------------------------------------------------------------===//
4996// IntToPtrInst Class
4997//===----------------------------------------------------------------------===//
4998
4999/// This class represents a cast from an integer to a pointer.
5000class IntToPtrInst : public CastInst {
5001public:
5002 // Note: Instruction needs to be a friend here to call cloneImpl.
5003 friend class Instruction;
5004
5005 /// Constructor with insert-before-instruction semantics
5006 IntToPtrInst(
5007 Value *S, ///< The value to be converted
5008 Type *Ty, ///< The type to convert to
5009 const Twine &NameStr = "", ///< A name for the new instruction
5010 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5011 );
5012
5013 /// Constructor with insert-at-end-of-block semantics
5014 IntToPtrInst(
5015 Value *S, ///< The value to be converted
5016 Type *Ty, ///< The type to convert to
5017 const Twine &NameStr, ///< A name for the new instruction
5018 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5019 );
5020
5021 /// Clone an identical IntToPtrInst.
5022 IntToPtrInst *cloneImpl() const;
5023
5024 /// Returns the address space of this instruction's pointer type.
5025 unsigned getAddressSpace() const {
5026 return getType()->getPointerAddressSpace();
5027 }
5028
5029 // Methods for support type inquiry through isa, cast, and dyn_cast:
5030 static bool classof(const Instruction *I) {
5031 return I->getOpcode() == IntToPtr;
5032 }
5033 static bool classof(const Value *V) {
5034 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5035 }
5036};
5037
5038//===----------------------------------------------------------------------===//
5039// PtrToIntInst Class
5040//===----------------------------------------------------------------------===//
5041
5042/// This class represents a cast from a pointer to an integer.
5043class PtrToIntInst : public CastInst {
5044protected:
5045 // Note: Instruction needs to be a friend here to call cloneImpl.
5046 friend class Instruction;
5047
5048 /// Clone an identical PtrToIntInst.
5049 PtrToIntInst *cloneImpl() const;
5050
5051public:
5052 /// Constructor with insert-before-instruction semantics
5053 PtrToIntInst(
5054 Value *S, ///< The value to be converted
5055 Type *Ty, ///< The type to convert to
5056 const Twine &NameStr = "", ///< A name for the new instruction
5057 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5058 );
5059
5060 /// Constructor with insert-at-end-of-block semantics
5061 PtrToIntInst(
5062 Value *S, ///< The value to be converted
5063 Type *Ty, ///< The type to convert to
5064 const Twine &NameStr, ///< A name for the new instruction
5065 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5066 );
5067
5068 /// Gets the pointer operand.
5069 Value *getPointerOperand() { return getOperand(0); }
5070 /// Gets the pointer operand.
5071 const Value *getPointerOperand() const { return getOperand(0); }
5072 /// Gets the operand index of the pointer operand.
5073 static unsigned getPointerOperandIndex() { return 0U; }
5074
5075 /// Returns the address space of the pointer operand.
5076 unsigned getPointerAddressSpace() const {
5077 return getPointerOperand()->getType()->getPointerAddressSpace();
5078 }
5079
5080 // Methods for support type inquiry through isa, cast, and dyn_cast:
5081 static bool classof(const Instruction *I) {
5082 return I->getOpcode() == PtrToInt;
5083 }
5084 static bool classof(const Value *V) {
5085 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5086 }
5087};
5088
5089//===----------------------------------------------------------------------===//
5090// BitCastInst Class
5091//===----------------------------------------------------------------------===//
5092
5093/// This class represents a no-op cast from one type to another.
5094class BitCastInst : public CastInst {
5095protected:
5096 // Note: Instruction needs to be a friend here to call cloneImpl.
5097 friend class Instruction;
5098
5099 /// Clone an identical BitCastInst.
5100 BitCastInst *cloneImpl() const;
5101
5102public:
5103 /// Constructor with insert-before-instruction semantics
5104 BitCastInst(
5105 Value *S, ///< The value to be casted
5106 Type *Ty, ///< The type to casted to
5107 const Twine &NameStr = "", ///< A name for the new instruction
5108 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5109 );
5110
5111 /// Constructor with insert-at-end-of-block semantics
5112 BitCastInst(
5113 Value *S, ///< The value to be casted
5114 Type *Ty, ///< The type to casted to
5115 const Twine &NameStr, ///< A name for the new instruction
5116 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5117 );
5118
5119 // Methods for support type inquiry through isa, cast, and dyn_cast:
5120 static bool classof(const Instruction *I) {
5121 return I->getOpcode() == BitCast;
5122 }
5123 static bool classof(const Value *V) {
5124 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5125 }
5126};
5127
5128//===----------------------------------------------------------------------===//
5129// AddrSpaceCastInst Class
5130//===----------------------------------------------------------------------===//
5131
5132/// This class represents a conversion between pointers from one address space
5133/// to another.
5134class AddrSpaceCastInst : public CastInst {
5135protected:
5136 // Note: Instruction needs to be a friend here to call cloneImpl.
5137 friend class Instruction;
5138
5139 /// Clone an identical AddrSpaceCastInst.
5140 AddrSpaceCastInst *cloneImpl() const;
5141
5142public:
5143 /// Constructor with insert-before-instruction semantics
5144 AddrSpaceCastInst(
5145 Value *S, ///< The value to be casted
5146 Type *Ty, ///< The type to casted to
5147 const Twine &NameStr = "", ///< A name for the new instruction
5148 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5149 );
5150
5151 /// Constructor with insert-at-end-of-block semantics
5152 AddrSpaceCastInst(
5153 Value *S, ///< The value to be casted
5154 Type *Ty, ///< The type to casted to
5155 const Twine &NameStr, ///< A name for the new instruction
5156 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5157 );
5158
5159 // Methods for support type inquiry through isa, cast, and dyn_cast:
5160 static bool classof(const Instruction *I) {
5161 return I->getOpcode() == AddrSpaceCast;
5162 }
5163 static bool classof(const Value *V) {
5164 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5165 }
5166
5167 /// Gets the pointer operand.
5168 Value *getPointerOperand() {
5169 return getOperand(0);
5170 }
5171
5172 /// Gets the pointer operand.
5173 const Value *getPointerOperand() const {
5174 return getOperand(0);
5175 }
5176
5177 /// Gets the operand index of the pointer operand.
5178 static unsigned getPointerOperandIndex() {
5179 return 0U;
5180 }
5181
5182 /// Returns the address space of the pointer operand.
5183 unsigned getSrcAddressSpace() const {
5184 return getPointerOperand()->getType()->getPointerAddressSpace();
5185 }
5186
5187 /// Returns the address space of the result.
5188 unsigned getDestAddressSpace() const {
5189 return getType()->getPointerAddressSpace();
5190 }
5191};
5192
5193} // end namespace llvm
5194
5195#endif // LLVM_IR_INSTRUCTIONS_H