Bug Summary

File:lib/Transforms/Scalar/LoopIdiomRecognize.cpp
Warning:line 1434, column 48
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LoopIdiomRecognize.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn338205/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/8/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Transforms/Scalar -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-29-043837-17923-1 -x c++ /build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp -faddrsig

/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp

1//===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass implements an idiom recognizer that transforms simple loops into a
11// non-loop form. In cases that this kicks in, it can be a significant
12// performance win.
13//
14// If compiling for code size we avoid idiom recognition if the resulting
15// code could be larger than the code for the original loop. One way this could
16// happen is if the loop is not removable after idiom recognition due to the
17// presence of non-idiom instructions. The initial implementation of the
18// heuristics applies to idioms in multi-block loops.
19//
20//===----------------------------------------------------------------------===//
21//
22// TODO List:
23//
24// Future loop memory idioms to recognize:
25// memcmp, memmove, strlen, etc.
26// Future floating point idioms to recognize in -ffast-math mode:
27// fpowi
28// Future integer operation idioms to recognize:
29// ctpop, ctlz, cttz
30//
31// Beware that isel's default lowering for ctpop is highly inefficient for
32// i64 and larger types when i64 is legal and the value has few bits set. It
33// would be good to enhance isel to emit a loop for ctpop in this case.
34//
35// This could recognize common matrix multiplies and dot product idioms and
36// replace them with calls to BLAS (if linked in??).
37//
38//===----------------------------------------------------------------------===//
39
40#include "llvm/ADT/APInt.h"
41#include "llvm/ADT/ArrayRef.h"
42#include "llvm/ADT/DenseMap.h"
43#include "llvm/ADT/MapVector.h"
44#include "llvm/ADT/SetVector.h"
45#include "llvm/ADT/SmallPtrSet.h"
46#include "llvm/ADT/SmallVector.h"
47#include "llvm/ADT/Statistic.h"
48#include "llvm/ADT/StringRef.h"
49#include "llvm/Analysis/AliasAnalysis.h"
50#include "llvm/Analysis/LoopAccessAnalysis.h"
51#include "llvm/Analysis/LoopInfo.h"
52#include "llvm/Analysis/LoopPass.h"
53#include "llvm/Analysis/MemoryLocation.h"
54#include "llvm/Analysis/ScalarEvolution.h"
55#include "llvm/Analysis/ScalarEvolutionExpander.h"
56#include "llvm/Analysis/ScalarEvolutionExpressions.h"
57#include "llvm/Analysis/TargetLibraryInfo.h"
58#include "llvm/Analysis/TargetTransformInfo.h"
59#include "llvm/Transforms/Utils/Local.h"
60#include "llvm/Analysis/ValueTracking.h"
61#include "llvm/IR/Attributes.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
66#include "llvm/IR/DebugLoc.h"
67#include "llvm/IR/DerivedTypes.h"
68#include "llvm/IR/Dominators.h"
69#include "llvm/IR/GlobalValue.h"
70#include "llvm/IR/GlobalVariable.h"
71#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/InstrTypes.h"
73#include "llvm/IR/Instruction.h"
74#include "llvm/IR/Instructions.h"
75#include "llvm/IR/IntrinsicInst.h"
76#include "llvm/IR/Intrinsics.h"
77#include "llvm/IR/LLVMContext.h"
78#include "llvm/IR/Module.h"
79#include "llvm/IR/PassManager.h"
80#include "llvm/IR/Type.h"
81#include "llvm/IR/User.h"
82#include "llvm/IR/Value.h"
83#include "llvm/IR/ValueHandle.h"
84#include "llvm/Pass.h"
85#include "llvm/Support/Casting.h"
86#include "llvm/Support/CommandLine.h"
87#include "llvm/Support/Debug.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/Transforms/Scalar.h"
90#include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
91#include "llvm/Transforms/Utils/BuildLibCalls.h"
92#include "llvm/Transforms/Utils/LoopUtils.h"
93#include <algorithm>
94#include <cassert>
95#include <cstdint>
96#include <utility>
97#include <vector>
98
99using namespace llvm;
100
101#define DEBUG_TYPE"loop-idiom" "loop-idiom"
102
103STATISTIC(NumMemSet, "Number of memset's formed from loop stores")static llvm::Statistic NumMemSet = {"loop-idiom", "NumMemSet"
, "Number of memset's formed from loop stores", {0}, {false}}
;
104STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores")static llvm::Statistic NumMemCpy = {"loop-idiom", "NumMemCpy"
, "Number of memcpy's formed from loop load+stores", {0}, {false
}}
;
105
106static cl::opt<bool> UseLIRCodeSizeHeurs(
107 "use-lir-code-size-heurs",
108 cl::desc("Use loop idiom recognition code size heuristics when compiling"
109 "with -Os/-Oz"),
110 cl::init(true), cl::Hidden);
111
112namespace {
113
114class LoopIdiomRecognize {
115 Loop *CurLoop = nullptr;
116 AliasAnalysis *AA;
117 DominatorTree *DT;
118 LoopInfo *LI;
119 ScalarEvolution *SE;
120 TargetLibraryInfo *TLI;
121 const TargetTransformInfo *TTI;
122 const DataLayout *DL;
123 bool ApplyCodeSizeHeuristics;
124
125public:
126 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
127 LoopInfo *LI, ScalarEvolution *SE,
128 TargetLibraryInfo *TLI,
129 const TargetTransformInfo *TTI,
130 const DataLayout *DL)
131 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL) {}
132
133 bool runOnLoop(Loop *L);
134
135private:
136 using StoreList = SmallVector<StoreInst *, 8>;
137 using StoreListMap = MapVector<Value *, StoreList>;
138
139 StoreListMap StoreRefsForMemset;
140 StoreListMap StoreRefsForMemsetPattern;
141 StoreList StoreRefsForMemcpy;
142 bool HasMemset;
143 bool HasMemsetPattern;
144 bool HasMemcpy;
145
146 /// Return code for isLegalStore()
147 enum LegalStoreKind {
148 None = 0,
149 Memset,
150 MemsetPattern,
151 Memcpy,
152 UnorderedAtomicMemcpy,
153 DontUse // Dummy retval never to be used. Allows catching errors in retval
154 // handling.
155 };
156
157 /// \name Countable Loop Idiom Handling
158 /// @{
159
160 bool runOnCountableLoop();
161 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
162 SmallVectorImpl<BasicBlock *> &ExitBlocks);
163
164 void collectStores(BasicBlock *BB);
165 LegalStoreKind isLegalStore(StoreInst *SI);
166 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
167 bool ForMemset);
168 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
169
170 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
171 unsigned StoreAlignment, Value *StoredVal,
172 Instruction *TheStore,
173 SmallPtrSetImpl<Instruction *> &Stores,
174 const SCEVAddRecExpr *Ev, const SCEV *BECount,
175 bool NegStride, bool IsLoopMemset = false);
176 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
177 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
178 bool IsLoopMemset = false);
179
180 /// @}
181 /// \name Noncountable Loop Idiom Handling
182 /// @{
183
184 bool runOnNoncountableLoop();
185
186 bool recognizePopcount();
187 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
188 PHINode *CntPhi, Value *Var);
189 bool recognizeAndInsertCTLZ();
190 void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
191 PHINode *CntPhi, Value *Var, Instruction *DefX,
192 const DebugLoc &DL, bool ZeroCheck,
193 bool IsCntPhiUsedOutsideLoop);
194
195 /// @}
196};
197
198class LoopIdiomRecognizeLegacyPass : public LoopPass {
199public:
200 static char ID;
201
202 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
203 initializeLoopIdiomRecognizeLegacyPassPass(
204 *PassRegistry::getPassRegistry());
205 }
206
207 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
208 if (skipLoop(L))
209 return false;
210
211 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
212 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
213 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
214 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
215 TargetLibraryInfo *TLI =
216 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
217 const TargetTransformInfo *TTI =
218 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
219 *L->getHeader()->getParent());
220 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
221
222 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
223 return LIR.runOnLoop(L);
224 }
225
226 /// This transformation requires natural loop information & requires that
227 /// loop preheaders be inserted into the CFG.
228 void getAnalysisUsage(AnalysisUsage &AU) const override {
229 AU.addRequired<TargetLibraryInfoWrapperPass>();
230 AU.addRequired<TargetTransformInfoWrapperPass>();
231 getLoopAnalysisUsage(AU);
232 }
233};
234
235} // end anonymous namespace
236
237char LoopIdiomRecognizeLegacyPass::ID = 0;
238
239PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
240 LoopStandardAnalysisResults &AR,
241 LPMUpdater &) {
242 const auto *DL = &L.getHeader()->getModule()->getDataLayout();
243
244 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
245 if (!LIR.runOnLoop(&L))
1
Calling 'LoopIdiomRecognize::runOnLoop'
246 return PreservedAnalyses::all();
247
248 return getLoopPassPreservedAnalyses();
249}
250
251INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",static void *initializeLoopIdiomRecognizeLegacyPassPassOnce(PassRegistry
&Registry) {
252 "Recognize loop idioms", false, false)static void *initializeLoopIdiomRecognizeLegacyPassPassOnce(PassRegistry
&Registry) {
253INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry);
254INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
255INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
256INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",PassInfo *PI = new PassInfo( "Recognize loop idioms", "loop-idiom"
, &LoopIdiomRecognizeLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<LoopIdiomRecognizeLegacyPass>), false,
false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeLoopIdiomRecognizeLegacyPassPassFlag
; void llvm::initializeLoopIdiomRecognizeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLoopIdiomRecognizeLegacyPassPassFlag
, initializeLoopIdiomRecognizeLegacyPassPassOnce, std::ref(Registry
)); }
257 "Recognize loop idioms", false, false)PassInfo *PI = new PassInfo( "Recognize loop idioms", "loop-idiom"
, &LoopIdiomRecognizeLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<LoopIdiomRecognizeLegacyPass>), false,
false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeLoopIdiomRecognizeLegacyPassPassFlag
; void llvm::initializeLoopIdiomRecognizeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLoopIdiomRecognizeLegacyPassPassFlag
, initializeLoopIdiomRecognizeLegacyPassPassOnce, std::ref(Registry
)); }
258
259Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
260
261static void deleteDeadInstruction(Instruction *I) {
262 I->replaceAllUsesWith(UndefValue::get(I->getType()));
263 I->eraseFromParent();
264}
265
266//===----------------------------------------------------------------------===//
267//
268// Implementation of LoopIdiomRecognize
269//
270//===----------------------------------------------------------------------===//
271
272bool LoopIdiomRecognize::runOnLoop(Loop *L) {
273 CurLoop = L;
274 // If the loop could not be converted to canonical form, it must have an
275 // indirectbr in it, just give up.
276 if (!L->getLoopPreheader())
2
Assuming the condition is false
3
Taking false branch
277 return false;
278
279 // Disable loop idiom recognition if the function's name is a common idiom.
280 StringRef Name = L->getHeader()->getParent()->getName();
281 if (Name == "memset" || Name == "memcpy")
4
Assuming the condition is false
5
Assuming the condition is false
6
Taking false branch
282 return false;
283
284 // Determine if code size heuristics need to be applied.
285 ApplyCodeSizeHeuristics =
286 L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
7
Assuming the condition is false
287
288 HasMemset = TLI->has(LibFunc_memset);
289 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
290 HasMemcpy = TLI->has(LibFunc_memcpy);
291
292 if (HasMemset || HasMemsetPattern || HasMemcpy)
8
Taking false branch
293 if (SE->hasLoopInvariantBackedgeTakenCount(L))
294 return runOnCountableLoop();
295
296 return runOnNoncountableLoop();
9
Calling 'LoopIdiomRecognize::runOnNoncountableLoop'
297}
298
299bool LoopIdiomRecognize::runOnCountableLoop() {
300 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
301 assert(!isa<SCEVCouldNotCompute>(BECount) &&(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
302 "runOnCountableLoop() called on a loop without a predictable"(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
303 "backedge-taken count")(static_cast <bool> (!isa<SCEVCouldNotCompute>(BECount
) && "runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count") ? void (0) : __assert_fail ("!isa<SCEVCouldNotCompute>(BECount) && \"runOnCountableLoop() called on a loop without a predictable\" \"backedge-taken count\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 303, __extension__ __PRETTY_FUNCTION__))
;
304
305 // If this loop executes exactly one time, then it should be peeled, not
306 // optimized by this pass.
307 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
308 if (BECst->getAPInt() == 0)
309 return false;
310
311 SmallVector<BasicBlock *, 8> ExitBlocks;
312 CurLoop->getUniqueExitBlocks(ExitBlocks);
313
314 LLVM_DEBUG(dbgs() << "loop-idiom Scanning: F["do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
315 << CurLoop->getHeader()->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
316 << "] Loop %" << CurLoop->getHeader()->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << "loop-idiom Scanning: F[" <<
CurLoop->getHeader()->getParent()->getName() <<
"] Loop %" << CurLoop->getHeader()->getName() <<
"\n"; } } while (false)
;
317
318 bool MadeChange = false;
319
320 // The following transforms hoist stores/memsets into the loop pre-header.
321 // Give up if the loop has instructions may throw.
322 LoopSafetyInfo SafetyInfo;
323 computeLoopSafetyInfo(&SafetyInfo, CurLoop);
324 if (SafetyInfo.MayThrow)
325 return MadeChange;
326
327 // Scan all the blocks in the loop that are not in subloops.
328 for (auto *BB : CurLoop->getBlocks()) {
329 // Ignore blocks in subloops.
330 if (LI->getLoopFor(BB) != CurLoop)
331 continue;
332
333 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
334 }
335 return MadeChange;
336}
337
338static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
339 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
340 return ConstStride->getAPInt();
341}
342
343/// getMemSetPatternValue - If a strided store of the specified value is safe to
344/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
345/// be passed in. Otherwise, return null.
346///
347/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
348/// just replicate their input array and then pass on to memset_pattern16.
349static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
350 // If the value isn't a constant, we can't promote it to being in a constant
351 // array. We could theoretically do a store to an alloca or something, but
352 // that doesn't seem worthwhile.
353 Constant *C = dyn_cast<Constant>(V);
354 if (!C)
355 return nullptr;
356
357 // Only handle simple values that are a power of two bytes in size.
358 uint64_t Size = DL->getTypeSizeInBits(V->getType());
359 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
360 return nullptr;
361
362 // Don't care enough about darwin/ppc to implement this.
363 if (DL->isBigEndian())
364 return nullptr;
365
366 // Convert to size in bytes.
367 Size /= 8;
368
369 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
370 // if the top and bottom are the same (e.g. for vectors and large integers).
371 if (Size > 16)
372 return nullptr;
373
374 // If the constant is exactly 16 bytes, just use it.
375 if (Size == 16)
376 return C;
377
378 // Otherwise, we'll use an array of the constants.
379 unsigned ArraySize = 16 / Size;
380 ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
381 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
382}
383
384LoopIdiomRecognize::LegalStoreKind
385LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
386 // Don't touch volatile stores.
387 if (SI->isVolatile())
388 return LegalStoreKind::None;
389 // We only want simple or unordered-atomic stores.
390 if (!SI->isUnordered())
391 return LegalStoreKind::None;
392
393 // Don't convert stores of non-integral pointer types to memsets (which stores
394 // integers).
395 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
396 return LegalStoreKind::None;
397
398 // Avoid merging nontemporal stores.
399 if (SI->getMetadata(LLVMContext::MD_nontemporal))
400 return LegalStoreKind::None;
401
402 Value *StoredVal = SI->getValueOperand();
403 Value *StorePtr = SI->getPointerOperand();
404
405 // Reject stores that are so large that they overflow an unsigned.
406 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
407 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
408 return LegalStoreKind::None;
409
410 // See if the pointer expression is an AddRec like {base,+,1} on the current
411 // loop, which indicates a strided store. If we have something else, it's a
412 // random store we can't handle.
413 const SCEVAddRecExpr *StoreEv =
414 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
415 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
416 return LegalStoreKind::None;
417
418 // Check to see if we have a constant stride.
419 if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
420 return LegalStoreKind::None;
421
422 // See if the store can be turned into a memset.
423
424 // If the stored value is a byte-wise value (like i32 -1), then it may be
425 // turned into a memset of i8 -1, assuming that all the consecutive bytes
426 // are stored. A store of i32 0x01020304 can never be turned into a memset,
427 // but it can be turned into memset_pattern if the target supports it.
428 Value *SplatValue = isBytewiseValue(StoredVal);
429 Constant *PatternValue = nullptr;
430
431 // Note: memset and memset_pattern on unordered-atomic is yet not supported
432 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
433
434 // If we're allowed to form a memset, and the stored value would be
435 // acceptable for memset, use it.
436 if (!UnorderedAtomic && HasMemset && SplatValue &&
437 // Verify that the stored value is loop invariant. If not, we can't
438 // promote the memset.
439 CurLoop->isLoopInvariant(SplatValue)) {
440 // It looks like we can use SplatValue.
441 return LegalStoreKind::Memset;
442 } else if (!UnorderedAtomic && HasMemsetPattern &&
443 // Don't create memset_pattern16s with address spaces.
444 StorePtr->getType()->getPointerAddressSpace() == 0 &&
445 (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
446 // It looks like we can use PatternValue!
447 return LegalStoreKind::MemsetPattern;
448 }
449
450 // Otherwise, see if the store can be turned into a memcpy.
451 if (HasMemcpy) {
452 // Check to see if the stride matches the size of the store. If so, then we
453 // know that every byte is touched in the loop.
454 APInt Stride = getStoreStride(StoreEv);
455 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
456 if (StoreSize != Stride && StoreSize != -Stride)
457 return LegalStoreKind::None;
458
459 // The store must be feeding a non-volatile load.
460 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
461
462 // Only allow non-volatile loads
463 if (!LI || LI->isVolatile())
464 return LegalStoreKind::None;
465 // Only allow simple or unordered-atomic loads
466 if (!LI->isUnordered())
467 return LegalStoreKind::None;
468
469 // See if the pointer expression is an AddRec like {base,+,1} on the current
470 // loop, which indicates a strided load. If we have something else, it's a
471 // random load we can't handle.
472 const SCEVAddRecExpr *LoadEv =
473 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
474 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
475 return LegalStoreKind::None;
476
477 // The store and load must share the same stride.
478 if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
479 return LegalStoreKind::None;
480
481 // Success. This store can be converted into a memcpy.
482 UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
483 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
484 : LegalStoreKind::Memcpy;
485 }
486 // This store can't be transformed into a memset/memcpy.
487 return LegalStoreKind::None;
488}
489
490void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
491 StoreRefsForMemset.clear();
492 StoreRefsForMemsetPattern.clear();
493 StoreRefsForMemcpy.clear();
494 for (Instruction &I : *BB) {
495 StoreInst *SI = dyn_cast<StoreInst>(&I);
496 if (!SI)
497 continue;
498
499 // Make sure this is a strided store with a constant stride.
500 switch (isLegalStore(SI)) {
501 case LegalStoreKind::None:
502 // Nothing to do
503 break;
504 case LegalStoreKind::Memset: {
505 // Find the base pointer.
506 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
507 StoreRefsForMemset[Ptr].push_back(SI);
508 } break;
509 case LegalStoreKind::MemsetPattern: {
510 // Find the base pointer.
511 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
512 StoreRefsForMemsetPattern[Ptr].push_back(SI);
513 } break;
514 case LegalStoreKind::Memcpy:
515 case LegalStoreKind::UnorderedAtomicMemcpy:
516 StoreRefsForMemcpy.push_back(SI);
517 break;
518 default:
519 assert(false && "unhandled return value")(static_cast <bool> (false && "unhandled return value"
) ? void (0) : __assert_fail ("false && \"unhandled return value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 519, __extension__ __PRETTY_FUNCTION__))
;
520 break;
521 }
522 }
523}
524
525/// runOnLoopBlock - Process the specified block, which lives in a counted loop
526/// with the specified backedge count. This block is known to be in the current
527/// loop and not in any subloops.
528bool LoopIdiomRecognize::runOnLoopBlock(
529 BasicBlock *BB, const SCEV *BECount,
530 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
531 // We can only promote stores in this block if they are unconditionally
532 // executed in the loop. For a block to be unconditionally executed, it has
533 // to dominate all the exit blocks of the loop. Verify this now.
534 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
535 if (!DT->dominates(BB, ExitBlocks[i]))
536 return false;
537
538 bool MadeChange = false;
539 // Look for store instructions, which may be optimized to memset/memcpy.
540 collectStores(BB);
541
542 // Look for a single store or sets of stores with a common base, which can be
543 // optimized into a memset (memset_pattern). The latter most commonly happens
544 // with structs and handunrolled loops.
545 for (auto &SL : StoreRefsForMemset)
546 MadeChange |= processLoopStores(SL.second, BECount, true);
547
548 for (auto &SL : StoreRefsForMemsetPattern)
549 MadeChange |= processLoopStores(SL.second, BECount, false);
550
551 // Optimize the store into a memcpy, if it feeds an similarly strided load.
552 for (auto &SI : StoreRefsForMemcpy)
553 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
554
555 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
556 Instruction *Inst = &*I++;
557 // Look for memset instructions, which may be optimized to a larger memset.
558 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
559 WeakTrackingVH InstPtr(&*I);
560 if (!processLoopMemSet(MSI, BECount))
561 continue;
562 MadeChange = true;
563
564 // If processing the memset invalidated our iterator, start over from the
565 // top of the block.
566 if (!InstPtr)
567 I = BB->begin();
568 continue;
569 }
570 }
571
572 return MadeChange;
573}
574
575/// processLoopStores - See if this store(s) can be promoted to a memset.
576bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
577 const SCEV *BECount,
578 bool ForMemset) {
579 // Try to find consecutive stores that can be transformed into memsets.
580 SetVector<StoreInst *> Heads, Tails;
581 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
582
583 // Do a quadratic search on all of the given stores and find
584 // all of the pairs of stores that follow each other.
585 SmallVector<unsigned, 16> IndexQueue;
586 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
587 assert(SL[i]->isSimple() && "Expected only non-volatile stores.")(static_cast <bool> (SL[i]->isSimple() && "Expected only non-volatile stores."
) ? void (0) : __assert_fail ("SL[i]->isSimple() && \"Expected only non-volatile stores.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 587, __extension__ __PRETTY_FUNCTION__))
;
588
589 Value *FirstStoredVal = SL[i]->getValueOperand();
590 Value *FirstStorePtr = SL[i]->getPointerOperand();
591 const SCEVAddRecExpr *FirstStoreEv =
592 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
593 APInt FirstStride = getStoreStride(FirstStoreEv);
594 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
595
596 // See if we can optimize just this store in isolation.
597 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
598 Heads.insert(SL[i]);
599 continue;
600 }
601
602 Value *FirstSplatValue = nullptr;
603 Constant *FirstPatternValue = nullptr;
604
605 if (ForMemset)
606 FirstSplatValue = isBytewiseValue(FirstStoredVal);
607 else
608 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
609
610 assert((FirstSplatValue || FirstPatternValue) &&(static_cast <bool> ((FirstSplatValue || FirstPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(FirstSplatValue || FirstPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 611, __extension__ __PRETTY_FUNCTION__))
611 "Expected either splat value or pattern value.")(static_cast <bool> ((FirstSplatValue || FirstPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(FirstSplatValue || FirstPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 611, __extension__ __PRETTY_FUNCTION__))
;
612
613 IndexQueue.clear();
614 // If a store has multiple consecutive store candidates, search Stores
615 // array according to the sequence: from i+1 to e, then from i-1 to 0.
616 // This is because usually pairing with immediate succeeding or preceding
617 // candidate create the best chance to find memset opportunity.
618 unsigned j = 0;
619 for (j = i + 1; j < e; ++j)
620 IndexQueue.push_back(j);
621 for (j = i; j > 0; --j)
622 IndexQueue.push_back(j - 1);
623
624 for (auto &k : IndexQueue) {
625 assert(SL[k]->isSimple() && "Expected only non-volatile stores.")(static_cast <bool> (SL[k]->isSimple() && "Expected only non-volatile stores."
) ? void (0) : __assert_fail ("SL[k]->isSimple() && \"Expected only non-volatile stores.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 625, __extension__ __PRETTY_FUNCTION__))
;
626 Value *SecondStorePtr = SL[k]->getPointerOperand();
627 const SCEVAddRecExpr *SecondStoreEv =
628 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
629 APInt SecondStride = getStoreStride(SecondStoreEv);
630
631 if (FirstStride != SecondStride)
632 continue;
633
634 Value *SecondStoredVal = SL[k]->getValueOperand();
635 Value *SecondSplatValue = nullptr;
636 Constant *SecondPatternValue = nullptr;
637
638 if (ForMemset)
639 SecondSplatValue = isBytewiseValue(SecondStoredVal);
640 else
641 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
642
643 assert((SecondSplatValue || SecondPatternValue) &&(static_cast <bool> ((SecondSplatValue || SecondPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(SecondSplatValue || SecondPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 644, __extension__ __PRETTY_FUNCTION__))
644 "Expected either splat value or pattern value.")(static_cast <bool> ((SecondSplatValue || SecondPatternValue
) && "Expected either splat value or pattern value.")
? void (0) : __assert_fail ("(SecondSplatValue || SecondPatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 644, __extension__ __PRETTY_FUNCTION__))
;
645
646 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
647 if (ForMemset) {
648 if (FirstSplatValue != SecondSplatValue)
649 continue;
650 } else {
651 if (FirstPatternValue != SecondPatternValue)
652 continue;
653 }
654 Tails.insert(SL[k]);
655 Heads.insert(SL[i]);
656 ConsecutiveChain[SL[i]] = SL[k];
657 break;
658 }
659 }
660 }
661
662 // We may run into multiple chains that merge into a single chain. We mark the
663 // stores that we transformed so that we don't visit the same store twice.
664 SmallPtrSet<Value *, 16> TransformedStores;
665 bool Changed = false;
666
667 // For stores that start but don't end a link in the chain:
668 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
669 it != e; ++it) {
670 if (Tails.count(*it))
671 continue;
672
673 // We found a store instr that starts a chain. Now follow the chain and try
674 // to transform it.
675 SmallPtrSet<Instruction *, 8> AdjacentStores;
676 StoreInst *I = *it;
677
678 StoreInst *HeadStore = I;
679 unsigned StoreSize = 0;
680
681 // Collect the chain into a list.
682 while (Tails.count(I) || Heads.count(I)) {
683 if (TransformedStores.count(I))
684 break;
685 AdjacentStores.insert(I);
686
687 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
688 // Move to the next value in the chain.
689 I = ConsecutiveChain[I];
690 }
691
692 Value *StoredVal = HeadStore->getValueOperand();
693 Value *StorePtr = HeadStore->getPointerOperand();
694 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
695 APInt Stride = getStoreStride(StoreEv);
696
697 // Check to see if the stride matches the size of the stores. If so, then
698 // we know that every byte is touched in the loop.
699 if (StoreSize != Stride && StoreSize != -Stride)
700 continue;
701
702 bool NegStride = StoreSize == -Stride;
703
704 if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(),
705 StoredVal, HeadStore, AdjacentStores, StoreEv,
706 BECount, NegStride)) {
707 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
708 Changed = true;
709 }
710 }
711
712 return Changed;
713}
714
715/// processLoopMemSet - See if this memset can be promoted to a large memset.
716bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
717 const SCEV *BECount) {
718 // We can only handle non-volatile memsets with a constant size.
719 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
720 return false;
721
722 // If we're not allowed to hack on memset, we fail.
723 if (!HasMemset)
724 return false;
725
726 Value *Pointer = MSI->getDest();
727
728 // See if the pointer expression is an AddRec like {base,+,1} on the current
729 // loop, which indicates a strided store. If we have something else, it's a
730 // random store we can't handle.
731 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
732 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
733 return false;
734
735 // Reject memsets that are so large that they overflow an unsigned.
736 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
737 if ((SizeInBytes >> 32) != 0)
738 return false;
739
740 // Check to see if the stride matches the size of the memset. If so, then we
741 // know that every byte is touched in the loop.
742 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
743 if (!ConstStride)
744 return false;
745
746 APInt Stride = ConstStride->getAPInt();
747 if (SizeInBytes != Stride && SizeInBytes != -Stride)
748 return false;
749
750 // Verify that the memset value is loop invariant. If not, we can't promote
751 // the memset.
752 Value *SplatValue = MSI->getValue();
753 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
754 return false;
755
756 SmallPtrSet<Instruction *, 1> MSIs;
757 MSIs.insert(MSI);
758 bool NegStride = SizeInBytes == -Stride;
759 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
760 MSI->getDestAlignment(), SplatValue, MSI, MSIs,
761 Ev, BECount, NegStride, /*IsLoopMemset=*/true);
762}
763
764/// mayLoopAccessLocation - Return true if the specified loop might access the
765/// specified pointer location, which is a loop-strided access. The 'Access'
766/// argument specifies what the verboten forms of access are (read or write).
767static bool
768mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
769 const SCEV *BECount, unsigned StoreSize,
770 AliasAnalysis &AA,
771 SmallPtrSetImpl<Instruction *> &IgnoredStores) {
772 // Get the location that may be stored across the loop. Since the access is
773 // strided positively through memory, we say that the modified location starts
774 // at the pointer and has infinite size.
775 uint64_t AccessSize = MemoryLocation::UnknownSize;
776
777 // If the loop iterates a fixed number of times, we can refine the access size
778 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
779 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
780 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
781
782 // TODO: For this to be really effective, we have to dive into the pointer
783 // operand in the store. Store to &A[i] of 100 will always return may alias
784 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
785 // which will then no-alias a store to &A[100].
786 MemoryLocation StoreLoc(Ptr, AccessSize);
787
788 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
789 ++BI)
790 for (Instruction &I : **BI)
791 if (IgnoredStores.count(&I) == 0 &&
792 isModOrRefSet(
793 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access)))
794 return true;
795
796 return false;
797}
798
799// If we have a negative stride, Start refers to the end of the memory location
800// we're trying to memset. Therefore, we need to recompute the base pointer,
801// which is just Start - BECount*Size.
802static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
803 Type *IntPtr, unsigned StoreSize,
804 ScalarEvolution *SE) {
805 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
806 if (StoreSize != 1)
807 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
808 SCEV::FlagNUW);
809 return SE->getMinusSCEV(Start, Index);
810}
811
812/// Compute the number of bytes as a SCEV from the backedge taken count.
813///
814/// This also maps the SCEV into the provided type and tries to handle the
815/// computation in a way that will fold cleanly.
816static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
817 unsigned StoreSize, Loop *CurLoop,
818 const DataLayout *DL, ScalarEvolution *SE) {
819 const SCEV *NumBytesS;
820 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
821 // pointer size if it isn't already.
822 //
823 // If we're going to need to zero extend the BE count, check if we can add
824 // one to it prior to zero extending without overflow. Provided this is safe,
825 // it allows better simplification of the +1.
826 if (DL->getTypeSizeInBits(BECount->getType()) <
827 DL->getTypeSizeInBits(IntPtr) &&
828 SE->isLoopEntryGuardedByCond(
829 CurLoop, ICmpInst::ICMP_NE, BECount,
830 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
831 NumBytesS = SE->getZeroExtendExpr(
832 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
833 IntPtr);
834 } else {
835 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
836 SE->getOne(IntPtr), SCEV::FlagNUW);
837 }
838
839 // And scale it based on the store size.
840 if (StoreSize != 1) {
841 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
842 SCEV::FlagNUW);
843 }
844 return NumBytesS;
845}
846
847/// processLoopStridedStore - We see a strided store of some value. If we can
848/// transform this into a memset or memset_pattern in the loop preheader, do so.
849bool LoopIdiomRecognize::processLoopStridedStore(
850 Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
851 Value *StoredVal, Instruction *TheStore,
852 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
853 const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
854 Value *SplatValue = isBytewiseValue(StoredVal);
855 Constant *PatternValue = nullptr;
856
857 if (!SplatValue)
858 PatternValue = getMemSetPatternValue(StoredVal, DL);
859
860 assert((SplatValue || PatternValue) &&(static_cast <bool> ((SplatValue || PatternValue) &&
"Expected either splat value or pattern value.") ? void (0) :
__assert_fail ("(SplatValue || PatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 861, __extension__ __PRETTY_FUNCTION__))
861 "Expected either splat value or pattern value.")(static_cast <bool> ((SplatValue || PatternValue) &&
"Expected either splat value or pattern value.") ? void (0) :
__assert_fail ("(SplatValue || PatternValue) && \"Expected either splat value or pattern value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 861, __extension__ __PRETTY_FUNCTION__))
;
862
863 // The trip count of the loop and the base pointer of the addrec SCEV is
864 // guaranteed to be loop invariant, which means that it should dominate the
865 // header. This allows us to insert code for it in the preheader.
866 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
867 BasicBlock *Preheader = CurLoop->getLoopPreheader();
868 IRBuilder<> Builder(Preheader->getTerminator());
869 SCEVExpander Expander(*SE, *DL, "loop-idiom");
870
871 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
872 Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
873
874 const SCEV *Start = Ev->getStart();
875 // Handle negative strided loops.
876 if (NegStride)
877 Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
878
879 // TODO: ideally we should still be able to generate memset if SCEV expander
880 // is taught to generate the dependencies at the latest point.
881 if (!isSafeToExpand(Start, *SE))
882 return false;
883
884 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
885 // this into a memset in the loop preheader now if we want. However, this
886 // would be unsafe to do if there is anything else in the loop that may read
887 // or write to the aliased location. Check for any overlap by generating the
888 // base pointer and checking the region.
889 Value *BasePtr =
890 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
891 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
892 StoreSize, *AA, Stores)) {
893 Expander.clear();
894 // If we generated new code for the base pointer, clean up.
895 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
896 return false;
897 }
898
899 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
900 return false;
901
902 // Okay, everything looks good, insert the memset.
903
904 const SCEV *NumBytesS =
905 getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
906
907 // TODO: ideally we should still be able to generate memset if SCEV expander
908 // is taught to generate the dependencies at the latest point.
909 if (!isSafeToExpand(NumBytesS, *SE))
910 return false;
911
912 Value *NumBytes =
913 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
914
915 CallInst *NewCall;
916 if (SplatValue) {
917 NewCall =
918 Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
919 } else {
920 // Everything is emitted in default address space
921 Type *Int8PtrTy = DestInt8PtrTy;
922
923 Module *M = TheStore->getModule();
924 Value *MSP =
925 M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
926 Int8PtrTy, Int8PtrTy, IntPtr);
927 inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI);
928
929 // Otherwise we should form a memset_pattern16. PatternValue is known to be
930 // an constant array of 16-bytes. Plop the value into a mergable global.
931 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
932 GlobalValue::PrivateLinkage,
933 PatternValue, ".memset_pattern");
934 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
935 GV->setAlignment(16);
936 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
937 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
938 }
939
940 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memset: " <<
*NewCall << "\n" << " from store to: " <<
*Ev << " at: " << *TheStore << "\n"; } } while
(false)
941 << " from store to: " << *Ev << " at: " << *TheStoredo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memset: " <<
*NewCall << "\n" << " from store to: " <<
*Ev << " at: " << *TheStore << "\n"; } } while
(false)
942 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memset: " <<
*NewCall << "\n" << " from store to: " <<
*Ev << " at: " << *TheStore << "\n"; } } while
(false)
;
943 NewCall->setDebugLoc(TheStore->getDebugLoc());
944
945 // Okay, the memset has been formed. Zap the original store and anything that
946 // feeds into it.
947 for (auto *I : Stores)
948 deleteDeadInstruction(I);
949 ++NumMemSet;
950 return true;
951}
952
953/// If the stored value is a strided load in the same loop with the same stride
954/// this may be transformable into a memcpy. This kicks in for stuff like
955/// for (i) A[i] = B[i];
956bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
957 const SCEV *BECount) {
958 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.")(static_cast <bool> (SI->isUnordered() && "Expected only non-volatile non-ordered stores."
) ? void (0) : __assert_fail ("SI->isUnordered() && \"Expected only non-volatile non-ordered stores.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 958, __extension__ __PRETTY_FUNCTION__))
;
959
960 Value *StorePtr = SI->getPointerOperand();
961 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
962 APInt Stride = getStoreStride(StoreEv);
963 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
964 bool NegStride = StoreSize == -Stride;
965
966 // The store must be feeding a non-volatile load.
967 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
968 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.")(static_cast <bool> (LI->isUnordered() && "Expected only non-volatile non-ordered loads."
) ? void (0) : __assert_fail ("LI->isUnordered() && \"Expected only non-volatile non-ordered loads.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 968, __extension__ __PRETTY_FUNCTION__))
;
969
970 // See if the pointer expression is an AddRec like {base,+,1} on the current
971 // loop, which indicates a strided load. If we have something else, it's a
972 // random load we can't handle.
973 const SCEVAddRecExpr *LoadEv =
974 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
975
976 // The trip count of the loop and the base pointer of the addrec SCEV is
977 // guaranteed to be loop invariant, which means that it should dominate the
978 // header. This allows us to insert code for it in the preheader.
979 BasicBlock *Preheader = CurLoop->getLoopPreheader();
980 IRBuilder<> Builder(Preheader->getTerminator());
981 SCEVExpander Expander(*SE, *DL, "loop-idiom");
982
983 const SCEV *StrStart = StoreEv->getStart();
984 unsigned StrAS = SI->getPointerAddressSpace();
985 Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
986
987 // Handle negative strided loops.
988 if (NegStride)
989 StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
990
991 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
992 // this into a memcpy in the loop preheader now if we want. However, this
993 // would be unsafe to do if there is anything else in the loop that may read
994 // or write the memory region we're storing to. This includes the load that
995 // feeds the stores. Check for an alias by generating the base address and
996 // checking everything.
997 Value *StoreBasePtr = Expander.expandCodeFor(
998 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
999
1000 SmallPtrSet<Instruction *, 1> Stores;
1001 Stores.insert(SI);
1002 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1003 StoreSize, *AA, Stores)) {
1004 Expander.clear();
1005 // If we generated new code for the base pointer, clean up.
1006 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1007 return false;
1008 }
1009
1010 const SCEV *LdStart = LoadEv->getStart();
1011 unsigned LdAS = LI->getPointerAddressSpace();
1012
1013 // Handle negative strided loops.
1014 if (NegStride)
1015 LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
1016
1017 // For a memcpy, we have to make sure that the input array is not being
1018 // mutated by the loop.
1019 Value *LoadBasePtr = Expander.expandCodeFor(
1020 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
1021
1022 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
1023 StoreSize, *AA, Stores)) {
1024 Expander.clear();
1025 // If we generated new code for the base pointer, clean up.
1026 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
1027 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1028 return false;
1029 }
1030
1031 if (avoidLIRForMultiBlockLoop())
1032 return false;
1033
1034 // Okay, everything is safe, we can transform this!
1035
1036 const SCEV *NumBytesS =
1037 getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
1038
1039 Value *NumBytes =
1040 Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
1041
1042 CallInst *NewCall = nullptr;
1043 // Check whether to generate an unordered atomic memcpy:
1044 // If the load or store are atomic, then they must necessarily be unordered
1045 // by previous checks.
1046 if (!SI->isAtomic() && !LI->isAtomic())
1047 NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(),
1048 LoadBasePtr, LI->getAlignment(), NumBytes);
1049 else {
1050 // We cannot allow unaligned ops for unordered load/store, so reject
1051 // anything where the alignment isn't at least the element size.
1052 unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
1053 if (Align < StoreSize)
1054 return false;
1055
1056 // If the element.atomic memcpy is not lowered into explicit
1057 // loads/stores later, then it will be lowered into an element-size
1058 // specific lib call. If the lib call doesn't exist for our store size, then
1059 // we shouldn't generate the memcpy.
1060 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1061 return false;
1062
1063 // Create the call.
1064 // Note that unordered atomic loads/stores are *required* by the spec to
1065 // have an alignment but non-atomic loads/stores may not.
1066 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1067 StoreBasePtr, SI->getAlignment(), LoadBasePtr, LI->getAlignment(),
1068 NumBytes, StoreSize);
1069 }
1070 NewCall->setDebugLoc(SI->getDebugLoc());
1071
1072 LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
1073 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
1074 << " from store ptr=" << *StoreEv << " at: " << *SIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
1075 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " Formed memcpy: " <<
*NewCall << "\n" << " from load ptr=" <<
*LoadEv << " at: " << *LI << "\n" <<
" from store ptr=" << *StoreEv << " at: " <<
*SI << "\n"; } } while (false)
;
1076
1077 // Okay, the memcpy has been formed. Zap the original store and anything that
1078 // feeds into it.
1079 deleteDeadInstruction(SI);
1080 ++NumMemCpy;
1081 return true;
1082}
1083
1084// When compiling for codesize we avoid idiom recognition for a multi-block loop
1085// unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1086//
1087bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1088 bool IsLoopMemset) {
1089 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1090 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
1091 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
1092 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
1093 << " avoided: multi-block top-level loop\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-idiom")) { dbgs() << " " << CurLoop->getHeader
()->getParent()->getName() << " : LIR " << (
IsMemset ? "Memset" : "Memcpy") << " avoided: multi-block top-level loop\n"
; } } while (false)
;
1094 return true;
1095 }
1096 }
1097
1098 return false;
1099}
1100
1101bool LoopIdiomRecognize::runOnNoncountableLoop() {
1102 return recognizePopcount() || recognizeAndInsertCTLZ();
10
Calling 'LoopIdiomRecognize::recognizeAndInsertCTLZ'
1103}
1104
1105/// Check if the given conditional branch is based on the comparison between
1106/// a variable and zero, and if the variable is non-zero, the control yields to
1107/// the loop entry. If the branch matches the behavior, the variable involved
1108/// in the comparison is returned. This function will be called to see if the
1109/// precondition and postcondition of the loop are in desirable form.
1110static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
1111 if (!BI || !BI->isConditional())
1112 return nullptr;
1113
1114 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1115 if (!Cond)
1116 return nullptr;
1117
1118 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1119 if (!CmpZero || !CmpZero->isZero())
1120 return nullptr;
1121
1122 ICmpInst::Predicate Pred = Cond->getPredicate();
1123 if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
1124 (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
1125 return Cond->getOperand(0);
1126
1127 return nullptr;
1128}
1129
1130// Check if the recurrence variable `VarX` is in the right form to create
1131// the idiom. Returns the value coerced to a PHINode if so.
1132static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
1133 BasicBlock *LoopEntry) {
1134 auto *PhiX = dyn_cast<PHINode>(VarX);
1135 if (PhiX && PhiX->getParent() == LoopEntry &&
1136 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
1137 return PhiX;
1138 return nullptr;
1139}
1140
1141/// Return true iff the idiom is detected in the loop.
1142///
1143/// Additionally:
1144/// 1) \p CntInst is set to the instruction counting the population bit.
1145/// 2) \p CntPhi is set to the corresponding phi node.
1146/// 3) \p Var is set to the value whose population bits are being counted.
1147///
1148/// The core idiom we are trying to detect is:
1149/// \code
1150/// if (x0 != 0)
1151/// goto loop-exit // the precondition of the loop
1152/// cnt0 = init-val;
1153/// do {
1154/// x1 = phi (x0, x2);
1155/// cnt1 = phi(cnt0, cnt2);
1156///
1157/// cnt2 = cnt1 + 1;
1158/// ...
1159/// x2 = x1 & (x1 - 1);
1160/// ...
1161/// } while(x != 0);
1162///
1163/// loop-exit:
1164/// \endcode
1165static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1166 Instruction *&CntInst, PHINode *&CntPhi,
1167 Value *&Var) {
1168 // step 1: Check to see if the look-back branch match this pattern:
1169 // "if (a!=0) goto loop-entry".
1170 BasicBlock *LoopEntry;
1171 Instruction *DefX2, *CountInst;
1172 Value *VarX1, *VarX0;
1173 PHINode *PhiX, *CountPhi;
1174
1175 DefX2 = CountInst = nullptr;
1176 VarX1 = VarX0 = nullptr;
1177 PhiX = CountPhi = nullptr;
1178 LoopEntry = *(CurLoop->block_begin());
1179
1180 // step 1: Check if the loop-back branch is in desirable form.
1181 {
1182 if (Value *T = matchCondition(
1183 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1184 DefX2 = dyn_cast<Instruction>(T);
1185 else
1186 return false;
1187 }
1188
1189 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1190 {
1191 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1192 return false;
1193
1194 BinaryOperator *SubOneOp;
1195
1196 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1197 VarX1 = DefX2->getOperand(1);
1198 else {
1199 VarX1 = DefX2->getOperand(0);
1200 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1201 }
1202 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1)
1203 return false;
1204
1205 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1));
1206 if (!Dec ||
1207 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1208 (SubOneOp->getOpcode() == Instruction::Add &&
1209 Dec->isMinusOne()))) {
1210 return false;
1211 }
1212 }
1213
1214 // step 3: Check the recurrence of variable X
1215 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
1216 if (!PhiX)
1217 return false;
1218
1219 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1220 {
1221 CountInst = nullptr;
1222 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1223 IterE = LoopEntry->end();
1224 Iter != IterE; Iter++) {
1225 Instruction *Inst = &*Iter;
1226 if (Inst->getOpcode() != Instruction::Add)
1227 continue;
1228
1229 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1230 if (!Inc || !Inc->isOne())
1231 continue;
1232
1233 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1234 if (!Phi)
1235 continue;
1236
1237 // Check if the result of the instruction is live of the loop.
1238 bool LiveOutLoop = false;
1239 for (User *U : Inst->users()) {
1240 if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1241 LiveOutLoop = true;
1242 break;
1243 }
1244 }
1245
1246 if (LiveOutLoop) {
1247 CountInst = Inst;
1248 CountPhi = Phi;
1249 break;
1250 }
1251 }
1252
1253 if (!CountInst)
1254 return false;
1255 }
1256
1257 // step 5: check if the precondition is in this form:
1258 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1259 {
1260 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1261 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1262 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1263 return false;
1264
1265 CntInst = CountInst;
1266 CntPhi = CountPhi;
1267 Var = T;
1268 }
1269
1270 return true;
1271}
1272
1273/// Return true if the idiom is detected in the loop.
1274///
1275/// Additionally:
1276/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
1277/// or nullptr if there is no such.
1278/// 2) \p CntPhi is set to the corresponding phi node
1279/// or nullptr if there is no such.
1280/// 3) \p Var is set to the value whose CTLZ could be used.
1281/// 4) \p DefX is set to the instruction calculating Loop exit condition.
1282///
1283/// The core idiom we are trying to detect is:
1284/// \code
1285/// if (x0 == 0)
1286/// goto loop-exit // the precondition of the loop
1287/// cnt0 = init-val;
1288/// do {
1289/// x = phi (x0, x.next); //PhiX
1290/// cnt = phi(cnt0, cnt.next);
1291///
1292/// cnt.next = cnt + 1;
1293/// ...
1294/// x.next = x >> 1; // DefX
1295/// ...
1296/// } while(x.next != 0);
1297///
1298/// loop-exit:
1299/// \endcode
1300static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
1301 Instruction *&CntInst, PHINode *&CntPhi,
1302 Instruction *&DefX) {
1303 BasicBlock *LoopEntry;
1304 Value *VarX = nullptr;
1305
1306 DefX = nullptr;
1307 PhiX = nullptr;
1308 CntInst = nullptr;
1309 CntPhi = nullptr;
1310 LoopEntry = *(CurLoop->block_begin());
1311
1312 // step 1: Check if the loop-back branch is in desirable form.
1313 if (Value *T = matchCondition(
1314 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1315 DefX = dyn_cast<Instruction>(T);
1316 else
1317 return false;
1318
1319 // step 2: detect instructions corresponding to "x.next = x >> 1"
1320 if (!DefX || (DefX->getOpcode() != Instruction::AShr &&
1321 DefX->getOpcode() != Instruction::LShr))
1322 return false;
1323 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
1324 if (!Shft || !Shft->isOne())
1325 return false;
1326 VarX = DefX->getOperand(0);
1327
1328 // step 3: Check the recurrence of variable X
1329 PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
1330 if (!PhiX)
1331 return false;
1332
1333 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
1334 // TODO: We can skip the step. If loop trip count is known (CTLZ),
1335 // then all uses of "cnt.next" could be optimized to the trip count
1336 // plus "cnt0". Currently it is not optimized.
1337 // This step could be used to detect POPCNT instruction:
1338 // cnt.next = cnt + (x.next & 1)
1339 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1340 IterE = LoopEntry->end();
1341 Iter != IterE; Iter++) {
1342 Instruction *Inst = &*Iter;
1343 if (Inst->getOpcode() != Instruction::Add)
1344 continue;
1345
1346 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1347 if (!Inc || !Inc->isOne())
1348 continue;
1349
1350 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1351 if (!Phi)
1352 continue;
1353
1354 CntInst = Inst;
1355 CntPhi = Phi;
1356 break;
1357 }
1358 if (!CntInst)
1359 return false;
1360
1361 return true;
1362}
1363
1364/// Recognize CTLZ idiom in a non-countable loop and convert the loop
1365/// to countable (with CTLZ trip count).
1366/// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
1367bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
1368 // Give up if the loop has multiple blocks or multiple backedges.
1369 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
11
Assuming the condition is false
12
Assuming the condition is false
13
Taking false branch
1370 return false;
1371
1372 Instruction *CntInst, *DefX;
1373 PHINode *CntPhi, *PhiX;
1374 if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
14
Taking false branch
1375 return false;
1376
1377 bool IsCntPhiUsedOutsideLoop = false;
1378 for (User *U : CntPhi->users())
1379 if (!CurLoop->contains(cast<Instruction>(U))) {
1380 IsCntPhiUsedOutsideLoop = true;
1381 break;
1382 }
1383 bool IsCntInstUsedOutsideLoop = false;
1384 for (User *U : CntInst->users())
1385 if (!CurLoop->contains(cast<Instruction>(U))) {
1386 IsCntInstUsedOutsideLoop = true;
1387 break;
1388 }
1389 // If both CntInst and CntPhi are used outside the loop the profitability
1390 // is questionable.
1391 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1392 return false;
1393
1394 // For some CPUs result of CTLZ(X) intrinsic is undefined
1395 // when X is 0. If we can not guarantee X != 0, we need to check this
1396 // when expand.
1397 bool ZeroCheck = false;
1398 // It is safe to assume Preheader exist as it was checked in
1399 // parent function RunOnLoop.
1400 BasicBlock *PH = CurLoop->getLoopPreheader();
1401 Value *InitX = PhiX->getIncomingValueForBlock(PH);
15
Calling 'PHINode::getIncomingValueForBlock'
19
Returning from 'PHINode::getIncomingValueForBlock'
20
'InitX' initialized here
1402
1403 // Make sure the initial value can't be negative otherwise the ashr in the
1404 // loop might never reach zero which would make the loop infinite.
1405 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, *DL))
21
Assuming the condition is false
1406 return false;
1407
1408 // If we are using the count instruction outside the loop, make sure we
1409 // have a zero check as a precondition. Without the check the loop would run
1410 // one iteration for before any check of the input value. This means 0 and 1
1411 // would have identical behavior in the original loop and thus
1412 if (!IsCntPhiUsedOutsideLoop) {
22
Taking true branch
1413 auto *PreCondBB = PH->getSinglePredecessor();
1414 if (!PreCondBB)
23
Assuming 'PreCondBB' is non-null
24
Taking false branch
1415 return false;
1416 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1417 if (!PreCondBI)
25
Assuming 'PreCondBI' is non-null
26
Taking false branch
1418 return false;
1419 if (matchCondition(PreCondBI, PH) != InitX)
27
Assuming the condition is false
28
Assuming pointer value is null
29
Taking false branch
1420 return false;
1421 ZeroCheck = true;
1422 }
1423
1424 // Check if CTLZ intrinsic is profitable. Assume it is always profitable
1425 // if we delete the loop (the loop has only 6 instructions):
1426 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
1427 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
1428 // %shr = ashr %n.addr.0, 1
1429 // %tobool = icmp eq %shr, 0
1430 // %inc = add nsw %i.0, 1
1431 // br i1 %tobool
1432
1433 const Value *Args[] =
1434 {InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext())
30
'?' condition is true
31
Called C++ object pointer is null
1435 : ConstantInt::getFalse(InitX->getContext())};
1436 if (CurLoop->getHeader()->size() != 6 &&
1437 TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
1438 TargetTransformInfo::TCC_Basic)
1439 return false;
1440
1441 transformLoopToCountable(PH, CntInst, CntPhi, InitX, DefX,
1442 DefX->getDebugLoc(), ZeroCheck,
1443 IsCntPhiUsedOutsideLoop);
1444 return true;
1445}
1446
1447/// Recognizes a population count idiom in a non-countable loop.
1448///
1449/// If detected, transforms the relevant code to issue the popcount intrinsic
1450/// function call, and returns true; otherwise, returns false.
1451bool LoopIdiomRecognize::recognizePopcount() {
1452 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1453 return false;
1454
1455 // Counting population are usually conducted by few arithmetic instructions.
1456 // Such instructions can be easily "absorbed" by vacant slots in a
1457 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1458 // in a compact loop.
1459
1460 // Give up if the loop has multiple blocks or multiple backedges.
1461 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1462 return false;
1463
1464 BasicBlock *LoopBody = *(CurLoop->block_begin());
1465 if (LoopBody->size() >= 20) {
1466 // The loop is too big, bail out.
1467 return false;
1468 }
1469
1470 // It should have a preheader containing nothing but an unconditional branch.
1471 BasicBlock *PH = CurLoop->getLoopPreheader();
1472 if (!PH || &PH->front() != PH->getTerminator())
1473 return false;
1474 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1475 if (!EntryBI || EntryBI->isConditional())
1476 return false;
1477
1478 // It should have a precondition block where the generated popcount intrinsic
1479 // function can be inserted.
1480 auto *PreCondBB = PH->getSinglePredecessor();
1481 if (!PreCondBB)
1482 return false;
1483 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1484 if (!PreCondBI || PreCondBI->isUnconditional())
1485 return false;
1486
1487 Instruction *CntInst;
1488 PHINode *CntPhi;
1489 Value *Val;
1490 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1491 return false;
1492
1493 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1494 return true;
1495}
1496
1497static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1498 const DebugLoc &DL) {
1499 Value *Ops[] = {Val};
1500 Type *Tys[] = {Val->getType()};
1501
1502 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1503 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1504 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1505 CI->setDebugLoc(DL);
1506
1507 return CI;
1508}
1509
1510static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1511 const DebugLoc &DL, bool ZeroCheck) {
1512 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
1513 Type *Tys[] = {Val->getType()};
1514
1515 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1516 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
1517 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1518 CI->setDebugLoc(DL);
1519
1520 return CI;
1521}
1522
1523/// Transform the following loop:
1524/// loop:
1525/// CntPhi = PHI [Cnt0, CntInst]
1526/// PhiX = PHI [InitX, DefX]
1527/// CntInst = CntPhi + 1
1528/// DefX = PhiX >> 1
1529/// LOOP_BODY
1530/// Br: loop if (DefX != 0)
1531/// Use(CntPhi) or Use(CntInst)
1532///
1533/// Into:
1534/// If CntPhi used outside the loop:
1535/// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
1536/// Count = CountPrev + 1
1537/// else
1538/// Count = BitWidth(InitX) - CTLZ(InitX)
1539/// loop:
1540/// CntPhi = PHI [Cnt0, CntInst]
1541/// PhiX = PHI [InitX, DefX]
1542/// PhiCount = PHI [Count, Dec]
1543/// CntInst = CntPhi + 1
1544/// DefX = PhiX >> 1
1545/// Dec = PhiCount - 1
1546/// LOOP_BODY
1547/// Br: loop if (Dec != 0)
1548/// Use(CountPrev + Cnt0) // Use(CntPhi)
1549/// or
1550/// Use(Count + Cnt0) // Use(CntInst)
1551///
1552/// If LOOP_BODY is empty the loop will be deleted.
1553/// If CntInst and DefX are not used in LOOP_BODY they will be removed.
1554void LoopIdiomRecognize::transformLoopToCountable(
1555 BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
1556 Instruction *DefX, const DebugLoc &DL, bool ZeroCheck,
1557 bool IsCntPhiUsedOutsideLoop) {
1558 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator());
1559
1560 // Step 1: Insert the CTLZ instruction at the end of the preheader block
1561 // Count = BitWidth - CTLZ(InitX);
1562 // If there are uses of CntPhi create:
1563 // CountPrev = BitWidth - CTLZ(InitX >> 1);
1564 IRBuilder<> Builder(PreheaderBr);
1565 Builder.SetCurrentDebugLocation(DL);
1566 Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
1567
1568 if (IsCntPhiUsedOutsideLoop) {
1569 if (DefX->getOpcode() == Instruction::AShr)
1570 InitXNext =
1571 Builder.CreateAShr(InitX, ConstantInt::get(InitX->getType(), 1));
1572 else if (DefX->getOpcode() == Instruction::LShr)
1573 InitXNext =
1574 Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1));
1575 else
1576 llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Scalar/LoopIdiomRecognize.cpp"
, 1576)
;
1577 } else
1578 InitXNext = InitX;
1579 CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
1580 Count = Builder.CreateSub(
1581 ConstantInt::get(CTLZ->getType(),
1582 CTLZ->getType()->getIntegerBitWidth()),
1583 CTLZ);
1584 if (IsCntPhiUsedOutsideLoop) {
1585 CountPrev = Count;
1586 Count = Builder.CreateAdd(
1587 CountPrev,
1588 ConstantInt::get(CountPrev->getType(), 1));
1589 }
1590 if (IsCntPhiUsedOutsideLoop)
1591 NewCount = Builder.CreateZExtOrTrunc(CountPrev,
1592 cast<IntegerType>(CntInst->getType()));
1593 else
1594 NewCount = Builder.CreateZExtOrTrunc(Count,
1595 cast<IntegerType>(CntInst->getType()));
1596
1597 // If the CTLZ counter's initial value is not zero, insert Add Inst.
1598 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
1599 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1600 if (!InitConst || !InitConst->isZero())
1601 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1602
1603 // Step 2: Insert new IV and loop condition:
1604 // loop:
1605 // ...
1606 // PhiCount = PHI [Count, Dec]
1607 // ...
1608 // Dec = PhiCount - 1
1609 // ...
1610 // Br: loop if (Dec != 0)
1611 BasicBlock *Body = *(CurLoop->block_begin());
1612 auto *LbBr = cast<BranchInst>(Body->getTerminator());
1613 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1614 Type *Ty = Count->getType();
1615
1616 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1617
1618 Builder.SetInsertPoint(LbCond);
1619 Instruction *TcDec = cast<Instruction>(
1620 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1621 "tcdec", false, true));
1622
1623 TcPhi->addIncoming(Count, Preheader);
1624 TcPhi->addIncoming(TcDec, Body);
1625
1626 CmpInst::Predicate Pred =
1627 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
1628 LbCond->setPredicate(Pred);
1629 LbCond->setOperand(0, TcDec);
1630 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1631
1632 // Step 3: All the references to the original counter outside
1633 // the loop are replaced with the NewCount -- the value returned from
1634 // __builtin_ctlz(x).
1635 if (IsCntPhiUsedOutsideLoop)
1636 CntPhi->replaceUsesOutsideBlock(NewCount, Body);
1637 else
1638 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1639
1640 // step 4: Forget the "non-computable" trip-count SCEV associated with the
1641 // loop. The loop would otherwise not be deleted even if it becomes empty.
1642 SE->forgetLoop(CurLoop);
1643}
1644
1645void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1646 Instruction *CntInst,
1647 PHINode *CntPhi, Value *Var) {
1648 BasicBlock *PreHead = CurLoop->getLoopPreheader();
1649 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator());
1650 const DebugLoc &DL = CntInst->getDebugLoc();
1651
1652 // Assuming before transformation, the loop is following:
1653 // if (x) // the precondition
1654 // do { cnt++; x &= x - 1; } while(x);
1655
1656 // Step 1: Insert the ctpop instruction at the end of the precondition block
1657 IRBuilder<> Builder(PreCondBr);
1658 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1659 {
1660 PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1661 NewCount = PopCntZext =
1662 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1663
1664 if (NewCount != PopCnt)
1665 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1666
1667 // TripCnt is exactly the number of iterations the loop has
1668 TripCnt = NewCount;
1669
1670 // If the population counter's initial value is not zero, insert Add Inst.
1671 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1672 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1673 if (!InitConst || !InitConst->isZero()) {
1674 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1675 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1676 }
1677 }
1678
1679 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1680 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1681 // function would be partial dead code, and downstream passes will drag
1682 // it back from the precondition block to the preheader.
1683 {
1684 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1685
1686 Value *Opnd0 = PopCntZext;
1687 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1688 if (PreCond->getOperand(0) != Var)
1689 std::swap(Opnd0, Opnd1);
1690
1691 ICmpInst *NewPreCond = cast<ICmpInst>(
1692 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1693 PreCondBr->setCondition(NewPreCond);
1694
1695 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1696 }
1697
1698 // Step 3: Note that the population count is exactly the trip count of the
1699 // loop in question, which enable us to convert the loop from noncountable
1700 // loop into a countable one. The benefit is twofold:
1701 //
1702 // - If the loop only counts population, the entire loop becomes dead after
1703 // the transformation. It is a lot easier to prove a countable loop dead
1704 // than to prove a noncountable one. (In some C dialects, an infinite loop
1705 // isn't dead even if it computes nothing useful. In general, DCE needs
1706 // to prove a noncountable loop finite before safely delete it.)
1707 //
1708 // - If the loop also performs something else, it remains alive.
1709 // Since it is transformed to countable form, it can be aggressively
1710 // optimized by some optimizations which are in general not applicable
1711 // to a noncountable loop.
1712 //
1713 // After this step, this loop (conceptually) would look like following:
1714 // newcnt = __builtin_ctpop(x);
1715 // t = newcnt;
1716 // if (x)
1717 // do { cnt++; x &= x-1; t--) } while (t > 0);
1718 BasicBlock *Body = *(CurLoop->block_begin());
1719 {
1720 auto *LbBr = cast<BranchInst>(Body->getTerminator());
1721 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1722 Type *Ty = TripCnt->getType();
1723
1724 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1725
1726 Builder.SetInsertPoint(LbCond);
1727 Instruction *TcDec = cast<Instruction>(
1728 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1729 "tcdec", false, true));
1730
1731 TcPhi->addIncoming(TripCnt, PreHead);
1732 TcPhi->addIncoming(TcDec, Body);
1733
1734 CmpInst::Predicate Pred =
1735 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1736 LbCond->setPredicate(Pred);
1737 LbCond->setOperand(0, TcDec);
1738 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1739 }
1740
1741 // Step 4: All the references to the original population counter outside
1742 // the loop are replaced with the NewCount -- the value returned from
1743 // __builtin_ctpop().
1744 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1745
1746 // step 5: Forget the "non-computable" trip-count SCEV associated with the
1747 // loop. The loop would otherwise not be deleted even if it becomes empty.
1748 SE->forgetLoop(CurLoop);
1749}

/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes the class definitions of all of the subclasses of the
11// Instruction class. This is meant to be an easy way to get access to all
12// instruction subclasses.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_IR_INSTRUCTIONS_H
17#define LLVM_IR_INSTRUCTIONS_H
18
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/OperandTraits.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Use.h"
38#include "llvm/IR/User.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/ErrorHandling.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <iterator>
47
48namespace llvm {
49
50class APInt;
51class ConstantInt;
52class DataLayout;
53class LLVMContext;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
60class AllocaInst : public UnaryInstruction {
61 Type *AllocatedType;
62
63protected:
64 // Note: Instruction needs to be a friend here to call cloneImpl.
65 friend class Instruction;
66
67 AllocaInst *cloneImpl() const;
68
69public:
70 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
71 Value *ArraySize = nullptr,
72 const Twine &Name = "",
73 Instruction *InsertBefore = nullptr);
74 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
75 const Twine &Name, BasicBlock *InsertAtEnd);
76
77 AllocaInst(Type *Ty, unsigned AddrSpace,
78 const Twine &Name, Instruction *InsertBefore = nullptr);
79 AllocaInst(Type *Ty, unsigned AddrSpace,
80 const Twine &Name, BasicBlock *InsertAtEnd);
81
82 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
83 const Twine &Name = "", Instruction *InsertBefore = nullptr);
84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
85 const Twine &Name, BasicBlock *InsertAtEnd);
86
87 /// Return true if there is an allocation size parameter to the allocation
88 /// instruction that is not 1.
89 bool isArrayAllocation() const;
90
91 /// Get the number of elements allocated. For a simple allocation of a single
92 /// element, this will return a constant 1 value.
93 const Value *getArraySize() const { return getOperand(0); }
94 Value *getArraySize() { return getOperand(0); }
95
96 /// Overload to return most specific pointer type.
97 PointerType *getType() const {
98 return cast<PointerType>(Instruction::getType());
99 }
100
101 /// Get allocation size in bits. Returns None if size can't be determined,
102 /// e.g. in case of a VLA.
103 Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const;
104
105 /// Return the type that is being allocated by the instruction.
106 Type *getAllocatedType() const { return AllocatedType; }
107 /// for use only in special circumstances that need to generically
108 /// transform a whole instruction (eg: IR linking and vectorization).
109 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
110
111 /// Return the alignment of the memory that is being allocated by the
112 /// instruction.
113 unsigned getAlignment() const {
114 return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
115 }
116 void setAlignment(unsigned Align);
117
118 /// Return true if this alloca is in the entry block of the function and is a
119 /// constant size. If so, the code generator will fold it into the
120 /// prolog/epilog code, so it is basically free.
121 bool isStaticAlloca() const;
122
123 /// Return true if this alloca is used as an inalloca argument to a call. Such
124 /// allocas are never considered static even if they are in the entry block.
125 bool isUsedWithInAlloca() const {
126 return getSubclassDataFromInstruction() & 32;
127 }
128
129 /// Specify whether this alloca is used to represent the arguments to a call.
130 void setUsedWithInAlloca(bool V) {
131 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
132 (V ? 32 : 0));
133 }
134
135 /// Return true if this alloca is used as a swifterror argument to a call.
136 bool isSwiftError() const {
137 return getSubclassDataFromInstruction() & 64;
138 }
139
140 /// Specify whether this alloca is used to represent a swifterror.
141 void setSwiftError(bool V) {
142 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
143 (V ? 64 : 0));
144 }
145
146 // Methods for support type inquiry through isa, cast, and dyn_cast:
147 static bool classof(const Instruction *I) {
148 return (I->getOpcode() == Instruction::Alloca);
149 }
150 static bool classof(const Value *V) {
151 return isa<Instruction>(V) && classof(cast<Instruction>(V));
152 }
153
154private:
155 // Shadow Instruction::setInstructionSubclassData with a private forwarding
156 // method so that subclasses cannot accidentally use it.
157 void setInstructionSubclassData(unsigned short D) {
158 Instruction::setInstructionSubclassData(D);
159 }
160};
161
162//===----------------------------------------------------------------------===//
163// LoadInst Class
164//===----------------------------------------------------------------------===//
165
166/// An instruction for reading from memory. This uses the SubclassData field in
167/// Value to store whether or not the load is volatile.
168class LoadInst : public UnaryInstruction {
169 void AssertOK();
170
171protected:
172 // Note: Instruction needs to be a friend here to call cloneImpl.
173 friend class Instruction;
174
175 LoadInst *cloneImpl() const;
176
177public:
178 LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
179 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
180 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
181 Instruction *InsertBefore = nullptr);
182 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
183 Instruction *InsertBefore = nullptr)
184 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
185 NameStr, isVolatile, InsertBefore) {}
186 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
187 BasicBlock *InsertAtEnd);
188 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
189 Instruction *InsertBefore = nullptr)
190 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
191 NameStr, isVolatile, Align, InsertBefore) {}
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
193 unsigned Align, Instruction *InsertBefore = nullptr);
194 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
195 unsigned Align, BasicBlock *InsertAtEnd);
196 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
197 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
198 Instruction *InsertBefore = nullptr)
199 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
200 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 unsigned Align, AtomicOrdering Order,
203 SyncScope::ID SSID = SyncScope::System,
204 Instruction *InsertBefore = nullptr);
205 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
206 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
207 BasicBlock *InsertAtEnd);
208 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
209 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
210 LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
211 bool isVolatile = false, Instruction *InsertBefore = nullptr);
212 explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
213 bool isVolatile = false,
214 Instruction *InsertBefore = nullptr)
215 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
216 NameStr, isVolatile, InsertBefore) {}
217 LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
218 BasicBlock *InsertAtEnd);
219
220 /// Return true if this is a load from a volatile memory location.
221 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
222
223 /// Specify whether this is a volatile load or not.
224 void setVolatile(bool V) {
225 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
226 (V ? 1 : 0));
227 }
228
229 /// Return the alignment of the access that is being performed.
230 unsigned getAlignment() const {
231 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
232 }
233
234 void setAlignment(unsigned Align);
235
236 /// Returns the ordering constraint of this load instruction.
237 AtomicOrdering getOrdering() const {
238 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
239 }
240
241 /// Sets the ordering constraint of this load instruction. May not be Release
242 /// or AcquireRelease.
243 void setOrdering(AtomicOrdering Ordering) {
244 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
245 ((unsigned)Ordering << 7));
246 }
247
248 /// Returns the synchronization scope ID of this load instruction.
249 SyncScope::ID getSyncScopeID() const {
250 return SSID;
251 }
252
253 /// Sets the synchronization scope ID of this load instruction.
254 void setSyncScopeID(SyncScope::ID SSID) {
255 this->SSID = SSID;
256 }
257
258 /// Sets the ordering constraint and the synchronization scope ID of this load
259 /// instruction.
260 void setAtomic(AtomicOrdering Ordering,
261 SyncScope::ID SSID = SyncScope::System) {
262 setOrdering(Ordering);
263 setSyncScopeID(SSID);
264 }
265
266 bool isSimple() const { return !isAtomic() && !isVolatile(); }
267
268 bool isUnordered() const {
269 return (getOrdering() == AtomicOrdering::NotAtomic ||
270 getOrdering() == AtomicOrdering::Unordered) &&
271 !isVolatile();
272 }
273
274 Value *getPointerOperand() { return getOperand(0); }
275 const Value *getPointerOperand() const { return getOperand(0); }
276 static unsigned getPointerOperandIndex() { return 0U; }
277 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
278
279 /// Returns the address space of the pointer operand.
280 unsigned getPointerAddressSpace() const {
281 return getPointerOperandType()->getPointerAddressSpace();
282 }
283
284 // Methods for support type inquiry through isa, cast, and dyn_cast:
285 static bool classof(const Instruction *I) {
286 return I->getOpcode() == Instruction::Load;
287 }
288 static bool classof(const Value *V) {
289 return isa<Instruction>(V) && classof(cast<Instruction>(V));
290 }
291
292private:
293 // Shadow Instruction::setInstructionSubclassData with a private forwarding
294 // method so that subclasses cannot accidentally use it.
295 void setInstructionSubclassData(unsigned short D) {
296 Instruction::setInstructionSubclassData(D);
297 }
298
299 /// The synchronization scope ID of this load instruction. Not quite enough
300 /// room in SubClassData for everything, so synchronization scope ID gets its
301 /// own field.
302 SyncScope::ID SSID;
303};
304
305//===----------------------------------------------------------------------===//
306// StoreInst Class
307//===----------------------------------------------------------------------===//
308
309/// An instruction for storing to memory.
310class StoreInst : public Instruction {
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
323 Instruction *InsertBefore = nullptr);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
326 unsigned Align, Instruction *InsertBefore = nullptr);
327 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
328 unsigned Align, BasicBlock *InsertAtEnd);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
330 unsigned Align, AtomicOrdering Order,
331 SyncScope::ID SSID = SyncScope::System,
332 Instruction *InsertBefore = nullptr);
333 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
334 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
335 BasicBlock *InsertAtEnd);
336
337 // allocate space for exactly two operands
338 void *operator new(size_t s) {
339 return User::operator new(s, 2);
340 }
341
342 /// Return true if this is a store to a volatile memory location.
343 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
344
345 /// Specify whether this is a volatile store or not.
346 void setVolatile(bool V) {
347 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
348 (V ? 1 : 0));
349 }
350
351 /// Transparently provide more efficient getOperand methods.
352 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
353
354 /// Return the alignment of the access that is being performed
355 unsigned getAlignment() const {
356 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
357 }
358
359 void setAlignment(unsigned Align);
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
370 ((unsigned)Ordering << 7));
371 }
372
373 /// Returns the synchronization scope ID of this store instruction.
374 SyncScope::ID getSyncScopeID() const {
375 return SSID;
376 }
377
378 /// Sets the synchronization scope ID of this store instruction.
379 void setSyncScopeID(SyncScope::ID SSID) {
380 this->SSID = SSID;
381 }
382
383 /// Sets the ordering constraint and the synchronization scope ID of this
384 /// store instruction.
385 void setAtomic(AtomicOrdering Ordering,
386 SyncScope::ID SSID = SyncScope::System) {
387 setOrdering(Ordering);
388 setSyncScopeID(SSID);
389 }
390
391 bool isSimple() const { return !isAtomic() && !isVolatile(); }
392
393 bool isUnordered() const {
394 return (getOrdering() == AtomicOrdering::NotAtomic ||
395 getOrdering() == AtomicOrdering::Unordered) &&
396 !isVolatile();
397 }
398
399 Value *getValueOperand() { return getOperand(0); }
400 const Value *getValueOperand() const { return getOperand(0); }
401
402 Value *getPointerOperand() { return getOperand(1); }
403 const Value *getPointerOperand() const { return getOperand(1); }
404 static unsigned getPointerOperandIndex() { return 1U; }
405 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
406
407 /// Returns the address space of the pointer operand.
408 unsigned getPointerAddressSpace() const {
409 return getPointerOperandType()->getPointerAddressSpace();
410 }
411
412 // Methods for support type inquiry through isa, cast, and dyn_cast:
413 static bool classof(const Instruction *I) {
414 return I->getOpcode() == Instruction::Store;
415 }
416 static bool classof(const Value *V) {
417 return isa<Instruction>(V) && classof(cast<Instruction>(V));
418 }
419
420private:
421 // Shadow Instruction::setInstructionSubclassData with a private forwarding
422 // method so that subclasses cannot accidentally use it.
423 void setInstructionSubclassData(unsigned short D) {
424 Instruction::setInstructionSubclassData(D);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
446
447protected:
448 // Note: Instruction needs to be a friend here to call cloneImpl.
449 friend class Instruction;
450
451 FenceInst *cloneImpl() const;
452
453public:
454 // Ordering may only be Acquire, Release, AcquireRelease, or
455 // SequentiallyConsistent.
456 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
457 SyncScope::ID SSID = SyncScope::System,
458 Instruction *InsertBefore = nullptr);
459 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
460 BasicBlock *InsertAtEnd);
461
462 // allocate space for exactly zero operands
463 void *operator new(size_t s) {
464 return User::operator new(s, 0);
465 }
466
467 /// Returns the ordering constraint of this fence instruction.
468 AtomicOrdering getOrdering() const {
469 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
470 }
471
472 /// Sets the ordering constraint of this fence instruction. May only be
473 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
474 void setOrdering(AtomicOrdering Ordering) {
475 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
476 ((unsigned)Ordering << 1));
477 }
478
479 /// Returns the synchronization scope ID of this fence instruction.
480 SyncScope::ID getSyncScopeID() const {
481 return SSID;
482 }
483
484 /// Sets the synchronization scope ID of this fence instruction.
485 void setSyncScopeID(SyncScope::ID SSID) {
486 this->SSID = SSID;
487 }
488
489 // Methods for support type inquiry through isa, cast, and dyn_cast:
490 static bool classof(const Instruction *I) {
491 return I->getOpcode() == Instruction::Fence;
492 }
493 static bool classof(const Value *V) {
494 return isa<Instruction>(V) && classof(cast<Instruction>(V));
495 }
496
497private:
498 // Shadow Instruction::setInstructionSubclassData with a private forwarding
499 // method so that subclasses cannot accidentally use it.
500 void setInstructionSubclassData(unsigned short D) {
501 Instruction::setInstructionSubclassData(D);
502 }
503
504 /// The synchronization scope ID of this fence instruction. Not quite enough
505 /// room in SubClassData for everything, so synchronization scope ID gets its
506 /// own field.
507 SyncScope::ID SSID;
508};
509
510//===----------------------------------------------------------------------===//
511// AtomicCmpXchgInst Class
512//===----------------------------------------------------------------------===//
513
514/// an instruction that atomically checks whether a
515/// specified value is in a memory location, and, if it is, stores a new value
516/// there. Returns the value that was loaded.
517///
518class AtomicCmpXchgInst : public Instruction {
519 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
520 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
521 SyncScope::ID SSID);
522
523protected:
524 // Note: Instruction needs to be a friend here to call cloneImpl.
525 friend class Instruction;
526
527 AtomicCmpXchgInst *cloneImpl() const;
528
529public:
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering,
533 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
534 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
535 AtomicOrdering SuccessOrdering,
536 AtomicOrdering FailureOrdering,
537 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
538
539 // allocate space for exactly three operands
540 void *operator new(size_t s) {
541 return User::operator new(s, 3);
542 }
543
544 /// Return true if this is a cmpxchg from a volatile memory
545 /// location.
546 ///
547 bool isVolatile() const {
548 return getSubclassDataFromInstruction() & 1;
549 }
550
551 /// Specify whether this is a volatile cmpxchg.
552 ///
553 void setVolatile(bool V) {
554 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
555 (unsigned)V);
556 }
557
558 /// Return true if this cmpxchg may spuriously fail.
559 bool isWeak() const {
560 return getSubclassDataFromInstruction() & 0x100;
561 }
562
563 void setWeak(bool IsWeak) {
564 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
565 (IsWeak << 8));
566 }
567
568 /// Transparently provide more efficient getOperand methods.
569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
570
571 /// Returns the success ordering constraint of this cmpxchg instruction.
572 AtomicOrdering getSuccessOrdering() const {
573 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
574 }
575
576 /// Sets the success ordering constraint of this cmpxchg instruction.
577 void setSuccessOrdering(AtomicOrdering Ordering) {
578 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 579, __extension__ __PRETTY_FUNCTION__))
579 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 579, __extension__ __PRETTY_FUNCTION__))
;
580 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
581 ((unsigned)Ordering << 2));
582 }
583
584 /// Returns the failure ordering constraint of this cmpxchg instruction.
585 AtomicOrdering getFailureOrdering() const {
586 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
587 }
588
589 /// Sets the failure ordering constraint of this cmpxchg instruction.
590 void setFailureOrdering(AtomicOrdering Ordering) {
591 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 592, __extension__ __PRETTY_FUNCTION__))
592 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 592, __extension__ __PRETTY_FUNCTION__))
;
593 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
594 ((unsigned)Ordering << 5));
595 }
596
597 /// Returns the synchronization scope ID of this cmpxchg instruction.
598 SyncScope::ID getSyncScopeID() const {
599 return SSID;
600 }
601
602 /// Sets the synchronization scope ID of this cmpxchg instruction.
603 void setSyncScopeID(SyncScope::ID SSID) {
604 this->SSID = SSID;
605 }
606
607 Value *getPointerOperand() { return getOperand(0); }
608 const Value *getPointerOperand() const { return getOperand(0); }
609 static unsigned getPointerOperandIndex() { return 0U; }
610
611 Value *getCompareOperand() { return getOperand(1); }
612 const Value *getCompareOperand() const { return getOperand(1); }
613
614 Value *getNewValOperand() { return getOperand(2); }
615 const Value *getNewValOperand() const { return getOperand(2); }
616
617 /// Returns the address space of the pointer operand.
618 unsigned getPointerAddressSpace() const {
619 return getPointerOperand()->getType()->getPointerAddressSpace();
620 }
621
622 /// Returns the strongest permitted ordering on failure, given the
623 /// desired ordering on success.
624 ///
625 /// If the comparison in a cmpxchg operation fails, there is no atomic store
626 /// so release semantics cannot be provided. So this function drops explicit
627 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
628 /// operation would remain SequentiallyConsistent.
629 static AtomicOrdering
630 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
631 switch (SuccessOrdering) {
632 default:
633 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 633)
;
634 case AtomicOrdering::Release:
635 case AtomicOrdering::Monotonic:
636 return AtomicOrdering::Monotonic;
637 case AtomicOrdering::AcquireRelease:
638 case AtomicOrdering::Acquire:
639 return AtomicOrdering::Acquire;
640 case AtomicOrdering::SequentiallyConsistent:
641 return AtomicOrdering::SequentiallyConsistent;
642 }
643 }
644
645 // Methods for support type inquiry through isa, cast, and dyn_cast:
646 static bool classof(const Instruction *I) {
647 return I->getOpcode() == Instruction::AtomicCmpXchg;
648 }
649 static bool classof(const Value *V) {
650 return isa<Instruction>(V) && classof(cast<Instruction>(V));
651 }
652
653private:
654 // Shadow Instruction::setInstructionSubclassData with a private forwarding
655 // method so that subclasses cannot accidentally use it.
656 void setInstructionSubclassData(unsigned short D) {
657 Instruction::setInstructionSubclassData(D);
658 }
659
660 /// The synchronization scope ID of this cmpxchg instruction. Not quite
661 /// enough room in SubClassData for everything, so synchronization scope ID
662 /// gets its own field.
663 SyncScope::ID SSID;
664};
665
666template <>
667struct OperandTraits<AtomicCmpXchgInst> :
668 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
669};
670
671DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 671, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 671, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
672
673//===----------------------------------------------------------------------===//
674// AtomicRMWInst Class
675//===----------------------------------------------------------------------===//
676
677/// an instruction that atomically reads a memory location,
678/// combines it with another value, and then stores the result back. Returns
679/// the old value.
680///
681class AtomicRMWInst : public Instruction {
682protected:
683 // Note: Instruction needs to be a friend here to call cloneImpl.
684 friend class Instruction;
685
686 AtomicRMWInst *cloneImpl() const;
687
688public:
689 /// This enumeration lists the possible modifications atomicrmw can make. In
690 /// the descriptions, 'p' is the pointer to the instruction's memory location,
691 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
692 /// instruction. These instructions always return 'old'.
693 enum BinOp {
694 /// *p = v
695 Xchg,
696 /// *p = old + v
697 Add,
698 /// *p = old - v
699 Sub,
700 /// *p = old & v
701 And,
702 /// *p = ~(old & v)
703 Nand,
704 /// *p = old | v
705 Or,
706 /// *p = old ^ v
707 Xor,
708 /// *p = old >signed v ? old : v
709 Max,
710 /// *p = old <signed v ? old : v
711 Min,
712 /// *p = old >unsigned v ? old : v
713 UMax,
714 /// *p = old <unsigned v ? old : v
715 UMin,
716
717 FIRST_BINOP = Xchg,
718 LAST_BINOP = UMin,
719 BAD_BINOP
720 };
721
722 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
723 AtomicOrdering Ordering, SyncScope::ID SSID,
724 Instruction *InsertBefore = nullptr);
725 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
726 AtomicOrdering Ordering, SyncScope::ID SSID,
727 BasicBlock *InsertAtEnd);
728
729 // allocate space for exactly two operands
730 void *operator new(size_t s) {
731 return User::operator new(s, 2);
732 }
733
734 BinOp getOperation() const {
735 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
736 }
737
738 void setOperation(BinOp Operation) {
739 unsigned short SubclassData = getSubclassDataFromInstruction();
740 setInstructionSubclassData((SubclassData & 31) |
741 (Operation << 5));
742 }
743
744 /// Return true if this is a RMW on a volatile memory location.
745 ///
746 bool isVolatile() const {
747 return getSubclassDataFromInstruction() & 1;
748 }
749
750 /// Specify whether this is a volatile RMW or not.
751 ///
752 void setVolatile(bool V) {
753 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
754 (unsigned)V);
755 }
756
757 /// Transparently provide more efficient getOperand methods.
758 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
759
760 /// Returns the ordering constraint of this rmw instruction.
761 AtomicOrdering getOrdering() const {
762 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
763 }
764
765 /// Sets the ordering constraint of this rmw instruction.
766 void setOrdering(AtomicOrdering Ordering) {
767 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 768, __extension__ __PRETTY_FUNCTION__))
768 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 768, __extension__ __PRETTY_FUNCTION__))
;
769 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
770 ((unsigned)Ordering << 2));
771 }
772
773 /// Returns the synchronization scope ID of this rmw instruction.
774 SyncScope::ID getSyncScopeID() const {
775 return SSID;
776 }
777
778 /// Sets the synchronization scope ID of this rmw instruction.
779 void setSyncScopeID(SyncScope::ID SSID) {
780 this->SSID = SSID;
781 }
782
783 Value *getPointerOperand() { return getOperand(0); }
784 const Value *getPointerOperand() const { return getOperand(0); }
785 static unsigned getPointerOperandIndex() { return 0U; }
786
787 Value *getValOperand() { return getOperand(1); }
788 const Value *getValOperand() const { return getOperand(1); }
789
790 /// Returns the address space of the pointer operand.
791 unsigned getPointerAddressSpace() const {
792 return getPointerOperand()->getType()->getPointerAddressSpace();
793 }
794
795 // Methods for support type inquiry through isa, cast, and dyn_cast:
796 static bool classof(const Instruction *I) {
797 return I->getOpcode() == Instruction::AtomicRMW;
798 }
799 static bool classof(const Value *V) {
800 return isa<Instruction>(V) && classof(cast<Instruction>(V));
801 }
802
803private:
804 void Init(BinOp Operation, Value *Ptr, Value *Val,
805 AtomicOrdering Ordering, SyncScope::ID SSID);
806
807 // Shadow Instruction::setInstructionSubclassData with a private forwarding
808 // method so that subclasses cannot accidentally use it.
809 void setInstructionSubclassData(unsigned short D) {
810 Instruction::setInstructionSubclassData(D);
811 }
812
813 /// The synchronization scope ID of this rmw instruction. Not quite enough
814 /// room in SubClassData for everything, so synchronization scope ID gets its
815 /// own field.
816 SyncScope::ID SSID;
817};
818
819template <>
820struct OperandTraits<AtomicRMWInst>
821 : public FixedNumOperandTraits<AtomicRMWInst,2> {
822};
823
824DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 824, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 824, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
825
826//===----------------------------------------------------------------------===//
827// GetElementPtrInst Class
828//===----------------------------------------------------------------------===//
829
830// checkGEPType - Simple wrapper function to give a better assertion failure
831// message on bad indexes for a gep instruction.
832//
833inline Type *checkGEPType(Type *Ty) {
834 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 834, __extension__ __PRETTY_FUNCTION__))
;
835 return Ty;
836}
837
838/// an instruction for type-safe pointer arithmetic to
839/// access elements of arrays and structs
840///
841class GetElementPtrInst : public Instruction {
842 Type *SourceElementType;
843 Type *ResultElementType;
844
845 GetElementPtrInst(const GetElementPtrInst &GEPI);
846
847 /// Constructors - Create a getelementptr instruction with a base pointer an
848 /// list of indices. The first ctor can optionally insert before an existing
849 /// instruction, the second appends the new instruction to the specified
850 /// BasicBlock.
851 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
852 ArrayRef<Value *> IdxList, unsigned Values,
853 const Twine &NameStr, Instruction *InsertBefore);
854 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
855 ArrayRef<Value *> IdxList, unsigned Values,
856 const Twine &NameStr, BasicBlock *InsertAtEnd);
857
858 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
859
860protected:
861 // Note: Instruction needs to be a friend here to call cloneImpl.
862 friend class Instruction;
863
864 GetElementPtrInst *cloneImpl() const;
865
866public:
867 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
868 ArrayRef<Value *> IdxList,
869 const Twine &NameStr = "",
870 Instruction *InsertBefore = nullptr) {
871 unsigned Values = 1 + unsigned(IdxList.size());
872 if (!PointeeType)
873 PointeeType =
874 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
875 else
876 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 878, __extension__ __PRETTY_FUNCTION__))
877 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 878, __extension__ __PRETTY_FUNCTION__))
878 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 878, __extension__ __PRETTY_FUNCTION__))
;
879 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
880 NameStr, InsertBefore);
881 }
882
883 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
884 ArrayRef<Value *> IdxList,
885 const Twine &NameStr,
886 BasicBlock *InsertAtEnd) {
887 unsigned Values = 1 + unsigned(IdxList.size());
888 if (!PointeeType)
889 PointeeType =
890 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
891 else
892 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 894, __extension__ __PRETTY_FUNCTION__))
893 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 894, __extension__ __PRETTY_FUNCTION__))
894 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 894, __extension__ __PRETTY_FUNCTION__))
;
895 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
896 NameStr, InsertAtEnd);
897 }
898
899 /// Create an "inbounds" getelementptr. See the documentation for the
900 /// "inbounds" flag in LangRef.html for details.
901 static GetElementPtrInst *CreateInBounds(Value *Ptr,
902 ArrayRef<Value *> IdxList,
903 const Twine &NameStr = "",
904 Instruction *InsertBefore = nullptr){
905 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
906 }
907
908 static GetElementPtrInst *
909 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
910 const Twine &NameStr = "",
911 Instruction *InsertBefore = nullptr) {
912 GetElementPtrInst *GEP =
913 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
914 GEP->setIsInBounds(true);
915 return GEP;
916 }
917
918 static GetElementPtrInst *CreateInBounds(Value *Ptr,
919 ArrayRef<Value *> IdxList,
920 const Twine &NameStr,
921 BasicBlock *InsertAtEnd) {
922 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
923 }
924
925 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
926 ArrayRef<Value *> IdxList,
927 const Twine &NameStr,
928 BasicBlock *InsertAtEnd) {
929 GetElementPtrInst *GEP =
930 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
931 GEP->setIsInBounds(true);
932 return GEP;
933 }
934
935 /// Transparently provide more efficient getOperand methods.
936 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
937
938 Type *getSourceElementType() const { return SourceElementType; }
939
940 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
941 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
942
943 Type *getResultElementType() const {
944 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 945, __extension__ __PRETTY_FUNCTION__))
945 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 945, __extension__ __PRETTY_FUNCTION__))
;
946 return ResultElementType;
947 }
948
949 /// Returns the address space of this instruction's pointer type.
950 unsigned getAddressSpace() const {
951 // Note that this is always the same as the pointer operand's address space
952 // and that is cheaper to compute, so cheat here.
953 return getPointerAddressSpace();
954 }
955
956 /// Returns the type of the element that would be loaded with
957 /// a load instruction with the specified parameters.
958 ///
959 /// Null is returned if the indices are invalid for the specified
960 /// pointer type.
961 ///
962 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
963 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
964 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
965
966 inline op_iterator idx_begin() { return op_begin()+1; }
967 inline const_op_iterator idx_begin() const { return op_begin()+1; }
968 inline op_iterator idx_end() { return op_end(); }
969 inline const_op_iterator idx_end() const { return op_end(); }
970
971 inline iterator_range<op_iterator> indices() {
972 return make_range(idx_begin(), idx_end());
973 }
974
975 inline iterator_range<const_op_iterator> indices() const {
976 return make_range(idx_begin(), idx_end());
977 }
978
979 Value *getPointerOperand() {
980 return getOperand(0);
981 }
982 const Value *getPointerOperand() const {
983 return getOperand(0);
984 }
985 static unsigned getPointerOperandIndex() {
986 return 0U; // get index for modifying correct operand.
987 }
988
989 /// Method to return the pointer operand as a
990 /// PointerType.
991 Type *getPointerOperandType() const {
992 return getPointerOperand()->getType();
993 }
994
995 /// Returns the address space of the pointer operand.
996 unsigned getPointerAddressSpace() const {
997 return getPointerOperandType()->getPointerAddressSpace();
998 }
999
1000 /// Returns the pointer type returned by the GEP
1001 /// instruction, which may be a vector of pointers.
1002 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
1003 return getGEPReturnType(
1004 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
1005 Ptr, IdxList);
1006 }
1007 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1008 ArrayRef<Value *> IdxList) {
1009 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1010 Ptr->getType()->getPointerAddressSpace());
1011 // Vector GEP
1012 if (Ptr->getType()->isVectorTy()) {
1013 unsigned NumElem = Ptr->getType()->getVectorNumElements();
1014 return VectorType::get(PtrTy, NumElem);
1015 }
1016 for (Value *Index : IdxList)
1017 if (Index->getType()->isVectorTy()) {
1018 unsigned NumElem = Index->getType()->getVectorNumElements();
1019 return VectorType::get(PtrTy, NumElem);
1020 }
1021 // Scalar GEP
1022 return PtrTy;
1023 }
1024
1025 unsigned getNumIndices() const { // Note: always non-negative
1026 return getNumOperands() - 1;
1027 }
1028
1029 bool hasIndices() const {
1030 return getNumOperands() > 1;
1031 }
1032
1033 /// Return true if all of the indices of this GEP are
1034 /// zeros. If so, the result pointer and the first operand have the same
1035 /// value, just potentially different types.
1036 bool hasAllZeroIndices() const;
1037
1038 /// Return true if all of the indices of this GEP are
1039 /// constant integers. If so, the result pointer and the first operand have
1040 /// a constant offset between them.
1041 bool hasAllConstantIndices() const;
1042
1043 /// Set or clear the inbounds flag on this GEP instruction.
1044 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1045 void setIsInBounds(bool b = true);
1046
1047 /// Determine whether the GEP has the inbounds flag.
1048 bool isInBounds() const;
1049
1050 /// Accumulate the constant address offset of this GEP if possible.
1051 ///
1052 /// This routine accepts an APInt into which it will accumulate the constant
1053 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1054 /// all-constant, it returns false and the value of the offset APInt is
1055 /// undefined (it is *not* preserved!). The APInt passed into this routine
1056 /// must be at least as wide as the IntPtr type for the address space of
1057 /// the base GEP pointer.
1058 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1059
1060 // Methods for support type inquiry through isa, cast, and dyn_cast:
1061 static bool classof(const Instruction *I) {
1062 return (I->getOpcode() == Instruction::GetElementPtr);
1063 }
1064 static bool classof(const Value *V) {
1065 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1066 }
1067};
1068
1069template <>
1070struct OperandTraits<GetElementPtrInst> :
1071 public VariadicOperandTraits<GetElementPtrInst, 1> {
1072};
1073
1074GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1075 ArrayRef<Value *> IdxList, unsigned Values,
1076 const Twine &NameStr,
1077 Instruction *InsertBefore)
1078 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1079 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1080 Values, InsertBefore),
1081 SourceElementType(PointeeType),
1082 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1083 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1084, __extension__ __PRETTY_FUNCTION__))
1084 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1084, __extension__ __PRETTY_FUNCTION__))
;
1085 init(Ptr, IdxList, NameStr);
1086}
1087
1088GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1089 ArrayRef<Value *> IdxList, unsigned Values,
1090 const Twine &NameStr,
1091 BasicBlock *InsertAtEnd)
1092 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1093 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1094 Values, InsertAtEnd),
1095 SourceElementType(PointeeType),
1096 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1097 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__))
1098 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__))
;
1099 init(Ptr, IdxList, NameStr);
1100}
1101
1102DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1102, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1102, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1103
1104//===----------------------------------------------------------------------===//
1105// ICmpInst Class
1106//===----------------------------------------------------------------------===//
1107
1108/// This instruction compares its operands according to the predicate given
1109/// to the constructor. It only operates on integers or pointers. The operands
1110/// must be identical types.
1111/// Represent an integer comparison operator.
1112class ICmpInst: public CmpInst {
1113 void AssertOK() {
1114 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1115, __extension__ __PRETTY_FUNCTION__))
1115 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1115, __extension__ __PRETTY_FUNCTION__))
;
1116 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
1117 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
;
1118 // Check that the operands are the right type
1119 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1121, __extension__ __PRETTY_FUNCTION__))
1120 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1121, __extension__ __PRETTY_FUNCTION__))
1121 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1121, __extension__ __PRETTY_FUNCTION__))
;
1122 }
1123
1124protected:
1125 // Note: Instruction needs to be a friend here to call cloneImpl.
1126 friend class Instruction;
1127
1128 /// Clone an identical ICmpInst
1129 ICmpInst *cloneImpl() const;
1130
1131public:
1132 /// Constructor with insert-before-instruction semantics.
1133 ICmpInst(
1134 Instruction *InsertBefore, ///< Where to insert
1135 Predicate pred, ///< The predicate to use for the comparison
1136 Value *LHS, ///< The left-hand-side of the expression
1137 Value *RHS, ///< The right-hand-side of the expression
1138 const Twine &NameStr = "" ///< Name of the instruction
1139 ) : CmpInst(makeCmpResultType(LHS->getType()),
1140 Instruction::ICmp, pred, LHS, RHS, NameStr,
1141 InsertBefore) {
1142#ifndef NDEBUG
1143 AssertOK();
1144#endif
1145 }
1146
1147 /// Constructor with insert-at-end semantics.
1148 ICmpInst(
1149 BasicBlock &InsertAtEnd, ///< Block to insert into.
1150 Predicate pred, ///< The predicate to use for the comparison
1151 Value *LHS, ///< The left-hand-side of the expression
1152 Value *RHS, ///< The right-hand-side of the expression
1153 const Twine &NameStr = "" ///< Name of the instruction
1154 ) : CmpInst(makeCmpResultType(LHS->getType()),
1155 Instruction::ICmp, pred, LHS, RHS, NameStr,
1156 &InsertAtEnd) {
1157#ifndef NDEBUG
1158 AssertOK();
1159#endif
1160 }
1161
1162 /// Constructor with no-insertion semantics
1163 ICmpInst(
1164 Predicate pred, ///< The predicate to use for the comparison
1165 Value *LHS, ///< The left-hand-side of the expression
1166 Value *RHS, ///< The right-hand-side of the expression
1167 const Twine &NameStr = "" ///< Name of the instruction
1168 ) : CmpInst(makeCmpResultType(LHS->getType()),
1169 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1170#ifndef NDEBUG
1171 AssertOK();
1172#endif
1173 }
1174
1175 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1176 /// @returns the predicate that would be the result if the operand were
1177 /// regarded as signed.
1178 /// Return the signed version of the predicate
1179 Predicate getSignedPredicate() const {
1180 return getSignedPredicate(getPredicate());
1181 }
1182
1183 /// This is a static version that you can use without an instruction.
1184 /// Return the signed version of the predicate.
1185 static Predicate getSignedPredicate(Predicate pred);
1186
1187 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1188 /// @returns the predicate that would be the result if the operand were
1189 /// regarded as unsigned.
1190 /// Return the unsigned version of the predicate
1191 Predicate getUnsignedPredicate() const {
1192 return getUnsignedPredicate(getPredicate());
1193 }
1194
1195 /// This is a static version that you can use without an instruction.
1196 /// Return the unsigned version of the predicate.
1197 static Predicate getUnsignedPredicate(Predicate pred);
1198
1199 /// Return true if this predicate is either EQ or NE. This also
1200 /// tests for commutativity.
1201 static bool isEquality(Predicate P) {
1202 return P == ICMP_EQ || P == ICMP_NE;
1203 }
1204
1205 /// Return true if this predicate is either EQ or NE. This also
1206 /// tests for commutativity.
1207 bool isEquality() const {
1208 return isEquality(getPredicate());
1209 }
1210
1211 /// @returns true if the predicate of this ICmpInst is commutative
1212 /// Determine if this relation is commutative.
1213 bool isCommutative() const { return isEquality(); }
1214
1215 /// Return true if the predicate is relational (not EQ or NE).
1216 ///
1217 bool isRelational() const {
1218 return !isEquality();
1219 }
1220
1221 /// Return true if the predicate is relational (not EQ or NE).
1222 ///
1223 static bool isRelational(Predicate P) {
1224 return !isEquality(P);
1225 }
1226
1227 /// Exchange the two operands to this instruction in such a way that it does
1228 /// not modify the semantics of the instruction. The predicate value may be
1229 /// changed to retain the same result if the predicate is order dependent
1230 /// (e.g. ult).
1231 /// Swap operands and adjust predicate.
1232 void swapOperands() {
1233 setPredicate(getSwappedPredicate());
1234 Op<0>().swap(Op<1>());
1235 }
1236
1237 // Methods for support type inquiry through isa, cast, and dyn_cast:
1238 static bool classof(const Instruction *I) {
1239 return I->getOpcode() == Instruction::ICmp;
1240 }
1241 static bool classof(const Value *V) {
1242 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1243 }
1244};
1245
1246//===----------------------------------------------------------------------===//
1247// FCmpInst Class
1248//===----------------------------------------------------------------------===//
1249
1250/// This instruction compares its operands according to the predicate given
1251/// to the constructor. It only operates on floating point values or packed
1252/// vectors of floating point values. The operands must be identical types.
1253/// Represents a floating point comparison operator.
1254class FCmpInst: public CmpInst {
1255 void AssertOK() {
1256 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1256, __extension__ __PRETTY_FUNCTION__))
;
1257 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1258, __extension__ __PRETTY_FUNCTION__))
1258 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1258, __extension__ __PRETTY_FUNCTION__))
;
1259 // Check that the operands are the right type
1260 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1261, __extension__ __PRETTY_FUNCTION__))
1261 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1261, __extension__ __PRETTY_FUNCTION__))
;
1262 }
1263
1264protected:
1265 // Note: Instruction needs to be a friend here to call cloneImpl.
1266 friend class Instruction;
1267
1268 /// Clone an identical FCmpInst
1269 FCmpInst *cloneImpl() const;
1270
1271public:
1272 /// Constructor with insert-before-instruction semantics.
1273 FCmpInst(
1274 Instruction *InsertBefore, ///< Where to insert
1275 Predicate pred, ///< The predicate to use for the comparison
1276 Value *LHS, ///< The left-hand-side of the expression
1277 Value *RHS, ///< The right-hand-side of the expression
1278 const Twine &NameStr = "" ///< Name of the instruction
1279 ) : CmpInst(makeCmpResultType(LHS->getType()),
1280 Instruction::FCmp, pred, LHS, RHS, NameStr,
1281 InsertBefore) {
1282 AssertOK();
1283 }
1284
1285 /// Constructor with insert-at-end semantics.
1286 FCmpInst(
1287 BasicBlock &InsertAtEnd, ///< Block to insert into.
1288 Predicate pred, ///< The predicate to use for the comparison
1289 Value *LHS, ///< The left-hand-side of the expression
1290 Value *RHS, ///< The right-hand-side of the expression
1291 const Twine &NameStr = "" ///< Name of the instruction
1292 ) : CmpInst(makeCmpResultType(LHS->getType()),
1293 Instruction::FCmp, pred, LHS, RHS, NameStr,
1294 &InsertAtEnd) {
1295 AssertOK();
1296 }
1297
1298 /// Constructor with no-insertion semantics
1299 FCmpInst(
1300 Predicate pred, ///< The predicate to use for the comparison
1301 Value *LHS, ///< The left-hand-side of the expression
1302 Value *RHS, ///< The right-hand-side of the expression
1303 const Twine &NameStr = "" ///< Name of the instruction
1304 ) : CmpInst(makeCmpResultType(LHS->getType()),
1305 Instruction::FCmp, pred, LHS, RHS, NameStr) {
1306 AssertOK();
1307 }
1308
1309 /// @returns true if the predicate of this instruction is EQ or NE.
1310 /// Determine if this is an equality predicate.
1311 static bool isEquality(Predicate Pred) {
1312 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1313 Pred == FCMP_UNE;
1314 }
1315
1316 /// @returns true if the predicate of this instruction is EQ or NE.
1317 /// Determine if this is an equality predicate.
1318 bool isEquality() const { return isEquality(getPredicate()); }
1319
1320 /// @returns true if the predicate of this instruction is commutative.
1321 /// Determine if this is a commutative predicate.
1322 bool isCommutative() const {
1323 return isEquality() ||
1324 getPredicate() == FCMP_FALSE ||
1325 getPredicate() == FCMP_TRUE ||
1326 getPredicate() == FCMP_ORD ||
1327 getPredicate() == FCMP_UNO;
1328 }
1329
1330 /// @returns true if the predicate is relational (not EQ or NE).
1331 /// Determine if this a relational predicate.
1332 bool isRelational() const { return !isEquality(); }
1333
1334 /// Exchange the two operands to this instruction in such a way that it does
1335 /// not modify the semantics of the instruction. The predicate value may be
1336 /// changed to retain the same result if the predicate is order dependent
1337 /// (e.g. ult).
1338 /// Swap operands and adjust predicate.
1339 void swapOperands() {
1340 setPredicate(getSwappedPredicate());
1341 Op<0>().swap(Op<1>());
1342 }
1343
1344 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1345 static bool classof(const Instruction *I) {
1346 return I->getOpcode() == Instruction::FCmp;
1347 }
1348 static bool classof(const Value *V) {
1349 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1350 }
1351};
1352
1353class CallInst;
1354class InvokeInst;
1355
1356template <class T> struct CallBaseParent { using type = Instruction; };
1357
1358template <> struct CallBaseParent<InvokeInst> { using type = TerminatorInst; };
1359
1360//===----------------------------------------------------------------------===//
1361/// Base class for all callable instructions (InvokeInst and CallInst)
1362/// Holds everything related to calling a function, abstracting from the base
1363/// type @p BaseInstTy and the concrete instruction @p InstTy
1364///
1365template <class InstTy>
1366class CallBase : public CallBaseParent<InstTy>::type,
1367 public OperandBundleUser<InstTy, User::op_iterator> {
1368protected:
1369 AttributeList Attrs; ///< parameter attributes for callable
1370 FunctionType *FTy;
1371 using BaseInstTy = typename CallBaseParent<InstTy>::type;
1372
1373 template <class... ArgsTy>
1374 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1375 : BaseInstTy(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1376 bool hasDescriptor() const { return Value::HasDescriptor; }
1377
1378 using BaseInstTy::BaseInstTy;
1379
1380 using OperandBundleUser<InstTy,
1381 User::op_iterator>::isFnAttrDisallowedByOpBundle;
1382 using OperandBundleUser<InstTy, User::op_iterator>::getNumTotalBundleOperands;
1383 using OperandBundleUser<InstTy, User::op_iterator>::bundleOperandHasAttr;
1384 using Instruction::getSubclassDataFromInstruction;
1385 using Instruction::setInstructionSubclassData;
1386
1387public:
1388 using Instruction::getContext;
1389 using OperandBundleUser<InstTy, User::op_iterator>::hasOperandBundles;
1390 using OperandBundleUser<InstTy,
1391 User::op_iterator>::getBundleOperandsStartIndex;
1392
1393 static bool classof(const Instruction *I) {
1394 llvm_unreachable(::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1395)
1395 "CallBase is not meant to be used as part of the classof hierarchy")::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1395)
;
1396 }
1397
1398public:
1399 /// Return the parameter attributes for this call.
1400 ///
1401 AttributeList getAttributes() const { return Attrs; }
1402
1403 /// Set the parameter attributes for this call.
1404 ///
1405 void setAttributes(AttributeList A) { Attrs = A; }
1406
1407 FunctionType *getFunctionType() const { return FTy; }
1408
1409 void mutateFunctionType(FunctionType *FTy) {
1410 Value::mutateType(FTy->getReturnType());
1411 this->FTy = FTy;
1412 }
1413
1414 /// Return the number of call arguments.
1415 ///
1416 unsigned getNumArgOperands() const {
1417 return getNumOperands() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1418 }
1419
1420 /// getArgOperand/setArgOperand - Return/set the i-th call argument.
1421 ///
1422 Value *getArgOperand(unsigned i) const {
1423 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1423, __extension__ __PRETTY_FUNCTION__))
;
1424 return getOperand(i);
1425 }
1426 void setArgOperand(unsigned i, Value *v) {
1427 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1427, __extension__ __PRETTY_FUNCTION__))
;
1428 setOperand(i, v);
1429 }
1430
1431 /// Return the iterator pointing to the beginning of the argument list.
1432 User::op_iterator arg_begin() { return op_begin(); }
1433
1434 /// Return the iterator pointing to the end of the argument list.
1435 User::op_iterator arg_end() {
1436 // [ call args ], [ operand bundles ], callee
1437 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1438 }
1439
1440 /// Iteration adapter for range-for loops.
1441 iterator_range<User::op_iterator> arg_operands() {
1442 return make_range(arg_begin(), arg_end());
1443 }
1444
1445 /// Return the iterator pointing to the beginning of the argument list.
1446 User::const_op_iterator arg_begin() const { return op_begin(); }
1447
1448 /// Return the iterator pointing to the end of the argument list.
1449 User::const_op_iterator arg_end() const {
1450 // [ call args ], [ operand bundles ], callee
1451 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1452 }
1453
1454 /// Iteration adapter for range-for loops.
1455 iterator_range<User::const_op_iterator> arg_operands() const {
1456 return make_range(arg_begin(), arg_end());
1457 }
1458
1459 /// Wrappers for getting the \c Use of a call argument.
1460 const Use &getArgOperandUse(unsigned i) const {
1461 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1461, __extension__ __PRETTY_FUNCTION__))
;
1462 return User::getOperandUse(i);
1463 }
1464 Use &getArgOperandUse(unsigned i) {
1465 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1465, __extension__ __PRETTY_FUNCTION__))
;
1466 return User::getOperandUse(i);
1467 }
1468
1469 /// If one of the arguments has the 'returned' attribute, return its
1470 /// operand value. Otherwise, return nullptr.
1471 Value *getReturnedArgOperand() const {
1472 unsigned Index;
1473
1474 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
1475 return getArgOperand(Index - AttributeList::FirstArgIndex);
1476 if (const Function *F = getCalledFunction())
1477 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
1478 Index)
1479 return getArgOperand(Index - AttributeList::FirstArgIndex);
1480
1481 return nullptr;
1482 }
1483
1484 User::op_iterator op_begin() {
1485 return OperandTraits<CallBase>::op_begin(this);
1486 }
1487
1488 User::const_op_iterator op_begin() const {
1489 return OperandTraits<CallBase>::op_begin(const_cast<CallBase *>(this));
1490 }
1491
1492 User::op_iterator op_end() { return OperandTraits<CallBase>::op_end(this); }
1493
1494 User::const_op_iterator op_end() const {
1495 return OperandTraits<CallBase>::op_end(const_cast<CallBase *>(this));
1496 }
1497
1498 Value *getOperand(unsigned i_nocapture) const {
1499 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1500, __extension__ __PRETTY_FUNCTION__))
1500 "getOperand() out of range!")(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1500, __extension__ __PRETTY_FUNCTION__))
;
1501 return cast_or_null<Value>(OperandTraits<CallBase>::op_begin(
1502 const_cast<CallBase *>(this))[i_nocapture]
1503 .get());
1504 }
1505
1506 void setOperand(unsigned i_nocapture, Value *Val_nocapture) {
1507 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1508, __extension__ __PRETTY_FUNCTION__))
1508 "setOperand() out of range!")(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1508, __extension__ __PRETTY_FUNCTION__))
;
1509 OperandTraits<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture;
1510 }
1511
1512 unsigned getNumOperands() const {
1513 return OperandTraits<CallBase>::operands(this);
1514 }
1515 template <int Idx_nocapture> Use &Op() {
1516 return User::OpFrom<Idx_nocapture>(this);
1517 }
1518 template <int Idx_nocapture> const Use &Op() const {
1519 return User::OpFrom<Idx_nocapture>(this);
1520 }
1521
1522 /// Return the function called, or null if this is an
1523 /// indirect function invocation.
1524 ///
1525 Function *getCalledFunction() const {
1526 return dyn_cast<Function>(Op<-InstTy::ArgOffset>());
1527 }
1528
1529 /// Determine whether this call has the given attribute.
1530 bool hasFnAttr(Attribute::AttrKind Kind) const {
1531 assert(Kind != Attribute::NoBuiltin &&(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1532, __extension__ __PRETTY_FUNCTION__))
1532 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1532, __extension__ __PRETTY_FUNCTION__))
;
1533 return hasFnAttrImpl(Kind);
1534 }
1535
1536 /// Determine whether this call has the given attribute.
1537 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1538
1539 /// getCallingConv/setCallingConv - Get or set the calling convention of this
1540 /// function call.
1541 CallingConv::ID getCallingConv() const {
1542 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1543 }
1544 void setCallingConv(CallingConv::ID CC) {
1545 auto ID = static_cast<unsigned>(CC);
1546 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")(static_cast <bool> (!(ID & ~CallingConv::MaxID) &&
"Unsupported calling convention") ? void (0) : __assert_fail
("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1546, __extension__ __PRETTY_FUNCTION__))
;
1547 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1548 (ID << 2));
1549 }
1550
1551
1552 /// adds the attribute to the list of attributes.
1553 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1554 AttributeList PAL = getAttributes();
1555 PAL = PAL.addAttribute(getContext(), i, Kind);
1556 setAttributes(PAL);
1557 }
1558
1559 /// adds the attribute to the list of attributes.
1560 void addAttribute(unsigned i, Attribute Attr) {
1561 AttributeList PAL = getAttributes();
1562 PAL = PAL.addAttribute(getContext(), i, Attr);
1563 setAttributes(PAL);
1564 }
1565
1566 /// Adds the attribute to the indicated argument
1567 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1568 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1568, __extension__ __PRETTY_FUNCTION__))
;
1569 AttributeList PAL = getAttributes();
1570 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1571 setAttributes(PAL);
1572 }
1573
1574 /// Adds the attribute to the indicated argument
1575 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1576 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1576, __extension__ __PRETTY_FUNCTION__))
;
1577 AttributeList PAL = getAttributes();
1578 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1579 setAttributes(PAL);
1580 }
1581
1582 /// removes the attribute from the list of attributes.
1583 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1584 AttributeList PAL = getAttributes();
1585 PAL = PAL.removeAttribute(getContext(), i, Kind);
1586 setAttributes(PAL);
1587 }
1588
1589 /// removes the attribute from the list of attributes.
1590 void removeAttribute(unsigned i, StringRef Kind) {
1591 AttributeList PAL = getAttributes();
1592 PAL = PAL.removeAttribute(getContext(), i, Kind);
1593 setAttributes(PAL);
1594 }
1595
1596 /// Removes the attribute from the given argument
1597 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1598 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1598, __extension__ __PRETTY_FUNCTION__))
;
1599 AttributeList PAL = getAttributes();
1600 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1601 setAttributes(PAL);
1602 }
1603
1604 /// Removes the attribute from the given argument
1605 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1606 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1606, __extension__ __PRETTY_FUNCTION__))
;
1607 AttributeList PAL = getAttributes();
1608 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1609 setAttributes(PAL);
1610 }
1611
1612 /// adds the dereferenceable attribute to the list of attributes.
1613 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1614 AttributeList PAL = getAttributes();
1615 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1616 setAttributes(PAL);
1617 }
1618
1619 /// adds the dereferenceable_or_null attribute to the list of
1620 /// attributes.
1621 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1622 AttributeList PAL = getAttributes();
1623 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1624 setAttributes(PAL);
1625 }
1626
1627 /// Determine whether the return value has the given attribute.
1628 bool hasRetAttr(Attribute::AttrKind Kind) const {
1629 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
1630 return true;
1631
1632 // Look at the callee, if available.
1633 if (const Function *F = getCalledFunction())
1634 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
1635 return false;
1636 }
1637
1638 /// Determine whether the argument or parameter has the given attribute.
1639 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1640 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Param index out of bounds!") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Param index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1640, __extension__ __PRETTY_FUNCTION__))
;
1641
1642 if (Attrs.hasParamAttribute(ArgNo, Kind))
1643 return true;
1644 if (const Function *F = getCalledFunction())
1645 return F->getAttributes().hasParamAttribute(ArgNo, Kind);
1646 return false;
1647 }
1648
1649 /// Get the attribute of a given kind at a position.
1650 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1651 return getAttributes().getAttribute(i, Kind);
1652 }
1653
1654 /// Get the attribute of a given kind at a position.
1655 Attribute getAttribute(unsigned i, StringRef Kind) const {
1656 return getAttributes().getAttribute(i, Kind);
1657 }
1658
1659 /// Get the attribute of a given kind from a given arg
1660 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1661 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1661, __extension__ __PRETTY_FUNCTION__))
;
1662 return getAttributes().getParamAttr(ArgNo, Kind);
1663 }
1664
1665 /// Get the attribute of a given kind from a given arg
1666 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1667 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1667, __extension__ __PRETTY_FUNCTION__))
;
1668 return getAttributes().getParamAttr(ArgNo, Kind);
1669 }
1670 /// Return true if the data operand at index \p i has the attribute \p
1671 /// A.
1672 ///
1673 /// Data operands include call arguments and values used in operand bundles,
1674 /// but does not include the callee operand. This routine dispatches to the
1675 /// underlying AttributeList or the OperandBundleUser as appropriate.
1676 ///
1677 /// The index \p i is interpreted as
1678 ///
1679 /// \p i == Attribute::ReturnIndex -> the return value
1680 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1681 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1682 /// (\p i - 1) in the operand list.
1683 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1684 // There are getNumOperands() - (InstTy::ArgOffset - 1) data operands.
1685 // The last operand is the callee.
1686 assert(i < (getNumOperands() - InstTy::ArgOffset + 1) &&(static_cast <bool> (i < (getNumOperands() - InstTy::
ArgOffset + 1) && "Data operand index out of bounds!"
) ? void (0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1687, __extension__ __PRETTY_FUNCTION__))
1687 "Data operand index out of bounds!")(static_cast <bool> (i < (getNumOperands() - InstTy::
ArgOffset + 1) && "Data operand index out of bounds!"
) ? void (0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1687, __extension__ __PRETTY_FUNCTION__))
;
1688
1689 // The attribute A can either be directly specified, if the operand in
1690 // question is a call argument; or be indirectly implied by the kind of its
1691 // containing operand bundle, if the operand is a bundle operand.
1692
1693 if (i == AttributeList::ReturnIndex)
1694 return hasRetAttr(Kind);
1695
1696 // FIXME: Avoid these i - 1 calculations and update the API to use
1697 // zero-based indices.
1698 if (i < (getNumArgOperands() + 1))
1699 return paramHasAttr(i - 1, Kind);
1700
1701 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1702, __extension__ __PRETTY_FUNCTION__))
1702 "Must be either a call argument or an operand bundle!")(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1702, __extension__ __PRETTY_FUNCTION__))
;
1703 return bundleOperandHasAttr(i - 1, Kind);
1704 }
1705
1706 /// Extract the alignment of the return value.
1707 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
1708
1709 /// Extract the alignment for a call or parameter (0=unknown).
1710 unsigned getParamAlignment(unsigned ArgNo) const {
1711 return Attrs.getParamAlignment(ArgNo);
1712 }
1713
1714 /// Extract the number of dereferenceable bytes for a call or
1715 /// parameter (0=unknown).
1716 uint64_t getDereferenceableBytes(unsigned i) const {
1717 return Attrs.getDereferenceableBytes(i);
1718 }
1719
1720 /// Extract the number of dereferenceable_or_null bytes for a call or
1721 /// parameter (0=unknown).
1722 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1723 return Attrs.getDereferenceableOrNullBytes(i);
1724 }
1725
1726 /// Determine if the return value is marked with NoAlias attribute.
1727 bool returnDoesNotAlias() const {
1728 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1729 }
1730
1731 /// Return true if the call should not be treated as a call to a
1732 /// builtin.
1733 bool isNoBuiltin() const {
1734 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1735 !hasFnAttrImpl(Attribute::Builtin);
1736 }
1737
1738 /// Determine if the call requires strict floating point semantics.
1739 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1740
1741 /// Return true if the call should not be inlined.
1742 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1743 void setIsNoInline() {
1744 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1745 }
1746 /// Determine if the call does not access memory.
1747 bool doesNotAccessMemory() const {
1748 return hasFnAttr(Attribute::ReadNone);
1749 }
1750 void setDoesNotAccessMemory() {
1751 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1752 }
1753
1754 /// Determine if the call does not access or only reads memory.
1755 bool onlyReadsMemory() const {
1756 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1757 }
1758 void setOnlyReadsMemory() {
1759 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1760 }
1761
1762 /// Determine if the call does not access or only writes memory.
1763 bool doesNotReadMemory() const {
1764 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1765 }
1766 void setDoesNotReadMemory() {
1767 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1768 }
1769
1770 /// Determine if the call can access memmory only using pointers based
1771 /// on its arguments.
1772 bool onlyAccessesArgMemory() const {
1773 return hasFnAttr(Attribute::ArgMemOnly);
1774 }
1775 void setOnlyAccessesArgMemory() {
1776 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1777 }
1778
1779 /// Determine if the function may only access memory that is
1780 /// inaccessible from the IR.
1781 bool onlyAccessesInaccessibleMemory() const {
1782 return hasFnAttr(Attribute::InaccessibleMemOnly);
1783 }
1784 void setOnlyAccessesInaccessibleMemory() {
1785 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1786 }
1787
1788 /// Determine if the function may only access memory that is
1789 /// either inaccessible from the IR or pointed to by its arguments.
1790 bool onlyAccessesInaccessibleMemOrArgMem() const {
1791 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1792 }
1793 void setOnlyAccessesInaccessibleMemOrArgMem() {
1794 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
1795 }
1796 /// Determine if the call cannot return.
1797 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1798 void setDoesNotReturn() {
1799 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1800 }
1801
1802 /// Determine if the call should not perform indirect branch tracking.
1803 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1804
1805 /// Determine if the call cannot unwind.
1806 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1807 void setDoesNotThrow() {
1808 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1809 }
1810
1811 /// Determine if the invoke cannot be duplicated.
1812 bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
1813 void setCannotDuplicate() {
1814 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1815 }
1816
1817 /// Determine if the invoke is convergent
1818 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1819 void setConvergent() {
1820 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1821 }
1822 void setNotConvergent() {
1823 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1824 }
1825
1826 /// Determine if the call returns a structure through first
1827 /// pointer argument.
1828 bool hasStructRetAttr() const {
1829 if (getNumArgOperands() == 0)
1830 return false;
1831
1832 // Be friendly and also check the callee.
1833 return paramHasAttr(0, Attribute::StructRet);
1834 }
1835
1836 /// Determine if any call argument is an aggregate passed by value.
1837 bool hasByValArgument() const {
1838 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1839 }
1840 /// Get a pointer to the function that is invoked by this
1841 /// instruction.
1842 const Value *getCalledValue() const { return Op<-InstTy::ArgOffset>(); }
1843 Value *getCalledValue() { return Op<-InstTy::ArgOffset>(); }
1844
1845 /// Set the function called.
1846 void setCalledFunction(Value* Fn) {
1847 setCalledFunction(
1848 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
1849 Fn);
1850 }
1851 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1852 this->FTy = FTy;
1853 assert(FTy == cast<FunctionType>((static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1854, __extension__ __PRETTY_FUNCTION__))
1854 cast<PointerType>(Fn->getType())->getElementType()))(static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1854, __extension__ __PRETTY_FUNCTION__))
;
1855 Op<-InstTy::ArgOffset>() = Fn;
1856 }
1857
1858protected:
1859 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
1860 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
1861 return true;
1862
1863 // Operand bundles override attributes on the called function, but don't
1864 // override attributes directly present on the call instruction.
1865 if (isFnAttrDisallowedByOpBundle(Kind))
1866 return false;
1867
1868 if (const Function *F = getCalledFunction())
1869 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
1870 Kind);
1871 return false;
1872 }
1873};
1874
1875//===----------------------------------------------------------------------===//
1876/// This class represents a function call, abstracting a target
1877/// machine's calling convention. This class uses low bit of the SubClassData
1878/// field to indicate whether or not this is a tail call. The rest of the bits
1879/// hold the calling convention of the call.
1880///
1881class CallInst : public CallBase<CallInst> {
1882 friend class OperandBundleUser<CallInst, User::op_iterator>;
1883
1884 CallInst(const CallInst &CI);
1885
1886 /// Construct a CallInst given a range of arguments.
1887 /// Construct a CallInst from a range of arguments
1888 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1889 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1890 Instruction *InsertBefore);
1891
1892 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1893 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1894 Instruction *InsertBefore)
1895 : CallInst(cast<FunctionType>(
1896 cast<PointerType>(Func->getType())->getElementType()),
1897 Func, Args, Bundles, NameStr, InsertBefore) {}
1898
1899 inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
1900 Instruction *InsertBefore)
1901 : CallInst(Func, Args, None, NameStr, InsertBefore) {}
1902
1903 /// Construct a CallInst given a range of arguments.
1904 /// Construct a CallInst from a range of arguments
1905 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1906 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1907 BasicBlock *InsertAtEnd);
1908
1909 explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore);
1910
1911 CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
1912
1913 void init(Value *Func, ArrayRef<Value *> Args,
1914 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
1915 init(cast<FunctionType>(
1916 cast<PointerType>(Func->getType())->getElementType()),
1917 Func, Args, Bundles, NameStr);
1918 }
1919 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1920 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1921 void init(Value *Func, const Twine &NameStr);
1922
1923protected:
1924 // Note: Instruction needs to be a friend here to call cloneImpl.
1925 friend class Instruction;
1926
1927 CallInst *cloneImpl() const;
1928
1929public:
1930 static constexpr int ArgOffset = 1;
1931
1932 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1933 ArrayRef<OperandBundleDef> Bundles = None,
1934 const Twine &NameStr = "",
1935 Instruction *InsertBefore = nullptr) {
1936 return Create(cast<FunctionType>(
1937 cast<PointerType>(Func->getType())->getElementType()),
1938 Func, Args, Bundles, NameStr, InsertBefore);
1939 }
1940
1941 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1942 const Twine &NameStr,
1943 Instruction *InsertBefore = nullptr) {
1944 return Create(cast<FunctionType>(
1945 cast<PointerType>(Func->getType())->getElementType()),
1946 Func, Args, None, NameStr, InsertBefore);
1947 }
1948
1949 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1950 const Twine &NameStr,
1951 Instruction *InsertBefore = nullptr) {
1952 return new (unsigned(Args.size() + 1))
1953 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1954 }
1955
1956 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1957 ArrayRef<OperandBundleDef> Bundles = None,
1958 const Twine &NameStr = "",
1959 Instruction *InsertBefore = nullptr) {
1960 const unsigned TotalOps =
1961 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1962 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1963
1964 return new (TotalOps, DescriptorBytes)
1965 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1966 }
1967
1968 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1969 ArrayRef<OperandBundleDef> Bundles,
1970 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1971 const unsigned TotalOps =
1972 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1973 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1974
1975 return new (TotalOps, DescriptorBytes)
1976 CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
1977 }
1978
1979 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1980 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1981 return new (unsigned(Args.size() + 1))
1982 CallInst(Func, Args, None, NameStr, InsertAtEnd);
1983 }
1984
1985 static CallInst *Create(Value *F, const Twine &NameStr = "",
1986 Instruction *InsertBefore = nullptr) {
1987 return new (1) CallInst(F, NameStr, InsertBefore);
1988 }
1989
1990 static CallInst *Create(Value *F, const Twine &NameStr,
1991 BasicBlock *InsertAtEnd) {
1992 return new (1) CallInst(F, NameStr, InsertAtEnd);
1993 }
1994
1995 /// Create a clone of \p CI with a different set of operand bundles and
1996 /// insert it before \p InsertPt.
1997 ///
1998 /// The returned call instruction is identical \p CI in every way except that
1999 /// the operand bundles for the new instruction are set to the operand bundles
2000 /// in \p Bundles.
2001 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
2002 Instruction *InsertPt = nullptr);
2003
2004 /// Generate the IR for a call to malloc:
2005 /// 1. Compute the malloc call's argument as the specified type's size,
2006 /// possibly multiplied by the array size if the array size is not
2007 /// constant 1.
2008 /// 2. Call malloc with that argument.
2009 /// 3. Bitcast the result of the malloc call to the specified type.
2010 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2011 Type *AllocTy, Value *AllocSize,
2012 Value *ArraySize = nullptr,
2013 Function *MallocF = nullptr,
2014 const Twine &Name = "");
2015 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2016 Type *AllocTy, Value *AllocSize,
2017 Value *ArraySize = nullptr,
2018 Function *MallocF = nullptr,
2019 const Twine &Name = "");
2020 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2021 Type *AllocTy, Value *AllocSize,
2022 Value *ArraySize = nullptr,
2023 ArrayRef<OperandBundleDef> Bundles = None,
2024 Function *MallocF = nullptr,
2025 const Twine &Name = "");
2026 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2027 Type *AllocTy, Value *AllocSize,
2028 Value *ArraySize = nullptr,
2029 ArrayRef<OperandBundleDef> Bundles = None,
2030 Function *MallocF = nullptr,
2031 const Twine &Name = "");
2032 /// Generate the IR for a call to the builtin free function.
2033 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
2034 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
2035 static Instruction *CreateFree(Value *Source,
2036 ArrayRef<OperandBundleDef> Bundles,
2037 Instruction *InsertBefore);
2038 static Instruction *CreateFree(Value *Source,
2039 ArrayRef<OperandBundleDef> Bundles,
2040 BasicBlock *InsertAtEnd);
2041
2042 // Note that 'musttail' implies 'tail'.
2043 enum TailCallKind {
2044 TCK_None = 0,
2045 TCK_Tail = 1,
2046 TCK_MustTail = 2,
2047 TCK_NoTail = 3
2048 };
2049 TailCallKind getTailCallKind() const {
2050 return TailCallKind(getSubclassDataFromInstruction() & 3);
2051 }
2052
2053 bool isTailCall() const {
2054 unsigned Kind = getSubclassDataFromInstruction() & 3;
2055 return Kind == TCK_Tail || Kind == TCK_MustTail;
2056 }
2057
2058 bool isMustTailCall() const {
2059 return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
2060 }
2061
2062 bool isNoTailCall() const {
2063 return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
2064 }
2065
2066 void setTailCall(bool isTC = true) {
2067 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2068 unsigned(isTC ? TCK_Tail : TCK_None));
2069 }
2070
2071 void setTailCallKind(TailCallKind TCK) {
2072 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2073 unsigned(TCK));
2074 }
2075
2076 /// Return true if the call can return twice
2077 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
2078 void setCanReturnTwice() {
2079 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
2080 }
2081
2082 /// Check if this call is an inline asm statement.
2083 bool isInlineAsm() const { return isa<InlineAsm>(Op<-1>()); }
2084
2085 // Methods for support type inquiry through isa, cast, and dyn_cast:
2086 static bool classof(const Instruction *I) {
2087 return I->getOpcode() == Instruction::Call;
2088 }
2089 static bool classof(const Value *V) {
2090 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2091 }
2092
2093private:
2094 // Shadow Instruction::setInstructionSubclassData with a private forwarding
2095 // method so that subclasses cannot accidentally use it.
2096 void setInstructionSubclassData(unsigned short D) {
2097 Instruction::setInstructionSubclassData(D);
2098 }
2099};
2100
2101template <>
2102struct OperandTraits<CallBase<CallInst>>
2103 : public VariadicOperandTraits<CallBase<CallInst>, 1> {};
2104
2105CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
2106 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2107 BasicBlock *InsertAtEnd)
2108 : CallBase<CallInst>(
2109 cast<FunctionType>(
2110 cast<PointerType>(Func->getType())->getElementType())
2111 ->getReturnType(),
2112 Instruction::Call,
2113 OperandTraits<CallBase<CallInst>>::op_end(this) -
2114 (Args.size() + CountBundleInputs(Bundles) + 1),
2115 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
2116 init(Func, Args, Bundles, NameStr);
2117}
2118
2119CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
2120 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2121 Instruction *InsertBefore)
2122 : CallBase<CallInst>(Ty->getReturnType(), Instruction::Call,
2123 OperandTraits<CallBase<CallInst>>::op_end(this) -
2124 (Args.size() + CountBundleInputs(Bundles) + 1),
2125 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
2126 InsertBefore) {
2127 init(Ty, Func, Args, Bundles, NameStr);
2128}
2129
2130//===----------------------------------------------------------------------===//
2131// SelectInst Class
2132//===----------------------------------------------------------------------===//
2133
2134/// This class represents the LLVM 'select' instruction.
2135///
2136class SelectInst : public Instruction {
2137 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2138 Instruction *InsertBefore)
2139 : Instruction(S1->getType(), Instruction::Select,
2140 &Op<0>(), 3, InsertBefore) {
2141 init(C, S1, S2);
2142 setName(NameStr);
2143 }
2144
2145 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2146 BasicBlock *InsertAtEnd)
2147 : Instruction(S1->getType(), Instruction::Select,
2148 &Op<0>(), 3, InsertAtEnd) {
2149 init(C, S1, S2);
2150 setName(NameStr);
2151 }
2152
2153 void init(Value *C, Value *S1, Value *S2) {
2154 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2154, __extension__ __PRETTY_FUNCTION__))
;
2155 Op<0>() = C;
2156 Op<1>() = S1;
2157 Op<2>() = S2;
2158 }
2159
2160protected:
2161 // Note: Instruction needs to be a friend here to call cloneImpl.
2162 friend class Instruction;
2163
2164 SelectInst *cloneImpl() const;
2165
2166public:
2167 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2168 const Twine &NameStr = "",
2169 Instruction *InsertBefore = nullptr,
2170 Instruction *MDFrom = nullptr) {
2171 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
2172 if (MDFrom)
2173 Sel->copyMetadata(*MDFrom);
2174 return Sel;
2175 }
2176
2177 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2178 const Twine &NameStr,
2179 BasicBlock *InsertAtEnd) {
2180 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
2181 }
2182
2183 const Value *getCondition() const { return Op<0>(); }
2184 const Value *getTrueValue() const { return Op<1>(); }
2185 const Value *getFalseValue() const { return Op<2>(); }
2186 Value *getCondition() { return Op<0>(); }
2187 Value *getTrueValue() { return Op<1>(); }
2188 Value *getFalseValue() { return Op<2>(); }
2189
2190 void setCondition(Value *V) { Op<0>() = V; }
2191 void setTrueValue(Value *V) { Op<1>() = V; }
2192 void setFalseValue(Value *V) { Op<2>() = V; }
2193
2194 /// Return a string if the specified operands are invalid
2195 /// for a select operation, otherwise return null.
2196 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
2197
2198 /// Transparently provide more efficient getOperand methods.
2199 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2200
2201 OtherOps getOpcode() const {
2202 return static_cast<OtherOps>(Instruction::getOpcode());
2203 }
2204
2205 // Methods for support type inquiry through isa, cast, and dyn_cast:
2206 static bool classof(const Instruction *I) {
2207 return I->getOpcode() == Instruction::Select;
2208 }
2209 static bool classof(const Value *V) {
2210 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2211 }
2212};
2213
2214template <>
2215struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
2216};
2217
2218DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2218, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2218, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2219
2220//===----------------------------------------------------------------------===//
2221// VAArgInst Class
2222//===----------------------------------------------------------------------===//
2223
2224/// This class represents the va_arg llvm instruction, which returns
2225/// an argument of the specified type given a va_list and increments that list
2226///
2227class VAArgInst : public UnaryInstruction {
2228protected:
2229 // Note: Instruction needs to be a friend here to call cloneImpl.
2230 friend class Instruction;
2231
2232 VAArgInst *cloneImpl() const;
2233
2234public:
2235 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
2236 Instruction *InsertBefore = nullptr)
2237 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
2238 setName(NameStr);
2239 }
2240
2241 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
2242 BasicBlock *InsertAtEnd)
2243 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
2244 setName(NameStr);
2245 }
2246
2247 Value *getPointerOperand() { return getOperand(0); }
2248 const Value *getPointerOperand() const { return getOperand(0); }
2249 static unsigned getPointerOperandIndex() { return 0U; }
2250
2251 // Methods for support type inquiry through isa, cast, and dyn_cast:
2252 static bool classof(const Instruction *I) {
2253 return I->getOpcode() == VAArg;
2254 }
2255 static bool classof(const Value *V) {
2256 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2257 }
2258};
2259
2260//===----------------------------------------------------------------------===//
2261// ExtractElementInst Class
2262//===----------------------------------------------------------------------===//
2263
2264/// This instruction extracts a single (scalar)
2265/// element from a VectorType value
2266///
2267class ExtractElementInst : public Instruction {
2268 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
2269 Instruction *InsertBefore = nullptr);
2270 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2271 BasicBlock *InsertAtEnd);
2272
2273protected:
2274 // Note: Instruction needs to be a friend here to call cloneImpl.
2275 friend class Instruction;
2276
2277 ExtractElementInst *cloneImpl() const;
2278
2279public:
2280 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2281 const Twine &NameStr = "",
2282 Instruction *InsertBefore = nullptr) {
2283 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2284 }
2285
2286 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2287 const Twine &NameStr,
2288 BasicBlock *InsertAtEnd) {
2289 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
2290 }
2291
2292 /// Return true if an extractelement instruction can be
2293 /// formed with the specified operands.
2294 static bool isValidOperands(const Value *Vec, const Value *Idx);
2295
2296 Value *getVectorOperand() { return Op<0>(); }
2297 Value *getIndexOperand() { return Op<1>(); }
2298 const Value *getVectorOperand() const { return Op<0>(); }
2299 const Value *getIndexOperand() const { return Op<1>(); }
2300
2301 VectorType *getVectorOperandType() const {
2302 return cast<VectorType>(getVectorOperand()->getType());
2303 }
2304
2305 /// Transparently provide more efficient getOperand methods.
2306 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2307
2308 // Methods for support type inquiry through isa, cast, and dyn_cast:
2309 static bool classof(const Instruction *I) {
2310 return I->getOpcode() == Instruction::ExtractElement;
2311 }
2312 static bool classof(const Value *V) {
2313 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2314 }
2315};
2316
2317template <>
2318struct OperandTraits<ExtractElementInst> :
2319 public FixedNumOperandTraits<ExtractElementInst, 2> {
2320};
2321
2322DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2322, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2322, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2323
2324//===----------------------------------------------------------------------===//
2325// InsertElementInst Class
2326//===----------------------------------------------------------------------===//
2327
2328/// This instruction inserts a single (scalar)
2329/// element into a VectorType value
2330///
2331class InsertElementInst : public Instruction {
2332 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
2333 const Twine &NameStr = "",
2334 Instruction *InsertBefore = nullptr);
2335 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2336 BasicBlock *InsertAtEnd);
2337
2338protected:
2339 // Note: Instruction needs to be a friend here to call cloneImpl.
2340 friend class Instruction;
2341
2342 InsertElementInst *cloneImpl() const;
2343
2344public:
2345 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2346 const Twine &NameStr = "",
2347 Instruction *InsertBefore = nullptr) {
2348 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2349 }
2350
2351 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2352 const Twine &NameStr,
2353 BasicBlock *InsertAtEnd) {
2354 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
2355 }
2356
2357 /// Return true if an insertelement instruction can be
2358 /// formed with the specified operands.
2359 static bool isValidOperands(const Value *Vec, const Value *NewElt,
2360 const Value *Idx);
2361
2362 /// Overload to return most specific vector type.
2363 ///
2364 VectorType *getType() const {
2365 return cast<VectorType>(Instruction::getType());
2366 }
2367
2368 /// Transparently provide more efficient getOperand methods.
2369 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2370
2371 // Methods for support type inquiry through isa, cast, and dyn_cast:
2372 static bool classof(const Instruction *I) {
2373 return I->getOpcode() == Instruction::InsertElement;
2374 }
2375 static bool classof(const Value *V) {
2376 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2377 }
2378};
2379
2380template <>
2381struct OperandTraits<InsertElementInst> :
2382 public FixedNumOperandTraits<InsertElementInst, 3> {
2383};
2384
2385DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2385, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2385, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2386
2387//===----------------------------------------------------------------------===//
2388// ShuffleVectorInst Class
2389//===----------------------------------------------------------------------===//
2390
2391/// This instruction constructs a fixed permutation of two
2392/// input vectors.
2393///
2394class ShuffleVectorInst : public Instruction {
2395protected:
2396 // Note: Instruction needs to be a friend here to call cloneImpl.
2397 friend class Instruction;
2398
2399 ShuffleVectorInst *cloneImpl() const;
2400
2401public:
2402 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2403 const Twine &NameStr = "",
2404 Instruction *InsertBefor = nullptr);
2405 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2406 const Twine &NameStr, BasicBlock *InsertAtEnd);
2407
2408 // allocate space for exactly three operands
2409 void *operator new(size_t s) {
2410 return User::operator new(s, 3);
2411 }
2412
2413 /// Return true if a shufflevector instruction can be
2414 /// formed with the specified operands.
2415 static bool isValidOperands(const Value *V1, const Value *V2,
2416 const Value *Mask);
2417
2418 /// Overload to return most specific vector type.
2419 ///
2420 VectorType *getType() const {
2421 return cast<VectorType>(Instruction::getType());
2422 }
2423
2424 /// Transparently provide more efficient getOperand methods.
2425 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2426
2427 Constant *getMask() const {
2428 return cast<Constant>(getOperand(2));
2429 }
2430
2431 /// Return the shuffle mask value for the specified element of the mask.
2432 /// Return -1 if the element is undef.
2433 static int getMaskValue(const Constant *Mask, unsigned Elt);
2434
2435 /// Return the shuffle mask value of this instruction for the given element
2436 /// index. Return -1 if the element is undef.
2437 int getMaskValue(unsigned Elt) const {
2438 return getMaskValue(getMask(), Elt);
2439 }
2440
2441 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2442 /// elements of the mask are returned as -1.
2443 static void getShuffleMask(const Constant *Mask,
2444 SmallVectorImpl<int> &Result);
2445
2446 /// Return the mask for this instruction as a vector of integers. Undefined
2447 /// elements of the mask are returned as -1.
2448 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2449 return getShuffleMask(getMask(), Result);
2450 }
2451
2452 SmallVector<int, 16> getShuffleMask() const {
2453 SmallVector<int, 16> Mask;
2454 getShuffleMask(Mask);
2455 return Mask;
2456 }
2457
2458 /// Return true if this shuffle returns a vector with a different number of
2459 /// elements than its source elements.
2460 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2>
2461 bool changesLength() const {
2462 unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
2463 unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
2464 return NumSourceElts != NumMaskElts;
2465 }
2466
2467 /// Return true if this shuffle mask chooses elements from exactly one source
2468 /// vector.
2469 /// Example: <7,5,undef,7>
2470 /// This assumes that vector operands are the same length as the mask.
2471 static bool isSingleSourceMask(ArrayRef<int> Mask);
2472 static bool isSingleSourceMask(const Constant *Mask) {
2473 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2473, __extension__ __PRETTY_FUNCTION__))
;
2474 SmallVector<int, 16> MaskAsInts;
2475 getShuffleMask(Mask, MaskAsInts);
2476 return isSingleSourceMask(MaskAsInts);
2477 }
2478
2479 /// Return true if this shuffle chooses elements from exactly one source
2480 /// vector without changing the length of that vector.
2481 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2482 /// TODO: Optionally allow length-changing shuffles.
2483 bool isSingleSource() const {
2484 return !changesLength() && isSingleSourceMask(getMask());
2485 }
2486
2487 /// Return true if this shuffle mask chooses elements from exactly one source
2488 /// vector without lane crossings. A shuffle using this mask is not
2489 /// necessarily a no-op because it may change the number of elements from its
2490 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2491 /// Example: <undef,undef,2,3>
2492 static bool isIdentityMask(ArrayRef<int> Mask);
2493 static bool isIdentityMask(const Constant *Mask) {
2494 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2494, __extension__ __PRETTY_FUNCTION__))
;
2495 SmallVector<int, 16> MaskAsInts;
2496 getShuffleMask(Mask, MaskAsInts);
2497 return isIdentityMask(MaskAsInts);
2498 }
2499
2500 /// Return true if this shuffle mask chooses elements from exactly one source
2501 /// vector without lane crossings and does not change the number of elements
2502 /// from its input vectors.
2503 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2504 /// TODO: Optionally allow length-changing shuffles.
2505 bool isIdentity() const {
2506 return !changesLength() && isIdentityMask(getShuffleMask());
2507 }
2508
2509 /// Return true if this shuffle mask chooses elements from its source vectors
2510 /// without lane crossings. A shuffle using this mask would be
2511 /// equivalent to a vector select with a constant condition operand.
2512 /// Example: <4,1,6,undef>
2513 /// This returns false if the mask does not choose from both input vectors.
2514 /// In that case, the shuffle is better classified as an identity shuffle.
2515 /// This assumes that vector operands are the same length as the mask
2516 /// (a length-changing shuffle can never be equivalent to a vector select).
2517 static bool isSelectMask(ArrayRef<int> Mask);
2518 static bool isSelectMask(const Constant *Mask) {
2519 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2519, __extension__ __PRETTY_FUNCTION__))
;
2520 SmallVector<int, 16> MaskAsInts;
2521 getShuffleMask(Mask, MaskAsInts);
2522 return isSelectMask(MaskAsInts);
2523 }
2524
2525 /// Return true if this shuffle chooses elements from its source vectors
2526 /// without lane crossings and all operands have the same number of elements.
2527 /// In other words, this shuffle is equivalent to a vector select with a
2528 /// constant condition operand.
2529 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2530 /// This returns false if the mask does not choose from both input vectors.
2531 /// In that case, the shuffle is better classified as an identity shuffle.
2532 /// TODO: Optionally allow length-changing shuffles.
2533 bool isSelect() const {
2534 return !changesLength() && isSelectMask(getMask());
2535 }
2536
2537 /// Return true if this shuffle mask swaps the order of elements from exactly
2538 /// one source vector.
2539 /// Example: <7,6,undef,4>
2540 /// This assumes that vector operands are the same length as the mask.
2541 static bool isReverseMask(ArrayRef<int> Mask);
2542 static bool isReverseMask(const Constant *Mask) {
2543 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2543, __extension__ __PRETTY_FUNCTION__))
;
2544 SmallVector<int, 16> MaskAsInts;
2545 getShuffleMask(Mask, MaskAsInts);
2546 return isReverseMask(MaskAsInts);
2547 }
2548
2549 /// Return true if this shuffle swaps the order of elements from exactly
2550 /// one source vector.
2551 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2552 /// TODO: Optionally allow length-changing shuffles.
2553 bool isReverse() const {
2554 return !changesLength() && isReverseMask(getMask());
2555 }
2556
2557 /// Return true if this shuffle mask chooses all elements with the same value
2558 /// as the first element of exactly one source vector.
2559 /// Example: <4,undef,undef,4>
2560 /// This assumes that vector operands are the same length as the mask.
2561 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2562 static bool isZeroEltSplatMask(const Constant *Mask) {
2563 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2563, __extension__ __PRETTY_FUNCTION__))
;
2564 SmallVector<int, 16> MaskAsInts;
2565 getShuffleMask(Mask, MaskAsInts);
2566 return isZeroEltSplatMask(MaskAsInts);
2567 }
2568
2569 /// Return true if all elements of this shuffle are the same value as the
2570 /// first element of exactly one source vector without changing the length
2571 /// of that vector.
2572 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2573 /// TODO: Optionally allow length-changing shuffles.
2574 /// TODO: Optionally allow splats from other elements.
2575 bool isZeroEltSplat() const {
2576 return !changesLength() && isZeroEltSplatMask(getMask());
2577 }
2578
2579 /// Return true if this shuffle mask is a transpose mask.
2580 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2581 /// even- or odd-numbered vector elements from two n-dimensional source
2582 /// vectors and write each result into consecutive elements of an
2583 /// n-dimensional destination vector. Two shuffles are necessary to complete
2584 /// the transpose, one for the even elements and another for the odd elements.
2585 /// This description closely follows how the TRN1 and TRN2 AArch64
2586 /// instructions operate.
2587 ///
2588 /// For example, a simple 2x2 matrix can be transposed with:
2589 ///
2590 /// ; Original matrix
2591 /// m0 = < a, b >
2592 /// m1 = < c, d >
2593 ///
2594 /// ; Transposed matrix
2595 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2596 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2597 ///
2598 /// For matrices having greater than n columns, the resulting nx2 transposed
2599 /// matrix is stored in two result vectors such that one vector contains
2600 /// interleaved elements from all the even-numbered rows and the other vector
2601 /// contains interleaved elements from all the odd-numbered rows. For example,
2602 /// a 2x4 matrix can be transposed with:
2603 ///
2604 /// ; Original matrix
2605 /// m0 = < a, b, c, d >
2606 /// m1 = < e, f, g, h >
2607 ///
2608 /// ; Transposed matrix
2609 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2610 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2611 static bool isTransposeMask(ArrayRef<int> Mask);
2612 static bool isTransposeMask(const Constant *Mask) {
2613 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2613, __extension__ __PRETTY_FUNCTION__))
;
2614 SmallVector<int, 16> MaskAsInts;
2615 getShuffleMask(Mask, MaskAsInts);
2616 return isTransposeMask(MaskAsInts);
2617 }
2618
2619 /// Return true if this shuffle transposes the elements of its inputs without
2620 /// changing the length of the vectors. This operation may also be known as a
2621 /// merge or interleave. See the description for isTransposeMask() for the
2622 /// exact specification.
2623 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2624 bool isTranspose() const {
2625 return !changesLength() && isTransposeMask(getMask());
2626 }
2627
2628 /// Change values in a shuffle permute mask assuming the two vector operands
2629 /// of length InVecNumElts have swapped position.
2630 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2631 unsigned InVecNumElts) {
2632 for (int &Idx : Mask) {
2633 if (Idx == -1)
2634 continue;
2635 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2636 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2637, __extension__ __PRETTY_FUNCTION__))
2637 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2637, __extension__ __PRETTY_FUNCTION__))
;
2638 }
2639 }
2640
2641 // Methods for support type inquiry through isa, cast, and dyn_cast:
2642 static bool classof(const Instruction *I) {
2643 return I->getOpcode() == Instruction::ShuffleVector;
2644 }
2645 static bool classof(const Value *V) {
2646 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2647 }
2648};
2649
2650template <>
2651struct OperandTraits<ShuffleVectorInst> :
2652 public FixedNumOperandTraits<ShuffleVectorInst, 3> {
2653};
2654
2655DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2655, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2655, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2656
2657//===----------------------------------------------------------------------===//
2658// ExtractValueInst Class
2659//===----------------------------------------------------------------------===//
2660
2661/// This instruction extracts a struct member or array
2662/// element value from an aggregate value.
2663///
2664class ExtractValueInst : public UnaryInstruction {
2665 SmallVector<unsigned, 4> Indices;
2666
2667 ExtractValueInst(const ExtractValueInst &EVI);
2668
2669 /// Constructors - Create a extractvalue instruction with a base aggregate
2670 /// value and a list of indices. The first ctor can optionally insert before
2671 /// an existing instruction, the second appends the new instruction to the
2672 /// specified BasicBlock.
2673 inline ExtractValueInst(Value *Agg,
2674 ArrayRef<unsigned> Idxs,
2675 const Twine &NameStr,
2676 Instruction *InsertBefore);
2677 inline ExtractValueInst(Value *Agg,
2678 ArrayRef<unsigned> Idxs,
2679 const Twine &NameStr, BasicBlock *InsertAtEnd);
2680
2681 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2682
2683protected:
2684 // Note: Instruction needs to be a friend here to call cloneImpl.
2685 friend class Instruction;
2686
2687 ExtractValueInst *cloneImpl() const;
2688
2689public:
2690 static ExtractValueInst *Create(Value *Agg,
2691 ArrayRef<unsigned> Idxs,
2692 const Twine &NameStr = "",
2693 Instruction *InsertBefore = nullptr) {
2694 return new
2695 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2696 }
2697
2698 static ExtractValueInst *Create(Value *Agg,
2699 ArrayRef<unsigned> Idxs,
2700 const Twine &NameStr,
2701 BasicBlock *InsertAtEnd) {
2702 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2703 }
2704
2705 /// Returns the type of the element that would be extracted
2706 /// with an extractvalue instruction with the specified parameters.
2707 ///
2708 /// Null is returned if the indices are invalid for the specified type.
2709 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2710
2711 using idx_iterator = const unsigned*;
2712
2713 inline idx_iterator idx_begin() const { return Indices.begin(); }
2714 inline idx_iterator idx_end() const { return Indices.end(); }
2715 inline iterator_range<idx_iterator> indices() const {
2716 return make_range(idx_begin(), idx_end());
2717 }
2718
2719 Value *getAggregateOperand() {
2720 return getOperand(0);
2721 }
2722 const Value *getAggregateOperand() const {
2723 return getOperand(0);
2724 }
2725 static unsigned getAggregateOperandIndex() {
2726 return 0U; // get index for modifying correct operand
2727 }
2728
2729 ArrayRef<unsigned> getIndices() const {
2730 return Indices;
2731 }
2732
2733 unsigned getNumIndices() const {
2734 return (unsigned)Indices.size();
2735 }
2736
2737 bool hasIndices() const {
2738 return true;
2739 }
2740
2741 // Methods for support type inquiry through isa, cast, and dyn_cast:
2742 static bool classof(const Instruction *I) {
2743 return I->getOpcode() == Instruction::ExtractValue;
2744 }
2745 static bool classof(const Value *V) {
2746 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2747 }
2748};
2749
2750ExtractValueInst::ExtractValueInst(Value *Agg,
2751 ArrayRef<unsigned> Idxs,
2752 const Twine &NameStr,
2753 Instruction *InsertBefore)
2754 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2755 ExtractValue, Agg, InsertBefore) {
2756 init(Idxs, NameStr);
2757}
2758
2759ExtractValueInst::ExtractValueInst(Value *Agg,
2760 ArrayRef<unsigned> Idxs,
2761 const Twine &NameStr,
2762 BasicBlock *InsertAtEnd)
2763 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2764 ExtractValue, Agg, InsertAtEnd) {
2765 init(Idxs, NameStr);
2766}
2767
2768//===----------------------------------------------------------------------===//
2769// InsertValueInst Class
2770//===----------------------------------------------------------------------===//
2771
2772/// This instruction inserts a struct field of array element
2773/// value into an aggregate value.
2774///
2775class InsertValueInst : public Instruction {
2776 SmallVector<unsigned, 4> Indices;
2777
2778 InsertValueInst(const InsertValueInst &IVI);
2779
2780 /// Constructors - Create a insertvalue instruction with a base aggregate
2781 /// value, a value to insert, and a list of indices. The first ctor can
2782 /// optionally insert before an existing instruction, the second appends
2783 /// the new instruction to the specified BasicBlock.
2784 inline InsertValueInst(Value *Agg, Value *Val,
2785 ArrayRef<unsigned> Idxs,
2786 const Twine &NameStr,
2787 Instruction *InsertBefore);
2788 inline InsertValueInst(Value *Agg, Value *Val,
2789 ArrayRef<unsigned> Idxs,
2790 const Twine &NameStr, BasicBlock *InsertAtEnd);
2791
2792 /// Constructors - These two constructors are convenience methods because one
2793 /// and two index insertvalue instructions are so common.
2794 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2795 const Twine &NameStr = "",
2796 Instruction *InsertBefore = nullptr);
2797 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2798 BasicBlock *InsertAtEnd);
2799
2800 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2801 const Twine &NameStr);
2802
2803protected:
2804 // Note: Instruction needs to be a friend here to call cloneImpl.
2805 friend class Instruction;
2806
2807 InsertValueInst *cloneImpl() const;
2808
2809public:
2810 // allocate space for exactly two operands
2811 void *operator new(size_t s) {
2812 return User::operator new(s, 2);
2813 }
2814
2815 static InsertValueInst *Create(Value *Agg, Value *Val,
2816 ArrayRef<unsigned> Idxs,
2817 const Twine &NameStr = "",
2818 Instruction *InsertBefore = nullptr) {
2819 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2820 }
2821
2822 static InsertValueInst *Create(Value *Agg, Value *Val,
2823 ArrayRef<unsigned> Idxs,
2824 const Twine &NameStr,
2825 BasicBlock *InsertAtEnd) {
2826 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2827 }
2828
2829 /// Transparently provide more efficient getOperand methods.
2830 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2831
2832 using idx_iterator = const unsigned*;
2833
2834 inline idx_iterator idx_begin() const { return Indices.begin(); }
2835 inline idx_iterator idx_end() const { return Indices.end(); }
2836 inline iterator_range<idx_iterator> indices() const {
2837 return make_range(idx_begin(), idx_end());
2838 }
2839
2840 Value *getAggregateOperand() {
2841 return getOperand(0);
2842 }
2843 const Value *getAggregateOperand() const {
2844 return getOperand(0);
2845 }
2846 static unsigned getAggregateOperandIndex() {
2847 return 0U; // get index for modifying correct operand
2848 }
2849
2850 Value *getInsertedValueOperand() {
2851 return getOperand(1);
2852 }
2853 const Value *getInsertedValueOperand() const {
2854 return getOperand(1);
2855 }
2856 static unsigned getInsertedValueOperandIndex() {
2857 return 1U; // get index for modifying correct operand
2858 }
2859
2860 ArrayRef<unsigned> getIndices() const {
2861 return Indices;
2862 }
2863
2864 unsigned getNumIndices() const {
2865 return (unsigned)Indices.size();
2866 }
2867
2868 bool hasIndices() const {
2869 return true;
2870 }
2871
2872 // Methods for support type inquiry through isa, cast, and dyn_cast:
2873 static bool classof(const Instruction *I) {
2874 return I->getOpcode() == Instruction::InsertValue;
2875 }
2876 static bool classof(const Value *V) {
2877 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2878 }
2879};
2880
2881template <>
2882struct OperandTraits<InsertValueInst> :
2883 public FixedNumOperandTraits<InsertValueInst, 2> {
2884};
2885
2886InsertValueInst::InsertValueInst(Value *Agg,
2887 Value *Val,
2888 ArrayRef<unsigned> Idxs,
2889 const Twine &NameStr,
2890 Instruction *InsertBefore)
2891 : Instruction(Agg->getType(), InsertValue,
2892 OperandTraits<InsertValueInst>::op_begin(this),
2893 2, InsertBefore) {
2894 init(Agg, Val, Idxs, NameStr);
2895}
2896
2897InsertValueInst::InsertValueInst(Value *Agg,
2898 Value *Val,
2899 ArrayRef<unsigned> Idxs,
2900 const Twine &NameStr,
2901 BasicBlock *InsertAtEnd)
2902 : Instruction(Agg->getType(), InsertValue,
2903 OperandTraits<InsertValueInst>::op_begin(this),
2904 2, InsertAtEnd) {
2905 init(Agg, Val, Idxs, NameStr);
2906}
2907
2908DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2908, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2908, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2909
2910//===----------------------------------------------------------------------===//
2911// PHINode Class
2912//===----------------------------------------------------------------------===//
2913
2914// PHINode - The PHINode class is used to represent the magical mystical PHI
2915// node, that can not exist in nature, but can be synthesized in a computer
2916// scientist's overactive imagination.
2917//
2918class PHINode : public Instruction {
2919 /// The number of operands actually allocated. NumOperands is
2920 /// the number actually in use.
2921 unsigned ReservedSpace;
2922
2923 PHINode(const PHINode &PN);
2924
2925 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2926 const Twine &NameStr = "",
2927 Instruction *InsertBefore = nullptr)
2928 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2929 ReservedSpace(NumReservedValues) {
2930 setName(NameStr);
2931 allocHungoffUses(ReservedSpace);
2932 }
2933
2934 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2935 BasicBlock *InsertAtEnd)
2936 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2937 ReservedSpace(NumReservedValues) {
2938 setName(NameStr);
2939 allocHungoffUses(ReservedSpace);
2940 }
2941
2942protected:
2943 // Note: Instruction needs to be a friend here to call cloneImpl.
2944 friend class Instruction;
2945
2946 PHINode *cloneImpl() const;
2947
2948 // allocHungoffUses - this is more complicated than the generic
2949 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2950 // values and pointers to the incoming blocks, all in one allocation.
2951 void allocHungoffUses(unsigned N) {
2952 User::allocHungoffUses(N, /* IsPhi */ true);
2953 }
2954
2955public:
2956 /// Constructors - NumReservedValues is a hint for the number of incoming
2957 /// edges that this phi node will have (use 0 if you really have no idea).
2958 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2959 const Twine &NameStr = "",
2960 Instruction *InsertBefore = nullptr) {
2961 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2962 }
2963
2964 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2965 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2966 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2967 }
2968
2969 /// Provide fast operand accessors
2970 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2971
2972 // Block iterator interface. This provides access to the list of incoming
2973 // basic blocks, which parallels the list of incoming values.
2974
2975 using block_iterator = BasicBlock **;
2976 using const_block_iterator = BasicBlock * const *;
2977
2978 block_iterator block_begin() {
2979 Use::UserRef *ref =
2980 reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
2981 return reinterpret_cast<block_iterator>(ref + 1);
2982 }
2983
2984 const_block_iterator block_begin() const {
2985 const Use::UserRef *ref =
2986 reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
2987 return reinterpret_cast<const_block_iterator>(ref + 1);
2988 }
2989
2990 block_iterator block_end() {
2991 return block_begin() + getNumOperands();
2992 }
2993
2994 const_block_iterator block_end() const {
2995 return block_begin() + getNumOperands();
2996 }
2997
2998 iterator_range<block_iterator> blocks() {
2999 return make_range(block_begin(), block_end());
3000 }
3001
3002 iterator_range<const_block_iterator> blocks() const {
3003 return make_range(block_begin(), block_end());
3004 }
3005
3006 op_range incoming_values() { return operands(); }
3007
3008 const_op_range incoming_values() const { return operands(); }
3009
3010 /// Return the number of incoming edges
3011 ///
3012 unsigned getNumIncomingValues() const { return getNumOperands(); }
3013
3014 /// Return incoming value number x
3015 ///
3016 Value *getIncomingValue(unsigned i) const {
3017 return getOperand(i);
3018 }
3019 void setIncomingValue(unsigned i, Value *V) {
3020 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3020, __extension__ __PRETTY_FUNCTION__))
;
3021 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3022, __extension__ __PRETTY_FUNCTION__))
3022 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3022, __extension__ __PRETTY_FUNCTION__))
;
3023 setOperand(i, V);
3024 }
3025
3026 static unsigned getOperandNumForIncomingValue(unsigned i) {
3027 return i;
3028 }
3029
3030 static unsigned getIncomingValueNumForOperand(unsigned i) {
3031 return i;
3032 }
3033
3034 /// Return incoming basic block number @p i.
3035 ///
3036 BasicBlock *getIncomingBlock(unsigned i) const {
3037 return block_begin()[i];
3038 }
3039
3040 /// Return incoming basic block corresponding
3041 /// to an operand of the PHI.
3042 ///
3043 BasicBlock *getIncomingBlock(const Use &U) const {
3044 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3044, __extension__ __PRETTY_FUNCTION__))
;
3045 return getIncomingBlock(unsigned(&U - op_begin()));
3046 }
3047
3048 /// Return incoming basic block corresponding
3049 /// to value use iterator.
3050 ///
3051 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
3052 return getIncomingBlock(I.getUse());
3053 }
3054
3055 void setIncomingBlock(unsigned i, BasicBlock *BB) {
3056 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3056, __extension__ __PRETTY_FUNCTION__))
;
3057 block_begin()[i] = BB;
3058 }
3059
3060 /// Add an incoming value to the end of the PHI list
3061 ///
3062 void addIncoming(Value *V, BasicBlock *BB) {
3063 if (getNumOperands() == ReservedSpace)
3064 growOperands(); // Get more space!
3065 // Initialize some new operands.
3066 setNumHungOffUseOperands(getNumOperands() + 1);
3067 setIncomingValue(getNumOperands() - 1, V);
3068 setIncomingBlock(getNumOperands() - 1, BB);
3069 }
3070
3071 /// Remove an incoming value. This is useful if a
3072 /// predecessor basic block is deleted. The value removed is returned.
3073 ///
3074 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
3075 /// is true), the PHI node is destroyed and any uses of it are replaced with
3076 /// dummy values. The only time there should be zero incoming values to a PHI
3077 /// node is when the block is dead, so this strategy is sound.
3078 ///
3079 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
3080
3081 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
3082 int Idx = getBasicBlockIndex(BB);
3083 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3083, __extension__ __PRETTY_FUNCTION__))
;
3084 return removeIncomingValue(Idx, DeletePHIIfEmpty);
3085 }
3086
3087 /// Return the first index of the specified basic
3088 /// block in the value list for this PHI. Returns -1 if no instance.
3089 ///
3090 int getBasicBlockIndex(const BasicBlock *BB) const {
3091 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
3092 if (block_begin()[i] == BB)
3093 return i;
3094 return -1;
3095 }
3096
3097 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
3098 int Idx = getBasicBlockIndex(BB);
3099 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3099, __extension__ __PRETTY_FUNCTION__))
;
16
Within the expansion of the macro 'assert':
a
Assuming 'Idx' is >= 0
3100 return getIncomingValue(Idx);
17
Calling 'PHINode::getIncomingValue'
18
Returning from 'PHINode::getIncomingValue'
3101 }
3102
3103 /// If the specified PHI node always merges together the
3104 /// same value, return the value, otherwise return null.
3105 Value *hasConstantValue() const;
3106
3107 /// Whether the specified PHI node always merges
3108 /// together the same value, assuming undefs are equal to a unique
3109 /// non-undef value.
3110 bool hasConstantOrUndefValue() const;
3111
3112 /// Methods for support type inquiry through isa, cast, and dyn_cast:
3113 static bool classof(const Instruction *I) {
3114 return I->getOpcode() == Instruction::PHI;
3115 }
3116 static bool classof(const Value *V) {
3117 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3118 }
3119
3120private:
3121 void growOperands();
3122};
3123
3124template <>
3125struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
3126};
3127
3128DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3128, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3128, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3129
3130//===----------------------------------------------------------------------===//
3131// LandingPadInst Class
3132//===----------------------------------------------------------------------===//
3133
3134//===---------------------------------------------------------------------------
3135/// The landingpad instruction holds all of the information
3136/// necessary to generate correct exception handling. The landingpad instruction
3137/// cannot be moved from the top of a landing pad block, which itself is
3138/// accessible only from the 'unwind' edge of an invoke. This uses the
3139/// SubclassData field in Value to store whether or not the landingpad is a
3140/// cleanup.
3141///
3142class LandingPadInst : public Instruction {
3143 /// The number of operands actually allocated. NumOperands is
3144 /// the number actually in use.
3145 unsigned ReservedSpace;
3146
3147 LandingPadInst(const LandingPadInst &LP);
3148
3149public:
3150 enum ClauseType { Catch, Filter };
3151
3152private:
3153 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
3154 const Twine &NameStr, Instruction *InsertBefore);
3155 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
3156 const Twine &NameStr, BasicBlock *InsertAtEnd);
3157
3158 // Allocate space for exactly zero operands.
3159 void *operator new(size_t s) {
3160 return User::operator new(s);
3161 }
3162
3163 void growOperands(unsigned Size);
3164 void init(unsigned NumReservedValues, const Twine &NameStr);
3165
3166protected:
3167 // Note: Instruction needs to be a friend here to call cloneImpl.
3168 friend class Instruction;
3169
3170 LandingPadInst *cloneImpl() const;
3171
3172public:
3173 /// Constructors - NumReservedClauses is a hint for the number of incoming
3174 /// clauses that this landingpad will have (use 0 if you really have no idea).
3175 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3176 const Twine &NameStr = "",
3177 Instruction *InsertBefore = nullptr);
3178 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3179 const Twine &NameStr, BasicBlock *InsertAtEnd);
3180
3181 /// Provide fast operand accessors
3182 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3183
3184 /// Return 'true' if this landingpad instruction is a
3185 /// cleanup. I.e., it should be run when unwinding even if its landing pad
3186 /// doesn't catch the exception.
3187 bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
3188
3189 /// Indicate that this landingpad instruction is a cleanup.
3190 void setCleanup(bool V) {
3191 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
3192 (V ? 1 : 0));
3193 }
3194
3195 /// Add a catch or filter clause to the landing pad.
3196 void addClause(Constant *ClauseVal);
3197
3198 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3199 /// determine what type of clause this is.
3200 Constant *getClause(unsigned Idx) const {
3201 return cast<Constant>(getOperandList()[Idx]);
3202 }
3203
3204 /// Return 'true' if the clause and index Idx is a catch clause.
3205 bool isCatch(unsigned Idx) const {
3206 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3207 }
3208
3209 /// Return 'true' if the clause and index Idx is a filter clause.
3210 bool isFilter(unsigned Idx) const {
3211 return isa<ArrayType>(getOperandList()[Idx]->getType());
3212 }
3213
3214 /// Get the number of clauses for this landing pad.
3215 unsigned getNumClauses() const { return getNumOperands(); }
3216
3217 /// Grow the size of the operand list to accommodate the new
3218 /// number of clauses.
3219 void reserveClauses(unsigned Size) { growOperands(Size); }
3220
3221 // Methods for support type inquiry through isa, cast, and dyn_cast:
3222 static bool classof(const Instruction *I) {
3223 return I->getOpcode() == Instruction::LandingPad;
3224 }
3225 static bool classof(const Value *V) {
3226 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3227 }
3228};
3229
3230template <>
3231struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3232};
3233
3234DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3234, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3234, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3235
3236//===----------------------------------------------------------------------===//
3237// ReturnInst Class
3238//===----------------------------------------------------------------------===//
3239
3240//===---------------------------------------------------------------------------
3241/// Return a value (possibly void), from a function. Execution
3242/// does not continue in this function any longer.
3243///
3244class ReturnInst : public TerminatorInst {
3245 ReturnInst(const ReturnInst &RI);
3246
3247private:
3248 // ReturnInst constructors:
3249 // ReturnInst() - 'ret void' instruction
3250 // ReturnInst( null) - 'ret void' instruction
3251 // ReturnInst(Value* X) - 'ret X' instruction
3252 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3253 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3254 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3255 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3256 //
3257 // NOTE: If the Value* passed is of type void then the constructor behaves as
3258 // if it was passed NULL.
3259 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3260 Instruction *InsertBefore = nullptr);
3261 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3262 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3263
3264protected:
3265 // Note: Instruction needs to be a friend here to call cloneImpl.
3266 friend class Instruction;
3267
3268 ReturnInst *cloneImpl() const;
3269
3270public:
3271 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3272 Instruction *InsertBefore = nullptr) {
3273 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3274 }
3275
3276 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3277 BasicBlock *InsertAtEnd) {
3278 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3279 }
3280
3281 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3282 return new(0) ReturnInst(C, InsertAtEnd);
3283 }
3284
3285 /// Provide fast operand accessors
3286 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3287
3288 /// Convenience accessor. Returns null if there is no return value.
3289 Value *getReturnValue() const {
3290 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3291 }
3292
3293 unsigned getNumSuccessors() const { return 0; }
3294
3295 // Methods for support type inquiry through isa, cast, and dyn_cast:
3296 static bool classof(const Instruction *I) {
3297 return (I->getOpcode() == Instruction::Ret);
3298 }
3299 static bool classof(const Value *V) {
3300 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3301 }
3302
3303private:
3304 friend TerminatorInst;
3305
3306 BasicBlock *getSuccessor(unsigned idx) const {
3307 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3307)
;
3308 }
3309
3310 void setSuccessor(unsigned idx, BasicBlock *B) {
3311 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3311)
;
3312 }
3313};
3314
3315template <>
3316struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3317};
3318
3319DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3319, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3319, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3320
3321//===----------------------------------------------------------------------===//
3322// BranchInst Class
3323//===----------------------------------------------------------------------===//
3324
3325//===---------------------------------------------------------------------------
3326/// Conditional or Unconditional Branch instruction.
3327///
3328class BranchInst : public TerminatorInst {
3329 /// Ops list - Branches are strange. The operands are ordered:
3330 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3331 /// they don't have to check for cond/uncond branchness. These are mostly
3332 /// accessed relative from op_end().
3333 BranchInst(const BranchInst &BI);
3334 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3335 // BranchInst(BB *B) - 'br B'
3336 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3337 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3338 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3339 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3340 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3341 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3342 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3343 Instruction *InsertBefore = nullptr);
3344 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3345 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3346 BasicBlock *InsertAtEnd);
3347
3348 void AssertOK();
3349
3350protected:
3351 // Note: Instruction needs to be a friend here to call cloneImpl.
3352 friend class Instruction;
3353
3354 BranchInst *cloneImpl() const;
3355
3356public:
3357 static BranchInst *Create(BasicBlock *IfTrue,
3358 Instruction *InsertBefore = nullptr) {
3359 return new(1) BranchInst(IfTrue, InsertBefore);
3360 }
3361
3362 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3363 Value *Cond, Instruction *InsertBefore = nullptr) {
3364 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3365 }
3366
3367 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3368 return new(1) BranchInst(IfTrue, InsertAtEnd);
3369 }
3370
3371 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3372 Value *Cond, BasicBlock *InsertAtEnd) {
3373 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3374 }
3375
3376 /// Transparently provide more efficient getOperand methods.
3377 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3378
3379 bool isUnconditional() const { return getNumOperands() == 1; }
3380 bool isConditional() const { return getNumOperands() == 3; }
3381
3382 Value *getCondition() const {
3383 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3383, __extension__ __PRETTY_FUNCTION__))
;
3384 return Op<-3>();
3385 }
3386
3387 void setCondition(Value *V) {
3388 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3388, __extension__ __PRETTY_FUNCTION__))
;
3389 Op<-3>() = V;
3390 }
3391
3392 unsigned getNumSuccessors() const { return 1+isConditional(); }
3393
3394 BasicBlock *getSuccessor(unsigned i) const {
3395 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3395, __extension__ __PRETTY_FUNCTION__))
;
3396 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3397 }
3398
3399 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3400 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3400, __extension__ __PRETTY_FUNCTION__))
;
3401 *(&Op<-1>() - idx) = NewSucc;
3402 }
3403
3404 /// Swap the successors of this branch instruction.
3405 ///
3406 /// Swaps the successors of the branch instruction. This also swaps any
3407 /// branch weight metadata associated with the instruction so that it
3408 /// continues to map correctly to each operand.
3409 void swapSuccessors();
3410
3411 // Methods for support type inquiry through isa, cast, and dyn_cast:
3412 static bool classof(const Instruction *I) {
3413 return (I->getOpcode() == Instruction::Br);
3414 }
3415 static bool classof(const Value *V) {
3416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3417 }
3418};
3419
3420template <>
3421struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3422};
3423
3424DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3424, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3424, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3425
3426//===----------------------------------------------------------------------===//
3427// SwitchInst Class
3428//===----------------------------------------------------------------------===//
3429
3430//===---------------------------------------------------------------------------
3431/// Multiway switch
3432///
3433class SwitchInst : public TerminatorInst {
3434 unsigned ReservedSpace;
3435
3436 // Operand[0] = Value to switch on
3437 // Operand[1] = Default basic block destination
3438 // Operand[2n ] = Value to match
3439 // Operand[2n+1] = BasicBlock to go to on match
3440 SwitchInst(const SwitchInst &SI);
3441
3442 /// Create a new switch instruction, specifying a value to switch on and a
3443 /// default destination. The number of additional cases can be specified here
3444 /// to make memory allocation more efficient. This constructor can also
3445 /// auto-insert before another instruction.
3446 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3447 Instruction *InsertBefore);
3448
3449 /// Create a new switch instruction, specifying a value to switch on and a
3450 /// default destination. The number of additional cases can be specified here
3451 /// to make memory allocation more efficient. This constructor also
3452 /// auto-inserts at the end of the specified BasicBlock.
3453 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3454 BasicBlock *InsertAtEnd);
3455
3456 // allocate space for exactly zero operands
3457 void *operator new(size_t s) {
3458 return User::operator new(s);
3459 }
3460
3461 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3462 void growOperands();
3463
3464protected:
3465 // Note: Instruction needs to be a friend here to call cloneImpl.
3466 friend class Instruction;
3467
3468 SwitchInst *cloneImpl() const;
3469
3470public:
3471 // -2
3472 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3473
3474 template <typename CaseHandleT> class CaseIteratorImpl;
3475
3476 /// A handle to a particular switch case. It exposes a convenient interface
3477 /// to both the case value and the successor block.
3478 ///
3479 /// We define this as a template and instantiate it to form both a const and
3480 /// non-const handle.
3481 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3482 class CaseHandleImpl {
3483 // Directly befriend both const and non-const iterators.
3484 friend class SwitchInst::CaseIteratorImpl<
3485 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3486
3487 protected:
3488 // Expose the switch type we're parameterized with to the iterator.
3489 using SwitchInstType = SwitchInstT;
3490
3491 SwitchInstT *SI;
3492 ptrdiff_t Index;
3493
3494 CaseHandleImpl() = default;
3495 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3496
3497 public:
3498 /// Resolves case value for current case.
3499 ConstantIntT *getCaseValue() const {
3500 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3501, __extension__ __PRETTY_FUNCTION__))
3501 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3501, __extension__ __PRETTY_FUNCTION__))
;
3502 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3503 }
3504
3505 /// Resolves successor for current case.
3506 BasicBlockT *getCaseSuccessor() const {
3507 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3509, __extension__ __PRETTY_FUNCTION__))
3508 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3509, __extension__ __PRETTY_FUNCTION__))
3509 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3509, __extension__ __PRETTY_FUNCTION__))
;
3510 return SI->getSuccessor(getSuccessorIndex());
3511 }
3512
3513 /// Returns number of current case.
3514 unsigned getCaseIndex() const { return Index; }
3515
3516 /// Returns TerminatorInst's successor index for current case successor.
3517 unsigned getSuccessorIndex() const {
3518 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3520, __extension__ __PRETTY_FUNCTION__))
3519 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3520, __extension__ __PRETTY_FUNCTION__))
3520 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3520, __extension__ __PRETTY_FUNCTION__))
;
3521 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3522 }
3523
3524 bool operator==(const CaseHandleImpl &RHS) const {
3525 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3525, __extension__ __PRETTY_FUNCTION__))
;
3526 return Index == RHS.Index;
3527 }
3528 };
3529
3530 using ConstCaseHandle =
3531 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3532
3533 class CaseHandle
3534 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3535 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3536
3537 public:
3538 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3539
3540 /// Sets the new value for current case.
3541 void setValue(ConstantInt *V) {
3542 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3543, __extension__ __PRETTY_FUNCTION__))
3543 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3543, __extension__ __PRETTY_FUNCTION__))
;
3544 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3545 }
3546
3547 /// Sets the new successor for current case.
3548 void setSuccessor(BasicBlock *S) {
3549 SI->setSuccessor(getSuccessorIndex(), S);
3550 }
3551 };
3552
3553 template <typename CaseHandleT>
3554 class CaseIteratorImpl
3555 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3556 std::random_access_iterator_tag,
3557 CaseHandleT> {
3558 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3559
3560 CaseHandleT Case;
3561
3562 public:
3563 /// Default constructed iterator is in an invalid state until assigned to
3564 /// a case for a particular switch.
3565 CaseIteratorImpl() = default;
3566
3567 /// Initializes case iterator for given SwitchInst and for given
3568 /// case number.
3569 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3570
3571 /// Initializes case iterator for given SwitchInst and for given
3572 /// TerminatorInst's successor index.
3573 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3574 unsigned SuccessorIndex) {
3575 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3576, __extension__ __PRETTY_FUNCTION__))
3576 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3576, __extension__ __PRETTY_FUNCTION__))
;
3577 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3578 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3579 }
3580
3581 /// Support converting to the const variant. This will be a no-op for const
3582 /// variant.
3583 operator CaseIteratorImpl<ConstCaseHandle>() const {
3584 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3585 }
3586
3587 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3588 // Check index correctness after addition.
3589 // Note: Index == getNumCases() means end().
3590 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3592, __extension__ __PRETTY_FUNCTION__))
3591 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3592, __extension__ __PRETTY_FUNCTION__))
3592 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3592, __extension__ __PRETTY_FUNCTION__))
;
3593 Case.Index += N;
3594 return *this;
3595 }
3596 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3597 // Check index correctness after subtraction.
3598 // Note: Case.Index == getNumCases() means end().
3599 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3601, __extension__ __PRETTY_FUNCTION__))
3600 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3601, __extension__ __PRETTY_FUNCTION__))
3601 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3601, __extension__ __PRETTY_FUNCTION__))
;
3602 Case.Index -= N;
3603 return *this;
3604 }
3605 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3606 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3606, __extension__ __PRETTY_FUNCTION__))
;
3607 return Case.Index - RHS.Case.Index;
3608 }
3609 bool operator==(const CaseIteratorImpl &RHS) const {
3610 return Case == RHS.Case;
3611 }
3612 bool operator<(const CaseIteratorImpl &RHS) const {
3613 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3613, __extension__ __PRETTY_FUNCTION__))
;
3614 return Case.Index < RHS.Case.Index;
3615 }
3616 CaseHandleT &operator*() { return Case; }
3617 const CaseHandleT &operator*() const { return Case; }
3618 };
3619
3620 using CaseIt = CaseIteratorImpl<CaseHandle>;
3621 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3622
3623 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3624 unsigned NumCases,
3625 Instruction *InsertBefore = nullptr) {
3626 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3627 }
3628
3629 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3630 unsigned NumCases, BasicBlock *InsertAtEnd) {
3631 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3632 }
3633
3634 /// Provide fast operand accessors
3635 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3636
3637 // Accessor Methods for Switch stmt
3638 Value *getCondition() const { return getOperand(0); }
3639 void setCondition(Value *V) { setOperand(0, V); }
3640
3641 BasicBlock *getDefaultDest() const {
3642 return cast<BasicBlock>(getOperand(1));
3643 }
3644
3645 void setDefaultDest(BasicBlock *DefaultCase) {
3646 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3647 }
3648
3649 /// Return the number of 'cases' in this switch instruction, excluding the
3650 /// default case.
3651 unsigned getNumCases() const {
3652 return getNumOperands()/2 - 1;
3653 }
3654
3655 /// Returns a read/write iterator that points to the first case in the
3656 /// SwitchInst.
3657 CaseIt case_begin() {
3658 return CaseIt(this, 0);
3659 }
3660
3661 /// Returns a read-only iterator that points to the first case in the
3662 /// SwitchInst.
3663 ConstCaseIt case_begin() const {
3664 return ConstCaseIt(this, 0);
3665 }
3666
3667 /// Returns a read/write iterator that points one past the last in the
3668 /// SwitchInst.
3669 CaseIt case_end() {
3670 return CaseIt(this, getNumCases());
3671 }
3672
3673 /// Returns a read-only iterator that points one past the last in the
3674 /// SwitchInst.
3675 ConstCaseIt case_end() const {
3676 return ConstCaseIt(this, getNumCases());
3677 }
3678
3679 /// Iteration adapter for range-for loops.
3680 iterator_range<CaseIt> cases() {
3681 return make_range(case_begin(), case_end());
3682 }
3683
3684 /// Constant iteration adapter for range-for loops.
3685 iterator_range<ConstCaseIt> cases() const {
3686 return make_range(case_begin(), case_end());
3687 }
3688
3689 /// Returns an iterator that points to the default case.
3690 /// Note: this iterator allows to resolve successor only. Attempt
3691 /// to resolve case value causes an assertion.
3692 /// Also note, that increment and decrement also causes an assertion and
3693 /// makes iterator invalid.
3694 CaseIt case_default() {
3695 return CaseIt(this, DefaultPseudoIndex);
3696 }
3697 ConstCaseIt case_default() const {
3698 return ConstCaseIt(this, DefaultPseudoIndex);
3699 }
3700
3701 /// Search all of the case values for the specified constant. If it is
3702 /// explicitly handled, return the case iterator of it, otherwise return
3703 /// default case iterator to indicate that it is handled by the default
3704 /// handler.
3705 CaseIt findCaseValue(const ConstantInt *C) {
3706 CaseIt I = llvm::find_if(
3707 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3708 if (I != case_end())
3709 return I;
3710
3711 return case_default();
3712 }
3713 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3714 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3715 return Case.getCaseValue() == C;
3716 });
3717 if (I != case_end())
3718 return I;
3719
3720 return case_default();
3721 }
3722
3723 /// Finds the unique case value for a given successor. Returns null if the
3724 /// successor is not found, not unique, or is the default case.
3725 ConstantInt *findCaseDest(BasicBlock *BB) {
3726 if (BB == getDefaultDest())
3727 return nullptr;
3728
3729 ConstantInt *CI = nullptr;
3730 for (auto Case : cases()) {
3731 if (Case.getCaseSuccessor() != BB)
3732 continue;
3733
3734 if (CI)
3735 return nullptr; // Multiple cases lead to BB.
3736
3737 CI = Case.getCaseValue();
3738 }
3739
3740 return CI;
3741 }
3742
3743 /// Add an entry to the switch instruction.
3744 /// Note:
3745 /// This action invalidates case_end(). Old case_end() iterator will
3746 /// point to the added case.
3747 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3748
3749 /// This method removes the specified case and its successor from the switch
3750 /// instruction. Note that this operation may reorder the remaining cases at
3751 /// index idx and above.
3752 /// Note:
3753 /// This action invalidates iterators for all cases following the one removed,
3754 /// including the case_end() iterator. It returns an iterator for the next
3755 /// case.
3756 CaseIt removeCase(CaseIt I);
3757
3758 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3759 BasicBlock *getSuccessor(unsigned idx) const {
3760 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 3760, __extension__ __PRETTY_FUNCTION__))
;
3761 return cast<BasicBlock>(getOperand(idx*2+1));
3762 }
3763 void setSuccessor(unsigned