Bug Summary

File:lib/Transforms/Utils/Local.cpp
Warning:line 140, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name Local.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Transforms/Utils -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp

/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp

1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This family of functions perform various local transformations to the
11// program.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Analysis/Utils/Local.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/Optional.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/Statistic.h"
28#include "llvm/ADT/TinyPtrVector.h"
29#include "llvm/Analysis/ConstantFolding.h"
30#include "llvm/Analysis/EHPersonalities.h"
31#include "llvm/Analysis/InstructionSimplify.h"
32#include "llvm/Analysis/LazyValueInfo.h"
33#include "llvm/Analysis/MemoryBuiltins.h"
34#include "llvm/Analysis/TargetLibraryInfo.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/BinaryFormat/Dwarf.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/CFG.h"
41#include "llvm/IR/CallSite.h"
42#include "llvm/IR/Constant.h"
43#include "llvm/IR/ConstantRange.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DIBuilder.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/DebugInfoMetadata.h"
48#include "llvm/IR/DebugLoc.h"
49#include "llvm/IR/DerivedTypes.h"
50#include "llvm/IR/Dominators.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/GetElementPtrTypeIterator.h"
53#include "llvm/IR/GlobalObject.h"
54#include "llvm/IR/IRBuilder.h"
55#include "llvm/IR/InstrTypes.h"
56#include "llvm/IR/Instruction.h"
57#include "llvm/IR/Instructions.h"
58#include "llvm/IR/IntrinsicInst.h"
59#include "llvm/IR/Intrinsics.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
64#include "llvm/IR/Operator.h"
65#include "llvm/IR/PatternMatch.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
71#include "llvm/Support/Casting.h"
72#include "llvm/Support/Debug.h"
73#include "llvm/Support/ErrorHandling.h"
74#include "llvm/Support/KnownBits.h"
75#include "llvm/Support/raw_ostream.h"
76#include "llvm/Transforms/Utils/ValueMapper.h"
77#include <algorithm>
78#include <cassert>
79#include <climits>
80#include <cstdint>
81#include <iterator>
82#include <map>
83#include <utility>
84
85using namespace llvm;
86using namespace llvm::PatternMatch;
87
88#define DEBUG_TYPE"local" "local"
89
90STATISTIC(NumRemoved, "Number of unreachable basic blocks removed")static llvm::Statistic NumRemoved = {"local", "NumRemoved", "Number of unreachable basic blocks removed"
, {0}, {false}}
;
91
92//===----------------------------------------------------------------------===//
93// Local constant propagation.
94//
95
96/// ConstantFoldTerminator - If a terminator instruction is predicated on a
97/// constant value, convert it into an unconditional branch to the constant
98/// destination. This is a nontrivial operation because the successors of this
99/// basic block must have their PHI nodes updated.
100/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
101/// conditions and indirectbr addresses this might make dead if
102/// DeleteDeadConditions is true.
103bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
104 const TargetLibraryInfo *TLI,
105 DeferredDominance *DDT) {
106 TerminatorInst *T = BB->getTerminator();
107 IRBuilder<> Builder(T);
108
109 // Branch - See if we are conditional jumping on constant
110 if (auto *BI = dyn_cast<BranchInst>(T)) {
5
Assuming 'BI' is non-null
6
Taking true branch
111 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
7
Taking false branch
112 BasicBlock *Dest1 = BI->getSuccessor(0);
8
Calling 'BranchInst::getSuccessor'
15
Returning from 'BranchInst::getSuccessor'
16
'Dest1' initialized here
113 BasicBlock *Dest2 = BI->getSuccessor(1);
114
115 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
17
Taking false branch
116 // Are we branching on constant?
117 // YES. Change to unconditional branch...
118 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
119 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
120
121 // Let the basic block know that we are letting go of it. Based on this,
122 // it will adjust it's PHI nodes.
123 OldDest->removePredecessor(BB);
124
125 // Replace the conditional branch with an unconditional one.
126 Builder.CreateBr(Destination);
127 BI->eraseFromParent();
128 if (DDT)
129 DDT->deleteEdge(BB, OldDest);
130 return true;
131 }
132
133 if (Dest2 == Dest1) { // Conditional branch to same location?
18
Assuming 'Dest2' is equal to 'Dest1'
19
Assuming pointer value is null
20
Taking true branch
134 // This branch matches something like this:
135 // br bool %cond, label %Dest, label %Dest
136 // and changes it into: br label %Dest
137
138 // Let the basic block know that we are letting go of one copy of it.
139 assert(BI->getParent() && "Terminator not inserted in block!")(static_cast <bool> (BI->getParent() && "Terminator not inserted in block!"
) ? void (0) : __assert_fail ("BI->getParent() && \"Terminator not inserted in block!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 139, __extension__ __PRETTY_FUNCTION__))
;
140 Dest1->removePredecessor(BI->getParent());
21
Called C++ object pointer is null
141
142 // Replace the conditional branch with an unconditional one.
143 Builder.CreateBr(Dest1);
144 Value *Cond = BI->getCondition();
145 BI->eraseFromParent();
146 if (DeleteDeadConditions)
147 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
148 return true;
149 }
150 return false;
151 }
152
153 if (auto *SI = dyn_cast<SwitchInst>(T)) {
154 // If we are switching on a constant, we can convert the switch to an
155 // unconditional branch.
156 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
157 BasicBlock *DefaultDest = SI->getDefaultDest();
158 BasicBlock *TheOnlyDest = DefaultDest;
159
160 // If the default is unreachable, ignore it when searching for TheOnlyDest.
161 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
162 SI->getNumCases() > 0) {
163 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
164 }
165
166 // Figure out which case it goes to.
167 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
168 // Found case matching a constant operand?
169 if (i->getCaseValue() == CI) {
170 TheOnlyDest = i->getCaseSuccessor();
171 break;
172 }
173
174 // Check to see if this branch is going to the same place as the default
175 // dest. If so, eliminate it as an explicit compare.
176 if (i->getCaseSuccessor() == DefaultDest) {
177 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
178 unsigned NCases = SI->getNumCases();
179 // Fold the case metadata into the default if there will be any branches
180 // left, unless the metadata doesn't match the switch.
181 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
182 // Collect branch weights into a vector.
183 SmallVector<uint32_t, 8> Weights;
184 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
185 ++MD_i) {
186 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
187 Weights.push_back(CI->getValue().getZExtValue());
188 }
189 // Merge weight of this case to the default weight.
190 unsigned idx = i->getCaseIndex();
191 Weights[0] += Weights[idx+1];
192 // Remove weight for this case.
193 std::swap(Weights[idx+1], Weights.back());
194 Weights.pop_back();
195 SI->setMetadata(LLVMContext::MD_prof,
196 MDBuilder(BB->getContext()).
197 createBranchWeights(Weights));
198 }
199 // Remove this entry.
200 BasicBlock *ParentBB = SI->getParent();
201 DefaultDest->removePredecessor(ParentBB);
202 i = SI->removeCase(i);
203 e = SI->case_end();
204 if (DDT)
205 DDT->deleteEdge(ParentBB, DefaultDest);
206 continue;
207 }
208
209 // Otherwise, check to see if the switch only branches to one destination.
210 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
211 // destinations.
212 if (i->getCaseSuccessor() != TheOnlyDest)
213 TheOnlyDest = nullptr;
214
215 // Increment this iterator as we haven't removed the case.
216 ++i;
217 }
218
219 if (CI && !TheOnlyDest) {
220 // Branching on a constant, but not any of the cases, go to the default
221 // successor.
222 TheOnlyDest = SI->getDefaultDest();
223 }
224
225 // If we found a single destination that we can fold the switch into, do so
226 // now.
227 if (TheOnlyDest) {
228 // Insert the new branch.
229 Builder.CreateBr(TheOnlyDest);
230 BasicBlock *BB = SI->getParent();
231 std::vector <DominatorTree::UpdateType> Updates;
232 if (DDT)
233 Updates.reserve(SI->getNumSuccessors() - 1);
234
235 // Remove entries from PHI nodes which we no longer branch to...
236 for (BasicBlock *Succ : SI->successors()) {
237 // Found case matching a constant operand?
238 if (Succ == TheOnlyDest) {
239 TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
240 } else {
241 Succ->removePredecessor(BB);
242 if (DDT)
243 Updates.push_back({DominatorTree::Delete, BB, Succ});
244 }
245 }
246
247 // Delete the old switch.
248 Value *Cond = SI->getCondition();
249 SI->eraseFromParent();
250 if (DeleteDeadConditions)
251 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
252 if (DDT)
253 DDT->applyUpdates(Updates);
254 return true;
255 }
256
257 if (SI->getNumCases() == 1) {
258 // Otherwise, we can fold this switch into a conditional branch
259 // instruction if it has only one non-default destination.
260 auto FirstCase = *SI->case_begin();
261 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
262 FirstCase.getCaseValue(), "cond");
263
264 // Insert the new branch.
265 BranchInst *NewBr = Builder.CreateCondBr(Cond,
266 FirstCase.getCaseSuccessor(),
267 SI->getDefaultDest());
268 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
269 if (MD && MD->getNumOperands() == 3) {
270 ConstantInt *SICase =
271 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
272 ConstantInt *SIDef =
273 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
274 assert(SICase && SIDef)(static_cast <bool> (SICase && SIDef) ? void (0
) : __assert_fail ("SICase && SIDef", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 274, __extension__ __PRETTY_FUNCTION__))
;
275 // The TrueWeight should be the weight for the single case of SI.
276 NewBr->setMetadata(LLVMContext::MD_prof,
277 MDBuilder(BB->getContext()).
278 createBranchWeights(SICase->getValue().getZExtValue(),
279 SIDef->getValue().getZExtValue()));
280 }
281
282 // Update make.implicit metadata to the newly-created conditional branch.
283 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
284 if (MakeImplicitMD)
285 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
286
287 // Delete the old switch.
288 SI->eraseFromParent();
289 return true;
290 }
291 return false;
292 }
293
294 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
295 // indirectbr blockaddress(@F, @BB) -> br label @BB
296 if (auto *BA =
297 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
298 BasicBlock *TheOnlyDest = BA->getBasicBlock();
299 std::vector <DominatorTree::UpdateType> Updates;
300 if (DDT)
301 Updates.reserve(IBI->getNumDestinations() - 1);
302
303 // Insert the new branch.
304 Builder.CreateBr(TheOnlyDest);
305
306 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
307 if (IBI->getDestination(i) == TheOnlyDest) {
308 TheOnlyDest = nullptr;
309 } else {
310 BasicBlock *ParentBB = IBI->getParent();
311 BasicBlock *DestBB = IBI->getDestination(i);
312 DestBB->removePredecessor(ParentBB);
313 if (DDT)
314 Updates.push_back({DominatorTree::Delete, ParentBB, DestBB});
315 }
316 }
317 Value *Address = IBI->getAddress();
318 IBI->eraseFromParent();
319 if (DeleteDeadConditions)
320 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
321
322 // If we didn't find our destination in the IBI successor list, then we
323 // have undefined behavior. Replace the unconditional branch with an
324 // 'unreachable' instruction.
325 if (TheOnlyDest) {
326 BB->getTerminator()->eraseFromParent();
327 new UnreachableInst(BB->getContext(), BB);
328 }
329
330 if (DDT)
331 DDT->applyUpdates(Updates);
332 return true;
333 }
334 }
335
336 return false;
337}
338
339//===----------------------------------------------------------------------===//
340// Local dead code elimination.
341//
342
343/// isInstructionTriviallyDead - Return true if the result produced by the
344/// instruction is not used, and the instruction has no side effects.
345///
346bool llvm::isInstructionTriviallyDead(Instruction *I,
347 const TargetLibraryInfo *TLI) {
348 if (!I->use_empty())
349 return false;
350 return wouldInstructionBeTriviallyDead(I, TLI);
351}
352
353bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
354 const TargetLibraryInfo *TLI) {
355 if (isa<TerminatorInst>(I))
356 return false;
357
358 // We don't want the landingpad-like instructions removed by anything this
359 // general.
360 if (I->isEHPad())
361 return false;
362
363 // We don't want debug info removed by anything this general, unless
364 // debug info is empty.
365 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
366 if (DDI->getAddress())
367 return false;
368 return true;
369 }
370 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
371 if (DVI->getValue())
372 return false;
373 return true;
374 }
375
376 if (!I->mayHaveSideEffects())
377 return true;
378
379 // Special case intrinsics that "may have side effects" but can be deleted
380 // when dead.
381 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
382 // Safe to delete llvm.stacksave if dead.
383 if (II->getIntrinsicID() == Intrinsic::stacksave)
384 return true;
385
386 // Lifetime intrinsics are dead when their right-hand is undef.
387 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
388 II->getIntrinsicID() == Intrinsic::lifetime_end)
389 return isa<UndefValue>(II->getArgOperand(1));
390
391 // Assumptions are dead if their condition is trivially true. Guards on
392 // true are operationally no-ops. In the future we can consider more
393 // sophisticated tradeoffs for guards considering potential for check
394 // widening, but for now we keep things simple.
395 if (II->getIntrinsicID() == Intrinsic::assume ||
396 II->getIntrinsicID() == Intrinsic::experimental_guard) {
397 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
398 return !Cond->isZero();
399
400 return false;
401 }
402 }
403
404 if (isAllocLikeFn(I, TLI))
405 return true;
406
407 if (CallInst *CI = isFreeCall(I, TLI))
408 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
409 return C->isNullValue() || isa<UndefValue>(C);
410
411 if (CallSite CS = CallSite(I))
412 if (isMathLibCallNoop(CS, TLI))
413 return true;
414
415 return false;
416}
417
418/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
419/// trivially dead instruction, delete it. If that makes any of its operands
420/// trivially dead, delete them too, recursively. Return true if any
421/// instructions were deleted.
422bool
423llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
424 const TargetLibraryInfo *TLI) {
425 Instruction *I = dyn_cast<Instruction>(V);
426 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
427 return false;
428
429 SmallVector<Instruction*, 16> DeadInsts;
430 DeadInsts.push_back(I);
431
432 do {
433 I = DeadInsts.pop_back_val();
434 salvageDebugInfo(*I);
435
436 // Null out all of the instruction's operands to see if any operand becomes
437 // dead as we go.
438 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
439 Value *OpV = I->getOperand(i);
440 I->setOperand(i, nullptr);
441
442 if (!OpV->use_empty()) continue;
443
444 // If the operand is an instruction that became dead as we nulled out the
445 // operand, and if it is 'trivially' dead, delete it in a future loop
446 // iteration.
447 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
448 if (isInstructionTriviallyDead(OpI, TLI))
449 DeadInsts.push_back(OpI);
450 }
451
452 I->eraseFromParent();
453 } while (!DeadInsts.empty());
454
455 return true;
456}
457
458/// areAllUsesEqual - Check whether the uses of a value are all the same.
459/// This is similar to Instruction::hasOneUse() except this will also return
460/// true when there are no uses or multiple uses that all refer to the same
461/// value.
462static bool areAllUsesEqual(Instruction *I) {
463 Value::user_iterator UI = I->user_begin();
464 Value::user_iterator UE = I->user_end();
465 if (UI == UE)
466 return true;
467
468 User *TheUse = *UI;
469 for (++UI; UI != UE; ++UI) {
470 if (*UI != TheUse)
471 return false;
472 }
473 return true;
474}
475
476/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
477/// dead PHI node, due to being a def-use chain of single-use nodes that
478/// either forms a cycle or is terminated by a trivially dead instruction,
479/// delete it. If that makes any of its operands trivially dead, delete them
480/// too, recursively. Return true if a change was made.
481bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
482 const TargetLibraryInfo *TLI) {
483 SmallPtrSet<Instruction*, 4> Visited;
484 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
485 I = cast<Instruction>(*I->user_begin())) {
486 if (I->use_empty())
487 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
488
489 // If we find an instruction more than once, we're on a cycle that
490 // won't prove fruitful.
491 if (!Visited.insert(I).second) {
492 // Break the cycle and delete the instruction and its operands.
493 I->replaceAllUsesWith(UndefValue::get(I->getType()));
494 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
495 return true;
496 }
497 }
498 return false;
499}
500
501static bool
502simplifyAndDCEInstruction(Instruction *I,
503 SmallSetVector<Instruction *, 16> &WorkList,
504 const DataLayout &DL,
505 const TargetLibraryInfo *TLI) {
506 if (isInstructionTriviallyDead(I, TLI)) {
507 salvageDebugInfo(*I);
508
509 // Null out all of the instruction's operands to see if any operand becomes
510 // dead as we go.
511 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
512 Value *OpV = I->getOperand(i);
513 I->setOperand(i, nullptr);
514
515 if (!OpV->use_empty() || I == OpV)
516 continue;
517
518 // If the operand is an instruction that became dead as we nulled out the
519 // operand, and if it is 'trivially' dead, delete it in a future loop
520 // iteration.
521 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
522 if (isInstructionTriviallyDead(OpI, TLI))
523 WorkList.insert(OpI);
524 }
525
526 I->eraseFromParent();
527
528 return true;
529 }
530
531 if (Value *SimpleV = SimplifyInstruction(I, DL)) {
532 // Add the users to the worklist. CAREFUL: an instruction can use itself,
533 // in the case of a phi node.
534 for (User *U : I->users()) {
535 if (U != I) {
536 WorkList.insert(cast<Instruction>(U));
537 }
538 }
539
540 // Replace the instruction with its simplified value.
541 bool Changed = false;
542 if (!I->use_empty()) {
543 I->replaceAllUsesWith(SimpleV);
544 Changed = true;
545 }
546 if (isInstructionTriviallyDead(I, TLI)) {
547 I->eraseFromParent();
548 Changed = true;
549 }
550 return Changed;
551 }
552 return false;
553}
554
555/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
556/// simplify any instructions in it and recursively delete dead instructions.
557///
558/// This returns true if it changed the code, note that it can delete
559/// instructions in other blocks as well in this block.
560bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
561 const TargetLibraryInfo *TLI) {
562 bool MadeChange = false;
563 const DataLayout &DL = BB->getModule()->getDataLayout();
564
565#ifndef NDEBUG
566 // In debug builds, ensure that the terminator of the block is never replaced
567 // or deleted by these simplifications. The idea of simplification is that it
568 // cannot introduce new instructions, and there is no way to replace the
569 // terminator of a block without introducing a new instruction.
570 AssertingVH<Instruction> TerminatorVH(&BB->back());
571#endif
572
573 SmallSetVector<Instruction *, 16> WorkList;
574 // Iterate over the original function, only adding insts to the worklist
575 // if they actually need to be revisited. This avoids having to pre-init
576 // the worklist with the entire function's worth of instructions.
577 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
578 BI != E;) {
579 assert(!BI->isTerminator())(static_cast <bool> (!BI->isTerminator()) ? void (0)
: __assert_fail ("!BI->isTerminator()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 579, __extension__ __PRETTY_FUNCTION__))
;
580 Instruction *I = &*BI;
581 ++BI;
582
583 // We're visiting this instruction now, so make sure it's not in the
584 // worklist from an earlier visit.
585 if (!WorkList.count(I))
586 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
587 }
588
589 while (!WorkList.empty()) {
590 Instruction *I = WorkList.pop_back_val();
591 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
592 }
593 return MadeChange;
594}
595
596//===----------------------------------------------------------------------===//
597// Control Flow Graph Restructuring.
598//
599
600/// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
601/// method is called when we're about to delete Pred as a predecessor of BB. If
602/// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
603///
604/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
605/// nodes that collapse into identity values. For example, if we have:
606/// x = phi(1, 0, 0, 0)
607/// y = and x, z
608///
609/// .. and delete the predecessor corresponding to the '1', this will attempt to
610/// recursively fold the and to 0.
611void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
612 DeferredDominance *DDT) {
613 // This only adjusts blocks with PHI nodes.
614 if (!isa<PHINode>(BB->begin()))
615 return;
616
617 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
618 // them down. This will leave us with single entry phi nodes and other phis
619 // that can be removed.
620 BB->removePredecessor(Pred, true);
621
622 WeakTrackingVH PhiIt = &BB->front();
623 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
624 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
625 Value *OldPhiIt = PhiIt;
626
627 if (!recursivelySimplifyInstruction(PN))
628 continue;
629
630 // If recursive simplification ended up deleting the next PHI node we would
631 // iterate to, then our iterator is invalid, restart scanning from the top
632 // of the block.
633 if (PhiIt != OldPhiIt) PhiIt = &BB->front();
634 }
635 if (DDT)
636 DDT->deleteEdge(Pred, BB);
637}
638
639/// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
640/// predecessor is known to have one successor (DestBB!). Eliminate the edge
641/// between them, moving the instructions in the predecessor into DestBB and
642/// deleting the predecessor block.
643void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT,
644 DeferredDominance *DDT) {
645 assert(!(DT && DDT) && "Cannot call with both DT and DDT.")(static_cast <bool> (!(DT && DDT) && "Cannot call with both DT and DDT."
) ? void (0) : __assert_fail ("!(DT && DDT) && \"Cannot call with both DT and DDT.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 645, __extension__ __PRETTY_FUNCTION__))
;
646
647 // If BB has single-entry PHI nodes, fold them.
648 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
649 Value *NewVal = PN->getIncomingValue(0);
650 // Replace self referencing PHI with undef, it must be dead.
651 if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
652 PN->replaceAllUsesWith(NewVal);
653 PN->eraseFromParent();
654 }
655
656 BasicBlock *PredBB = DestBB->getSinglePredecessor();
657 assert(PredBB && "Block doesn't have a single predecessor!")(static_cast <bool> (PredBB && "Block doesn't have a single predecessor!"
) ? void (0) : __assert_fail ("PredBB && \"Block doesn't have a single predecessor!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 657, __extension__ __PRETTY_FUNCTION__))
;
658
659 bool ReplaceEntryBB = false;
660 if (PredBB == &DestBB->getParent()->getEntryBlock())
661 ReplaceEntryBB = true;
662
663 // Deferred DT update: Collect all the edges that enter PredBB. These
664 // dominator edges will be redirected to DestBB.
665 std::vector <DominatorTree::UpdateType> Updates;
666 if (DDT && !ReplaceEntryBB) {
667 Updates.reserve(1 +
668 (2 * std::distance(pred_begin(PredBB), pred_end(PredBB))));
669 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
670 for (auto I = pred_begin(PredBB), E = pred_end(PredBB); I != E; ++I) {
671 Updates.push_back({DominatorTree::Delete, *I, PredBB});
672 // This predecessor of PredBB may already have DestBB as a successor.
673 if (llvm::find(successors(*I), DestBB) == succ_end(*I))
674 Updates.push_back({DominatorTree::Insert, *I, DestBB});
675 }
676 }
677
678 // Zap anything that took the address of DestBB. Not doing this will give the
679 // address an invalid value.
680 if (DestBB->hasAddressTaken()) {
681 BlockAddress *BA = BlockAddress::get(DestBB);
682 Constant *Replacement =
683 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
684 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
685 BA->getType()));
686 BA->destroyConstant();
687 }
688
689 // Anything that branched to PredBB now branches to DestBB.
690 PredBB->replaceAllUsesWith(DestBB);
691
692 // Splice all the instructions from PredBB to DestBB.
693 PredBB->getTerminator()->eraseFromParent();
694 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
695
696 // If the PredBB is the entry block of the function, move DestBB up to
697 // become the entry block after we erase PredBB.
698 if (ReplaceEntryBB)
699 DestBB->moveAfter(PredBB);
700
701 if (DT) {
702 // For some irreducible CFG we end up having forward-unreachable blocks
703 // so check if getNode returns a valid node before updating the domtree.
704 if (DomTreeNode *DTN = DT->getNode(PredBB)) {
705 BasicBlock *PredBBIDom = DTN->getIDom()->getBlock();
706 DT->changeImmediateDominator(DestBB, PredBBIDom);
707 DT->eraseNode(PredBB);
708 }
709 }
710
711 if (DDT) {
712 DDT->deleteBB(PredBB); // Deferred deletion of BB.
713 if (ReplaceEntryBB)
714 // The entry block was removed and there is no external interface for the
715 // dominator tree to be notified of this change. In this corner-case we
716 // recalculate the entire tree.
717 DDT->recalculate(*(DestBB->getParent()));
718 else
719 DDT->applyUpdates(Updates);
720 } else {
721 PredBB->eraseFromParent(); // Nuke BB.
722 }
723}
724
725/// CanMergeValues - Return true if we can choose one of these values to use
726/// in place of the other. Note that we will always choose the non-undef
727/// value to keep.
728static bool CanMergeValues(Value *First, Value *Second) {
729 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
730}
731
732/// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
733/// almost-empty BB ending in an unconditional branch to Succ, into Succ.
734///
735/// Assumption: Succ is the single successor for BB.
736static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
737 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!")(static_cast <bool> (*succ_begin(BB) == Succ &&
"Succ is not successor of BB!") ? void (0) : __assert_fail (
"*succ_begin(BB) == Succ && \"Succ is not successor of BB!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 737, __extension__ __PRETTY_FUNCTION__))
;
738
739 DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Looking to fold " << BB->
getName() << " into " << Succ->getName() <<
"\n"; } } while (false)
740 << Succ->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Looking to fold " << BB->
getName() << " into " << Succ->getName() <<
"\n"; } } while (false)
;
741 // Shortcut, if there is only a single predecessor it must be BB and merging
742 // is always safe
743 if (Succ->getSinglePredecessor()) return true;
744
745 // Make a list of the predecessors of BB
746 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
747
748 // Look at all the phi nodes in Succ, to see if they present a conflict when
749 // merging these blocks
750 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
751 PHINode *PN = cast<PHINode>(I);
752
753 // If the incoming value from BB is again a PHINode in
754 // BB which has the same incoming value for *PI as PN does, we can
755 // merge the phi nodes and then the blocks can still be merged
756 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
757 if (BBPN && BBPN->getParent() == BB) {
758 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
759 BasicBlock *IBB = PN->getIncomingBlock(PI);
760 if (BBPreds.count(IBB) &&
761 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
762 PN->getIncomingValue(PI))) {
763 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
764 << Succ->getName() << " is conflicting with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
765 << BBPN->getName() << " with regard to common predecessor "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
766 << IBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
;
767 return false;
768 }
769 }
770 } else {
771 Value* Val = PN->getIncomingValueForBlock(BB);
772 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
773 // See if the incoming value for the common predecessor is equal to the
774 // one for BB, in which case this phi node will not prevent the merging
775 // of the block.
776 BasicBlock *IBB = PN->getIncomingBlock(PI);
777 if (BBPreds.count(IBB) &&
778 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
779 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
780 << Succ->getName() << " is conflicting with regard to common "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
781 << "predecessor " << IBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
;
782 return false;
783 }
784 }
785 }
786 }
787
788 return true;
789}
790
791using PredBlockVector = SmallVector<BasicBlock *, 16>;
792using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
793
794/// \brief Determines the value to use as the phi node input for a block.
795///
796/// Select between \p OldVal any value that we know flows from \p BB
797/// to a particular phi on the basis of which one (if either) is not
798/// undef. Update IncomingValues based on the selected value.
799///
800/// \param OldVal The value we are considering selecting.
801/// \param BB The block that the value flows in from.
802/// \param IncomingValues A map from block-to-value for other phi inputs
803/// that we have examined.
804///
805/// \returns the selected value.
806static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
807 IncomingValueMap &IncomingValues) {
808 if (!isa<UndefValue>(OldVal)) {
809 assert((!IncomingValues.count(BB) ||(static_cast <bool> ((!IncomingValues.count(BB) || IncomingValues
.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!"
) ? void (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 811, __extension__ __PRETTY_FUNCTION__))
810 IncomingValues.find(BB)->second == OldVal) &&(static_cast <bool> ((!IncomingValues.count(BB) || IncomingValues
.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!"
) ? void (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 811, __extension__ __PRETTY_FUNCTION__))
811 "Expected OldVal to match incoming value from BB!")(static_cast <bool> ((!IncomingValues.count(BB) || IncomingValues
.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!"
) ? void (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 811, __extension__ __PRETTY_FUNCTION__))
;
812
813 IncomingValues.insert(std::make_pair(BB, OldVal));
814 return OldVal;
815 }
816
817 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
818 if (It != IncomingValues.end()) return It->second;
819
820 return OldVal;
821}
822
823/// \brief Create a map from block to value for the operands of a
824/// given phi.
825///
826/// Create a map from block to value for each non-undef value flowing
827/// into \p PN.
828///
829/// \param PN The phi we are collecting the map for.
830/// \param IncomingValues [out] The map from block to value for this phi.
831static void gatherIncomingValuesToPhi(PHINode *PN,
832 IncomingValueMap &IncomingValues) {
833 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
834 BasicBlock *BB = PN->getIncomingBlock(i);
835 Value *V = PN->getIncomingValue(i);
836
837 if (!isa<UndefValue>(V))
838 IncomingValues.insert(std::make_pair(BB, V));
839 }
840}
841
842/// \brief Replace the incoming undef values to a phi with the values
843/// from a block-to-value map.
844///
845/// \param PN The phi we are replacing the undefs in.
846/// \param IncomingValues A map from block to value.
847static void replaceUndefValuesInPhi(PHINode *PN,
848 const IncomingValueMap &IncomingValues) {
849 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
850 Value *V = PN->getIncomingValue(i);
851
852 if (!isa<UndefValue>(V)) continue;
853
854 BasicBlock *BB = PN->getIncomingBlock(i);
855 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
856 if (It == IncomingValues.end()) continue;
857
858 PN->setIncomingValue(i, It->second);
859 }
860}
861
862/// \brief Replace a value flowing from a block to a phi with
863/// potentially multiple instances of that value flowing from the
864/// block's predecessors to the phi.
865///
866/// \param BB The block with the value flowing into the phi.
867/// \param BBPreds The predecessors of BB.
868/// \param PN The phi that we are updating.
869static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
870 const PredBlockVector &BBPreds,
871 PHINode *PN) {
872 Value *OldVal = PN->removeIncomingValue(BB, false);
873 assert(OldVal && "No entry in PHI for Pred BB!")(static_cast <bool> (OldVal && "No entry in PHI for Pred BB!"
) ? void (0) : __assert_fail ("OldVal && \"No entry in PHI for Pred BB!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 873, __extension__ __PRETTY_FUNCTION__))
;
874
875 IncomingValueMap IncomingValues;
876
877 // We are merging two blocks - BB, and the block containing PN - and
878 // as a result we need to redirect edges from the predecessors of BB
879 // to go to the block containing PN, and update PN
880 // accordingly. Since we allow merging blocks in the case where the
881 // predecessor and successor blocks both share some predecessors,
882 // and where some of those common predecessors might have undef
883 // values flowing into PN, we want to rewrite those values to be
884 // consistent with the non-undef values.
885
886 gatherIncomingValuesToPhi(PN, IncomingValues);
887
888 // If this incoming value is one of the PHI nodes in BB, the new entries
889 // in the PHI node are the entries from the old PHI.
890 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
891 PHINode *OldValPN = cast<PHINode>(OldVal);
892 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
893 // Note that, since we are merging phi nodes and BB and Succ might
894 // have common predecessors, we could end up with a phi node with
895 // identical incoming branches. This will be cleaned up later (and
896 // will trigger asserts if we try to clean it up now, without also
897 // simplifying the corresponding conditional branch).
898 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
899 Value *PredVal = OldValPN->getIncomingValue(i);
900 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
901 IncomingValues);
902
903 // And add a new incoming value for this predecessor for the
904 // newly retargeted branch.
905 PN->addIncoming(Selected, PredBB);
906 }
907 } else {
908 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
909 // Update existing incoming values in PN for this
910 // predecessor of BB.
911 BasicBlock *PredBB = BBPreds[i];
912 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
913 IncomingValues);
914
915 // And add a new incoming value for this predecessor for the
916 // newly retargeted branch.
917 PN->addIncoming(Selected, PredBB);
918 }
919 }
920
921 replaceUndefValuesInPhi(PN, IncomingValues);
922}
923
924/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
925/// unconditional branch, and contains no instructions other than PHI nodes,
926/// potential side-effect free intrinsics and the branch. If possible,
927/// eliminate BB by rewriting all the predecessors to branch to the successor
928/// block and return true. If we can't transform, return false.
929bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
930 DeferredDominance *DDT) {
931 assert(BB != &BB->getParent()->getEntryBlock() &&(static_cast <bool> (BB != &BB->getParent()->
getEntryBlock() && "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"
) ? void (0) : __assert_fail ("BB != &BB->getParent()->getEntryBlock() && \"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 932, __extension__ __PRETTY_FUNCTION__))
932 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!")(static_cast <bool> (BB != &BB->getParent()->
getEntryBlock() && "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"
) ? void (0) : __assert_fail ("BB != &BB->getParent()->getEntryBlock() && \"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 932, __extension__ __PRETTY_FUNCTION__))
;
933
934 // We can't eliminate infinite loops.
935 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
936 if (BB == Succ) return false;
937
938 // Check to see if merging these blocks would cause conflicts for any of the
939 // phi nodes in BB or Succ. If not, we can safely merge.
940 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
941
942 // Check for cases where Succ has multiple predecessors and a PHI node in BB
943 // has uses which will not disappear when the PHI nodes are merged. It is
944 // possible to handle such cases, but difficult: it requires checking whether
945 // BB dominates Succ, which is non-trivial to calculate in the case where
946 // Succ has multiple predecessors. Also, it requires checking whether
947 // constructing the necessary self-referential PHI node doesn't introduce any
948 // conflicts; this isn't too difficult, but the previous code for doing this
949 // was incorrect.
950 //
951 // Note that if this check finds a live use, BB dominates Succ, so BB is
952 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
953 // folding the branch isn't profitable in that case anyway.
954 if (!Succ->getSinglePredecessor()) {
955 BasicBlock::iterator BBI = BB->begin();
956 while (isa<PHINode>(*BBI)) {
957 for (Use &U : BBI->uses()) {
958 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
959 if (PN->getIncomingBlock(U) != BB)
960 return false;
961 } else {
962 return false;
963 }
964 }
965 ++BBI;
966 }
967 }
968
969 DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Killing Trivial BB: \n" <<
*BB; } } while (false)
;
970
971 std::vector<DominatorTree::UpdateType> Updates;
972 if (DDT) {
973 Updates.reserve(1 + (2 * std::distance(pred_begin(BB), pred_end(BB))));
974 Updates.push_back({DominatorTree::Delete, BB, Succ});
975 // All predecessors of BB will be moved to Succ.
976 for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
977 Updates.push_back({DominatorTree::Delete, *I, BB});
978 // This predecessor of BB may already have Succ as a successor.
979 if (llvm::find(successors(*I), Succ) == succ_end(*I))
980 Updates.push_back({DominatorTree::Insert, *I, Succ});
981 }
982 }
983
984 if (isa<PHINode>(Succ->begin())) {
985 // If there is more than one pred of succ, and there are PHI nodes in
986 // the successor, then we need to add incoming edges for the PHI nodes
987 //
988 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
989
990 // Loop over all of the PHI nodes in the successor of BB.
991 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
992 PHINode *PN = cast<PHINode>(I);
993
994 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
995 }
996 }
997
998 if (Succ->getSinglePredecessor()) {
999 // BB is the only predecessor of Succ, so Succ will end up with exactly
1000 // the same predecessors BB had.
1001
1002 // Copy over any phi, debug or lifetime instruction.
1003 BB->getTerminator()->eraseFromParent();
1004 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1005 BB->getInstList());
1006 } else {
1007 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1008 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1009 assert(PN->use_empty() && "There shouldn't be any uses here!")(static_cast <bool> (PN->use_empty() && "There shouldn't be any uses here!"
) ? void (0) : __assert_fail ("PN->use_empty() && \"There shouldn't be any uses here!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1009, __extension__ __PRETTY_FUNCTION__))
;
1010 PN->eraseFromParent();
1011 }
1012 }
1013
1014 // If the unconditional branch we replaced contains llvm.loop metadata, we
1015 // add the metadata to the branch instructions in the predecessors.
1016 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1017 Instruction *TI = BB->getTerminator();
1018 if (TI)
1019 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1020 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1021 BasicBlock *Pred = *PI;
1022 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1023 }
1024
1025 // Everything that jumped to BB now goes to Succ.
1026 BB->replaceAllUsesWith(Succ);
1027 if (!Succ->hasName()) Succ->takeName(BB);
1028
1029 if (DDT) {
1030 DDT->deleteBB(BB); // Deferred deletion of the old basic block.
1031 DDT->applyUpdates(Updates);
1032 } else {
1033 BB->eraseFromParent(); // Delete the old basic block.
1034 }
1035 return true;
1036}
1037
1038/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
1039/// nodes in this block. This doesn't try to be clever about PHI nodes
1040/// which differ only in the order of the incoming values, but instcombine
1041/// orders them so it usually won't matter.
1042bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1043 // This implementation doesn't currently consider undef operands
1044 // specially. Theoretically, two phis which are identical except for
1045 // one having an undef where the other doesn't could be collapsed.
1046
1047 struct PHIDenseMapInfo {
1048 static PHINode *getEmptyKey() {
1049 return DenseMapInfo<PHINode *>::getEmptyKey();
1050 }
1051
1052 static PHINode *getTombstoneKey() {
1053 return DenseMapInfo<PHINode *>::getTombstoneKey();
1054 }
1055
1056 static unsigned getHashValue(PHINode *PN) {
1057 // Compute a hash value on the operands. Instcombine will likely have
1058 // sorted them, which helps expose duplicates, but we have to check all
1059 // the operands to be safe in case instcombine hasn't run.
1060 return static_cast<unsigned>(hash_combine(
1061 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1062 hash_combine_range(PN->block_begin(), PN->block_end())));
1063 }
1064
1065 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1066 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1067 RHS == getEmptyKey() || RHS == getTombstoneKey())
1068 return LHS == RHS;
1069 return LHS->isIdenticalTo(RHS);
1070 }
1071 };
1072
1073 // Set of unique PHINodes.
1074 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1075
1076 // Examine each PHI.
1077 bool Changed = false;
1078 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1079 auto Inserted = PHISet.insert(PN);
1080 if (!Inserted.second) {
1081 // A duplicate. Replace this PHI with its duplicate.
1082 PN->replaceAllUsesWith(*Inserted.first);
1083 PN->eraseFromParent();
1084 Changed = true;
1085
1086 // The RAUW can change PHIs that we already visited. Start over from the
1087 // beginning.
1088 PHISet.clear();
1089 I = BB->begin();
1090 }
1091 }
1092
1093 return Changed;
1094}
1095
1096/// enforceKnownAlignment - If the specified pointer points to an object that
1097/// we control, modify the object's alignment to PrefAlign. This isn't
1098/// often possible though. If alignment is important, a more reliable approach
1099/// is to simply align all global variables and allocation instructions to
1100/// their preferred alignment from the beginning.
1101static unsigned enforceKnownAlignment(Value *V, unsigned Align,
1102 unsigned PrefAlign,
1103 const DataLayout &DL) {
1104 assert(PrefAlign > Align)(static_cast <bool> (PrefAlign > Align) ? void (0) :
__assert_fail ("PrefAlign > Align", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1104, __extension__ __PRETTY_FUNCTION__))
;
1105
1106 V = V->stripPointerCasts();
1107
1108 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1109 // TODO: ideally, computeKnownBits ought to have used
1110 // AllocaInst::getAlignment() in its computation already, making
1111 // the below max redundant. But, as it turns out,
1112 // stripPointerCasts recurses through infinite layers of bitcasts,
1113 // while computeKnownBits is not allowed to traverse more than 6
1114 // levels.
1115 Align = std::max(AI->getAlignment(), Align);
1116 if (PrefAlign <= Align)
1117 return Align;
1118
1119 // If the preferred alignment is greater than the natural stack alignment
1120 // then don't round up. This avoids dynamic stack realignment.
1121 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1122 return Align;
1123 AI->setAlignment(PrefAlign);
1124 return PrefAlign;
1125 }
1126
1127 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1128 // TODO: as above, this shouldn't be necessary.
1129 Align = std::max(GO->getAlignment(), Align);
1130 if (PrefAlign <= Align)
1131 return Align;
1132
1133 // If there is a large requested alignment and we can, bump up the alignment
1134 // of the global. If the memory we set aside for the global may not be the
1135 // memory used by the final program then it is impossible for us to reliably
1136 // enforce the preferred alignment.
1137 if (!GO->canIncreaseAlignment())
1138 return Align;
1139
1140 GO->setAlignment(PrefAlign);
1141 return PrefAlign;
1142 }
1143
1144 return Align;
1145}
1146
1147unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
1148 const DataLayout &DL,
1149 const Instruction *CxtI,
1150 AssumptionCache *AC,
1151 const DominatorTree *DT) {
1152 assert(V->getType()->isPointerTy() &&(static_cast <bool> (V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!") ? void (0) :
__assert_fail ("V->getType()->isPointerTy() && \"getOrEnforceKnownAlignment expects a pointer!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1153, __extension__ __PRETTY_FUNCTION__))
1153 "getOrEnforceKnownAlignment expects a pointer!")(static_cast <bool> (V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!") ? void (0) :
__assert_fail ("V->getType()->isPointerTy() && \"getOrEnforceKnownAlignment expects a pointer!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1153, __extension__ __PRETTY_FUNCTION__))
;
1154
1155 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1156 unsigned TrailZ = Known.countMinTrailingZeros();
1157
1158 // Avoid trouble with ridiculously large TrailZ values, such as
1159 // those computed from a null pointer.
1160 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT8 - 1));
1161
1162 unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ);
1163
1164 // LLVM doesn't support alignments larger than this currently.
1165 Align = std::min(Align, +Value::MaximumAlignment);
1166
1167 if (PrefAlign > Align)
1168 Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
1169
1170 // We don't need to make any adjustment.
1171 return Align;
1172}
1173
1174///===---------------------------------------------------------------------===//
1175/// Dbg Intrinsic utilities
1176///
1177
1178/// See if there is a dbg.value intrinsic for DIVar before I.
1179static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
1180 Instruction *I) {
1181 // Since we can't guarantee that the original dbg.declare instrinsic
1182 // is removed by LowerDbgDeclare(), we need to make sure that we are
1183 // not inserting the same dbg.value intrinsic over and over.
1184 BasicBlock::InstListType::iterator PrevI(I);
1185 if (PrevI != I->getParent()->getInstList().begin()) {
1186 --PrevI;
1187 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
1188 if (DVI->getValue() == I->getOperand(0) &&
1189 DVI->getVariable() == DIVar &&
1190 DVI->getExpression() == DIExpr)
1191 return true;
1192 }
1193 return false;
1194}
1195
1196/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1197static bool PhiHasDebugValue(DILocalVariable *DIVar,
1198 DIExpression *DIExpr,
1199 PHINode *APN) {
1200 // Since we can't guarantee that the original dbg.declare instrinsic
1201 // is removed by LowerDbgDeclare(), we need to make sure that we are
1202 // not inserting the same dbg.value intrinsic over and over.
1203 SmallVector<DbgValueInst *, 1> DbgValues;
1204 findDbgValues(DbgValues, APN);
1205 for (auto *DVI : DbgValues) {
1206 assert(DVI->getValue() == APN)(static_cast <bool> (DVI->getValue() == APN) ? void (
0) : __assert_fail ("DVI->getValue() == APN", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1206, __extension__ __PRETTY_FUNCTION__))
;
1207 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1208 return true;
1209 }
1210 return false;
1211}
1212
1213/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1214/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1215void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1216 StoreInst *SI, DIBuilder &Builder) {
1217 assert(DII->isAddressOfVariable())(static_cast <bool> (DII->isAddressOfVariable()) ? void
(0) : __assert_fail ("DII->isAddressOfVariable()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1217, __extension__ __PRETTY_FUNCTION__))
;
1218 auto *DIVar = DII->getVariable();
1219 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1219, __extension__ __PRETTY_FUNCTION__))
;
1220 auto *DIExpr = DII->getExpression();
1221 Value *DV = SI->getOperand(0);
1222
1223 // If an argument is zero extended then use argument directly. The ZExt
1224 // may be zapped by an optimization pass in future.
1225 Argument *ExtendedArg = nullptr;
1226 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1227 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1228 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1229 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1230 if (ExtendedArg) {
1231 // If this DII was already describing only a fragment of a variable, ensure
1232 // that fragment is appropriately narrowed here.
1233 // But if a fragment wasn't used, describe the value as the original
1234 // argument (rather than the zext or sext) so that it remains described even
1235 // if the sext/zext is optimized away. This widens the variable description,
1236 // leaving it up to the consumer to know how the smaller value may be
1237 // represented in a larger register.
1238 if (auto Fragment = DIExpr->getFragmentInfo()) {
1239 unsigned FragmentOffset = Fragment->OffsetInBits;
1240 SmallVector<uint64_t, 3> Ops(DIExpr->elements_begin(),
1241 DIExpr->elements_end() - 3);
1242 Ops.push_back(dwarf::DW_OP_LLVM_fragment);
1243 Ops.push_back(FragmentOffset);
1244 const DataLayout &DL = DII->getModule()->getDataLayout();
1245 Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
1246 DIExpr = Builder.createExpression(Ops);
1247 }
1248 DV = ExtendedArg;
1249 }
1250 if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1251 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
1252 SI);
1253}
1254
1255/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1256/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1257void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1258 LoadInst *LI, DIBuilder &Builder) {
1259 auto *DIVar = DII->getVariable();
1260 auto *DIExpr = DII->getExpression();
1261 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1261, __extension__ __PRETTY_FUNCTION__))
;
1262
1263 if (LdStHasDebugValue(DIVar, DIExpr, LI))
1264 return;
1265
1266 // We are now tracking the loaded value instead of the address. In the
1267 // future if multi-location support is added to the IR, it might be
1268 // preferable to keep tracking both the loaded value and the original
1269 // address in case the alloca can not be elided.
1270 Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1271 LI, DIVar, DIExpr, DII->getDebugLoc(), (Instruction *)nullptr);
1272 DbgValue->insertAfter(LI);
1273}
1274
1275/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1276/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1277void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1278 PHINode *APN, DIBuilder &Builder) {
1279 auto *DIVar = DII->getVariable();
1280 auto *DIExpr = DII->getExpression();
1281 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1281, __extension__ __PRETTY_FUNCTION__))
;
1282
1283 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1284 return;
1285
1286 BasicBlock *BB = APN->getParent();
1287 auto InsertionPt = BB->getFirstInsertionPt();
1288
1289 // The block may be a catchswitch block, which does not have a valid
1290 // insertion point.
1291 // FIXME: Insert dbg.value markers in the successors when appropriate.
1292 if (InsertionPt != BB->end())
1293 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, DII->getDebugLoc(),
1294 &*InsertionPt);
1295}
1296
1297/// Determine whether this alloca is either a VLA or an array.
1298static bool isArray(AllocaInst *AI) {
1299 return AI->isArrayAllocation() ||
1300 AI->getType()->getElementType()->isArrayTy();
1301}
1302
1303/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1304/// of llvm.dbg.value intrinsics.
1305bool llvm::LowerDbgDeclare(Function &F) {
1306 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1307 SmallVector<DbgDeclareInst *, 4> Dbgs;
1308 for (auto &FI : F)
1309 for (Instruction &BI : FI)
1310 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1311 Dbgs.push_back(DDI);
1312
1313 if (Dbgs.empty())
1314 return false;
1315
1316 for (auto &I : Dbgs) {
1317 DbgDeclareInst *DDI = I;
1318 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1319 // If this is an alloca for a scalar variable, insert a dbg.value
1320 // at each load and store to the alloca and erase the dbg.declare.
1321 // The dbg.values allow tracking a variable even if it is not
1322 // stored on the stack, while the dbg.declare can only describe
1323 // the stack slot (and at a lexical-scope granularity). Later
1324 // passes will attempt to elide the stack slot.
1325 if (!AI || isArray(AI))
1326 continue;
1327
1328 // A volatile load/store means that the alloca can't be elided anyway.
1329 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1330 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1331 return LI->isVolatile();
1332 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1333 return SI->isVolatile();
1334 return false;
1335 }))
1336 continue;
1337
1338 for (auto &AIUse : AI->uses()) {
1339 User *U = AIUse.getUser();
1340 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1341 if (AIUse.getOperandNo() == 1)
1342 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1343 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1344 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1345 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1346 // This is a call by-value or some other instruction that
1347 // takes a pointer to the variable. Insert a *value*
1348 // intrinsic that describes the alloca.
1349 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(),
1350 DDI->getExpression(), DDI->getDebugLoc(),
1351 CI);
1352 }
1353 }
1354 DDI->eraseFromParent();
1355 }
1356 return true;
1357}
1358
1359/// Propagate dbg.value intrinsics through the newly inserted PHIs.
1360void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1361 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1362 assert(BB && "No BasicBlock to clone dbg.value(s) from.")(static_cast <bool> (BB && "No BasicBlock to clone dbg.value(s) from."
) ? void (0) : __assert_fail ("BB && \"No BasicBlock to clone dbg.value(s) from.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1362, __extension__ __PRETTY_FUNCTION__))
;
1363 if (InsertedPHIs.size() == 0)
1364 return;
1365
1366 // Map existing PHI nodes to their dbg.values.
1367 ValueToValueMapTy DbgValueMap;
1368 for (auto &I : *BB) {
1369 if (auto DbgII = dyn_cast<DbgInfoIntrinsic>(&I)) {
1370 if (auto *Loc = dyn_cast_or_null<PHINode>(DbgII->getVariableLocation()))
1371 DbgValueMap.insert({Loc, DbgII});
1372 }
1373 }
1374 if (DbgValueMap.size() == 0)
1375 return;
1376
1377 // Then iterate through the new PHIs and look to see if they use one of the
1378 // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will
1379 // propagate the info through the new PHI.
1380 LLVMContext &C = BB->getContext();
1381 for (auto PHI : InsertedPHIs) {
1382 BasicBlock *Parent = PHI->getParent();
1383 // Avoid inserting an intrinsic into an EH block.
1384 if (Parent->getFirstNonPHI()->isEHPad())
1385 continue;
1386 auto PhiMAV = MetadataAsValue::get(C, ValueAsMetadata::get(PHI));
1387 for (auto VI : PHI->operand_values()) {
1388 auto V = DbgValueMap.find(VI);
1389 if (V != DbgValueMap.end()) {
1390 auto *DbgII = cast<DbgInfoIntrinsic>(V->second);
1391 Instruction *NewDbgII = DbgII->clone();
1392 NewDbgII->setOperand(0, PhiMAV);
1393 auto InsertionPt = Parent->getFirstInsertionPt();
1394 assert(InsertionPt != Parent->end() && "Ill-formed basic block")(static_cast <bool> (InsertionPt != Parent->end() &&
"Ill-formed basic block") ? void (0) : __assert_fail ("InsertionPt != Parent->end() && \"Ill-formed basic block\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1394, __extension__ __PRETTY_FUNCTION__))
;
1395 NewDbgII->insertBefore(&*InsertionPt);
1396 }
1397 }
1398 }
1399}
1400
1401/// Finds all intrinsics declaring local variables as living in the memory that
1402/// 'V' points to. This may include a mix of dbg.declare and
1403/// dbg.addr intrinsics.
1404TinyPtrVector<DbgInfoIntrinsic *> llvm::FindDbgAddrUses(Value *V) {
1405 auto *L = LocalAsMetadata::getIfExists(V);
1406 if (!L)
1407 return {};
1408 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L);
1409 if (!MDV)
1410 return {};
1411
1412 TinyPtrVector<DbgInfoIntrinsic *> Declares;
1413 for (User *U : MDV->users()) {
1414 if (auto *DII = dyn_cast<DbgInfoIntrinsic>(U))
1415 if (DII->isAddressOfVariable())
1416 Declares.push_back(DII);
1417 }
1418
1419 return Declares;
1420}
1421
1422void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
1423 if (auto *L = LocalAsMetadata::getIfExists(V))
1424 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1425 for (User *U : MDV->users())
1426 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1427 DbgValues.push_back(DVI);
1428}
1429
1430void llvm::findDbgUsers(SmallVectorImpl<DbgInfoIntrinsic *> &DbgUsers,
1431 Value *V) {
1432 if (auto *L = LocalAsMetadata::getIfExists(V))
1433 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1434 for (User *U : MDV->users())
1435 if (DbgInfoIntrinsic *DII = dyn_cast<DbgInfoIntrinsic>(U))
1436 DbgUsers.push_back(DII);
1437}
1438
1439bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1440 Instruction *InsertBefore, DIBuilder &Builder,
1441 bool DerefBefore, int Offset, bool DerefAfter) {
1442 auto DbgAddrs = FindDbgAddrUses(Address);
1443 for (DbgInfoIntrinsic *DII : DbgAddrs) {
1444 DebugLoc Loc = DII->getDebugLoc();
1445 auto *DIVar = DII->getVariable();
1446 auto *DIExpr = DII->getExpression();
1447 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1447, __extension__ __PRETTY_FUNCTION__))
;
1448 DIExpr = DIExpression::prepend(DIExpr, DerefBefore, Offset, DerefAfter);
1449 // Insert llvm.dbg.declare immediately after InsertBefore, and remove old
1450 // llvm.dbg.declare.
1451 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
1452 if (DII == InsertBefore)
1453 InsertBefore = &*std::next(InsertBefore->getIterator());
1454 DII->eraseFromParent();
1455 }
1456 return !DbgAddrs.empty();
1457}
1458
1459bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1460 DIBuilder &Builder, bool DerefBefore,
1461 int Offset, bool DerefAfter) {
1462 return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
1463 DerefBefore, Offset, DerefAfter);
1464}
1465
1466static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1467 DIBuilder &Builder, int Offset) {
1468 DebugLoc Loc = DVI->getDebugLoc();
1469 auto *DIVar = DVI->getVariable();
1470 auto *DIExpr = DVI->getExpression();
1471 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1471, __extension__ __PRETTY_FUNCTION__))
;
1472
1473 // This is an alloca-based llvm.dbg.value. The first thing it should do with
1474 // the alloca pointer is dereference it. Otherwise we don't know how to handle
1475 // it and give up.
1476 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1477 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1478 return;
1479
1480 // Insert the offset immediately after the first deref.
1481 // We could just change the offset argument of dbg.value, but it's unsigned...
1482 if (Offset) {
1483 SmallVector<uint64_t, 4> Ops;
1484 Ops.push_back(dwarf::DW_OP_deref);
1485 DIExpression::appendOffset(Ops, Offset);
1486 Ops.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
1487 DIExpr = Builder.createExpression(Ops);
1488 }
1489
1490 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1491 DVI->eraseFromParent();
1492}
1493
1494void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1495 DIBuilder &Builder, int Offset) {
1496 if (auto *L = LocalAsMetadata::getIfExists(AI))
1497 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1498 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1499 Use &U = *UI++;
1500 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1501 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1502 }
1503}
1504
1505void llvm::salvageDebugInfo(Instruction &I) {
1506 // This function is hot. An early check to determine whether the instruction
1507 // has any metadata to save allows it to return earlier on average.
1508 if (!I.isUsedByMetadata())
1509 return;
1510
1511 SmallVector<DbgInfoIntrinsic *, 1> DbgUsers;
1512 findDbgUsers(DbgUsers, &I);
1513 if (DbgUsers.empty())
1514 return;
1515
1516 auto &M = *I.getModule();
1517 auto &DL = M.getDataLayout();
1518
1519 auto wrapMD = [&](Value *V) {
1520 return MetadataAsValue::get(I.getContext(), ValueAsMetadata::get(V));
1521 };
1522
1523 auto doSalvage = [&](DbgInfoIntrinsic *DII, SmallVectorImpl<uint64_t> &Ops) {
1524 auto *DIExpr = DII->getExpression();
1525 DIExpr = DIExpression::doPrepend(DIExpr, Ops,
1526 DIExpression::WithStackValue);
1527 DII->setOperand(0, wrapMD(I.getOperand(0)));
1528 DII->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
1529 DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1530 };
1531
1532 auto applyOffset = [&](DbgInfoIntrinsic *DII, uint64_t Offset) {
1533 SmallVector<uint64_t, 8> Ops;
1534 DIExpression::appendOffset(Ops, Offset);
1535 doSalvage(DII, Ops);
1536 };
1537
1538 auto applyOps = [&](DbgInfoIntrinsic *DII,
1539 std::initializer_list<uint64_t> Opcodes) {
1540 SmallVector<uint64_t, 8> Ops(Opcodes);
1541 doSalvage(DII, Ops);
1542 };
1543
1544 if (auto *CI = dyn_cast<CastInst>(&I)) {
1545 if (!CI->isNoopCast(DL))
1546 return;
1547
1548 // No-op casts are irrelevant for debug info.
1549 MetadataAsValue *CastSrc = wrapMD(I.getOperand(0));
1550 for (auto *DII : DbgUsers) {
1551 DII->setOperand(0, CastSrc);
1552 DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1553 }
1554 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1555 unsigned BitWidth =
1556 M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace());
1557 // Rewrite a constant GEP into a DIExpression. Since we are performing
1558 // arithmetic to compute the variable's *value* in the DIExpression, we
1559 // need to mark the expression with a DW_OP_stack_value.
1560 APInt Offset(BitWidth, 0);
1561 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset))
1562 for (auto *DII : DbgUsers)
1563 applyOffset(DII, Offset.getSExtValue());
1564 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1565 // Rewrite binary operations with constant integer operands.
1566 auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1));
1567 if (!ConstInt || ConstInt->getBitWidth() > 64)
1568 return;
1569
1570 uint64_t Val = ConstInt->getSExtValue();
1571 for (auto *DII : DbgUsers) {
1572 switch (BI->getOpcode()) {
1573 case Instruction::Add:
1574 applyOffset(DII, Val);
1575 break;
1576 case Instruction::Sub:
1577 applyOffset(DII, -int64_t(Val));
1578 break;
1579 case Instruction::Mul:
1580 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_mul});
1581 break;
1582 case Instruction::SDiv:
1583 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_div});
1584 break;
1585 case Instruction::SRem:
1586 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_mod});
1587 break;
1588 case Instruction::Or:
1589 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_or});
1590 break;
1591 case Instruction::And:
1592 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_and});
1593 break;
1594 case Instruction::Xor:
1595 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_xor});
1596 break;
1597 case Instruction::Shl:
1598 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shl});
1599 break;
1600 case Instruction::LShr:
1601 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shr});
1602 break;
1603 case Instruction::AShr:
1604 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shra});
1605 break;
1606 default:
1607 // TODO: Salvage constants from each kind of binop we know about.
1608 continue;
1609 }
1610 }
1611 } else if (isa<LoadInst>(&I)) {
1612 MetadataAsValue *AddrMD = wrapMD(I.getOperand(0));
1613 for (auto *DII : DbgUsers) {
1614 // Rewrite the load into DW_OP_deref.
1615 auto *DIExpr = DII->getExpression();
1616 DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref);
1617 DII->setOperand(0, AddrMD);
1618 DII->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
1619 DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1620 }
1621 }
1622}
1623
1624unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
1625 unsigned NumDeadInst = 0;
1626 // Delete the instructions backwards, as it has a reduced likelihood of
1627 // having to update as many def-use and use-def chains.
1628 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1629 while (EndInst != &BB->front()) {
1630 // Delete the next to last instruction.
1631 Instruction *Inst = &*--EndInst->getIterator();
1632 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
1633 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1634 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
1635 EndInst = Inst;
1636 continue;
1637 }
1638 if (!isa<DbgInfoIntrinsic>(Inst))
1639 ++NumDeadInst;
1640 Inst->eraseFromParent();
1641 }
1642 return NumDeadInst;
1643}
1644
1645unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
1646 bool PreserveLCSSA, DeferredDominance *DDT) {
1647 BasicBlock *BB = I->getParent();
1648 std::vector <DominatorTree::UpdateType> Updates;
1649
1650 // Loop over all of the successors, removing BB's entry from any PHI
1651 // nodes.
1652 if (DDT)
1653 Updates.reserve(BB->getTerminator()->getNumSuccessors());
1654 for (BasicBlock *Successor : successors(BB)) {
1655 Successor->removePredecessor(BB, PreserveLCSSA);
1656 if (DDT)
1657 Updates.push_back({DominatorTree::Delete, BB, Successor});
1658 }
1659 // Insert a call to llvm.trap right before this. This turns the undefined
1660 // behavior into a hard fail instead of falling through into random code.
1661 if (UseLLVMTrap) {
1662 Function *TrapFn =
1663 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
1664 CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
1665 CallTrap->setDebugLoc(I->getDebugLoc());
1666 }
1667 new UnreachableInst(I->getContext(), I);
1668
1669 // All instructions after this are dead.
1670 unsigned NumInstrsRemoved = 0;
1671 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
1672 while (BBI != BBE) {
1673 if (!BBI->use_empty())
1674 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
1675 BB->getInstList().erase(BBI++);
1676 ++NumInstrsRemoved;
1677 }
1678 if (DDT)
1679 DDT->applyUpdates(Updates);
1680 return NumInstrsRemoved;
1681}
1682
1683/// changeToCall - Convert the specified invoke into a normal call.
1684static void changeToCall(InvokeInst *II, DeferredDominance *DDT = nullptr) {
1685 SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
1686 SmallVector<OperandBundleDef, 1> OpBundles;
1687 II->getOperandBundlesAsDefs(OpBundles);
1688 CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
1689 "", II);
1690 NewCall->takeName(II);
1691 NewCall->setCallingConv(II->getCallingConv());
1692 NewCall->setAttributes(II->getAttributes());
1693 NewCall->setDebugLoc(II->getDebugLoc());
1694 II->replaceAllUsesWith(NewCall);
1695
1696 // Follow the call by a branch to the normal destination.
1697 BasicBlock *NormalDestBB = II->getNormalDest();
1698 BranchInst::Create(NormalDestBB, II);
1699
1700 // Update PHI nodes in the unwind destination
1701 BasicBlock *BB = II->getParent();
1702 BasicBlock *UnwindDestBB = II->getUnwindDest();
1703 UnwindDestBB->removePredecessor(BB);
1704 II->eraseFromParent();
1705 if (DDT)
1706 DDT->deleteEdge(BB, UnwindDestBB);
1707}
1708
1709BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
1710 BasicBlock *UnwindEdge) {
1711 BasicBlock *BB = CI->getParent();
1712
1713 // Convert this function call into an invoke instruction. First, split the
1714 // basic block.
1715 BasicBlock *Split =
1716 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
1717
1718 // Delete the unconditional branch inserted by splitBasicBlock
1719 BB->getInstList().pop_back();
1720
1721 // Create the new invoke instruction.
1722 SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
1723 SmallVector<OperandBundleDef, 1> OpBundles;
1724
1725 CI->getOperandBundlesAsDefs(OpBundles);
1726
1727 // Note: we're round tripping operand bundles through memory here, and that
1728 // can potentially be avoided with a cleverer API design that we do not have
1729 // as of this time.
1730
1731 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
1732 InvokeArgs, OpBundles, CI->getName(), BB);
1733 II->setDebugLoc(CI->getDebugLoc());
1734 II->setCallingConv(CI->getCallingConv());
1735 II->setAttributes(CI->getAttributes());
1736
1737 // Make sure that anything using the call now uses the invoke! This also
1738 // updates the CallGraph if present, because it uses a WeakTrackingVH.
1739 CI->replaceAllUsesWith(II);
1740
1741 // Delete the original call
1742 Split->getInstList().pop_front();
1743 return Split;
1744}
1745
1746static bool markAliveBlocks(Function &F,
1747 SmallPtrSetImpl<BasicBlock*> &Reachable,
1748 DeferredDominance *DDT = nullptr) {
1749 SmallVector<BasicBlock*, 128> Worklist;
1750 BasicBlock *BB = &F.front();
1751 Worklist.push_back(BB);
1752 Reachable.insert(BB);
1753 bool Changed = false;
1754 do {
1755 BB = Worklist.pop_back_val();
1756
1757 // Do a quick scan of the basic block, turning any obviously unreachable
1758 // instructions into LLVM unreachable insts. The instruction combining pass
1759 // canonicalizes unreachable insts into stores to null or undef.
1760 for (Instruction &I : *BB) {
1761 // Assumptions that are known to be false are equivalent to unreachable.
1762 // Also, if the condition is undefined, then we make the choice most
1763 // beneficial to the optimizer, and choose that to also be unreachable.
1764 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1765 if (II->getIntrinsicID() == Intrinsic::assume) {
1766 if (match(II->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
1767 // Don't insert a call to llvm.trap right before the unreachable.
1768 changeToUnreachable(II, false, false, DDT);
1769 Changed = true;
1770 break;
1771 }
1772 }
1773
1774 if (II->getIntrinsicID() == Intrinsic::experimental_guard) {
1775 // A call to the guard intrinsic bails out of the current compilation
1776 // unit if the predicate passed to it is false. If the predicate is a
1777 // constant false, then we know the guard will bail out of the current
1778 // compile unconditionally, so all code following it is dead.
1779 //
1780 // Note: unlike in llvm.assume, it is not "obviously profitable" for
1781 // guards to treat `undef` as `false` since a guard on `undef` can
1782 // still be useful for widening.
1783 if (match(II->getArgOperand(0), m_Zero()))
1784 if (!isa<UnreachableInst>(II->getNextNode())) {
1785 changeToUnreachable(II->getNextNode(), /*UseLLVMTrap=*/false,
1786 false, DDT);
1787 Changed = true;
1788 break;
1789 }
1790 }
1791 }
1792
1793 if (auto *CI = dyn_cast<CallInst>(&I)) {
1794 Value *Callee = CI->getCalledValue();
1795 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1796 changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DDT);
1797 Changed = true;
1798 break;
1799 }
1800 if (CI->doesNotReturn()) {
1801 // If we found a call to a no-return function, insert an unreachable
1802 // instruction after it. Make sure there isn't *already* one there
1803 // though.
1804 if (!isa<UnreachableInst>(CI->getNextNode())) {
1805 // Don't insert a call to llvm.trap right before the unreachable.
1806 changeToUnreachable(CI->getNextNode(), false, false, DDT);
1807 Changed = true;
1808 }
1809 break;
1810 }
1811 }
1812
1813 // Store to undef and store to null are undefined and used to signal that
1814 // they should be changed to unreachable by passes that can't modify the
1815 // CFG.
1816 if (auto *SI = dyn_cast<StoreInst>(&I)) {
1817 // Don't touch volatile stores.
1818 if (SI->isVolatile()) continue;
1819
1820 Value *Ptr = SI->getOperand(1);
1821
1822 if (isa<UndefValue>(Ptr) ||
1823 (isa<ConstantPointerNull>(Ptr) &&
1824 SI->getPointerAddressSpace() == 0)) {
1825 changeToUnreachable(SI, true, false, DDT);
1826 Changed = true;
1827 break;
1828 }
1829 }
1830 }
1831
1832 TerminatorInst *Terminator = BB->getTerminator();
1833 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2
Taking false branch
1834 // Turn invokes that call 'nounwind' functions into ordinary calls.
1835 Value *Callee = II->getCalledValue();
1836 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1837 changeToUnreachable(II, true, false, DDT);
1838 Changed = true;
1839 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
1840 if (II->use_empty() && II->onlyReadsMemory()) {
1841 // jump to the normal destination branch.
1842 BasicBlock *NormalDestBB = II->getNormalDest();
1843 BasicBlock *UnwindDestBB = II->getUnwindDest();
1844 BranchInst::Create(NormalDestBB, II);
1845 UnwindDestBB->removePredecessor(II->getParent());
1846 II->eraseFromParent();
1847 if (DDT)
1848 DDT->deleteEdge(BB, UnwindDestBB);
1849 } else
1850 changeToCall(II, DDT);
1851 Changed = true;
1852 }
1853 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
3
Taking false branch
1854 // Remove catchpads which cannot be reached.
1855 struct CatchPadDenseMapInfo {
1856 static CatchPadInst *getEmptyKey() {
1857 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
1858 }
1859
1860 static CatchPadInst *getTombstoneKey() {
1861 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
1862 }
1863
1864 static unsigned getHashValue(CatchPadInst *CatchPad) {
1865 return static_cast<unsigned>(hash_combine_range(
1866 CatchPad->value_op_begin(), CatchPad->value_op_end()));
1867 }
1868
1869 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
1870 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1871 RHS == getEmptyKey() || RHS == getTombstoneKey())
1872 return LHS == RHS;
1873 return LHS->isIdenticalTo(RHS);
1874 }
1875 };
1876
1877 // Set of unique CatchPads.
1878 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
1879 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
1880 HandlerSet;
1881 detail::DenseSetEmpty Empty;
1882 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
1883 E = CatchSwitch->handler_end();
1884 I != E; ++I) {
1885 BasicBlock *HandlerBB = *I;
1886 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
1887 if (!HandlerSet.insert({CatchPad, Empty}).second) {
1888 CatchSwitch->removeHandler(I);
1889 --I;
1890 --E;
1891 Changed = true;
1892 }
1893 }
1894 }
1895
1896 Changed |= ConstantFoldTerminator(BB, true, nullptr, DDT);
4
Calling 'ConstantFoldTerminator'
1897 for (BasicBlock *Successor : successors(BB))
1898 if (Reachable.insert(Successor).second)
1899 Worklist.push_back(Successor);
1900 } while (!Worklist.empty());
1901 return Changed;
1902}
1903
1904void llvm::removeUnwindEdge(BasicBlock *BB, DeferredDominance *DDT) {
1905 TerminatorInst *TI = BB->getTerminator();
1906
1907 if (auto *II = dyn_cast<InvokeInst>(TI)) {
1908 changeToCall(II, DDT);
1909 return;
1910 }
1911
1912 TerminatorInst *NewTI;
1913 BasicBlock *UnwindDest;
1914
1915 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
1916 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
1917 UnwindDest = CRI->getUnwindDest();
1918 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
1919 auto *NewCatchSwitch = CatchSwitchInst::Create(
1920 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
1921 CatchSwitch->getName(), CatchSwitch);
1922 for (BasicBlock *PadBB : CatchSwitch->handlers())
1923 NewCatchSwitch->addHandler(PadBB);
1924
1925 NewTI = NewCatchSwitch;
1926 UnwindDest = CatchSwitch->getUnwindDest();
1927 } else {
1928 llvm_unreachable("Could not find unwind successor")::llvm::llvm_unreachable_internal("Could not find unwind successor"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1928)
;
1929 }
1930
1931 NewTI->takeName(TI);
1932 NewTI->setDebugLoc(TI->getDebugLoc());
1933 UnwindDest->removePredecessor(BB);
1934 TI->replaceAllUsesWith(NewTI);
1935 TI->eraseFromParent();
1936 if (DDT)
1937 DDT->deleteEdge(BB, UnwindDest);
1938}
1939
1940/// removeUnreachableBlocks - Remove blocks that are not reachable, even
1941/// if they are in a dead cycle. Return true if a change was made, false
1942/// otherwise. If `LVI` is passed, this function preserves LazyValueInfo
1943/// after modifying the CFG.
1944bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI,
1945 DeferredDominance *DDT) {
1946 SmallPtrSet<BasicBlock*, 16> Reachable;
1947 bool Changed = markAliveBlocks(F, Reachable, DDT);
1
Calling 'markAliveBlocks'
1948
1949 // If there are unreachable blocks in the CFG...
1950 if (Reachable.size() == F.size())
1951 return Changed;
1952
1953 assert(Reachable.size() < F.size())(static_cast <bool> (Reachable.size() < F.size()) ? void
(0) : __assert_fail ("Reachable.size() < F.size()", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 1953, __extension__ __PRETTY_FUNCTION__))
;
1954 NumRemoved += F.size()-Reachable.size();
1955
1956 // Loop over all of the basic blocks that are not reachable, dropping all of
1957 // their internal references. Update DDT and LVI if available.
1958 std::vector <DominatorTree::UpdateType> Updates;
1959 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ++I) {
1960 auto *BB = &*I;
1961 if (Reachable.count(BB))
1962 continue;
1963 for (BasicBlock *Successor : successors(BB)) {
1964 if (Reachable.count(Successor))
1965 Successor->removePredecessor(BB);
1966 if (DDT)
1967 Updates.push_back({DominatorTree::Delete, BB, Successor});
1968 }
1969 if (LVI)
1970 LVI->eraseBlock(BB);
1971 BB->dropAllReferences();
1972 }
1973
1974 for (Function::iterator I = ++F.begin(); I != F.end();) {
1975 auto *BB = &*I;
1976 if (Reachable.count(BB)) {
1977 ++I;
1978 continue;
1979 }
1980 if (DDT) {
1981 DDT->deleteBB(BB); // deferred deletion of BB.
1982 ++I;
1983 } else {
1984 I = F.getBasicBlockList().erase(I);
1985 }
1986 }
1987
1988 if (DDT)
1989 DDT->applyUpdates(Updates);
1990 return true;
1991}
1992
1993void llvm::combineMetadata(Instruction *K, const Instruction *J,
1994 ArrayRef<unsigned> KnownIDs) {
1995 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
1996 K->dropUnknownNonDebugMetadata(KnownIDs);
1997 K->getAllMetadataOtherThanDebugLoc(Metadata);
1998 for (const auto &MD : Metadata) {
1999 unsigned Kind = MD.first;
2000 MDNode *JMD = J->getMetadata(Kind);
2001 MDNode *KMD = MD.second;
2002
2003 switch (Kind) {
2004 default:
2005 K->setMetadata(Kind, nullptr); // Remove unknown metadata
2006 break;
2007 case LLVMContext::MD_dbg:
2008 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg")::llvm::llvm_unreachable_internal("getAllMetadataOtherThanDebugLoc returned a MD_dbg"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 2008)
;
2009 case LLVMContext::MD_tbaa:
2010 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2011 break;
2012 case LLVMContext::MD_alias_scope:
2013 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2014 break;
2015 case LLVMContext::MD_noalias:
2016 case LLVMContext::MD_mem_parallel_loop_access:
2017 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2018 break;
2019 case LLVMContext::MD_range:
2020 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2021 break;
2022 case LLVMContext::MD_fpmath:
2023 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2024 break;
2025 case LLVMContext::MD_invariant_load:
2026 // Only set the !invariant.load if it is present in both instructions.
2027 K->setMetadata(Kind, JMD);
2028 break;
2029 case LLVMContext::MD_nonnull:
2030 // Only set the !nonnull if it is present in both instructions.
2031 K->setMetadata(Kind, JMD);
2032 break;
2033 case LLVMContext::MD_invariant_group:
2034 // Preserve !invariant.group in K.
2035 break;
2036 case LLVMContext::MD_align:
2037 K->setMetadata(Kind,
2038 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2039 break;
2040 case LLVMContext::MD_dereferenceable:
2041 case LLVMContext::MD_dereferenceable_or_null:
2042 K->setMetadata(Kind,
2043 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2044 break;
2045 }
2046 }
2047 // Set !invariant.group from J if J has it. If both instructions have it
2048 // then we will just pick it from J - even when they are different.
2049 // Also make sure that K is load or store - f.e. combining bitcast with load
2050 // could produce bitcast with invariant.group metadata, which is invalid.
2051 // FIXME: we should try to preserve both invariant.group md if they are
2052 // different, but right now instruction can only have one invariant.group.
2053 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2054 if (isa<LoadInst>(K) || isa<StoreInst>(K))
2055 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2056}
2057
2058void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J) {
2059 unsigned KnownIDs[] = {
2060 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
2061 LLVMContext::MD_noalias, LLVMContext::MD_range,
2062 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
2063 LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2064 LLVMContext::MD_dereferenceable,
2065 LLVMContext::MD_dereferenceable_or_null};
2066 combineMetadata(K, J, KnownIDs);
2067}
2068
2069template <typename RootType, typename DominatesFn>
2070static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2071 const RootType &Root,
2072 const DominatesFn &Dominates) {
2073 assert(From->getType() == To->getType())(static_cast <bool> (From->getType() == To->getType
()) ? void (0) : __assert_fail ("From->getType() == To->getType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 2073, __extension__ __PRETTY_FUNCTION__))
;
2074
2075 unsigned Count = 0;
2076 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2077 UI != UE;) {
2078 Use &U = *UI++;
2079 if (!Dominates(Root, U))
2080 continue;
2081 U.set(To);
2082 DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Replace dominated use of '" <<
From->getName() << "' as " << *To << " in "
<< *U << "\n"; } } while (false)
2083 << *To << " in " << *U << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Replace dominated use of '" <<
From->getName() << "' as " << *To << " in "
<< *U << "\n"; } } while (false)
;
2084 ++Count;
2085 }
2086 return Count;
2087}
2088
2089unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2090 assert(From->getType() == To->getType())(static_cast <bool> (From->getType() == To->getType
()) ? void (0) : __assert_fail ("From->getType() == To->getType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Transforms/Utils/Local.cpp"
, 2090, __extension__ __PRETTY_FUNCTION__))
;
2091 auto *BB = From->getParent();
2092 unsigned Count = 0;
2093
2094 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2095 UI != UE;) {
2096 Use &U = *UI++;
2097 auto *I = cast<Instruction>(U.getUser());
2098 if (I->getParent() == BB)
2099 continue;
2100 U.set(To);
2101 ++Count;
2102 }
2103 return Count;
2104}
2105
2106unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2107 DominatorTree &DT,
2108 const BasicBlockEdge &Root) {
2109 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2110 return DT.dominates(Root, U);
2111 };
2112 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2113}
2114
2115unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2116 DominatorTree &DT,
2117 const BasicBlock *BB) {
2118 auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) {
2119 auto *I = cast<Instruction>(U.getUser())->getParent();
2120 return DT.properlyDominates(BB, I);
2121 };
2122 return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates);
2123}
2124
2125bool llvm::callsGCLeafFunction(ImmutableCallSite CS,
2126 const TargetLibraryInfo &TLI) {
2127 // Check if the function is specifically marked as a gc leaf function.
2128 if (CS.hasFnAttr("gc-leaf-function"))
2129 return true;
2130 if (const Function *F = CS.getCalledFunction()) {
2131 if (F->hasFnAttribute("gc-leaf-function"))
2132 return true;
2133
2134 if (auto IID = F->getIntrinsicID())
2135 // Most LLVM intrinsics do not take safepoints.
2136 return IID != Intrinsic::experimental_gc_statepoint &&
2137 IID != Intrinsic::experimental_deoptimize;
2138 }
2139
2140 // Lib calls can be materialized by some passes, and won't be
2141 // marked as 'gc-leaf-function.' All available Libcalls are
2142 // GC-leaf.
2143 LibFunc LF;
2144 if (TLI.getLibFunc(CS, LF)) {
2145 return TLI.has(LF);
2146 }
2147
2148 return false;
2149}
2150
2151void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2152 LoadInst &NewLI) {
2153 auto *NewTy = NewLI.getType();
2154
2155 // This only directly applies if the new type is also a pointer.
2156 if (NewTy->isPointerTy()) {
2157 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2158 return;
2159 }
2160
2161 // The only other translation we can do is to integral loads with !range
2162 // metadata.
2163 if (!NewTy->isIntegerTy())
2164 return;
2165
2166 MDBuilder MDB(NewLI.getContext());
2167 const Value *Ptr = OldLI.getPointerOperand();
2168 auto *ITy = cast<IntegerType>(NewTy);
2169 auto *NullInt = ConstantExpr::getPtrToInt(
2170 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2171 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2172 NewLI.setMetadata(LLVMContext::MD_range,
2173 MDB.createRange(NonNullInt, NullInt));
2174}
2175
2176void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2177 MDNode *N, LoadInst &NewLI) {
2178 auto *NewTy = NewLI.getType();
2179
2180 // Give up unless it is converted to a pointer where there is a single very
2181 // valuable mapping we can do reliably.
2182 // FIXME: It would be nice to propagate this in more ways, but the type
2183 // conversions make it hard.
2184 if (!NewTy->isPointerTy())
2185 return;
2186
2187 unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy);
2188 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2189 MDNode *NN = MDNode::get(OldLI.getContext(), None);
2190 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2191 }
2192}
2193
2194namespace {
2195
2196/// A potential constituent of a bitreverse or bswap expression. See
2197/// collectBitParts for a fuller explanation.
2198struct BitPart {
2199 BitPart(Value *P, unsigned BW) : Provider(P) {
2200 Provenance.resize(BW);
2201 }
2202
2203 /// The Value that this is a bitreverse/bswap of.
2204 Value *Provider;
2205
2206 /// The "provenance" of each bit. Provenance[A] = B means that bit A
2207 /// in Provider becomes bit B in the result of this expression.
2208 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2209
2210 enum { Unset = -1 };
2211};
2212
2213} // end anonymous namespace
2214
2215/// Analyze the specified subexpression and see if it is capable of providing
2216/// pieces of a bswap or bitreverse. The subexpression provides a potential
2217/// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
2218/// the output of the expression came from a corresponding bit in some other
2219/// value. This function is recursive, and the end result is a mapping of
2220/// bitnumber to bitnumber. It is the caller's responsibility to validate that
2221/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2222///
2223/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2224/// that the expression deposits the low byte of %X into the high byte of the
2225/// result and that all other bits are zero. This expression is accepted and a
2226/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2227/// [0-7].
2228///
2229/// To avoid revisiting values, the BitPart results are memoized into the
2230/// provided map. To avoid unnecessary copying of BitParts, BitParts are
2231/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2232/// store BitParts objects, not pointers. As we need the concept of a nullptr
2233/// BitParts (Value has been analyzed and the analysis failed), we an Optional
2234/// type instead to provide the same functionality.
2235///
2236/// Because we pass around references into \c BPS, we must use a container that
2237/// does not invalidate internal references (std::map instead of DenseMap).
2238static const Optional<BitPart> &
2239collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2240 std::map<Value *, Optional<BitPart>> &BPS) {
2241 auto I = BPS.find(V);
2242 if (I != BPS.end())
2243 return I->second;
2244
2245 auto &Result = BPS[V] = None;
2246 auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2247
2248 if (Instruction *I = dyn_cast<Instruction>(V)) {
2249 // If this is an or instruction, it may be an inner node of the bswap.
2250 if (I->getOpcode() == Instruction::Or) {
2251 auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
2252 MatchBitReversals, BPS);
2253 auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
2254 MatchBitReversals, BPS);
2255 if (!A || !B)
2256 return Result;
2257
2258 // Try and merge the two together.
2259 if (!A->Provider || A->Provider != B->Provider)
2260 return Result;
2261
2262 Result = BitPart(A->Provider, BitWidth);
2263 for (unsigned i = 0; i < A->Provenance.size(); ++i) {
2264 if (A->Provenance[i] != BitPart::Unset &&
2265 B->Provenance[i] != BitPart::Unset &&
2266 A->Provenance[i] != B->Provenance[i])
2267 return Result = None;
2268
2269 if (A->Provenance[i] == BitPart::Unset)
2270 Result->Provenance[i] = B->Provenance[i];
2271 else
2272 Result->Provenance[i] = A->Provenance[i];
2273 }
2274
2275 return Result;
2276 }
2277
2278 // If this is a logical shift by a constant, recurse then shift the result.
2279 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
2280 unsigned BitShift =
2281 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
2282 // Ensure the shift amount is defined.
2283 if (BitShift > BitWidth)
2284 return Result;
2285
2286 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2287 MatchBitReversals, BPS);
2288 if (!Res)
2289 return Result;
2290 Result = Res;
2291
2292 // Perform the "shift" on BitProvenance.
2293 auto &P = Result->Provenance;
2294 if (I->getOpcode() == Instruction::Shl) {
2295 P.erase(std::prev(P.end(), BitShift), P.end());
2296 P.insert(P.begin(), BitShift, BitPart::Unset);
2297 } else {
2298 P.erase(P.begin(), std::next(P.begin(), BitShift));
2299 P.insert(P.end(), BitShift, BitPart::Unset);
2300 }
2301
2302 return Result;
2303 }
2304
2305 // If this is a logical 'and' with a mask that clears bits, recurse then
2306 // unset the appropriate bits.
2307 if (I->getOpcode() == Instruction::And &&
2308 isa<ConstantInt>(I->getOperand(1))) {
2309 APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
2310 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
2311
2312 // Check that the mask allows a multiple of 8 bits for a bswap, for an
2313 // early exit.
2314 unsigned NumMaskedBits = AndMask.countPopulation();
2315 if (!MatchBitReversals && NumMaskedBits % 8 != 0)
2316 return Result;
2317
2318 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2319 MatchBitReversals, BPS);
2320 if (!Res)
2321 return Result;
2322 Result = Res;
2323
2324 for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
2325 // If the AndMask is zero for this bit, clear the bit.
2326 if ((AndMask & Bit) == 0)
2327 Result->Provenance[i] = BitPart::Unset;
2328 return Result;
2329 }
2330
2331 // If this is a zext instruction zero extend the result.
2332 if (I->getOpcode() == Instruction::ZExt) {
2333 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2334 MatchBitReversals, BPS);
2335 if (!Res)
2336 return Result;
2337
2338 Result = BitPart(Res->Provider, BitWidth);
2339 auto NarrowBitWidth =
2340 cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
2341 for (unsigned i = 0; i < NarrowBitWidth; ++i)
2342 Result->Provenance[i] = Res->Provenance[i];
2343 for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
2344 Result->Provenance[i] = BitPart::Unset;
2345 return Result;
2346 }
2347 }
2348
2349 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
2350 // the input value to the bswap/bitreverse.
2351 Result = BitPart(V, BitWidth);
2352 for (unsigned i = 0; i < BitWidth; ++i)
2353 Result->Provenance[i] = i;
2354 return Result;
2355}
2356
2357static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
2358 unsigned BitWidth) {
2359 if (From % 8 != To % 8)
2360 return false;
2361 // Convert from bit indices to byte indices and check for a byte reversal.
2362 From >>= 3;
2363 To >>= 3;
2364 BitWidth >>= 3;
2365 return From == BitWidth - To - 1;
2366}
2367
2368static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
2369 unsigned BitWidth) {
2370 return From == BitWidth - To - 1;
2371}
2372
2373bool llvm::recognizeBSwapOrBitReverseIdiom(
2374 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
2375 SmallVectorImpl<Instruction *> &InsertedInsts) {
2376 if (Operator::getOpcode(I) != Instruction::Or)
2377 return false;
2378 if (!MatchBSwaps && !MatchBitReversals)
2379 return false;
2380 IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
2381 if (!ITy || ITy->getBitWidth() > 128)
2382 return false; // Can't do vectors or integers > 128 bits.
2383 unsigned BW = ITy->getBitWidth();
2384
2385 unsigned DemandedBW = BW;
2386 IntegerType *DemandedTy = ITy;
2387 if (I->hasOneUse()) {
2388 if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
2389 DemandedTy = cast<IntegerType>(Trunc->getType());
2390 DemandedBW = DemandedTy->getBitWidth();
2391 }
2392 }
2393
2394 // Try to find all the pieces corresponding to the bswap.
2395 std::map<Value *, Optional<BitPart>> BPS;
2396 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
2397 if (!Res)
2398 return false;
2399 auto &BitProvenance = Res->Provenance;
2400
2401 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
2402 // only byteswap values with an even number of bytes.
2403 bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
2404 for (unsigned i = 0; i < DemandedBW; ++i) {
2405 OKForBSwap &=
2406 bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
2407 OKForBitReverse &=
2408 bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
2409 }
2410
2411 Intrinsic::ID Intrin;
2412 if (OKForBSwap && MatchBSwaps)
2413 Intrin = Intrinsic::bswap;
2414 else if (OKForBitReverse && MatchBitReversals)
2415 Intrin = Intrinsic::bitreverse;
2416 else
2417 return false;
2418
2419 if (ITy != DemandedTy) {
2420 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
2421 Value *Provider = Res->Provider;
2422 IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
2423 // We may need to truncate the provider.
2424 if (DemandedTy != ProviderTy) {
2425 auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
2426 "trunc", I);
2427 InsertedInsts.push_back(Trunc);
2428 Provider = Trunc;
2429 }
2430 auto *CI = CallInst::Create(F, Provider, "rev", I);
2431 InsertedInsts.push_back(CI);
2432 auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
2433 InsertedInsts.push_back(ExtInst);
2434 return true;
2435 }
2436
2437 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
2438 InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
2439 return true;
2440}
2441
2442// CodeGen has special handling for some string functions that may replace
2443// them with target-specific intrinsics. Since that'd skip our interceptors
2444// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
2445// we mark affected calls as NoBuiltin, which will disable optimization
2446// in CodeGen.
2447void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
2448 CallInst *CI, const TargetLibraryInfo *TLI) {
2449 Function *F = CI->getCalledFunction();
2450 LibFunc Func;
2451 if (F && !F->hasLocalLinkage() && F->hasName() &&
2452 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
2453 !F->doesNotAccessMemory())
2454 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
2455}
2456
2457bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
2458 // We can't have a PHI with a metadata type.
2459 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
2460 return false;
2461
2462 // Early exit.
2463 if (!isa<Constant>(I->getOperand(OpIdx)))
2464 return true;
2465
2466 switch (I->getOpcode()) {
2467 default:
2468 return true;
2469 case Instruction::Call:
2470 case Instruction::Invoke:
2471 // Can't handle inline asm. Skip it.
2472 if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue()))
2473 return false;
2474 // Many arithmetic intrinsics have no issue taking a
2475 // variable, however it's hard to distingish these from
2476 // specials such as @llvm.frameaddress that require a constant.
2477 if (isa<IntrinsicInst>(I))
2478 return false;
2479
2480 // Constant bundle operands may need to retain their constant-ness for
2481 // correctness.
2482 if (ImmutableCallSite(I).isBundleOperand(OpIdx))
2483 return false;
2484 return true;
2485 case Instruction::ShuffleVector:
2486 // Shufflevector masks are constant.
2487 return OpIdx != 2;
2488 case Instruction::Switch:
2489 case Instruction::ExtractValue:
2490 // All operands apart from the first are constant.
2491 return OpIdx == 0;
2492 case Instruction::InsertValue:
2493 // All operands apart from the first and the second are constant.
2494 return OpIdx < 2;
2495 case Instruction::Alloca:
2496 // Static allocas (constant size in the entry block) are handled by
2497 // prologue/epilogue insertion so they're free anyway. We definitely don't
2498 // want to make them non-constant.
2499 return !dyn_cast<AllocaInst>(I)->isStaticAlloca();
2500 case Instruction::GetElementPtr:
2501 if (OpIdx == 0)
2502 return true;
2503 gep_type_iterator It = gep_type_begin(I);
2504 for (auto E = std::next(It, OpIdx); It != E; ++It)
2505 if (It.isStruct())
2506 return false;
2507 return true;
2508 }
2509}

/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes the class definitions of all of the subclasses of the
11// Instruction class. This is meant to be an easy way to get access to all
12// instruction subclasses.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_IR_INSTRUCTIONS_H
17#define LLVM_IR_INSTRUCTIONS_H
18
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/OperandTraits.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Use.h"
38#include "llvm/IR/User.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/ErrorHandling.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <iterator>
47
48namespace llvm {
49
50class APInt;
51class ConstantInt;
52class DataLayout;
53class LLVMContext;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
60class AllocaInst : public UnaryInstruction {
61 Type *AllocatedType;
62
63protected:
64 // Note: Instruction needs to be a friend here to call cloneImpl.
65 friend class Instruction;
66
67 AllocaInst *cloneImpl() const;
68
69public:
70 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
71 Value *ArraySize = nullptr,
72 const Twine &Name = "",
73 Instruction *InsertBefore = nullptr);
74 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
75 const Twine &Name, BasicBlock *InsertAtEnd);
76
77 AllocaInst(Type *Ty, unsigned AddrSpace,
78 const Twine &Name, Instruction *InsertBefore = nullptr);
79 AllocaInst(Type *Ty, unsigned AddrSpace,
80 const Twine &Name, BasicBlock *InsertAtEnd);
81
82 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
83 const Twine &Name = "", Instruction *InsertBefore = nullptr);
84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
85 const Twine &Name, BasicBlock *InsertAtEnd);
86
87 /// Return true if there is an allocation size parameter to the allocation
88 /// instruction that is not 1.
89 bool isArrayAllocation() const;
90
91 /// Get the number of elements allocated. For a simple allocation of a single
92 /// element, this will return a constant 1 value.
93 const Value *getArraySize() const { return getOperand(0); }
94 Value *getArraySize() { return getOperand(0); }
95
96 /// Overload to return most specific pointer type.
97 PointerType *getType() const {
98 return cast<PointerType>(Instruction::getType());
99 }
100
101 /// Return the type that is being allocated by the instruction.
102 Type *getAllocatedType() const { return AllocatedType; }
103 /// for use only in special circumstances that need to generically
104 /// transform a whole instruction (eg: IR linking and vectorization).
105 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
106
107 /// Return the alignment of the memory that is being allocated by the
108 /// instruction.
109 unsigned getAlignment() const {
110 return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
111 }
112 void setAlignment(unsigned Align);
113
114 /// Return true if this alloca is in the entry block of the function and is a
115 /// constant size. If so, the code generator will fold it into the
116 /// prolog/epilog code, so it is basically free.
117 bool isStaticAlloca() const;
118
119 /// Return true if this alloca is used as an inalloca argument to a call. Such
120 /// allocas are never considered static even if they are in the entry block.
121 bool isUsedWithInAlloca() const {
122 return getSubclassDataFromInstruction() & 32;
123 }
124
125 /// Specify whether this alloca is used to represent the arguments to a call.
126 void setUsedWithInAlloca(bool V) {
127 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
128 (V ? 32 : 0));
129 }
130
131 /// Return true if this alloca is used as a swifterror argument to a call.
132 bool isSwiftError() const {
133 return getSubclassDataFromInstruction() & 64;
134 }
135
136 /// Specify whether this alloca is used to represent a swifterror.
137 void setSwiftError(bool V) {
138 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
139 (V ? 64 : 0));
140 }
141
142 // Methods for support type inquiry through isa, cast, and dyn_cast:
143 static bool classof(const Instruction *I) {
144 return (I->getOpcode() == Instruction::Alloca);
145 }
146 static bool classof(const Value *V) {
147 return isa<Instruction>(V) && classof(cast<Instruction>(V));
148 }
149
150private:
151 // Shadow Instruction::setInstructionSubclassData with a private forwarding
152 // method so that subclasses cannot accidentally use it.
153 void setInstructionSubclassData(unsigned short D) {
154 Instruction::setInstructionSubclassData(D);
155 }
156};
157
158//===----------------------------------------------------------------------===//
159// LoadInst Class
160//===----------------------------------------------------------------------===//
161
162/// An instruction for reading from memory. This uses the SubclassData field in
163/// Value to store whether or not the load is volatile.
164class LoadInst : public UnaryInstruction {
165 void AssertOK();
166
167protected:
168 // Note: Instruction needs to be a friend here to call cloneImpl.
169 friend class Instruction;
170
171 LoadInst *cloneImpl() const;
172
173public:
174 LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
175 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
176 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
177 Instruction *InsertBefore = nullptr);
178 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
179 Instruction *InsertBefore = nullptr)
180 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
181 NameStr, isVolatile, InsertBefore) {}
182 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
183 BasicBlock *InsertAtEnd);
184 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
185 Instruction *InsertBefore = nullptr)
186 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
187 NameStr, isVolatile, Align, InsertBefore) {}
188 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
189 unsigned Align, Instruction *InsertBefore = nullptr);
190 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
191 unsigned Align, BasicBlock *InsertAtEnd);
192 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
193 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
194 Instruction *InsertBefore = nullptr)
195 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
196 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 unsigned Align, AtomicOrdering Order,
199 SyncScope::ID SSID = SyncScope::System,
200 Instruction *InsertBefore = nullptr);
201 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
202 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
203 BasicBlock *InsertAtEnd);
204 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
205 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
206 LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
207 bool isVolatile = false, Instruction *InsertBefore = nullptr);
208 explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
209 bool isVolatile = false,
210 Instruction *InsertBefore = nullptr)
211 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
212 NameStr, isVolatile, InsertBefore) {}
213 LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
214 BasicBlock *InsertAtEnd);
215
216 /// Return true if this is a load from a volatile memory location.
217 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
218
219 /// Specify whether this is a volatile load or not.
220 void setVolatile(bool V) {
221 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
222 (V ? 1 : 0));
223 }
224
225 /// Return the alignment of the access that is being performed.
226 unsigned getAlignment() const {
227 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
228 }
229
230 void setAlignment(unsigned Align);
231
232 /// Returns the ordering constraint of this load instruction.
233 AtomicOrdering getOrdering() const {
234 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
235 }
236
237 /// Sets the ordering constraint of this load instruction. May not be Release
238 /// or AcquireRelease.
239 void setOrdering(AtomicOrdering Ordering) {
240 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
241 ((unsigned)Ordering << 7));
242 }
243
244 /// Returns the synchronization scope ID of this load instruction.
245 SyncScope::ID getSyncScopeID() const {
246 return SSID;
247 }
248
249 /// Sets the synchronization scope ID of this load instruction.
250 void setSyncScopeID(SyncScope::ID SSID) {
251 this->SSID = SSID;
252 }
253
254 /// Sets the ordering constraint and the synchronization scope ID of this load
255 /// instruction.
256 void setAtomic(AtomicOrdering Ordering,
257 SyncScope::ID SSID = SyncScope::System) {
258 setOrdering(Ordering);
259 setSyncScopeID(SSID);
260 }
261
262 bool isSimple() const { return !isAtomic() && !isVolatile(); }
263
264 bool isUnordered() const {
265 return (getOrdering() == AtomicOrdering::NotAtomic ||
266 getOrdering() == AtomicOrdering::Unordered) &&
267 !isVolatile();
268 }
269
270 Value *getPointerOperand() { return getOperand(0); }
271 const Value *getPointerOperand() const { return getOperand(0); }
272 static unsigned getPointerOperandIndex() { return 0U; }
273 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
274
275 /// Returns the address space of the pointer operand.
276 unsigned getPointerAddressSpace() const {
277 return getPointerOperandType()->getPointerAddressSpace();
278 }
279
280 // Methods for support type inquiry through isa, cast, and dyn_cast:
281 static bool classof(const Instruction *I) {
282 return I->getOpcode() == Instruction::Load;
283 }
284 static bool classof(const Value *V) {
285 return isa<Instruction>(V) && classof(cast<Instruction>(V));
286 }
287
288private:
289 // Shadow Instruction::setInstructionSubclassData with a private forwarding
290 // method so that subclasses cannot accidentally use it.
291 void setInstructionSubclassData(unsigned short D) {
292 Instruction::setInstructionSubclassData(D);
293 }
294
295 /// The synchronization scope ID of this load instruction. Not quite enough
296 /// room in SubClassData for everything, so synchronization scope ID gets its
297 /// own field.
298 SyncScope::ID SSID;
299};
300
301//===----------------------------------------------------------------------===//
302// StoreInst Class
303//===----------------------------------------------------------------------===//
304
305/// An instruction for storing to memory.
306class StoreInst : public Instruction {
307 void AssertOK();
308
309protected:
310 // Note: Instruction needs to be a friend here to call cloneImpl.
311 friend class Instruction;
312
313 StoreInst *cloneImpl() const;
314
315public:
316 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
317 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
318 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
319 Instruction *InsertBefore = nullptr);
320 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
322 unsigned Align, Instruction *InsertBefore = nullptr);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
324 unsigned Align, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
326 unsigned Align, AtomicOrdering Order,
327 SyncScope::ID SSID = SyncScope::System,
328 Instruction *InsertBefore = nullptr);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
330 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
331 BasicBlock *InsertAtEnd);
332
333 // allocate space for exactly two operands
334 void *operator new(size_t s) {
335 return User::operator new(s, 2);
336 }
337
338 /// Return true if this is a store to a volatile memory location.
339 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
340
341 /// Specify whether this is a volatile store or not.
342 void setVolatile(bool V) {
343 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
344 (V ? 1 : 0));
345 }
346
347 /// Transparently provide more efficient getOperand methods.
348 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
349
350 /// Return the alignment of the access that is being performed
351 unsigned getAlignment() const {
352 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
353 }
354
355 void setAlignment(unsigned Align);
356
357 /// Returns the ordering constraint of this store instruction.
358 AtomicOrdering getOrdering() const {
359 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
360 }
361
362 /// Sets the ordering constraint of this store instruction. May not be
363 /// Acquire or AcquireRelease.
364 void setOrdering(AtomicOrdering Ordering) {
365 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
366 ((unsigned)Ordering << 7));
367 }
368
369 /// Returns the synchronization scope ID of this store instruction.
370 SyncScope::ID getSyncScopeID() const {
371 return SSID;
372 }
373
374 /// Sets the synchronization scope ID of this store instruction.
375 void setSyncScopeID(SyncScope::ID SSID) {
376 this->SSID = SSID;
377 }
378
379 /// Sets the ordering constraint and the synchronization scope ID of this
380 /// store instruction.
381 void setAtomic(AtomicOrdering Ordering,
382 SyncScope::ID SSID = SyncScope::System) {
383 setOrdering(Ordering);
384 setSyncScopeID(SSID);
385 }
386
387 bool isSimple() const { return !isAtomic() && !isVolatile(); }
388
389 bool isUnordered() const {
390 return (getOrdering() == AtomicOrdering::NotAtomic ||
391 getOrdering() == AtomicOrdering::Unordered) &&
392 !isVolatile();
393 }
394
395 Value *getValueOperand() { return getOperand(0); }
396 const Value *getValueOperand() const { return getOperand(0); }
397
398 Value *getPointerOperand() { return getOperand(1); }
399 const Value *getPointerOperand() const { return getOperand(1); }
400 static unsigned getPointerOperandIndex() { return 1U; }
401 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
402
403 /// Returns the address space of the pointer operand.
404 unsigned getPointerAddressSpace() const {
405 return getPointerOperandType()->getPointerAddressSpace();
406 }
407
408 // Methods for support type inquiry through isa, cast, and dyn_cast:
409 static bool classof(const Instruction *I) {
410 return I->getOpcode() == Instruction::Store;
411 }
412 static bool classof(const Value *V) {
413 return isa<Instruction>(V) && classof(cast<Instruction>(V));
414 }
415
416private:
417 // Shadow Instruction::setInstructionSubclassData with a private forwarding
418 // method so that subclasses cannot accidentally use it.
419 void setInstructionSubclassData(unsigned short D) {
420 Instruction::setInstructionSubclassData(D);
421 }
422
423 /// The synchronization scope ID of this store instruction. Not quite enough
424 /// room in SubClassData for everything, so synchronization scope ID gets its
425 /// own field.
426 SyncScope::ID SSID;
427};
428
429template <>
430struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
431};
432
433DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 433, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 433, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
434
435//===----------------------------------------------------------------------===//
436// FenceInst Class
437//===----------------------------------------------------------------------===//
438
439/// An instruction for ordering other memory operations.
440class FenceInst : public Instruction {
441 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
442
443protected:
444 // Note: Instruction needs to be a friend here to call cloneImpl.
445 friend class Instruction;
446
447 FenceInst *cloneImpl() const;
448
449public:
450 // Ordering may only be Acquire, Release, AcquireRelease, or
451 // SequentiallyConsistent.
452 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
453 SyncScope::ID SSID = SyncScope::System,
454 Instruction *InsertBefore = nullptr);
455 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
456 BasicBlock *InsertAtEnd);
457
458 // allocate space for exactly zero operands
459 void *operator new(size_t s) {
460 return User::operator new(s, 0);
461 }
462
463 /// Returns the ordering constraint of this fence instruction.
464 AtomicOrdering getOrdering() const {
465 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
466 }
467
468 /// Sets the ordering constraint of this fence instruction. May only be
469 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
470 void setOrdering(AtomicOrdering Ordering) {
471 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
472 ((unsigned)Ordering << 1));
473 }
474
475 /// Returns the synchronization scope ID of this fence instruction.
476 SyncScope::ID getSyncScopeID() const {
477 return SSID;
478 }
479
480 /// Sets the synchronization scope ID of this fence instruction.
481 void setSyncScopeID(SyncScope::ID SSID) {
482 this->SSID = SSID;
483 }
484
485 // Methods for support type inquiry through isa, cast, and dyn_cast:
486 static bool classof(const Instruction *I) {
487 return I->getOpcode() == Instruction::Fence;
488 }
489 static bool classof(const Value *V) {
490 return isa<Instruction>(V) && classof(cast<Instruction>(V));
491 }
492
493private:
494 // Shadow Instruction::setInstructionSubclassData with a private forwarding
495 // method so that subclasses cannot accidentally use it.
496 void setInstructionSubclassData(unsigned short D) {
497 Instruction::setInstructionSubclassData(D);
498 }
499
500 /// The synchronization scope ID of this fence instruction. Not quite enough
501 /// room in SubClassData for everything, so synchronization scope ID gets its
502 /// own field.
503 SyncScope::ID SSID;
504};
505
506//===----------------------------------------------------------------------===//
507// AtomicCmpXchgInst Class
508//===----------------------------------------------------------------------===//
509
510/// an instruction that atomically checks whether a
511/// specified value is in a memory location, and, if it is, stores a new value
512/// there. Returns the value that was loaded.
513///
514class AtomicCmpXchgInst : public Instruction {
515 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
516 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
517 SyncScope::ID SSID);
518
519protected:
520 // Note: Instruction needs to be a friend here to call cloneImpl.
521 friend class Instruction;
522
523 AtomicCmpXchgInst *cloneImpl() const;
524
525public:
526 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
527 AtomicOrdering SuccessOrdering,
528 AtomicOrdering FailureOrdering,
529 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering,
533 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
534
535 // allocate space for exactly three operands
536 void *operator new(size_t s) {
537 return User::operator new(s, 3);
538 }
539
540 /// Return true if this is a cmpxchg from a volatile memory
541 /// location.
542 ///
543 bool isVolatile() const {
544 return getSubclassDataFromInstruction() & 1;
545 }
546
547 /// Specify whether this is a volatile cmpxchg.
548 ///
549 void setVolatile(bool V) {
550 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
551 (unsigned)V);
552 }
553
554 /// Return true if this cmpxchg may spuriously fail.
555 bool isWeak() const {
556 return getSubclassDataFromInstruction() & 0x100;
557 }
558
559 void setWeak(bool IsWeak) {
560 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
561 (IsWeak << 8));
562 }
563
564 /// Transparently provide more efficient getOperand methods.
565 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
566
567 /// Returns the success ordering constraint of this cmpxchg instruction.
568 AtomicOrdering getSuccessOrdering() const {
569 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
570 }
571
572 /// Sets the success ordering constraint of this cmpxchg instruction.
573 void setSuccessOrdering(AtomicOrdering Ordering) {
574 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 575, __extension__ __PRETTY_FUNCTION__))
575 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 575, __extension__ __PRETTY_FUNCTION__))
;
576 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
577 ((unsigned)Ordering << 2));
578 }
579
580 /// Returns the failure ordering constraint of this cmpxchg instruction.
581 AtomicOrdering getFailureOrdering() const {
582 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
583 }
584
585 /// Sets the failure ordering constraint of this cmpxchg instruction.
586 void setFailureOrdering(AtomicOrdering Ordering) {
587 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 588, __extension__ __PRETTY_FUNCTION__))
588 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 588, __extension__ __PRETTY_FUNCTION__))
;
589 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
590 ((unsigned)Ordering << 5));
591 }
592
593 /// Returns the synchronization scope ID of this cmpxchg instruction.
594 SyncScope::ID getSyncScopeID() const {
595 return SSID;
596 }
597
598 /// Sets the synchronization scope ID of this cmpxchg instruction.
599 void setSyncScopeID(SyncScope::ID SSID) {
600 this->SSID = SSID;
601 }
602
603 Value *getPointerOperand() { return getOperand(0); }
604 const Value *getPointerOperand() const { return getOperand(0); }
605 static unsigned getPointerOperandIndex() { return 0U; }
606
607 Value *getCompareOperand() { return getOperand(1); }
608 const Value *getCompareOperand() const { return getOperand(1); }
609
610 Value *getNewValOperand() { return getOperand(2); }
611 const Value *getNewValOperand() const { return getOperand(2); }
612
613 /// Returns the address space of the pointer operand.
614 unsigned getPointerAddressSpace() const {
615 return getPointerOperand()->getType()->getPointerAddressSpace();
616 }
617
618 /// Returns the strongest permitted ordering on failure, given the
619 /// desired ordering on success.
620 ///
621 /// If the comparison in a cmpxchg operation fails, there is no atomic store
622 /// so release semantics cannot be provided. So this function drops explicit
623 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
624 /// operation would remain SequentiallyConsistent.
625 static AtomicOrdering
626 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
627 switch (SuccessOrdering) {
628 default:
629 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 629)
;
630 case AtomicOrdering::Release:
631 case AtomicOrdering::Monotonic:
632 return AtomicOrdering::Monotonic;
633 case AtomicOrdering::AcquireRelease:
634 case AtomicOrdering::Acquire:
635 return AtomicOrdering::Acquire;
636 case AtomicOrdering::SequentiallyConsistent:
637 return AtomicOrdering::SequentiallyConsistent;
638 }
639 }
640
641 // Methods for support type inquiry through isa, cast, and dyn_cast:
642 static bool classof(const Instruction *I) {
643 return I->getOpcode() == Instruction::AtomicCmpXchg;
644 }
645 static bool classof(const Value *V) {
646 return isa<Instruction>(V) && classof(cast<Instruction>(V));
647 }
648
649private:
650 // Shadow Instruction::setInstructionSubclassData with a private forwarding
651 // method so that subclasses cannot accidentally use it.
652 void setInstructionSubclassData(unsigned short D) {
653 Instruction::setInstructionSubclassData(D);
654 }
655
656 /// The synchronization scope ID of this cmpxchg instruction. Not quite
657 /// enough room in SubClassData for everything, so synchronization scope ID
658 /// gets its own field.
659 SyncScope::ID SSID;
660};
661
662template <>
663struct OperandTraits<AtomicCmpXchgInst> :
664 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
665};
666
667DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 667, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 667, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
668
669//===----------------------------------------------------------------------===//
670// AtomicRMWInst Class
671//===----------------------------------------------------------------------===//
672
673/// an instruction that atomically reads a memory location,
674/// combines it with another value, and then stores the result back. Returns
675/// the old value.
676///
677class AtomicRMWInst : public Instruction {
678protected:
679 // Note: Instruction needs to be a friend here to call cloneImpl.
680 friend class Instruction;
681
682 AtomicRMWInst *cloneImpl() const;
683
684public:
685 /// This enumeration lists the possible modifications atomicrmw can make. In
686 /// the descriptions, 'p' is the pointer to the instruction's memory location,
687 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
688 /// instruction. These instructions always return 'old'.
689 enum BinOp {
690 /// *p = v
691 Xchg,
692 /// *p = old + v
693 Add,
694 /// *p = old - v
695 Sub,
696 /// *p = old & v
697 And,
698 /// *p = ~(old & v)
699 Nand,
700 /// *p = old | v
701 Or,
702 /// *p = old ^ v
703 Xor,
704 /// *p = old >signed v ? old : v
705 Max,
706 /// *p = old <signed v ? old : v
707 Min,
708 /// *p = old >unsigned v ? old : v
709 UMax,
710 /// *p = old <unsigned v ? old : v
711 UMin,
712
713 FIRST_BINOP = Xchg,
714 LAST_BINOP = UMin,
715 BAD_BINOP
716 };
717
718 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
719 AtomicOrdering Ordering, SyncScope::ID SSID,
720 Instruction *InsertBefore = nullptr);
721 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
722 AtomicOrdering Ordering, SyncScope::ID SSID,
723 BasicBlock *InsertAtEnd);
724
725 // allocate space for exactly two operands
726 void *operator new(size_t s) {
727 return User::operator new(s, 2);
728 }
729
730 BinOp getOperation() const {
731 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
732 }
733
734 void setOperation(BinOp Operation) {
735 unsigned short SubclassData = getSubclassDataFromInstruction();
736 setInstructionSubclassData((SubclassData & 31) |
737 (Operation << 5));
738 }
739
740 /// Return true if this is a RMW on a volatile memory location.
741 ///
742 bool isVolatile() const {
743 return getSubclassDataFromInstruction() & 1;
744 }
745
746 /// Specify whether this is a volatile RMW or not.
747 ///
748 void setVolatile(bool V) {
749 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
750 (unsigned)V);
751 }
752
753 /// Transparently provide more efficient getOperand methods.
754 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
755
756 /// Returns the ordering constraint of this rmw instruction.
757 AtomicOrdering getOrdering() const {
758 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
759 }
760
761 /// Sets the ordering constraint of this rmw instruction.
762 void setOrdering(AtomicOrdering Ordering) {
763 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 764, __extension__ __PRETTY_FUNCTION__))
764 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 764, __extension__ __PRETTY_FUNCTION__))
;
765 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
766 ((unsigned)Ordering << 2));
767 }
768
769 /// Returns the synchronization scope ID of this rmw instruction.
770 SyncScope::ID getSyncScopeID() const {
771 return SSID;
772 }
773
774 /// Sets the synchronization scope ID of this rmw instruction.
775 void setSyncScopeID(SyncScope::ID SSID) {
776 this->SSID = SSID;
777 }
778
779 Value *getPointerOperand() { return getOperand(0); }
780 const Value *getPointerOperand() const { return getOperand(0); }
781 static unsigned getPointerOperandIndex() { return 0U; }
782
783 Value *getValOperand() { return getOperand(1); }
784 const Value *getValOperand() const { return getOperand(1); }
785
786 /// Returns the address space of the pointer operand.
787 unsigned getPointerAddressSpace() const {
788 return getPointerOperand()->getType()->getPointerAddressSpace();
789 }
790
791 // Methods for support type inquiry through isa, cast, and dyn_cast:
792 static bool classof(const Instruction *I) {
793 return I->getOpcode() == Instruction::AtomicRMW;
794 }
795 static bool classof(const Value *V) {
796 return isa<Instruction>(V) && classof(cast<Instruction>(V));
797 }
798
799private:
800 void Init(BinOp Operation, Value *Ptr, Value *Val,
801 AtomicOrdering Ordering, SyncScope::ID SSID);
802
803 // Shadow Instruction::setInstructionSubclassData with a private forwarding
804 // method so that subclasses cannot accidentally use it.
805 void setInstructionSubclassData(unsigned short D) {
806 Instruction::setInstructionSubclassData(D);
807 }
808
809 /// The synchronization scope ID of this rmw instruction. Not quite enough
810 /// room in SubClassData for everything, so synchronization scope ID gets its
811 /// own field.
812 SyncScope::ID SSID;
813};
814
815template <>
816struct OperandTraits<AtomicRMWInst>
817 : public FixedNumOperandTraits<AtomicRMWInst,2> {
818};
819
820DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 820, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 820, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
821
822//===----------------------------------------------------------------------===//
823// GetElementPtrInst Class
824//===----------------------------------------------------------------------===//
825
826// checkGEPType - Simple wrapper function to give a better assertion failure
827// message on bad indexes for a gep instruction.
828//
829inline Type *checkGEPType(Type *Ty) {
830 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 830, __extension__ __PRETTY_FUNCTION__))
;
831 return Ty;
832}
833
834/// an instruction for type-safe pointer arithmetic to
835/// access elements of arrays and structs
836///
837class GetElementPtrInst : public Instruction {
838 Type *SourceElementType;
839 Type *ResultElementType;
840
841 GetElementPtrInst(const GetElementPtrInst &GEPI);
842
843 /// Constructors - Create a getelementptr instruction with a base pointer an
844 /// list of indices. The first ctor can optionally insert before an existing
845 /// instruction, the second appends the new instruction to the specified
846 /// BasicBlock.
847 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
848 ArrayRef<Value *> IdxList, unsigned Values,
849 const Twine &NameStr, Instruction *InsertBefore);
850 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
851 ArrayRef<Value *> IdxList, unsigned Values,
852 const Twine &NameStr, BasicBlock *InsertAtEnd);
853
854 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
855
856protected:
857 // Note: Instruction needs to be a friend here to call cloneImpl.
858 friend class Instruction;
859
860 GetElementPtrInst *cloneImpl() const;
861
862public:
863 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
864 ArrayRef<Value *> IdxList,
865 const Twine &NameStr = "",
866 Instruction *InsertBefore = nullptr) {
867 unsigned Values = 1 + unsigned(IdxList.size());
868 if (!PointeeType)
869 PointeeType =
870 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
871 else
872 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
873 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
874 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 874, __extension__ __PRETTY_FUNCTION__))
;
875 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
876 NameStr, InsertBefore);
877 }
878
879 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
880 ArrayRef<Value *> IdxList,
881 const Twine &NameStr,
882 BasicBlock *InsertAtEnd) {
883 unsigned Values = 1 + unsigned(IdxList.size());
884 if (!PointeeType)
885 PointeeType =
886 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
887 else
888 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
889 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
890 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 890, __extension__ __PRETTY_FUNCTION__))
;
891 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
892 NameStr, InsertAtEnd);
893 }
894
895 /// Create an "inbounds" getelementptr. See the documentation for the
896 /// "inbounds" flag in LangRef.html for details.
897 static GetElementPtrInst *CreateInBounds(Value *Ptr,
898 ArrayRef<Value *> IdxList,
899 const Twine &NameStr = "",
900 Instruction *InsertBefore = nullptr){
901 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
902 }
903
904 static GetElementPtrInst *
905 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
906 const Twine &NameStr = "",
907 Instruction *InsertBefore = nullptr) {
908 GetElementPtrInst *GEP =
909 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
910 GEP->setIsInBounds(true);
911 return GEP;
912 }
913
914 static GetElementPtrInst *CreateInBounds(Value *Ptr,
915 ArrayRef<Value *> IdxList,
916 const Twine &NameStr,
917 BasicBlock *InsertAtEnd) {
918 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
919 }
920
921 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
922 ArrayRef<Value *> IdxList,
923 const Twine &NameStr,
924 BasicBlock *InsertAtEnd) {
925 GetElementPtrInst *GEP =
926 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
927 GEP->setIsInBounds(true);
928 return GEP;
929 }
930
931 /// Transparently provide more efficient getOperand methods.
932 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
933
934 Type *getSourceElementType() const { return SourceElementType; }
935
936 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
937 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
938
939 Type *getResultElementType() const {
940 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 941, __extension__ __PRETTY_FUNCTION__))
941 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 941, __extension__ __PRETTY_FUNCTION__))
;
942 return ResultElementType;
943 }
944
945 /// Returns the address space of this instruction's pointer type.
946 unsigned getAddressSpace() const {
947 // Note that this is always the same as the pointer operand's address space
948 // and that is cheaper to compute, so cheat here.
949 return getPointerAddressSpace();
950 }
951
952 /// Returns the type of the element that would be loaded with
953 /// a load instruction with the specified parameters.
954 ///
955 /// Null is returned if the indices are invalid for the specified
956 /// pointer type.
957 ///
958 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
959 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
960 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
961
962 inline op_iterator idx_begin() { return op_begin()+1; }
963 inline const_op_iterator idx_begin() const { return op_begin()+1; }
964 inline op_iterator idx_end() { return op_end(); }
965 inline const_op_iterator idx_end() const { return op_end(); }
966
967 inline iterator_range<op_iterator> indices() {
968 return make_range(idx_begin(), idx_end());
969 }
970
971 inline iterator_range<const_op_iterator> indices() const {
972 return make_range(idx_begin(), idx_end());
973 }
974
975 Value *getPointerOperand() {
976 return getOperand(0);
977 }
978 const Value *getPointerOperand() const {
979 return getOperand(0);
980 }
981 static unsigned getPointerOperandIndex() {
982 return 0U; // get index for modifying correct operand.
983 }
984
985 /// Method to return the pointer operand as a
986 /// PointerType.
987 Type *getPointerOperandType() const {
988 return getPointerOperand()->getType();
989 }
990
991 /// Returns the address space of the pointer operand.
992 unsigned getPointerAddressSpace() const {
993 return getPointerOperandType()->getPointerAddressSpace();
994 }
995
996 /// Returns the pointer type returned by the GEP
997 /// instruction, which may be a vector of pointers.
998 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
999 return getGEPReturnType(
1000 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
1001 Ptr, IdxList);
1002 }
1003 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1004 ArrayRef<Value *> IdxList) {
1005 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1006 Ptr->getType()->getPointerAddressSpace());
1007 // Vector GEP
1008 if (Ptr->getType()->isVectorTy()) {
1009 unsigned NumElem = Ptr->getType()->getVectorNumElements();
1010 return VectorType::get(PtrTy, NumElem);
1011 }
1012 for (Value *Index : IdxList)
1013 if (Index->getType()->isVectorTy()) {
1014 unsigned NumElem = Index->getType()->getVectorNumElements();
1015 return VectorType::get(PtrTy, NumElem);
1016 }
1017 // Scalar GEP
1018 return PtrTy;
1019 }
1020
1021 unsigned getNumIndices() const { // Note: always non-negative
1022 return getNumOperands() - 1;
1023 }
1024
1025 bool hasIndices() const {
1026 return getNumOperands() > 1;
1027 }
1028
1029 /// Return true if all of the indices of this GEP are
1030 /// zeros. If so, the result pointer and the first operand have the same
1031 /// value, just potentially different types.
1032 bool hasAllZeroIndices() const;
1033
1034 /// Return true if all of the indices of this GEP are
1035 /// constant integers. If so, the result pointer and the first operand have
1036 /// a constant offset between them.
1037 bool hasAllConstantIndices() const;
1038
1039 /// Set or clear the inbounds flag on this GEP instruction.
1040 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1041 void setIsInBounds(bool b = true);
1042
1043 /// Determine whether the GEP has the inbounds flag.
1044 bool isInBounds() const;
1045
1046 /// Accumulate the constant address offset of this GEP if possible.
1047 ///
1048 /// This routine accepts an APInt into which it will accumulate the constant
1049 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1050 /// all-constant, it returns false and the value of the offset APInt is
1051 /// undefined (it is *not* preserved!). The APInt passed into this routine
1052 /// must be at least as wide as the IntPtr type for the address space of
1053 /// the base GEP pointer.
1054 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1055
1056 // Methods for support type inquiry through isa, cast, and dyn_cast:
1057 static bool classof(const Instruction *I) {
1058 return (I->getOpcode() == Instruction::GetElementPtr);
1059 }
1060 static bool classof(const Value *V) {
1061 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1062 }
1063};
1064
1065template <>
1066struct OperandTraits<GetElementPtrInst> :
1067 public VariadicOperandTraits<GetElementPtrInst, 1> {
1068};
1069
1070GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1071 ArrayRef<Value *> IdxList, unsigned Values,
1072 const Twine &NameStr,
1073 Instruction *InsertBefore)
1074 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1075 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1076 Values, InsertBefore),
1077 SourceElementType(PointeeType),
1078 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1079 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
1080 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
;
1081 init(Ptr, IdxList, NameStr);
1082}
1083
1084GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1085 ArrayRef<Value *> IdxList, unsigned Values,
1086 const Twine &NameStr,
1087 BasicBlock *InsertAtEnd)
1088 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1089 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1090 Values, InsertAtEnd),
1091 SourceElementType(PointeeType),
1092 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1093 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1094, __extension__ __PRETTY_FUNCTION__))
1094 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1094, __extension__ __PRETTY_FUNCTION__))
;
1095 init(Ptr, IdxList, NameStr);
1096}
1097
1098DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1099
1100//===----------------------------------------------------------------------===//
1101// ICmpInst Class
1102//===----------------------------------------------------------------------===//
1103
1104/// This instruction compares its operands according to the predicate given
1105/// to the constructor. It only operates on integers or pointers. The operands
1106/// must be identical types.
1107/// Represent an integer comparison operator.
1108class ICmpInst: public CmpInst {
1109 void AssertOK() {
1110 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1111, __extension__ __PRETTY_FUNCTION__))
1111 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1111, __extension__ __PRETTY_FUNCTION__))
;
1112 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
1113 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
;
1114 // Check that the operands are the right type
1115 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
1116 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
1117 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
;
1118 }
1119
1120protected:
1121 // Note: Instruction needs to be a friend here to call cloneImpl.
1122 friend class Instruction;
1123
1124 /// Clone an identical ICmpInst
1125 ICmpInst *cloneImpl() const;
1126
1127public:
1128 /// Constructor with insert-before-instruction semantics.
1129 ICmpInst(
1130 Instruction *InsertBefore, ///< Where to insert
1131 Predicate pred, ///< The predicate to use for the comparison
1132 Value *LHS, ///< The left-hand-side of the expression
1133 Value *RHS, ///< The right-hand-side of the expression
1134 const Twine &NameStr = "" ///< Name of the instruction
1135 ) : CmpInst(makeCmpResultType(LHS->getType()),
1136 Instruction::ICmp, pred, LHS, RHS, NameStr,
1137 InsertBefore) {
1138#ifndef NDEBUG
1139 AssertOK();
1140#endif
1141 }
1142
1143 /// Constructor with insert-at-end semantics.
1144 ICmpInst(
1145 BasicBlock &InsertAtEnd, ///< Block to insert into.
1146 Predicate pred, ///< The predicate to use for the comparison
1147 Value *LHS, ///< The left-hand-side of the expression
1148 Value *RHS, ///< The right-hand-side of the expression
1149 const Twine &NameStr = "" ///< Name of the instruction
1150 ) : CmpInst(makeCmpResultType(LHS->getType()),
1151 Instruction::ICmp, pred, LHS, RHS, NameStr,
1152 &InsertAtEnd) {
1153#ifndef NDEBUG
1154 AssertOK();
1155#endif
1156 }
1157
1158 /// Constructor with no-insertion semantics
1159 ICmpInst(
1160 Predicate pred, ///< The predicate to use for the comparison
1161 Value *LHS, ///< The left-hand-side of the expression
1162 Value *RHS, ///< The right-hand-side of the expression
1163 const Twine &NameStr = "" ///< Name of the instruction
1164 ) : CmpInst(makeCmpResultType(LHS->getType()),
1165 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1166#ifndef NDEBUG
1167 AssertOK();
1168#endif
1169 }
1170
1171 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1172 /// @returns the predicate that would be the result if the operand were
1173 /// regarded as signed.
1174 /// Return the signed version of the predicate
1175 Predicate getSignedPredicate() const {
1176 return getSignedPredicate(getPredicate());
1177 }
1178
1179 /// This is a static version that you can use without an instruction.
1180 /// Return the signed version of the predicate.
1181 static Predicate getSignedPredicate(Predicate pred);
1182
1183 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1184 /// @returns the predicate that would be the result if the operand were
1185 /// regarded as unsigned.
1186 /// Return the unsigned version of the predicate
1187 Predicate getUnsignedPredicate() const {
1188 return getUnsignedPredicate(getPredicate());
1189 }
1190
1191 /// This is a static version that you can use without an instruction.
1192 /// Return the unsigned version of the predicate.
1193 static Predicate getUnsignedPredicate(Predicate pred);
1194
1195 /// Return true if this predicate is either EQ or NE. This also
1196 /// tests for commutativity.
1197 static bool isEquality(Predicate P) {
1198 return P == ICMP_EQ || P == ICMP_NE;
1199 }
1200
1201 /// Return true if this predicate is either EQ or NE. This also
1202 /// tests for commutativity.
1203 bool isEquality() const {
1204 return isEquality(getPredicate());
1205 }
1206
1207 /// @returns true if the predicate of this ICmpInst is commutative
1208 /// Determine if this relation is commutative.
1209 bool isCommutative() const { return isEquality(); }
1210
1211 /// Return true if the predicate is relational (not EQ or NE).
1212 ///
1213 bool isRelational() const {
1214 return !isEquality();
1215 }
1216
1217 /// Return true if the predicate is relational (not EQ or NE).
1218 ///
1219 static bool isRelational(Predicate P) {
1220 return !isEquality(P);
1221 }
1222
1223 /// Exchange the two operands to this instruction in such a way that it does
1224 /// not modify the semantics of the instruction. The predicate value may be
1225 /// changed to retain the same result if the predicate is order dependent
1226 /// (e.g. ult).
1227 /// Swap operands and adjust predicate.
1228 void swapOperands() {
1229 setPredicate(getSwappedPredicate());
1230 Op<0>().swap(Op<1>());
1231 }
1232
1233 // Methods for support type inquiry through isa, cast, and dyn_cast:
1234 static bool classof(const Instruction *I) {
1235 return I->getOpcode() == Instruction::ICmp;
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1239 }
1240};
1241
1242//===----------------------------------------------------------------------===//
1243// FCmpInst Class
1244//===----------------------------------------------------------------------===//
1245
1246/// This instruction compares its operands according to the predicate given
1247/// to the constructor. It only operates on floating point values or packed
1248/// vectors of floating point values. The operands must be identical types.
1249/// Represents a floating point comparison operator.
1250class FCmpInst: public CmpInst {
1251 void AssertOK() {
1252 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1252, __extension__ __PRETTY_FUNCTION__))
;
1253 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1254, __extension__ __PRETTY_FUNCTION__))
1254 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1254, __extension__ __PRETTY_FUNCTION__))
;
1255 // Check that the operands are the right type
1256 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1257, __extension__ __PRETTY_FUNCTION__))
1257 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1257, __extension__ __PRETTY_FUNCTION__))
;
1258 }
1259
1260protected:
1261 // Note: Instruction needs to be a friend here to call cloneImpl.
1262 friend class Instruction;
1263
1264 /// Clone an identical FCmpInst
1265 FCmpInst *cloneImpl() const;
1266
1267public:
1268 /// Constructor with insert-before-instruction semantics.
1269 FCmpInst(
1270 Instruction *InsertBefore, ///< Where to insert
1271 Predicate pred, ///< The predicate to use for the comparison
1272 Value *LHS, ///< The left-hand-side of the expression
1273 Value *RHS, ///< The right-hand-side of the expression
1274 const Twine &NameStr = "" ///< Name of the instruction
1275 ) : CmpInst(makeCmpResultType(LHS->getType()),
1276 Instruction::FCmp, pred, LHS, RHS, NameStr,
1277 InsertBefore) {
1278 AssertOK();
1279 }
1280
1281 /// Constructor with insert-at-end semantics.
1282 FCmpInst(
1283 BasicBlock &InsertAtEnd, ///< Block to insert into.
1284 Predicate pred, ///< The predicate to use for the comparison
1285 Value *LHS, ///< The left-hand-side of the expression
1286 Value *RHS, ///< The right-hand-side of the expression
1287 const Twine &NameStr = "" ///< Name of the instruction
1288 ) : CmpInst(makeCmpResultType(LHS->getType()),
1289 Instruction::FCmp, pred, LHS, RHS, NameStr,
1290 &InsertAtEnd) {
1291 AssertOK();
1292 }
1293
1294 /// Constructor with no-insertion semantics
1295 FCmpInst(
1296 Predicate pred, ///< The predicate to use for the comparison
1297 Value *LHS, ///< The left-hand-side of the expression
1298 Value *RHS, ///< The right-hand-side of the expression
1299 const Twine &NameStr = "" ///< Name of the instruction
1300 ) : CmpInst(makeCmpResultType(LHS->getType()),
1301 Instruction::FCmp, pred, LHS, RHS, NameStr) {
1302 AssertOK();
1303 }
1304
1305 /// @returns true if the predicate of this instruction is EQ or NE.
1306 /// Determine if this is an equality predicate.
1307 static bool isEquality(Predicate Pred) {
1308 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1309 Pred == FCMP_UNE;
1310 }
1311
1312 /// @returns true if the predicate of this instruction is EQ or NE.
1313 /// Determine if this is an equality predicate.
1314 bool isEquality() const { return isEquality(getPredicate()); }
1315
1316 /// @returns true if the predicate of this instruction is commutative.
1317 /// Determine if this is a commutative predicate.
1318 bool isCommutative() const {
1319 return isEquality() ||
1320 getPredicate() == FCMP_FALSE ||
1321 getPredicate() == FCMP_TRUE ||
1322 getPredicate() == FCMP_ORD ||
1323 getPredicate() == FCMP_UNO;
1324 }
1325
1326 /// @returns true if the predicate is relational (not EQ or NE).
1327 /// Determine if this a relational predicate.
1328 bool isRelational() const { return !isEquality(); }
1329
1330 /// Exchange the two operands to this instruction in such a way that it does
1331 /// not modify the semantics of the instruction. The predicate value may be
1332 /// changed to retain the same result if the predicate is order dependent
1333 /// (e.g. ult).
1334 /// Swap operands and adjust predicate.
1335 void swapOperands() {
1336 setPredicate(getSwappedPredicate());
1337 Op<0>().swap(Op<1>());
1338 }
1339
1340 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1341 static bool classof(const Instruction *I) {
1342 return I->getOpcode() == Instruction::FCmp;
1343 }
1344 static bool classof(const Value *V) {
1345 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1346 }
1347};
1348
1349class CallInst;
1350class InvokeInst;
1351
1352template <class T> struct CallBaseParent { using type = Instruction; };
1353
1354template <> struct CallBaseParent<InvokeInst> { using type = TerminatorInst; };
1355
1356//===----------------------------------------------------------------------===//
1357/// Base class for all callable instructions (InvokeInst and CallInst)
1358/// Holds everything related to calling a function, abstracting from the base
1359/// type @p BaseInstTy and the concrete instruction @p InstTy
1360///
1361template <class InstTy>
1362class CallBase : public CallBaseParent<InstTy>::type,
1363 public OperandBundleUser<InstTy, User::op_iterator> {
1364protected:
1365 AttributeList Attrs; ///< parameter attributes for callable
1366 FunctionType *FTy;
1367 using BaseInstTy = typename CallBaseParent<InstTy>::type;
1368
1369 template <class... ArgsTy>
1370 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1371 : BaseInstTy(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1372 bool hasDescriptor() const { return Value::HasDescriptor; }
1373
1374 using BaseInstTy::BaseInstTy;
1375
1376 using OperandBundleUser<InstTy,
1377 User::op_iterator>::isFnAttrDisallowedByOpBundle;
1378 using OperandBundleUser<InstTy, User::op_iterator>::getNumTotalBundleOperands;
1379 using OperandBundleUser<InstTy, User::op_iterator>::bundleOperandHasAttr;
1380 using Instruction::getSubclassDataFromInstruction;
1381 using Instruction::setInstructionSubclassData;
1382
1383public:
1384 using Instruction::getContext;
1385 using OperandBundleUser<InstTy, User::op_iterator>::hasOperandBundles;
1386 using OperandBundleUser<InstTy,
1387 User::op_iterator>::getBundleOperandsStartIndex;
1388
1389 static bool classof(const Instruction *I) {
1390 llvm_unreachable(::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1391)
1391 "CallBase is not meant to be used as part of the classof hierarchy")::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1391)
;
1392 }
1393
1394public:
1395 /// Return the parameter attributes for this call.
1396 ///
1397 AttributeList getAttributes() const { return Attrs; }
1398
1399 /// Set the parameter attributes for this call.
1400 ///
1401 void setAttributes(AttributeList A) { Attrs = A; }
1402
1403 FunctionType *getFunctionType() const { return FTy; }
1404
1405 void mutateFunctionType(FunctionType *FTy) {
1406 Value::mutateType(FTy->getReturnType());
1407 this->FTy = FTy;
1408 }
1409
1410 /// Return the number of call arguments.
1411 ///
1412 unsigned getNumArgOperands() const {
1413 return getNumOperands() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1414 }
1415
1416 /// getArgOperand/setArgOperand - Return/set the i-th call argument.
1417 ///
1418 Value *getArgOperand(unsigned i) const {
1419 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1419, __extension__ __PRETTY_FUNCTION__))
;
1420 return getOperand(i);
1421 }
1422 void setArgOperand(unsigned i, Value *v) {
1423 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1423, __extension__ __PRETTY_FUNCTION__))
;
1424 setOperand(i, v);
1425 }
1426
1427 /// Return the iterator pointing to the beginning of the argument list.
1428 User::op_iterator arg_begin() { return op_begin(); }
1429
1430 /// Return the iterator pointing to the end of the argument list.
1431 User::op_iterator arg_end() {
1432 // [ call args ], [ operand bundles ], callee
1433 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1434 }
1435
1436 /// Iteration adapter for range-for loops.
1437 iterator_range<User::op_iterator> arg_operands() {
1438 return make_range(arg_begin(), arg_end());
1439 }
1440
1441 /// Return the iterator pointing to the beginning of the argument list.
1442 User::const_op_iterator arg_begin() const { return op_begin(); }
1443
1444 /// Return the iterator pointing to the end of the argument list.
1445 User::const_op_iterator arg_end() const {
1446 // [ call args ], [ operand bundles ], callee
1447 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1448 }
1449
1450 /// Iteration adapter for range-for loops.
1451 iterator_range<User::const_op_iterator> arg_operands() const {
1452 return make_range(arg_begin(), arg_end());
1453 }
1454
1455 /// Wrappers for getting the \c Use of a call argument.
1456 const Use &getArgOperandUse(unsigned i) const {
1457 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1457, __extension__ __PRETTY_FUNCTION__))
;
1458 return User::getOperandUse(i);
1459 }
1460 Use &getArgOperandUse(unsigned i) {
1461 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1461, __extension__ __PRETTY_FUNCTION__))
;
1462 return User::getOperandUse(i);
1463 }
1464
1465 /// If one of the arguments has the 'returned' attribute, return its
1466 /// operand value. Otherwise, return nullptr.
1467 Value *getReturnedArgOperand() const {
1468 unsigned Index;
1469
1470 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
1471 return getArgOperand(Index - AttributeList::FirstArgIndex);
1472 if (const Function *F = getCalledFunction())
1473 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
1474 Index)
1475 return getArgOperand(Index - AttributeList::FirstArgIndex);
1476
1477 return nullptr;
1478 }
1479
1480 User::op_iterator op_begin() {
1481 return OperandTraits<CallBase>::op_begin(this);
1482 }
1483
1484 User::const_op_iterator op_begin() const {
1485 return OperandTraits<CallBase>::op_begin(const_cast<CallBase *>(this));
1486 }
1487
1488 User::op_iterator op_end() { return OperandTraits<CallBase>::op_end(this); }
1489
1490 User::const_op_iterator op_end() const {
1491 return OperandTraits<CallBase>::op_end(const_cast<CallBase *>(this));
1492 }
1493
1494 Value *getOperand(unsigned i_nocapture) const {
1495 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1496, __extension__ __PRETTY_FUNCTION__))
1496 "getOperand() out of range!")(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1496, __extension__ __PRETTY_FUNCTION__))
;
1497 return cast_or_null<Value>(OperandTraits<CallBase>::op_begin(
1498 const_cast<CallBase *>(this))[i_nocapture]
1499 .get());
1500 }
1501
1502 void setOperand(unsigned i_nocapture, Value *Val_nocapture) {
1503 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1504, __extension__ __PRETTY_FUNCTION__))
1504 "setOperand() out of range!")(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1504, __extension__ __PRETTY_FUNCTION__))
;
1505 OperandTraits<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture;
1506 }
1507
1508 unsigned getNumOperands() const {
1509 return OperandTraits<CallBase>::operands(this);
1510 }
1511 template <int Idx_nocapture> Use &Op() {
1512 return User::OpFrom<Idx_nocapture>(this);
1513 }
1514 template <int Idx_nocapture> const Use &Op() const {
1515 return User::OpFrom<Idx_nocapture>(this);
1516 }
1517
1518 /// Return the function called, or null if this is an
1519 /// indirect function invocation.
1520 ///
1521 Function *getCalledFunction() const {
1522 return dyn_cast<Function>(Op<-InstTy::ArgOffset>());
1523 }
1524
1525 /// Determine whether this call has the given attribute.
1526 bool hasFnAttr(Attribute::AttrKind Kind) const {
1527 assert(Kind != Attribute::NoBuiltin &&(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1528, __extension__ __PRETTY_FUNCTION__))
1528 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1528, __extension__ __PRETTY_FUNCTION__))
;
1529 return hasFnAttrImpl(Kind);
1530 }
1531
1532 /// Determine whether this call has the given attribute.
1533 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1534
1535 /// getCallingConv/setCallingConv - Get or set the calling convention of this
1536 /// function call.
1537 CallingConv::ID getCallingConv() const {
1538 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1539 }
1540 void setCallingConv(CallingConv::ID CC) {
1541 auto ID = static_cast<unsigned>(CC);
1542 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")(static_cast <bool> (!(ID & ~CallingConv::MaxID) &&
"Unsupported calling convention") ? void (0) : __assert_fail
("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1542, __extension__ __PRETTY_FUNCTION__))
;
1543 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1544 (ID << 2));
1545 }
1546
1547
1548 /// adds the attribute to the list of attributes.
1549 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1550 AttributeList PAL = getAttributes();
1551 PAL = PAL.addAttribute(getContext(), i, Kind);
1552 setAttributes(PAL);
1553 }
1554
1555 /// adds the attribute to the list of attributes.
1556 void addAttribute(unsigned i, Attribute Attr) {
1557 AttributeList PAL = getAttributes();
1558 PAL = PAL.addAttribute(getContext(), i, Attr);
1559 setAttributes(PAL);
1560 }
1561
1562 /// Adds the attribute to the indicated argument
1563 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1564 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1564, __extension__ __PRETTY_FUNCTION__))
;
1565 AttributeList PAL = getAttributes();
1566 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1567 setAttributes(PAL);
1568 }
1569
1570 /// Adds the attribute to the indicated argument
1571 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1572 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1572, __extension__ __PRETTY_FUNCTION__))
;
1573 AttributeList PAL = getAttributes();
1574 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1575 setAttributes(PAL);
1576 }
1577
1578 /// removes the attribute from the list of attributes.
1579 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1580 AttributeList PAL = getAttributes();
1581 PAL = PAL.removeAttribute(getContext(), i, Kind);
1582 setAttributes(PAL);
1583 }
1584
1585 /// removes the attribute from the list of attributes.
1586 void removeAttribute(unsigned i, StringRef Kind) {
1587 AttributeList PAL = getAttributes();
1588 PAL = PAL.removeAttribute(getContext(), i, Kind);
1589 setAttributes(PAL);
1590 }
1591
1592 /// Removes the attribute from the given argument
1593 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1594 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1594, __extension__ __PRETTY_FUNCTION__))
;
1595 AttributeList PAL = getAttributes();
1596 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1597 setAttributes(PAL);
1598 }
1599
1600 /// Removes the attribute from the given argument
1601 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1602 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1602, __extension__ __PRETTY_FUNCTION__))
;
1603 AttributeList PAL = getAttributes();
1604 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1605 setAttributes(PAL);
1606 }
1607
1608 /// adds the dereferenceable attribute to the list of attributes.
1609 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1610 AttributeList PAL = getAttributes();
1611 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1612 setAttributes(PAL);
1613 }
1614
1615 /// adds the dereferenceable_or_null attribute to the list of
1616 /// attributes.
1617 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1618 AttributeList PAL = getAttributes();
1619 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1620 setAttributes(PAL);
1621 }
1622
1623 /// Determine whether the return value has the given attribute.
1624 bool hasRetAttr(Attribute::AttrKind Kind) const {
1625 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
1626 return true;
1627
1628 // Look at the callee, if available.
1629 if (const Function *F = getCalledFunction())
1630 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
1631 return false;
1632 }
1633
1634 /// Determine whether the argument or parameter has the given attribute.
1635 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1636 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Param index out of bounds!") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Param index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1636, __extension__ __PRETTY_FUNCTION__))
;
1637
1638 if (Attrs.hasParamAttribute(ArgNo, Kind))
1639 return true;
1640 if (const Function *F = getCalledFunction())
1641 return F->getAttributes().hasParamAttribute(ArgNo, Kind);
1642 return false;
1643 }
1644
1645 /// Get the attribute of a given kind at a position.
1646 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1647 return getAttributes().getAttribute(i, Kind);
1648 }
1649
1650 /// Get the attribute of a given kind at a position.
1651 Attribute getAttribute(unsigned i, StringRef Kind) const {
1652 return getAttributes().getAttribute(i, Kind);
1653 }
1654
1655 /// Get the attribute of a given kind from a given arg
1656 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1657 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1657, __extension__ __PRETTY_FUNCTION__))
;
1658 return getAttributes().getParamAttr(ArgNo, Kind);
1659 }
1660
1661 /// Get the attribute of a given kind from a given arg
1662 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1663 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1663, __extension__ __PRETTY_FUNCTION__))
;
1664 return getAttributes().getParamAttr(ArgNo, Kind);
1665 }
1666 /// Return true if the data operand at index \p i has the attribute \p
1667 /// A.
1668 ///
1669 /// Data operands include call arguments and values used in operand bundles,
1670 /// but does not include the callee operand. This routine dispatches to the
1671 /// underlying AttributeList or the OperandBundleUser as appropriate.
1672 ///
1673 /// The index \p i is interpreted as
1674 ///
1675 /// \p i == Attribute::ReturnIndex -> the return value
1676 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1677 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1678 /// (\p i - 1) in the operand list.
1679 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1680 // There are getNumOperands() - (InstTy::ArgOffset - 1) data operands.
1681 // The last operand is the callee.
1682 assert(i < (getNumOperands() - InstTy::ArgOffset + 1) &&(static_cast <bool> (i < (getNumOperands() - InstTy::
ArgOffset + 1) && "Data operand index out of bounds!"
) ? void (0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1683, __extension__ __PRETTY_FUNCTION__))
1683 "Data operand index out of bounds!")(static_cast <bool> (i < (getNumOperands() - InstTy::
ArgOffset + 1) && "Data operand index out of bounds!"
) ? void (0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1683, __extension__ __PRETTY_FUNCTION__))
;
1684
1685 // The attribute A can either be directly specified, if the operand in
1686 // question is a call argument; or be indirectly implied by the kind of its
1687 // containing operand bundle, if the operand is a bundle operand.
1688
1689 if (i == AttributeList::ReturnIndex)
1690 return hasRetAttr(Kind);
1691
1692 // FIXME: Avoid these i - 1 calculations and update the API to use
1693 // zero-based indices.
1694 if (i < (getNumArgOperands() + 1))
1695 return paramHasAttr(i - 1, Kind);
1696
1697 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1698, __extension__ __PRETTY_FUNCTION__))
1698 "Must be either a call argument or an operand bundle!")(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1698, __extension__ __PRETTY_FUNCTION__))
;
1699 return bundleOperandHasAttr(i - 1, Kind);
1700 }
1701
1702 /// Extract the alignment of the return value.
1703 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
1704
1705 /// Extract the alignment for a call or parameter (0=unknown).
1706 unsigned getParamAlignment(unsigned ArgNo) const {
1707 return Attrs.getParamAlignment(ArgNo);
1708 }
1709
1710 /// Extract the number of dereferenceable bytes for a call or
1711 /// parameter (0=unknown).
1712 uint64_t getDereferenceableBytes(unsigned i) const {
1713 return Attrs.getDereferenceableBytes(i);
1714 }
1715
1716 /// Extract the number of dereferenceable_or_null bytes for a call or
1717 /// parameter (0=unknown).
1718 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1719 return Attrs.getDereferenceableOrNullBytes(i);
1720 }
1721
1722 /// @brief Determine if the return value is marked with NoAlias attribute.
1723 bool returnDoesNotAlias() const {
1724 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1725 }
1726
1727 /// Return true if the call should not be treated as a call to a
1728 /// builtin.
1729 bool isNoBuiltin() const {
1730 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1731 !hasFnAttrImpl(Attribute::Builtin);
1732 }
1733
1734 /// Determine if the call requires strict floating point semantics.
1735 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1736
1737 /// Return true if the call should not be inlined.
1738 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1739 void setIsNoInline() {
1740 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1741 }
1742 /// Determine if the call does not access memory.
1743 bool doesNotAccessMemory() const {
1744 return hasFnAttr(Attribute::ReadNone);
1745 }
1746 void setDoesNotAccessMemory() {
1747 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1748 }
1749
1750 /// Determine if the call does not access or only reads memory.
1751 bool onlyReadsMemory() const {
1752 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1753 }
1754 void setOnlyReadsMemory() {
1755 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1756 }
1757
1758 /// Determine if the call does not access or only writes memory.
1759 bool doesNotReadMemory() const {
1760 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1761 }
1762 void setDoesNotReadMemory() {
1763 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1764 }
1765
1766 /// @brief Determine if the call can access memmory only using pointers based
1767 /// on its arguments.
1768 bool onlyAccessesArgMemory() const {
1769 return hasFnAttr(Attribute::ArgMemOnly);
1770 }
1771 void setOnlyAccessesArgMemory() {
1772 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1773 }
1774
1775 /// @brief Determine if the function may only access memory that is
1776 /// inaccessible from the IR.
1777 bool onlyAccessesInaccessibleMemory() const {
1778 return hasFnAttr(Attribute::InaccessibleMemOnly);
1779 }
1780 void setOnlyAccessesInaccessibleMemory() {
1781 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1782 }
1783
1784 /// @brief Determine if the function may only access memory that is
1785 /// either inaccessible from the IR or pointed to by its arguments.
1786 bool onlyAccessesInaccessibleMemOrArgMem() const {
1787 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1788 }
1789 void setOnlyAccessesInaccessibleMemOrArgMem() {
1790 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
1791 }
1792 /// Determine if the call cannot return.
1793 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1794 void setDoesNotReturn() {
1795 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1796 }
1797
1798 /// Determine if the call should not perform indirect branch tracking.
1799 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1800
1801 /// Determine if the call cannot unwind.
1802 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1803 void setDoesNotThrow() {
1804 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1805 }
1806
1807 /// Determine if the invoke cannot be duplicated.
1808 bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
1809 void setCannotDuplicate() {
1810 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1811 }
1812
1813 /// Determine if the invoke is convergent
1814 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1815 void setConvergent() {
1816 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1817 }
1818 void setNotConvergent() {
1819 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1820 }
1821
1822 /// Determine if the call returns a structure through first
1823 /// pointer argument.
1824 bool hasStructRetAttr() const {
1825 if (getNumArgOperands() == 0)
1826 return false;
1827
1828 // Be friendly and also check the callee.
1829 return paramHasAttr(0, Attribute::StructRet);
1830 }
1831
1832 /// Determine if any call argument is an aggregate passed by value.
1833 bool hasByValArgument() const {
1834 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1835 }
1836 /// Get a pointer to the function that is invoked by this
1837 /// instruction.
1838 const Value *getCalledValue() const { return Op<-InstTy::ArgOffset>(); }
1839 Value *getCalledValue() { return Op<-InstTy::ArgOffset>(); }
1840
1841 /// Set the function called.
1842 void setCalledFunction(Value* Fn) {
1843 setCalledFunction(
1844 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
1845 Fn);
1846 }
1847 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1848 this->FTy = FTy;
1849 assert(FTy == cast<FunctionType>((static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1850, __extension__ __PRETTY_FUNCTION__))
1850 cast<PointerType>(Fn->getType())->getElementType()))(static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 1850, __extension__ __PRETTY_FUNCTION__))
;
1851 Op<-InstTy::ArgOffset>() = Fn;
1852 }
1853
1854protected:
1855 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
1856 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
1857 return true;
1858
1859 // Operand bundles override attributes on the called function, but don't
1860 // override attributes directly present on the call instruction.
1861 if (isFnAttrDisallowedByOpBundle(Kind))
1862 return false;
1863
1864 if (const Function *F = getCalledFunction())
1865 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
1866 Kind);
1867 return false;
1868 }
1869};
1870
1871//===----------------------------------------------------------------------===//
1872/// This class represents a function call, abstracting a target
1873/// machine's calling convention. This class uses low bit of the SubClassData
1874/// field to indicate whether or not this is a tail call. The rest of the bits
1875/// hold the calling convention of the call.
1876///
1877class CallInst : public CallBase<CallInst> {
1878 friend class OperandBundleUser<CallInst, User::op_iterator>;
1879
1880 CallInst(const CallInst &CI);
1881
1882 /// Construct a CallInst given a range of arguments.
1883 /// Construct a CallInst from a range of arguments
1884 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1885 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1886 Instruction *InsertBefore);
1887
1888 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1889 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1890 Instruction *InsertBefore)
1891 : CallInst(cast<FunctionType>(
1892 cast<PointerType>(Func->getType())->getElementType()),
1893 Func, Args, Bundles, NameStr, InsertBefore) {}
1894
1895 inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
1896 Instruction *InsertBefore)
1897 : CallInst(Func, Args, None, NameStr, InsertBefore) {}
1898
1899 /// Construct a CallInst given a range of arguments.
1900 /// Construct a CallInst from a range of arguments
1901 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1902 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1903 BasicBlock *InsertAtEnd);
1904
1905 explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore);
1906
1907 CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
1908
1909 void init(Value *Func, ArrayRef<Value *> Args,
1910 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
1911 init(cast<FunctionType>(
1912 cast<PointerType>(Func->getType())->getElementType()),
1913 Func, Args, Bundles, NameStr);
1914 }
1915 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1916 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1917 void init(Value *Func, const Twine &NameStr);
1918
1919protected:
1920 // Note: Instruction needs to be a friend here to call cloneImpl.
1921 friend class Instruction;
1922
1923 CallInst *cloneImpl() const;
1924
1925public:
1926 static constexpr int ArgOffset = 1;
1927
1928 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1929 ArrayRef<OperandBundleDef> Bundles = None,
1930 const Twine &NameStr = "",
1931 Instruction *InsertBefore = nullptr) {
1932 return Create(cast<FunctionType>(
1933 cast<PointerType>(Func->getType())->getElementType()),
1934 Func, Args, Bundles, NameStr, InsertBefore);
1935 }
1936
1937 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1938 const Twine &NameStr,
1939 Instruction *InsertBefore = nullptr) {
1940 return Create(cast<FunctionType>(
1941 cast<PointerType>(Func->getType())->getElementType()),
1942 Func, Args, None, NameStr, InsertBefore);
1943 }
1944
1945 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1946 const Twine &NameStr,
1947 Instruction *InsertBefore = nullptr) {
1948 return new (unsigned(Args.size() + 1))
1949 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1950 }
1951
1952 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1953 ArrayRef<OperandBundleDef> Bundles = None,
1954 const Twine &NameStr = "",
1955 Instruction *InsertBefore = nullptr) {
1956 const unsigned TotalOps =
1957 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1958 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1959
1960 return new (TotalOps, DescriptorBytes)
1961 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1962 }
1963
1964 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1965 ArrayRef<OperandBundleDef> Bundles,
1966 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1967 const unsigned TotalOps =
1968 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1969 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1970
1971 return new (TotalOps, DescriptorBytes)
1972 CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
1973 }
1974
1975 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1976 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1977 return new (unsigned(Args.size() + 1))
1978 CallInst(Func, Args, None, NameStr, InsertAtEnd);
1979 }
1980
1981 static CallInst *Create(Value *F, const Twine &NameStr = "",
1982 Instruction *InsertBefore = nullptr) {
1983 return new (1) CallInst(F, NameStr, InsertBefore);
1984 }
1985
1986 static CallInst *Create(Value *F, const Twine &NameStr,
1987 BasicBlock *InsertAtEnd) {
1988 return new (1) CallInst(F, NameStr, InsertAtEnd);
1989 }
1990
1991 /// Create a clone of \p CI with a different set of operand bundles and
1992 /// insert it before \p InsertPt.
1993 ///
1994 /// The returned call instruction is identical \p CI in every way except that
1995 /// the operand bundles for the new instruction are set to the operand bundles
1996 /// in \p Bundles.
1997 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1998 Instruction *InsertPt = nullptr);
1999
2000 /// Generate the IR for a call to malloc:
2001 /// 1. Compute the malloc call's argument as the specified type's size,
2002 /// possibly multiplied by the array size if the array size is not
2003 /// constant 1.
2004 /// 2. Call malloc with that argument.
2005 /// 3. Bitcast the result of the malloc call to the specified type.
2006 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2007 Type *AllocTy, Value *AllocSize,
2008 Value *ArraySize = nullptr,
2009 Function *MallocF = nullptr,
2010 const Twine &Name = "");
2011 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2012 Type *AllocTy, Value *AllocSize,
2013 Value *ArraySize = nullptr,
2014 Function *MallocF = nullptr,
2015 const Twine &Name = "");
2016 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2017 Type *AllocTy, Value *AllocSize,
2018 Value *ArraySize = nullptr,
2019 ArrayRef<OperandBundleDef> Bundles = None,
2020 Function *MallocF = nullptr,
2021 const Twine &Name = "");
2022 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2023 Type *AllocTy, Value *AllocSize,
2024 Value *ArraySize = nullptr,
2025 ArrayRef<OperandBundleDef> Bundles = None,
2026 Function *MallocF = nullptr,
2027 const Twine &Name = "");
2028 /// Generate the IR for a call to the builtin free function.
2029 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
2030 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
2031 static Instruction *CreateFree(Value *Source,
2032 ArrayRef<OperandBundleDef> Bundles,
2033 Instruction *InsertBefore);
2034 static Instruction *CreateFree(Value *Source,
2035 ArrayRef<OperandBundleDef> Bundles,
2036 BasicBlock *InsertAtEnd);
2037
2038 // Note that 'musttail' implies 'tail'.
2039 enum TailCallKind {
2040 TCK_None = 0,
2041 TCK_Tail = 1,
2042 TCK_MustTail = 2,
2043 TCK_NoTail = 3
2044 };
2045 TailCallKind getTailCallKind() const {
2046 return TailCallKind(getSubclassDataFromInstruction() & 3);
2047 }
2048
2049 bool isTailCall() const {
2050 unsigned Kind = getSubclassDataFromInstruction() & 3;
2051 return Kind == TCK_Tail || Kind == TCK_MustTail;
2052 }
2053
2054 bool isMustTailCall() const {
2055 return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
2056 }
2057
2058 bool isNoTailCall() const {
2059 return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
2060 }
2061
2062 void setTailCall(bool isTC = true) {
2063 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2064 unsigned(isTC ? TCK_Tail : TCK_None));
2065 }
2066
2067 void setTailCallKind(TailCallKind TCK) {
2068 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2069 unsigned(TCK));
2070 }
2071
2072 /// Return true if the call can return twice
2073 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
2074 void setCanReturnTwice() {
2075 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
2076 }
2077
2078 /// Check if this call is an inline asm statement.
2079 bool isInlineAsm() const { return isa<InlineAsm>(Op<-1>()); }
2080
2081 // Methods for support type inquiry through isa, cast, and dyn_cast:
2082 static bool classof(const Instruction *I) {
2083 return I->getOpcode() == Instruction::Call;
2084 }
2085 static bool classof(const Value *V) {
2086 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2087 }
2088
2089private:
2090 // Shadow Instruction::setInstructionSubclassData with a private forwarding
2091 // method so that subclasses cannot accidentally use it.
2092 void setInstructionSubclassData(unsigned short D) {
2093 Instruction::setInstructionSubclassData(D);
2094 }
2095};
2096
2097template <>
2098struct OperandTraits<CallBase<CallInst>>
2099 : public VariadicOperandTraits<CallBase<CallInst>, 1> {};
2100
2101CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
2102 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2103 BasicBlock *InsertAtEnd)
2104 : CallBase<CallInst>(
2105 cast<FunctionType>(
2106 cast<PointerType>(Func->getType())->getElementType())
2107 ->getReturnType(),
2108 Instruction::Call,
2109 OperandTraits<CallBase<CallInst>>::op_end(this) -
2110 (Args.size() + CountBundleInputs(Bundles) + 1),
2111 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
2112 init(Func, Args, Bundles, NameStr);
2113}
2114
2115CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
2116 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2117 Instruction *InsertBefore)
2118 : CallBase<CallInst>(Ty->getReturnType(), Instruction::Call,
2119 OperandTraits<CallBase<CallInst>>::op_end(this) -
2120 (Args.size() + CountBundleInputs(Bundles) + 1),
2121 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
2122 InsertBefore) {
2123 init(Ty, Func, Args, Bundles, NameStr);
2124}
2125
2126//===----------------------------------------------------------------------===//
2127// SelectInst Class
2128//===----------------------------------------------------------------------===//
2129
2130/// This class represents the LLVM 'select' instruction.
2131///
2132class SelectInst : public Instruction {
2133 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2134 Instruction *InsertBefore)
2135 : Instruction(S1->getType(), Instruction::Select,
2136 &Op<0>(), 3, InsertBefore) {
2137 init(C, S1, S2);
2138 setName(NameStr);
2139 }
2140
2141 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2142 BasicBlock *InsertAtEnd)
2143 : Instruction(S1->getType(), Instruction::Select,
2144 &Op<0>(), 3, InsertAtEnd) {
2145 init(C, S1, S2);
2146 setName(NameStr);
2147 }
2148
2149 void init(Value *C, Value *S1, Value *S2) {
2150 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2150, __extension__ __PRETTY_FUNCTION__))
;
2151 Op<0>() = C;
2152 Op<1>() = S1;
2153 Op<2>() = S2;
2154 }
2155
2156protected:
2157 // Note: Instruction needs to be a friend here to call cloneImpl.
2158 friend class Instruction;
2159
2160 SelectInst *cloneImpl() const;
2161
2162public:
2163 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2164 const Twine &NameStr = "",
2165 Instruction *InsertBefore = nullptr,
2166 Instruction *MDFrom = nullptr) {
2167 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
2168 if (MDFrom)
2169 Sel->copyMetadata(*MDFrom);
2170 return Sel;
2171 }
2172
2173 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2174 const Twine &NameStr,
2175 BasicBlock *InsertAtEnd) {
2176 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
2177 }
2178
2179 const Value *getCondition() const { return Op<0>(); }
2180 const Value *getTrueValue() const { return Op<1>(); }
2181 const Value *getFalseValue() const { return Op<2>(); }
2182 Value *getCondition() { return Op<0>(); }
2183 Value *getTrueValue() { return Op<1>(); }
2184 Value *getFalseValue() { return Op<2>(); }
2185
2186 void setCondition(Value *V) { Op<0>() = V; }
2187 void setTrueValue(Value *V) { Op<1>() = V; }
2188 void setFalseValue(Value *V) { Op<2>() = V; }
2189
2190 /// Return a string if the specified operands are invalid
2191 /// for a select operation, otherwise return null.
2192 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
2193
2194 /// Transparently provide more efficient getOperand methods.
2195 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2196
2197 OtherOps getOpcode() const {
2198 return static_cast<OtherOps>(Instruction::getOpcode());
2199 }
2200
2201 // Methods for support type inquiry through isa, cast, and dyn_cast:
2202 static bool classof(const Instruction *I) {
2203 return I->getOpcode() == Instruction::Select;
2204 }
2205 static bool classof(const Value *V) {
2206 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2207 }
2208};
2209
2210template <>
2211struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
2212};
2213
2214DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2214, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2214, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2215
2216//===----------------------------------------------------------------------===//
2217// VAArgInst Class
2218//===----------------------------------------------------------------------===//
2219
2220/// This class represents the va_arg llvm instruction, which returns
2221/// an argument of the specified type given a va_list and increments that list
2222///
2223class VAArgInst : public UnaryInstruction {
2224protected:
2225 // Note: Instruction needs to be a friend here to call cloneImpl.
2226 friend class Instruction;
2227
2228 VAArgInst *cloneImpl() const;
2229
2230public:
2231 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
2232 Instruction *InsertBefore = nullptr)
2233 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
2234 setName(NameStr);
2235 }
2236
2237 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
2238 BasicBlock *InsertAtEnd)
2239 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
2240 setName(NameStr);
2241 }
2242
2243 Value *getPointerOperand() { return getOperand(0); }
2244 const Value *getPointerOperand() const { return getOperand(0); }
2245 static unsigned getPointerOperandIndex() { return 0U; }
2246
2247 // Methods for support type inquiry through isa, cast, and dyn_cast:
2248 static bool classof(const Instruction *I) {
2249 return I->getOpcode() == VAArg;
2250 }
2251 static bool classof(const Value *V) {
2252 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2253 }
2254};
2255
2256//===----------------------------------------------------------------------===//
2257// ExtractElementInst Class
2258//===----------------------------------------------------------------------===//
2259
2260/// This instruction extracts a single (scalar)
2261/// element from a VectorType value
2262///
2263class ExtractElementInst : public Instruction {
2264 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
2265 Instruction *InsertBefore = nullptr);
2266 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2267 BasicBlock *InsertAtEnd);
2268
2269protected:
2270 // Note: Instruction needs to be a friend here to call cloneImpl.
2271 friend class Instruction;
2272
2273 ExtractElementInst *cloneImpl() const;
2274
2275public:
2276 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2277 const Twine &NameStr = "",
2278 Instruction *InsertBefore = nullptr) {
2279 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2280 }
2281
2282 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2283 const Twine &NameStr,
2284 BasicBlock *InsertAtEnd) {
2285 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
2286 }
2287
2288 /// Return true if an extractelement instruction can be
2289 /// formed with the specified operands.
2290 static bool isValidOperands(const Value *Vec, const Value *Idx);
2291
2292 Value *getVectorOperand() { return Op<0>(); }
2293 Value *getIndexOperand() { return Op<1>(); }
2294 const Value *getVectorOperand() const { return Op<0>(); }
2295 const Value *getIndexOperand() const { return Op<1>(); }
2296
2297 VectorType *getVectorOperandType() const {
2298 return cast<VectorType>(getVectorOperand()->getType());
2299 }
2300
2301 /// Transparently provide more efficient getOperand methods.
2302 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2303
2304 // Methods for support type inquiry through isa, cast, and dyn_cast:
2305 static bool classof(const Instruction *I) {
2306 return I->getOpcode() == Instruction::ExtractElement;
2307 }
2308 static bool classof(const Value *V) {
2309 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2310 }
2311};
2312
2313template <>
2314struct OperandTraits<ExtractElementInst> :
2315 public FixedNumOperandTraits<ExtractElementInst, 2> {
2316};
2317
2318DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2319
2320//===----------------------------------------------------------------------===//
2321// InsertElementInst Class
2322//===----------------------------------------------------------------------===//
2323
2324/// This instruction inserts a single (scalar)
2325/// element into a VectorType value
2326///
2327class InsertElementInst : public Instruction {
2328 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
2329 const Twine &NameStr = "",
2330 Instruction *InsertBefore = nullptr);
2331 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2332 BasicBlock *InsertAtEnd);
2333
2334protected:
2335 // Note: Instruction needs to be a friend here to call cloneImpl.
2336 friend class Instruction;
2337
2338 InsertElementInst *cloneImpl() const;
2339
2340public:
2341 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2342 const Twine &NameStr = "",
2343 Instruction *InsertBefore = nullptr) {
2344 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2345 }
2346
2347 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2348 const Twine &NameStr,
2349 BasicBlock *InsertAtEnd) {
2350 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
2351 }
2352
2353 /// Return true if an insertelement instruction can be
2354 /// formed with the specified operands.
2355 static bool isValidOperands(const Value *Vec, const Value *NewElt,
2356 const Value *Idx);
2357
2358 /// Overload to return most specific vector type.
2359 ///
2360 VectorType *getType() const {
2361 return cast<VectorType>(Instruction::getType());
2362 }
2363
2364 /// Transparently provide more efficient getOperand methods.
2365 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2366
2367 // Methods for support type inquiry through isa, cast, and dyn_cast:
2368 static bool classof(const Instruction *I) {
2369 return I->getOpcode() == Instruction::InsertElement;
2370 }
2371 static bool classof(const Value *V) {
2372 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2373 }
2374};
2375
2376template <>
2377struct OperandTraits<InsertElementInst> :
2378 public FixedNumOperandTraits<InsertElementInst, 3> {
2379};
2380
2381DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2381, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2381, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2382
2383//===----------------------------------------------------------------------===//
2384// ShuffleVectorInst Class
2385//===----------------------------------------------------------------------===//
2386
2387/// This instruction constructs a fixed permutation of two
2388/// input vectors.
2389///
2390class ShuffleVectorInst : public Instruction {
2391protected:
2392 // Note: Instruction needs to be a friend here to call cloneImpl.
2393 friend class Instruction;
2394
2395 ShuffleVectorInst *cloneImpl() const;
2396
2397public:
2398 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2399 const Twine &NameStr = "",
2400 Instruction *InsertBefor = nullptr);
2401 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2402 const Twine &NameStr, BasicBlock *InsertAtEnd);
2403
2404 // allocate space for exactly three operands
2405 void *operator new(size_t s) {
2406 return User::operator new(s, 3);
2407 }
2408
2409 /// Return true if a shufflevector instruction can be
2410 /// formed with the specified operands.
2411 static bool isValidOperands(const Value *V1, const Value *V2,
2412 const Value *Mask);
2413
2414 /// Overload to return most specific vector type.
2415 ///
2416 VectorType *getType() const {
2417 return cast<VectorType>(Instruction::getType());
2418 }
2419
2420 /// Transparently provide more efficient getOperand methods.
2421 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2422
2423 Constant *getMask() const {
2424 return cast<Constant>(getOperand(2));
2425 }
2426
2427 /// Return the shuffle mask value for the specified element of the mask.
2428 /// Return -1 if the element is undef.
2429 static int getMaskValue(Constant *Mask, unsigned Elt);
2430
2431 /// Return the shuffle mask value of this instruction for the given element
2432 /// index. Return -1 if the element is undef.
2433 int getMaskValue(unsigned Elt) const {
2434 return getMaskValue(getMask(), Elt);
2435 }
2436
2437 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2438 /// elements of the mask are returned as -1.
2439 static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result);
2440
2441 /// Return the mask for this instruction as a vector of integers. Undefined
2442 /// elements of the mask are returned as -1.
2443 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2444 return getShuffleMask(getMask(), Result);
2445 }
2446
2447 SmallVector<int, 16> getShuffleMask() const {
2448 SmallVector<int, 16> Mask;
2449 getShuffleMask(Mask);
2450 return Mask;
2451 }
2452
2453 /// Change values in a shuffle permute mask assuming the two vector operands
2454 /// of length InVecNumElts have swapped position.
2455 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2456 unsigned InVecNumElts) {
2457 for (int &Idx : Mask) {
2458 if (Idx == -1)
2459 continue;
2460 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2461 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2462, __extension__ __PRETTY_FUNCTION__))
2462 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2462, __extension__ __PRETTY_FUNCTION__))
;
2463 }
2464 }
2465
2466 // Methods for support type inquiry through isa, cast, and dyn_cast:
2467 static bool classof(const Instruction *I) {
2468 return I->getOpcode() == Instruction::ShuffleVector;
2469 }
2470 static bool classof(const Value *V) {
2471 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2472 }
2473};
2474
2475template <>
2476struct OperandTraits<ShuffleVectorInst> :
2477 public FixedNumOperandTraits<ShuffleVectorInst, 3> {
2478};
2479
2480DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2480, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2480, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2481
2482//===----------------------------------------------------------------------===//
2483// ExtractValueInst Class
2484//===----------------------------------------------------------------------===//
2485
2486/// This instruction extracts a struct member or array
2487/// element value from an aggregate value.
2488///
2489class ExtractValueInst : public UnaryInstruction {
2490 SmallVector<unsigned, 4> Indices;
2491
2492 ExtractValueInst(const ExtractValueInst &EVI);
2493
2494 /// Constructors - Create a extractvalue instruction with a base aggregate
2495 /// value and a list of indices. The first ctor can optionally insert before
2496 /// an existing instruction, the second appends the new instruction to the
2497 /// specified BasicBlock.
2498 inline ExtractValueInst(Value *Agg,
2499 ArrayRef<unsigned> Idxs,
2500 const Twine &NameStr,
2501 Instruction *InsertBefore);
2502 inline ExtractValueInst(Value *Agg,
2503 ArrayRef<unsigned> Idxs,
2504 const Twine &NameStr, BasicBlock *InsertAtEnd);
2505
2506 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2507
2508protected:
2509 // Note: Instruction needs to be a friend here to call cloneImpl.
2510 friend class Instruction;
2511
2512 ExtractValueInst *cloneImpl() const;
2513
2514public:
2515 static ExtractValueInst *Create(Value *Agg,
2516 ArrayRef<unsigned> Idxs,
2517 const Twine &NameStr = "",
2518 Instruction *InsertBefore = nullptr) {
2519 return new
2520 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2521 }
2522
2523 static ExtractValueInst *Create(Value *Agg,
2524 ArrayRef<unsigned> Idxs,
2525 const Twine &NameStr,
2526 BasicBlock *InsertAtEnd) {
2527 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2528 }
2529
2530 /// Returns the type of the element that would be extracted
2531 /// with an extractvalue instruction with the specified parameters.
2532 ///
2533 /// Null is returned if the indices are invalid for the specified type.
2534 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2535
2536 using idx_iterator = const unsigned*;
2537
2538 inline idx_iterator idx_begin() const { return Indices.begin(); }
2539 inline idx_iterator idx_end() const { return Indices.end(); }
2540 inline iterator_range<idx_iterator> indices() const {
2541 return make_range(idx_begin(), idx_end());
2542 }
2543
2544 Value *getAggregateOperand() {
2545 return getOperand(0);
2546 }
2547 const Value *getAggregateOperand() const {
2548 return getOperand(0);
2549 }
2550 static unsigned getAggregateOperandIndex() {
2551 return 0U; // get index for modifying correct operand
2552 }
2553
2554 ArrayRef<unsigned> getIndices() const {
2555 return Indices;
2556 }
2557
2558 unsigned getNumIndices() const {
2559 return (unsigned)Indices.size();
2560 }
2561
2562 bool hasIndices() const {
2563 return true;
2564 }
2565
2566 // Methods for support type inquiry through isa, cast, and dyn_cast:
2567 static bool classof(const Instruction *I) {
2568 return I->getOpcode() == Instruction::ExtractValue;
2569 }
2570 static bool classof(const Value *V) {
2571 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2572 }
2573};
2574
2575ExtractValueInst::ExtractValueInst(Value *Agg,
2576 ArrayRef<unsigned> Idxs,
2577 const Twine &NameStr,
2578 Instruction *InsertBefore)
2579 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2580 ExtractValue, Agg, InsertBefore) {
2581 init(Idxs, NameStr);
2582}
2583
2584ExtractValueInst::ExtractValueInst(Value *Agg,
2585 ArrayRef<unsigned> Idxs,
2586 const Twine &NameStr,
2587 BasicBlock *InsertAtEnd)
2588 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2589 ExtractValue, Agg, InsertAtEnd) {
2590 init(Idxs, NameStr);
2591}
2592
2593//===----------------------------------------------------------------------===//
2594// InsertValueInst Class
2595//===----------------------------------------------------------------------===//
2596
2597/// This instruction inserts a struct field of array element
2598/// value into an aggregate value.
2599///
2600class InsertValueInst : public Instruction {
2601 SmallVector<unsigned, 4> Indices;
2602
2603 InsertValueInst(const InsertValueInst &IVI);
2604
2605 /// Constructors - Create a insertvalue instruction with a base aggregate
2606 /// value, a value to insert, and a list of indices. The first ctor can
2607 /// optionally insert before an existing instruction, the second appends
2608 /// the new instruction to the specified BasicBlock.
2609 inline InsertValueInst(Value *Agg, Value *Val,
2610 ArrayRef<unsigned> Idxs,
2611 const Twine &NameStr,
2612 Instruction *InsertBefore);
2613 inline InsertValueInst(Value *Agg, Value *Val,
2614 ArrayRef<unsigned> Idxs,
2615 const Twine &NameStr, BasicBlock *InsertAtEnd);
2616
2617 /// Constructors - These two constructors are convenience methods because one
2618 /// and two index insertvalue instructions are so common.
2619 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2620 const Twine &NameStr = "",
2621 Instruction *InsertBefore = nullptr);
2622 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2623 BasicBlock *InsertAtEnd);
2624
2625 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2626 const Twine &NameStr);
2627
2628protected:
2629 // Note: Instruction needs to be a friend here to call cloneImpl.
2630 friend class Instruction;
2631
2632 InsertValueInst *cloneImpl() const;
2633
2634public:
2635 // allocate space for exactly two operands
2636 void *operator new(size_t s) {
2637 return User::operator new(s, 2);
2638 }
2639
2640 static InsertValueInst *Create(Value *Agg, Value *Val,
2641 ArrayRef<unsigned> Idxs,
2642 const Twine &NameStr = "",
2643 Instruction *InsertBefore = nullptr) {
2644 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2645 }
2646
2647 static InsertValueInst *Create(Value *Agg, Value *Val,
2648 ArrayRef<unsigned> Idxs,
2649 const Twine &NameStr,
2650 BasicBlock *InsertAtEnd) {
2651 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2652 }
2653
2654 /// Transparently provide more efficient getOperand methods.
2655 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2656
2657 using idx_iterator = const unsigned*;
2658
2659 inline idx_iterator idx_begin() const { return Indices.begin(); }
2660 inline idx_iterator idx_end() const { return Indices.end(); }
2661 inline iterator_range<idx_iterator> indices() const {
2662 return make_range(idx_begin(), idx_end());
2663 }
2664
2665 Value *getAggregateOperand() {
2666 return getOperand(0);
2667 }
2668 const Value *getAggregateOperand() const {
2669 return getOperand(0);
2670 }
2671 static unsigned getAggregateOperandIndex() {
2672 return 0U; // get index for modifying correct operand
2673 }
2674
2675 Value *getInsertedValueOperand() {
2676 return getOperand(1);
2677 }
2678 const Value *getInsertedValueOperand() const {
2679 return getOperand(1);
2680 }
2681 static unsigned getInsertedValueOperandIndex() {
2682 return 1U; // get index for modifying correct operand
2683 }
2684
2685 ArrayRef<unsigned> getIndices() const {
2686 return Indices;
2687 }
2688
2689 unsigned getNumIndices() const {
2690 return (unsigned)Indices.size();
2691 }
2692
2693 bool hasIndices() const {
2694 return true;
2695 }
2696
2697 // Methods for support type inquiry through isa, cast, and dyn_cast:
2698 static bool classof(const Instruction *I) {
2699 return I->getOpcode() == Instruction::InsertValue;
2700 }
2701 static bool classof(const Value *V) {
2702 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2703 }
2704};
2705
2706template <>
2707struct OperandTraits<InsertValueInst> :
2708 public FixedNumOperandTraits<InsertValueInst, 2> {
2709};
2710
2711InsertValueInst::InsertValueInst(Value *Agg,
2712 Value *Val,
2713 ArrayRef<unsigned> Idxs,
2714 const Twine &NameStr,
2715 Instruction *InsertBefore)
2716 : Instruction(Agg->getType(), InsertValue,
2717 OperandTraits<InsertValueInst>::op_begin(this),
2718 2, InsertBefore) {
2719 init(Agg, Val, Idxs, NameStr);
2720}
2721
2722InsertValueInst::InsertValueInst(Value *Agg,
2723 Value *Val,
2724 ArrayRef<unsigned> Idxs,
2725 const Twine &NameStr,
2726 BasicBlock *InsertAtEnd)
2727 : Instruction(Agg->getType(), InsertValue,
2728 OperandTraits<InsertValueInst>::op_begin(this),
2729 2, InsertAtEnd) {
2730 init(Agg, Val, Idxs, NameStr);
2731}
2732
2733DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2733, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2733, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2734
2735//===----------------------------------------------------------------------===//
2736// PHINode Class
2737//===----------------------------------------------------------------------===//
2738
2739// PHINode - The PHINode class is used to represent the magical mystical PHI
2740// node, that can not exist in nature, but can be synthesized in a computer
2741// scientist's overactive imagination.
2742//
2743class PHINode : public Instruction {
2744 /// The number of operands actually allocated. NumOperands is
2745 /// the number actually in use.
2746 unsigned ReservedSpace;
2747
2748 PHINode(const PHINode &PN);
2749
2750 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2751 const Twine &NameStr = "",
2752 Instruction *InsertBefore = nullptr)
2753 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2754 ReservedSpace(NumReservedValues) {
2755 setName(NameStr);
2756 allocHungoffUses(ReservedSpace);
2757 }
2758
2759 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2760 BasicBlock *InsertAtEnd)
2761 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2762 ReservedSpace(NumReservedValues) {
2763 setName(NameStr);
2764 allocHungoffUses(ReservedSpace);
2765 }
2766
2767protected:
2768 // Note: Instruction needs to be a friend here to call cloneImpl.
2769 friend class Instruction;
2770
2771 PHINode *cloneImpl() const;
2772
2773 // allocHungoffUses - this is more complicated than the generic
2774 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2775 // values and pointers to the incoming blocks, all in one allocation.
2776 void allocHungoffUses(unsigned N) {
2777 User::allocHungoffUses(N, /* IsPhi */ true);
2778 }
2779
2780public:
2781 /// Constructors - NumReservedValues is a hint for the number of incoming
2782 /// edges that this phi node will have (use 0 if you really have no idea).
2783 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2784 const Twine &NameStr = "",
2785 Instruction *InsertBefore = nullptr) {
2786 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2787 }
2788
2789 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2790 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2791 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2792 }
2793
2794 /// Provide fast operand accessors
2795 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2796
2797 // Block iterator interface. This provides access to the list of incoming
2798 // basic blocks, which parallels the list of incoming values.
2799
2800 using block_iterator = BasicBlock **;
2801 using const_block_iterator = BasicBlock * const *;
2802
2803 block_iterator block_begin() {
2804 Use::UserRef *ref =
2805 reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
2806 return reinterpret_cast<block_iterator>(ref + 1);
2807 }
2808
2809 const_block_iterator block_begin() const {
2810 const Use::UserRef *ref =
2811 reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
2812 return reinterpret_cast<const_block_iterator>(ref + 1);
2813 }
2814
2815 block_iterator block_end() {
2816 return block_begin() + getNumOperands();
2817 }
2818
2819 const_block_iterator block_end() const {
2820 return block_begin() + getNumOperands();
2821 }
2822
2823 iterator_range<block_iterator> blocks() {
2824 return make_range(block_begin(), block_end());
2825 }
2826
2827 iterator_range<const_block_iterator> blocks() const {
2828 return make_range(block_begin(), block_end());
2829 }
2830
2831 op_range incoming_values() { return operands(); }
2832
2833 const_op_range incoming_values() const { return operands(); }
2834
2835 /// Return the number of incoming edges
2836 ///
2837 unsigned getNumIncomingValues() const { return getNumOperands(); }
2838
2839 /// Return incoming value number x
2840 ///
2841 Value *getIncomingValue(unsigned i) const {
2842 return getOperand(i);
2843 }
2844 void setIncomingValue(unsigned i, Value *V) {
2845 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2845, __extension__ __PRETTY_FUNCTION__))
;
2846 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2847, __extension__ __PRETTY_FUNCTION__))
2847 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2847, __extension__ __PRETTY_FUNCTION__))
;
2848 setOperand(i, V);
2849 }
2850
2851 static unsigned getOperandNumForIncomingValue(unsigned i) {
2852 return i;
2853 }
2854
2855 static unsigned getIncomingValueNumForOperand(unsigned i) {
2856 return i;
2857 }
2858
2859 /// Return incoming basic block number @p i.
2860 ///
2861 BasicBlock *getIncomingBlock(unsigned i) const {
2862 return block_begin()[i];
2863 }
2864
2865 /// Return incoming basic block corresponding
2866 /// to an operand of the PHI.
2867 ///
2868 BasicBlock *getIncomingBlock(const Use &U) const {
2869 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2869, __extension__ __PRETTY_FUNCTION__))
;
2870 return getIncomingBlock(unsigned(&U - op_begin()));
2871 }
2872
2873 /// Return incoming basic block corresponding
2874 /// to value use iterator.
2875 ///
2876 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2877 return getIncomingBlock(I.getUse());
2878 }
2879
2880 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2881 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2881, __extension__ __PRETTY_FUNCTION__))
;
2882 block_begin()[i] = BB;
2883 }
2884
2885 /// Add an incoming value to the end of the PHI list
2886 ///
2887 void addIncoming(Value *V, BasicBlock *BB) {
2888 if (getNumOperands() == ReservedSpace)
2889 growOperands(); // Get more space!
2890 // Initialize some new operands.
2891 setNumHungOffUseOperands(getNumOperands() + 1);
2892 setIncomingValue(getNumOperands() - 1, V);
2893 setIncomingBlock(getNumOperands() - 1, BB);
2894 }
2895
2896 /// Remove an incoming value. This is useful if a
2897 /// predecessor basic block is deleted. The value removed is returned.
2898 ///
2899 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2900 /// is true), the PHI node is destroyed and any uses of it are replaced with
2901 /// dummy values. The only time there should be zero incoming values to a PHI
2902 /// node is when the block is dead, so this strategy is sound.
2903 ///
2904 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2905
2906 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2907 int Idx = getBasicBlockIndex(BB);
2908 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2908, __extension__ __PRETTY_FUNCTION__))
;
2909 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2910 }
2911
2912 /// Return the first index of the specified basic
2913 /// block in the value list for this PHI. Returns -1 if no instance.
2914 ///
2915 int getBasicBlockIndex(const BasicBlock *BB) const {
2916 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2917 if (block_begin()[i] == BB)
2918 return i;
2919 return -1;
2920 }
2921
2922 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2923 int Idx = getBasicBlockIndex(BB);
2924 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2924, __extension__ __PRETTY_FUNCTION__))
;
2925 return getIncomingValue(Idx);
2926 }
2927
2928 /// If the specified PHI node always merges together the
2929 /// same value, return the value, otherwise return null.
2930 Value *hasConstantValue() const;
2931
2932 /// Whether the specified PHI node always merges
2933 /// together the same value, assuming undefs are equal to a unique
2934 /// non-undef value.
2935 bool hasConstantOrUndefValue() const;
2936
2937 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2938 static bool classof(const Instruction *I) {
2939 return I->getOpcode() == Instruction::PHI;
2940 }
2941 static bool classof(const Value *V) {
2942 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2943 }
2944
2945private:
2946 void growOperands();
2947};
2948
2949template <>
2950struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2951};
2952
2953DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2953, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 2953, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2954
2955//===----------------------------------------------------------------------===//
2956// LandingPadInst Class
2957//===----------------------------------------------------------------------===//
2958
2959//===---------------------------------------------------------------------------
2960/// The landingpad instruction holds all of the information
2961/// necessary to generate correct exception handling. The landingpad instruction
2962/// cannot be moved from the top of a landing pad block, which itself is
2963/// accessible only from the 'unwind' edge of an invoke. This uses the
2964/// SubclassData field in Value to store whether or not the landingpad is a
2965/// cleanup.
2966///
2967class LandingPadInst : public Instruction {
2968 /// The number of operands actually allocated. NumOperands is
2969 /// the number actually in use.
2970 unsigned ReservedSpace;
2971
2972 LandingPadInst(const LandingPadInst &LP);
2973
2974public:
2975 enum ClauseType { Catch, Filter };
2976
2977private:
2978 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2979 const Twine &NameStr, Instruction *InsertBefore);
2980 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2981 const Twine &NameStr, BasicBlock *InsertAtEnd);
2982
2983 // Allocate space for exactly zero operands.
2984 void *operator new(size_t s) {
2985 return User::operator new(s);
2986 }
2987
2988 void growOperands(unsigned Size);
2989 void init(unsigned NumReservedValues, const Twine &NameStr);
2990
2991protected:
2992 // Note: Instruction needs to be a friend here to call cloneImpl.
2993 friend class Instruction;
2994
2995 LandingPadInst *cloneImpl() const;
2996
2997public:
2998 /// Constructors - NumReservedClauses is a hint for the number of incoming
2999 /// clauses that this landingpad will have (use 0 if you really have no idea).
3000 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3001 const Twine &NameStr = "",
3002 Instruction *InsertBefore = nullptr);
3003 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3004 const Twine &NameStr, BasicBlock *InsertAtEnd);
3005
3006 /// Provide fast operand accessors
3007 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3008
3009 /// Return 'true' if this landingpad instruction is a
3010 /// cleanup. I.e., it should be run when unwinding even if its landing pad
3011 /// doesn't catch the exception.
3012 bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
3013
3014 /// Indicate that this landingpad instruction is a cleanup.
3015 void setCleanup(bool V) {
3016 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
3017 (V ? 1 : 0));
3018 }
3019
3020 /// Add a catch or filter clause to the landing pad.
3021 void addClause(Constant *ClauseVal);
3022
3023 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3024 /// determine what type of clause this is.
3025 Constant *getClause(unsigned Idx) const {
3026 return cast<Constant>(getOperandList()[Idx]);
3027 }
3028
3029 /// Return 'true' if the clause and index Idx is a catch clause.
3030 bool isCatch(unsigned Idx) const {
3031 return !isa<ArrayType>(getOperandList()[Idx]->getType());
3032 }
3033
3034 /// Return 'true' if the clause and index Idx is a filter clause.
3035 bool isFilter(unsigned Idx) const {
3036 return isa<ArrayType>(getOperandList()[Idx]->getType());
3037 }
3038
3039 /// Get the number of clauses for this landing pad.
3040 unsigned getNumClauses() const { return getNumOperands(); }
3041
3042 /// Grow the size of the operand list to accommodate the new
3043 /// number of clauses.
3044 void reserveClauses(unsigned Size) { growOperands(Size); }
3045
3046 // Methods for support type inquiry through isa, cast, and dyn_cast:
3047 static bool classof(const Instruction *I) {
3048 return I->getOpcode() == Instruction::LandingPad;
3049 }
3050 static bool classof(const Value *V) {
3051 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3052 }
3053};
3054
3055template <>
3056struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3057};
3058
3059DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3059, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3059, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3060
3061//===----------------------------------------------------------------------===//
3062// ReturnInst Class
3063//===----------------------------------------------------------------------===//
3064
3065//===---------------------------------------------------------------------------
3066/// Return a value (possibly void), from a function. Execution
3067/// does not continue in this function any longer.
3068///
3069class ReturnInst : public TerminatorInst {
3070 ReturnInst(const ReturnInst &RI);
3071
3072private:
3073 // ReturnInst constructors:
3074 // ReturnInst() - 'ret void' instruction
3075 // ReturnInst( null) - 'ret void' instruction
3076 // ReturnInst(Value* X) - 'ret X' instruction
3077 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3078 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3079 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3080 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3081 //
3082 // NOTE: If the Value* passed is of type void then the constructor behaves as
3083 // if it was passed NULL.
3084 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3085 Instruction *InsertBefore = nullptr);
3086 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3087 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3088
3089protected:
3090 // Note: Instruction needs to be a friend here to call cloneImpl.
3091 friend class Instruction;
3092
3093 ReturnInst *cloneImpl() const;
3094
3095public:
3096 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3097 Instruction *InsertBefore = nullptr) {
3098 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3099 }
3100
3101 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3102 BasicBlock *InsertAtEnd) {
3103 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3104 }
3105
3106 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3107 return new(0) ReturnInst(C, InsertAtEnd);
3108 }
3109
3110 /// Provide fast operand accessors
3111 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3112
3113 /// Convenience accessor. Returns null if there is no return value.
3114 Value *getReturnValue() const {
3115 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3116 }
3117
3118 unsigned getNumSuccessors() const { return 0; }
3119
3120 // Methods for support type inquiry through isa, cast, and dyn_cast:
3121 static bool classof(const Instruction *I) {
3122 return (I->getOpcode() == Instruction::Ret);
3123 }
3124 static bool classof(const Value *V) {
3125 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3126 }
3127
3128private:
3129 friend TerminatorInst;
3130
3131 BasicBlock *getSuccessor(unsigned idx) const {
3132 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3132)
;
3133 }
3134
3135 void setSuccessor(unsigned idx, BasicBlock *B) {
3136 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3136)
;
3137 }
3138};
3139
3140template <>
3141struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3142};
3143
3144DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3144, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/IR/Instructions.h"
, 3144, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3145
3146//===----------------------------------------------------------------------===//
3147// BranchInst Class
3148//===----------------------------------------------------------------------===//
3149
3150//===---------------------------------------------------------------------------
3151/// Conditional or Unconditional Branch instruction.
3152///
3153class BranchInst : public TerminatorInst {
3154 /// Ops list - Branches are strange. The operands are ordered:
3155 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3156 /// they don't have to check for cond/uncond branchness. These are mostly
3157 /// accessed relative from op_end().
3158 BranchInst(