Bug Summary

File:lib/Transforms/Utils/Local.cpp
Warning:line 140, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name Local.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn338205/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/8/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Transforms/Utils -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-29-043837-17923-1 -x c++ /build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp -faddrsig

/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp

1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This family of functions perform various local transformations to the
11// program.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/Local.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/Optional.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/Statistic.h"
28#include "llvm/ADT/TinyPtrVector.h"
29#include "llvm/Analysis/ConstantFolding.h"
30#include "llvm/Analysis/EHPersonalities.h"
31#include "llvm/Analysis/InstructionSimplify.h"
32#include "llvm/Analysis/LazyValueInfo.h"
33#include "llvm/Analysis/MemoryBuiltins.h"
34#include "llvm/Analysis/TargetLibraryInfo.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/BinaryFormat/Dwarf.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/CFG.h"
41#include "llvm/IR/CallSite.h"
42#include "llvm/IR/Constant.h"
43#include "llvm/IR/ConstantRange.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DIBuilder.h"
46#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/DebugInfoMetadata.h"
48#include "llvm/IR/DebugLoc.h"
49#include "llvm/IR/DerivedTypes.h"
50#include "llvm/IR/Dominators.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/GetElementPtrTypeIterator.h"
53#include "llvm/IR/GlobalObject.h"
54#include "llvm/IR/IRBuilder.h"
55#include "llvm/IR/InstrTypes.h"
56#include "llvm/IR/Instruction.h"
57#include "llvm/IR/Instructions.h"
58#include "llvm/IR/IntrinsicInst.h"
59#include "llvm/IR/Intrinsics.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
64#include "llvm/IR/Operator.h"
65#include "llvm/IR/PatternMatch.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
71#include "llvm/Support/Casting.h"
72#include "llvm/Support/Debug.h"
73#include "llvm/Support/ErrorHandling.h"
74#include "llvm/Support/KnownBits.h"
75#include "llvm/Support/raw_ostream.h"
76#include "llvm/Transforms/Utils/ValueMapper.h"
77#include <algorithm>
78#include <cassert>
79#include <climits>
80#include <cstdint>
81#include <iterator>
82#include <map>
83#include <utility>
84
85using namespace llvm;
86using namespace llvm::PatternMatch;
87
88#define DEBUG_TYPE"local" "local"
89
90STATISTIC(NumRemoved, "Number of unreachable basic blocks removed")static llvm::Statistic NumRemoved = {"local", "NumRemoved", "Number of unreachable basic blocks removed"
, {0}, {false}}
;
91
92//===----------------------------------------------------------------------===//
93// Local constant propagation.
94//
95
96/// ConstantFoldTerminator - If a terminator instruction is predicated on a
97/// constant value, convert it into an unconditional branch to the constant
98/// destination. This is a nontrivial operation because the successors of this
99/// basic block must have their PHI nodes updated.
100/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
101/// conditions and indirectbr addresses this might make dead if
102/// DeleteDeadConditions is true.
103bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
104 const TargetLibraryInfo *TLI,
105 DeferredDominance *DDT) {
106 TerminatorInst *T = BB->getTerminator();
107 IRBuilder<> Builder(T);
108
109 // Branch - See if we are conditional jumping on constant
110 if (auto *BI = dyn_cast<BranchInst>(T)) {
5
Assuming 'BI' is non-null
6
Taking true branch
111 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
7
Taking false branch
112 BasicBlock *Dest1 = BI->getSuccessor(0);
8
Calling 'BranchInst::getSuccessor'
15
Returning from 'BranchInst::getSuccessor'
16
'Dest1' initialized here
113 BasicBlock *Dest2 = BI->getSuccessor(1);
114
115 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
17
Taking false branch
116 // Are we branching on constant?
117 // YES. Change to unconditional branch...
118 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
119 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
120
121 // Let the basic block know that we are letting go of it. Based on this,
122 // it will adjust it's PHI nodes.
123 OldDest->removePredecessor(BB);
124
125 // Replace the conditional branch with an unconditional one.
126 Builder.CreateBr(Destination);
127 BI->eraseFromParent();
128 if (DDT)
129 DDT->deleteEdge(BB, OldDest);
130 return true;
131 }
132
133 if (Dest2 == Dest1) { // Conditional branch to same location?
18
Assuming 'Dest2' is equal to 'Dest1'
19
Assuming pointer value is null
20
Taking true branch
134 // This branch matches something like this:
135 // br bool %cond, label %Dest, label %Dest
136 // and changes it into: br label %Dest
137
138 // Let the basic block know that we are letting go of one copy of it.
139 assert(BI->getParent() && "Terminator not inserted in block!")(static_cast <bool> (BI->getParent() && "Terminator not inserted in block!"
) ? void (0) : __assert_fail ("BI->getParent() && \"Terminator not inserted in block!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 139, __extension__ __PRETTY_FUNCTION__))
;
140 Dest1->removePredecessor(BI->getParent());
21
Called C++ object pointer is null
141
142 // Replace the conditional branch with an unconditional one.
143 Builder.CreateBr(Dest1);
144 Value *Cond = BI->getCondition();
145 BI->eraseFromParent();
146 if (DeleteDeadConditions)
147 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
148 return true;
149 }
150 return false;
151 }
152
153 if (auto *SI = dyn_cast<SwitchInst>(T)) {
154 // If we are switching on a constant, we can convert the switch to an
155 // unconditional branch.
156 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
157 BasicBlock *DefaultDest = SI->getDefaultDest();
158 BasicBlock *TheOnlyDest = DefaultDest;
159
160 // If the default is unreachable, ignore it when searching for TheOnlyDest.
161 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
162 SI->getNumCases() > 0) {
163 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
164 }
165
166 // Figure out which case it goes to.
167 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
168 // Found case matching a constant operand?
169 if (i->getCaseValue() == CI) {
170 TheOnlyDest = i->getCaseSuccessor();
171 break;
172 }
173
174 // Check to see if this branch is going to the same place as the default
175 // dest. If so, eliminate it as an explicit compare.
176 if (i->getCaseSuccessor() == DefaultDest) {
177 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
178 unsigned NCases = SI->getNumCases();
179 // Fold the case metadata into the default if there will be any branches
180 // left, unless the metadata doesn't match the switch.
181 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
182 // Collect branch weights into a vector.
183 SmallVector<uint32_t, 8> Weights;
184 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
185 ++MD_i) {
186 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
187 Weights.push_back(CI->getValue().getZExtValue());
188 }
189 // Merge weight of this case to the default weight.
190 unsigned idx = i->getCaseIndex();
191 Weights[0] += Weights[idx+1];
192 // Remove weight for this case.
193 std::swap(Weights[idx+1], Weights.back());
194 Weights.pop_back();
195 SI->setMetadata(LLVMContext::MD_prof,
196 MDBuilder(BB->getContext()).
197 createBranchWeights(Weights));
198 }
199 // Remove this entry.
200 BasicBlock *ParentBB = SI->getParent();
201 DefaultDest->removePredecessor(ParentBB);
202 i = SI->removeCase(i);
203 e = SI->case_end();
204 if (DDT)
205 DDT->deleteEdge(ParentBB, DefaultDest);
206 continue;
207 }
208
209 // Otherwise, check to see if the switch only branches to one destination.
210 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
211 // destinations.
212 if (i->getCaseSuccessor() != TheOnlyDest)
213 TheOnlyDest = nullptr;
214
215 // Increment this iterator as we haven't removed the case.
216 ++i;
217 }
218
219 if (CI && !TheOnlyDest) {
220 // Branching on a constant, but not any of the cases, go to the default
221 // successor.
222 TheOnlyDest = SI->getDefaultDest();
223 }
224
225 // If we found a single destination that we can fold the switch into, do so
226 // now.
227 if (TheOnlyDest) {
228 // Insert the new branch.
229 Builder.CreateBr(TheOnlyDest);
230 BasicBlock *BB = SI->getParent();
231 std::vector <DominatorTree::UpdateType> Updates;
232 if (DDT)
233 Updates.reserve(SI->getNumSuccessors() - 1);
234
235 // Remove entries from PHI nodes which we no longer branch to...
236 for (BasicBlock *Succ : SI->successors()) {
237 // Found case matching a constant operand?
238 if (Succ == TheOnlyDest) {
239 TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
240 } else {
241 Succ->removePredecessor(BB);
242 if (DDT)
243 Updates.push_back({DominatorTree::Delete, BB, Succ});
244 }
245 }
246
247 // Delete the old switch.
248 Value *Cond = SI->getCondition();
249 SI->eraseFromParent();
250 if (DeleteDeadConditions)
251 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
252 if (DDT)
253 DDT->applyUpdates(Updates);
254 return true;
255 }
256
257 if (SI->getNumCases() == 1) {
258 // Otherwise, we can fold this switch into a conditional branch
259 // instruction if it has only one non-default destination.
260 auto FirstCase = *SI->case_begin();
261 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
262 FirstCase.getCaseValue(), "cond");
263
264 // Insert the new branch.
265 BranchInst *NewBr = Builder.CreateCondBr(Cond,
266 FirstCase.getCaseSuccessor(),
267 SI->getDefaultDest());
268 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
269 if (MD && MD->getNumOperands() == 3) {
270 ConstantInt *SICase =
271 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
272 ConstantInt *SIDef =
273 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
274 assert(SICase && SIDef)(static_cast <bool> (SICase && SIDef) ? void (0
) : __assert_fail ("SICase && SIDef", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 274, __extension__ __PRETTY_FUNCTION__))
;
275 // The TrueWeight should be the weight for the single case of SI.
276 NewBr->setMetadata(LLVMContext::MD_prof,
277 MDBuilder(BB->getContext()).
278 createBranchWeights(SICase->getValue().getZExtValue(),
279 SIDef->getValue().getZExtValue()));
280 }
281
282 // Update make.implicit metadata to the newly-created conditional branch.
283 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
284 if (MakeImplicitMD)
285 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
286
287 // Delete the old switch.
288 SI->eraseFromParent();
289 return true;
290 }
291 return false;
292 }
293
294 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
295 // indirectbr blockaddress(@F, @BB) -> br label @BB
296 if (auto *BA =
297 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
298 BasicBlock *TheOnlyDest = BA->getBasicBlock();
299 std::vector <DominatorTree::UpdateType> Updates;
300 if (DDT)
301 Updates.reserve(IBI->getNumDestinations() - 1);
302
303 // Insert the new branch.
304 Builder.CreateBr(TheOnlyDest);
305
306 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
307 if (IBI->getDestination(i) == TheOnlyDest) {
308 TheOnlyDest = nullptr;
309 } else {
310 BasicBlock *ParentBB = IBI->getParent();
311 BasicBlock *DestBB = IBI->getDestination(i);
312 DestBB->removePredecessor(ParentBB);
313 if (DDT)
314 Updates.push_back({DominatorTree::Delete, ParentBB, DestBB});
315 }
316 }
317 Value *Address = IBI->getAddress();
318 IBI->eraseFromParent();
319 if (DeleteDeadConditions)
320 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
321
322 // If we didn't find our destination in the IBI successor list, then we
323 // have undefined behavior. Replace the unconditional branch with an
324 // 'unreachable' instruction.
325 if (TheOnlyDest) {
326 BB->getTerminator()->eraseFromParent();
327 new UnreachableInst(BB->getContext(), BB);
328 }
329
330 if (DDT)
331 DDT->applyUpdates(Updates);
332 return true;
333 }
334 }
335
336 return false;
337}
338
339//===----------------------------------------------------------------------===//
340// Local dead code elimination.
341//
342
343/// isInstructionTriviallyDead - Return true if the result produced by the
344/// instruction is not used, and the instruction has no side effects.
345///
346bool llvm::isInstructionTriviallyDead(Instruction *I,
347 const TargetLibraryInfo *TLI) {
348 if (!I->use_empty())
349 return false;
350 return wouldInstructionBeTriviallyDead(I, TLI);
351}
352
353bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
354 const TargetLibraryInfo *TLI) {
355 if (isa<TerminatorInst>(I))
356 return false;
357
358 // We don't want the landingpad-like instructions removed by anything this
359 // general.
360 if (I->isEHPad())
361 return false;
362
363 // We don't want debug info removed by anything this general, unless
364 // debug info is empty.
365 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
366 if (DDI->getAddress())
367 return false;
368 return true;
369 }
370 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
371 if (DVI->getValue())
372 return false;
373 return true;
374 }
375 if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
376 if (DLI->getLabel())
377 return false;
378 return true;
379 }
380
381 if (!I->mayHaveSideEffects())
382 return true;
383
384 // Special case intrinsics that "may have side effects" but can be deleted
385 // when dead.
386 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
387 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
388 if (II->getIntrinsicID() == Intrinsic::stacksave ||
389 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
390 return true;
391
392 // Lifetime intrinsics are dead when their right-hand is undef.
393 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
394 II->getIntrinsicID() == Intrinsic::lifetime_end)
395 return isa<UndefValue>(II->getArgOperand(1));
396
397 // Assumptions are dead if their condition is trivially true. Guards on
398 // true are operationally no-ops. In the future we can consider more
399 // sophisticated tradeoffs for guards considering potential for check
400 // widening, but for now we keep things simple.
401 if (II->getIntrinsicID() == Intrinsic::assume ||
402 II->getIntrinsicID() == Intrinsic::experimental_guard) {
403 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
404 return !Cond->isZero();
405
406 return false;
407 }
408 }
409
410 if (isAllocLikeFn(I, TLI))
411 return true;
412
413 if (CallInst *CI = isFreeCall(I, TLI))
414 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
415 return C->isNullValue() || isa<UndefValue>(C);
416
417 if (CallSite CS = CallSite(I))
418 if (isMathLibCallNoop(CS, TLI))
419 return true;
420
421 return false;
422}
423
424/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
425/// trivially dead instruction, delete it. If that makes any of its operands
426/// trivially dead, delete them too, recursively. Return true if any
427/// instructions were deleted.
428bool
429llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
430 const TargetLibraryInfo *TLI) {
431 Instruction *I = dyn_cast<Instruction>(V);
432 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
433 return false;
434
435 SmallVector<Instruction*, 16> DeadInsts;
436 DeadInsts.push_back(I);
437 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI);
438
439 return true;
440}
441
442void llvm::RecursivelyDeleteTriviallyDeadInstructions(
443 SmallVectorImpl<Instruction *> &DeadInsts, const TargetLibraryInfo *TLI) {
444 // Process the dead instruction list until empty.
445 while (!DeadInsts.empty()) {
446 Instruction &I = *DeadInsts.pop_back_val();
447 assert(I.use_empty() && "Instructions with uses are not dead.")(static_cast <bool> (I.use_empty() && "Instructions with uses are not dead."
) ? void (0) : __assert_fail ("I.use_empty() && \"Instructions with uses are not dead.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 447, __extension__ __PRETTY_FUNCTION__))
;
448 assert(isInstructionTriviallyDead(&I, TLI) &&(static_cast <bool> (isInstructionTriviallyDead(&I,
TLI) && "Live instruction found in dead worklist!") ?
void (0) : __assert_fail ("isInstructionTriviallyDead(&I, TLI) && \"Live instruction found in dead worklist!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 449, __extension__ __PRETTY_FUNCTION__))
449 "Live instruction found in dead worklist!")(static_cast <bool> (isInstructionTriviallyDead(&I,
TLI) && "Live instruction found in dead worklist!") ?
void (0) : __assert_fail ("isInstructionTriviallyDead(&I, TLI) && \"Live instruction found in dead worklist!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 449, __extension__ __PRETTY_FUNCTION__))
;
450
451 // Don't lose the debug info while deleting the instructions.
452 salvageDebugInfo(I);
453
454 // Null out all of the instruction's operands to see if any operand becomes
455 // dead as we go.
456 for (Use &OpU : I.operands()) {
457 Value *OpV = OpU.get();
458 OpU.set(nullptr);
459
460 if (!OpV->use_empty())
461 continue;
462
463 // If the operand is an instruction that became dead as we nulled out the
464 // operand, and if it is 'trivially' dead, delete it in a future loop
465 // iteration.
466 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
467 if (isInstructionTriviallyDead(OpI, TLI))
468 DeadInsts.push_back(OpI);
469 }
470
471 I.eraseFromParent();
472 }
473}
474
475/// areAllUsesEqual - Check whether the uses of a value are all the same.
476/// This is similar to Instruction::hasOneUse() except this will also return
477/// true when there are no uses or multiple uses that all refer to the same
478/// value.
479static bool areAllUsesEqual(Instruction *I) {
480 Value::user_iterator UI = I->user_begin();
481 Value::user_iterator UE = I->user_end();
482 if (UI == UE)
483 return true;
484
485 User *TheUse = *UI;
486 for (++UI; UI != UE; ++UI) {
487 if (*UI != TheUse)
488 return false;
489 }
490 return true;
491}
492
493/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
494/// dead PHI node, due to being a def-use chain of single-use nodes that
495/// either forms a cycle or is terminated by a trivially dead instruction,
496/// delete it. If that makes any of its operands trivially dead, delete them
497/// too, recursively. Return true if a change was made.
498bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
499 const TargetLibraryInfo *TLI) {
500 SmallPtrSet<Instruction*, 4> Visited;
501 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
502 I = cast<Instruction>(*I->user_begin())) {
503 if (I->use_empty())
504 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
505
506 // If we find an instruction more than once, we're on a cycle that
507 // won't prove fruitful.
508 if (!Visited.insert(I).second) {
509 // Break the cycle and delete the instruction and its operands.
510 I->replaceAllUsesWith(UndefValue::get(I->getType()));
511 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
512 return true;
513 }
514 }
515 return false;
516}
517
518static bool
519simplifyAndDCEInstruction(Instruction *I,
520 SmallSetVector<Instruction *, 16> &WorkList,
521 const DataLayout &DL,
522 const TargetLibraryInfo *TLI) {
523 if (isInstructionTriviallyDead(I, TLI)) {
524 salvageDebugInfo(*I);
525
526 // Null out all of the instruction's operands to see if any operand becomes
527 // dead as we go.
528 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
529 Value *OpV = I->getOperand(i);
530 I->setOperand(i, nullptr);
531
532 if (!OpV->use_empty() || I == OpV)
533 continue;
534
535 // If the operand is an instruction that became dead as we nulled out the
536 // operand, and if it is 'trivially' dead, delete it in a future loop
537 // iteration.
538 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
539 if (isInstructionTriviallyDead(OpI, TLI))
540 WorkList.insert(OpI);
541 }
542
543 I->eraseFromParent();
544
545 return true;
546 }
547
548 if (Value *SimpleV = SimplifyInstruction(I, DL)) {
549 // Add the users to the worklist. CAREFUL: an instruction can use itself,
550 // in the case of a phi node.
551 for (User *U : I->users()) {
552 if (U != I) {
553 WorkList.insert(cast<Instruction>(U));
554 }
555 }
556
557 // Replace the instruction with its simplified value.
558 bool Changed = false;
559 if (!I->use_empty()) {
560 I->replaceAllUsesWith(SimpleV);
561 Changed = true;
562 }
563 if (isInstructionTriviallyDead(I, TLI)) {
564 I->eraseFromParent();
565 Changed = true;
566 }
567 return Changed;
568 }
569 return false;
570}
571
572/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
573/// simplify any instructions in it and recursively delete dead instructions.
574///
575/// This returns true if it changed the code, note that it can delete
576/// instructions in other blocks as well in this block.
577bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
578 const TargetLibraryInfo *TLI) {
579 bool MadeChange = false;
580 const DataLayout &DL = BB->getModule()->getDataLayout();
581
582#ifndef NDEBUG
583 // In debug builds, ensure that the terminator of the block is never replaced
584 // or deleted by these simplifications. The idea of simplification is that it
585 // cannot introduce new instructions, and there is no way to replace the
586 // terminator of a block without introducing a new instruction.
587 AssertingVH<Instruction> TerminatorVH(&BB->back());
588#endif
589
590 SmallSetVector<Instruction *, 16> WorkList;
591 // Iterate over the original function, only adding insts to the worklist
592 // if they actually need to be revisited. This avoids having to pre-init
593 // the worklist with the entire function's worth of instructions.
594 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
595 BI != E;) {
596 assert(!BI->isTerminator())(static_cast <bool> (!BI->isTerminator()) ? void (0)
: __assert_fail ("!BI->isTerminator()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 Instruction *I = &*BI;
598 ++BI;
599
600 // We're visiting this instruction now, so make sure it's not in the
601 // worklist from an earlier visit.
602 if (!WorkList.count(I))
603 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
604 }
605
606 while (!WorkList.empty()) {
607 Instruction *I = WorkList.pop_back_val();
608 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
609 }
610 return MadeChange;
611}
612
613//===----------------------------------------------------------------------===//
614// Control Flow Graph Restructuring.
615//
616
617/// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
618/// method is called when we're about to delete Pred as a predecessor of BB. If
619/// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
620///
621/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
622/// nodes that collapse into identity values. For example, if we have:
623/// x = phi(1, 0, 0, 0)
624/// y = and x, z
625///
626/// .. and delete the predecessor corresponding to the '1', this will attempt to
627/// recursively fold the and to 0.
628void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
629 DeferredDominance *DDT) {
630 // This only adjusts blocks with PHI nodes.
631 if (!isa<PHINode>(BB->begin()))
632 return;
633
634 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
635 // them down. This will leave us with single entry phi nodes and other phis
636 // that can be removed.
637 BB->removePredecessor(Pred, true);
638
639 WeakTrackingVH PhiIt = &BB->front();
640 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
641 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
642 Value *OldPhiIt = PhiIt;
643
644 if (!recursivelySimplifyInstruction(PN))
645 continue;
646
647 // If recursive simplification ended up deleting the next PHI node we would
648 // iterate to, then our iterator is invalid, restart scanning from the top
649 // of the block.
650 if (PhiIt != OldPhiIt) PhiIt = &BB->front();
651 }
652 if (DDT)
653 DDT->deleteEdge(Pred, BB);
654}
655
656/// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
657/// predecessor is known to have one successor (DestBB!). Eliminate the edge
658/// between them, moving the instructions in the predecessor into DestBB and
659/// deleting the predecessor block.
660void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT,
661 DeferredDominance *DDT) {
662 assert(!(DT && DDT) && "Cannot call with both DT and DDT.")(static_cast <bool> (!(DT && DDT) && "Cannot call with both DT and DDT."
) ? void (0) : __assert_fail ("!(DT && DDT) && \"Cannot call with both DT and DDT.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 662, __extension__ __PRETTY_FUNCTION__))
;
663
664 // If BB has single-entry PHI nodes, fold them.
665 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
666 Value *NewVal = PN->getIncomingValue(0);
667 // Replace self referencing PHI with undef, it must be dead.
668 if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
669 PN->replaceAllUsesWith(NewVal);
670 PN->eraseFromParent();
671 }
672
673 BasicBlock *PredBB = DestBB->getSinglePredecessor();
674 assert(PredBB && "Block doesn't have a single predecessor!")(static_cast <bool> (PredBB && "Block doesn't have a single predecessor!"
) ? void (0) : __assert_fail ("PredBB && \"Block doesn't have a single predecessor!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 674, __extension__ __PRETTY_FUNCTION__))
;
675
676 bool ReplaceEntryBB = false;
677 if (PredBB == &DestBB->getParent()->getEntryBlock())
678 ReplaceEntryBB = true;
679
680 // Deferred DT update: Collect all the edges that enter PredBB. These
681 // dominator edges will be redirected to DestBB.
682 std::vector <DominatorTree::UpdateType> Updates;
683 if (DDT && !ReplaceEntryBB) {
684 Updates.reserve(1 + (2 * pred_size(PredBB)));
685 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
686 for (auto I = pred_begin(PredBB), E = pred_end(PredBB); I != E; ++I) {
687 Updates.push_back({DominatorTree::Delete, *I, PredBB});
688 // This predecessor of PredBB may already have DestBB as a successor.
689 if (llvm::find(successors(*I), DestBB) == succ_end(*I))
690 Updates.push_back({DominatorTree::Insert, *I, DestBB});
691 }
692 }
693
694 // Zap anything that took the address of DestBB. Not doing this will give the
695 // address an invalid value.
696 if (DestBB->hasAddressTaken()) {
697 BlockAddress *BA = BlockAddress::get(DestBB);
698 Constant *Replacement =
699 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
700 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
701 BA->getType()));
702 BA->destroyConstant();
703 }
704
705 // Anything that branched to PredBB now branches to DestBB.
706 PredBB->replaceAllUsesWith(DestBB);
707
708 // Splice all the instructions from PredBB to DestBB.
709 PredBB->getTerminator()->eraseFromParent();
710 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
711
712 // If the PredBB is the entry block of the function, move DestBB up to
713 // become the entry block after we erase PredBB.
714 if (ReplaceEntryBB)
715 DestBB->moveAfter(PredBB);
716
717 if (DT) {
718 // For some irreducible CFG we end up having forward-unreachable blocks
719 // so check if getNode returns a valid node before updating the domtree.
720 if (DomTreeNode *DTN = DT->getNode(PredBB)) {
721 BasicBlock *PredBBIDom = DTN->getIDom()->getBlock();
722 DT->changeImmediateDominator(DestBB, PredBBIDom);
723 DT->eraseNode(PredBB);
724 }
725 }
726
727 if (DDT) {
728 DDT->deleteBB(PredBB); // Deferred deletion of BB.
729 if (ReplaceEntryBB)
730 // The entry block was removed and there is no external interface for the
731 // dominator tree to be notified of this change. In this corner-case we
732 // recalculate the entire tree.
733 DDT->recalculate(*(DestBB->getParent()));
734 else
735 DDT->applyUpdates(Updates);
736 } else {
737 PredBB->eraseFromParent(); // Nuke BB.
738 }
739}
740
741/// CanMergeValues - Return true if we can choose one of these values to use
742/// in place of the other. Note that we will always choose the non-undef
743/// value to keep.
744static bool CanMergeValues(Value *First, Value *Second) {
745 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
746}
747
748/// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
749/// almost-empty BB ending in an unconditional branch to Succ, into Succ.
750///
751/// Assumption: Succ is the single successor for BB.
752static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
753 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!")(static_cast <bool> (*succ_begin(BB) == Succ &&
"Succ is not successor of BB!") ? void (0) : __assert_fail (
"*succ_begin(BB) == Succ && \"Succ is not successor of BB!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 753, __extension__ __PRETTY_FUNCTION__))
;
754
755 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Looking to fold " << BB->
getName() << " into " << Succ->getName() <<
"\n"; } } while (false)
756 << Succ->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Looking to fold " << BB->
getName() << " into " << Succ->getName() <<
"\n"; } } while (false)
;
757 // Shortcut, if there is only a single predecessor it must be BB and merging
758 // is always safe
759 if (Succ->getSinglePredecessor()) return true;
760
761 // Make a list of the predecessors of BB
762 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
763
764 // Look at all the phi nodes in Succ, to see if they present a conflict when
765 // merging these blocks
766 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
767 PHINode *PN = cast<PHINode>(I);
768
769 // If the incoming value from BB is again a PHINode in
770 // BB which has the same incoming value for *PI as PN does, we can
771 // merge the phi nodes and then the blocks can still be merged
772 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
773 if (BBPN && BBPN->getParent() == BB) {
774 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
775 BasicBlock *IBB = PN->getIncomingBlock(PI);
776 if (BBPreds.count(IBB) &&
777 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
778 PN->getIncomingValue(PI))) {
779 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
780 << "Can't fold, phi node " << PN->getName() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
781 << Succ->getName() << " is conflicting with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
782 << BBPN->getName() << " with regard to common predecessor "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
783 << IBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
;
784 return false;
785 }
786 }
787 } else {
788 Value* Val = PN->getIncomingValueForBlock(BB);
789 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
790 // See if the incoming value for the common predecessor is equal to the
791 // one for BB, in which case this phi node will not prevent the merging
792 // of the block.
793 BasicBlock *IBB = PN->getIncomingBlock(PI);
794 if (BBPreds.count(IBB) &&
795 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
796 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
797 << " in " << Succ->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
798 << " is conflicting with regard to common "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
799 << "predecessor " << IBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
;
800 return false;
801 }
802 }
803 }
804 }
805
806 return true;
807}
808
809using PredBlockVector = SmallVector<BasicBlock *, 16>;
810using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
811
812/// Determines the value to use as the phi node input for a block.
813///
814/// Select between \p OldVal any value that we know flows from \p BB
815/// to a particular phi on the basis of which one (if either) is not
816/// undef. Update IncomingValues based on the selected value.
817///
818/// \param OldVal The value we are considering selecting.
819/// \param BB The block that the value flows in from.
820/// \param IncomingValues A map from block-to-value for other phi inputs
821/// that we have examined.
822///
823/// \returns the selected value.
824static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
825 IncomingValueMap &IncomingValues) {
826 if (!isa<UndefValue>(OldVal)) {
827 assert((!IncomingValues.count(BB) ||(static_cast <bool> ((!IncomingValues.count(BB) || IncomingValues
.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!"
) ? void (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 829, __extension__ __PRETTY_FUNCTION__))
828 IncomingValues.find(BB)->second == OldVal) &&(static_cast <bool> ((!IncomingValues.count(BB) || IncomingValues
.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!"
) ? void (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 829, __extension__ __PRETTY_FUNCTION__))
829 "Expected OldVal to match incoming value from BB!")(static_cast <bool> ((!IncomingValues.count(BB) || IncomingValues
.find(BB)->second == OldVal) && "Expected OldVal to match incoming value from BB!"
) ? void (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 829, __extension__ __PRETTY_FUNCTION__))
;
830
831 IncomingValues.insert(std::make_pair(BB, OldVal));
832 return OldVal;
833 }
834
835 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
836 if (It != IncomingValues.end()) return It->second;
837
838 return OldVal;
839}
840
841/// Create a map from block to value for the operands of a
842/// given phi.
843///
844/// Create a map from block to value for each non-undef value flowing
845/// into \p PN.
846///
847/// \param PN The phi we are collecting the map for.
848/// \param IncomingValues [out] The map from block to value for this phi.
849static void gatherIncomingValuesToPhi(PHINode *PN,
850 IncomingValueMap &IncomingValues) {
851 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
852 BasicBlock *BB = PN->getIncomingBlock(i);
853 Value *V = PN->getIncomingValue(i);
854
855 if (!isa<UndefValue>(V))
856 IncomingValues.insert(std::make_pair(BB, V));
857 }
858}
859
860/// Replace the incoming undef values to a phi with the values
861/// from a block-to-value map.
862///
863/// \param PN The phi we are replacing the undefs in.
864/// \param IncomingValues A map from block to value.
865static void replaceUndefValuesInPhi(PHINode *PN,
866 const IncomingValueMap &IncomingValues) {
867 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
868 Value *V = PN->getIncomingValue(i);
869
870 if (!isa<UndefValue>(V)) continue;
871
872 BasicBlock *BB = PN->getIncomingBlock(i);
873 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
874 if (It == IncomingValues.end()) continue;
875
876 PN->setIncomingValue(i, It->second);
877 }
878}
879
880/// Replace a value flowing from a block to a phi with
881/// potentially multiple instances of that value flowing from the
882/// block's predecessors to the phi.
883///
884/// \param BB The block with the value flowing into the phi.
885/// \param BBPreds The predecessors of BB.
886/// \param PN The phi that we are updating.
887static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
888 const PredBlockVector &BBPreds,
889 PHINode *PN) {
890 Value *OldVal = PN->removeIncomingValue(BB, false);
891 assert(OldVal && "No entry in PHI for Pred BB!")(static_cast <bool> (OldVal && "No entry in PHI for Pred BB!"
) ? void (0) : __assert_fail ("OldVal && \"No entry in PHI for Pred BB!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 891, __extension__ __PRETTY_FUNCTION__))
;
892
893 IncomingValueMap IncomingValues;
894
895 // We are merging two blocks - BB, and the block containing PN - and
896 // as a result we need to redirect edges from the predecessors of BB
897 // to go to the block containing PN, and update PN
898 // accordingly. Since we allow merging blocks in the case where the
899 // predecessor and successor blocks both share some predecessors,
900 // and where some of those common predecessors might have undef
901 // values flowing into PN, we want to rewrite those values to be
902 // consistent with the non-undef values.
903
904 gatherIncomingValuesToPhi(PN, IncomingValues);
905
906 // If this incoming value is one of the PHI nodes in BB, the new entries
907 // in the PHI node are the entries from the old PHI.
908 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
909 PHINode *OldValPN = cast<PHINode>(OldVal);
910 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
911 // Note that, since we are merging phi nodes and BB and Succ might
912 // have common predecessors, we could end up with a phi node with
913 // identical incoming branches. This will be cleaned up later (and
914 // will trigger asserts if we try to clean it up now, without also
915 // simplifying the corresponding conditional branch).
916 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
917 Value *PredVal = OldValPN->getIncomingValue(i);
918 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
919 IncomingValues);
920
921 // And add a new incoming value for this predecessor for the
922 // newly retargeted branch.
923 PN->addIncoming(Selected, PredBB);
924 }
925 } else {
926 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
927 // Update existing incoming values in PN for this
928 // predecessor of BB.
929 BasicBlock *PredBB = BBPreds[i];
930 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
931 IncomingValues);
932
933 // And add a new incoming value for this predecessor for the
934 // newly retargeted branch.
935 PN->addIncoming(Selected, PredBB);
936 }
937 }
938
939 replaceUndefValuesInPhi(PN, IncomingValues);
940}
941
942/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
943/// unconditional branch, and contains no instructions other than PHI nodes,
944/// potential side-effect free intrinsics and the branch. If possible,
945/// eliminate BB by rewriting all the predecessors to branch to the successor
946/// block and return true. If we can't transform, return false.
947bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
948 DeferredDominance *DDT) {
949 assert(BB != &BB->getParent()->getEntryBlock() &&(static_cast <bool> (BB != &BB->getParent()->
getEntryBlock() && "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"
) ? void (0) : __assert_fail ("BB != &BB->getParent()->getEntryBlock() && \"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 950, __extension__ __PRETTY_FUNCTION__))
950 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!")(static_cast <bool> (BB != &BB->getParent()->
getEntryBlock() && "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"
) ? void (0) : __assert_fail ("BB != &BB->getParent()->getEntryBlock() && \"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 950, __extension__ __PRETTY_FUNCTION__))
;
951
952 // We can't eliminate infinite loops.
953 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
954 if (BB == Succ) return false;
955
956 // Check to see if merging these blocks would cause conflicts for any of the
957 // phi nodes in BB or Succ. If not, we can safely merge.
958 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
959
960 // Check for cases where Succ has multiple predecessors and a PHI node in BB
961 // has uses which will not disappear when the PHI nodes are merged. It is
962 // possible to handle such cases, but difficult: it requires checking whether
963 // BB dominates Succ, which is non-trivial to calculate in the case where
964 // Succ has multiple predecessors. Also, it requires checking whether
965 // constructing the necessary self-referential PHI node doesn't introduce any
966 // conflicts; this isn't too difficult, but the previous code for doing this
967 // was incorrect.
968 //
969 // Note that if this check finds a live use, BB dominates Succ, so BB is
970 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
971 // folding the branch isn't profitable in that case anyway.
972 if (!Succ->getSinglePredecessor()) {
973 BasicBlock::iterator BBI = BB->begin();
974 while (isa<PHINode>(*BBI)) {
975 for (Use &U : BBI->uses()) {
976 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
977 if (PN->getIncomingBlock(U) != BB)
978 return false;
979 } else {
980 return false;
981 }
982 }
983 ++BBI;
984 }
985 }
986
987 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Killing Trivial BB: \n" <<
*BB; } } while (false)
;
988
989 std::vector<DominatorTree::UpdateType> Updates;
990 if (DDT) {
991 Updates.reserve(1 + (2 * pred_size(BB)));
992 Updates.push_back({DominatorTree::Delete, BB, Succ});
993 // All predecessors of BB will be moved to Succ.
994 for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
995 Updates.push_back({DominatorTree::Delete, *I, BB});
996 // This predecessor of BB may already have Succ as a successor.
997 if (llvm::find(successors(*I), Succ) == succ_end(*I))
998 Updates.push_back({DominatorTree::Insert, *I, Succ});
999 }
1000 }
1001
1002 if (isa<PHINode>(Succ->begin())) {
1003 // If there is more than one pred of succ, and there are PHI nodes in
1004 // the successor, then we need to add incoming edges for the PHI nodes
1005 //
1006 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
1007
1008 // Loop over all of the PHI nodes in the successor of BB.
1009 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1010 PHINode *PN = cast<PHINode>(I);
1011
1012 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1013 }
1014 }
1015
1016 if (Succ->getSinglePredecessor()) {
1017 // BB is the only predecessor of Succ, so Succ will end up with exactly
1018 // the same predecessors BB had.
1019
1020 // Copy over any phi, debug or lifetime instruction.
1021 BB->getTerminator()->eraseFromParent();
1022 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1023 BB->getInstList());
1024 } else {
1025 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1026 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1027 assert(PN->use_empty() && "There shouldn't be any uses here!")(static_cast <bool> (PN->use_empty() && "There shouldn't be any uses here!"
) ? void (0) : __assert_fail ("PN->use_empty() && \"There shouldn't be any uses here!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1027, __extension__ __PRETTY_FUNCTION__))
;
1028 PN->eraseFromParent();
1029 }
1030 }
1031
1032 // If the unconditional branch we replaced contains llvm.loop metadata, we
1033 // add the metadata to the branch instructions in the predecessors.
1034 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1035 Instruction *TI = BB->getTerminator();
1036 if (TI)
1037 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1038 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1039 BasicBlock *Pred = *PI;
1040 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1041 }
1042
1043 // Everything that jumped to BB now goes to Succ.
1044 BB->replaceAllUsesWith(Succ);
1045 if (!Succ->hasName()) Succ->takeName(BB);
1046
1047 if (DDT) {
1048 DDT->deleteBB(BB); // Deferred deletion of the old basic block.
1049 DDT->applyUpdates(Updates);
1050 } else {
1051 BB->eraseFromParent(); // Delete the old basic block.
1052 }
1053 return true;
1054}
1055
1056/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
1057/// nodes in this block. This doesn't try to be clever about PHI nodes
1058/// which differ only in the order of the incoming values, but instcombine
1059/// orders them so it usually won't matter.
1060bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1061 // This implementation doesn't currently consider undef operands
1062 // specially. Theoretically, two phis which are identical except for
1063 // one having an undef where the other doesn't could be collapsed.
1064
1065 struct PHIDenseMapInfo {
1066 static PHINode *getEmptyKey() {
1067 return DenseMapInfo<PHINode *>::getEmptyKey();
1068 }
1069
1070 static PHINode *getTombstoneKey() {
1071 return DenseMapInfo<PHINode *>::getTombstoneKey();
1072 }
1073
1074 static unsigned getHashValue(PHINode *PN) {
1075 // Compute a hash value on the operands. Instcombine will likely have
1076 // sorted them, which helps expose duplicates, but we have to check all
1077 // the operands to be safe in case instcombine hasn't run.
1078 return static_cast<unsigned>(hash_combine(
1079 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1080 hash_combine_range(PN->block_begin(), PN->block_end())));
1081 }
1082
1083 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1084 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1085 RHS == getEmptyKey() || RHS == getTombstoneKey())
1086 return LHS == RHS;
1087 return LHS->isIdenticalTo(RHS);
1088 }
1089 };
1090
1091 // Set of unique PHINodes.
1092 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1093
1094 // Examine each PHI.
1095 bool Changed = false;
1096 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1097 auto Inserted = PHISet.insert(PN);
1098 if (!Inserted.second) {
1099 // A duplicate. Replace this PHI with its duplicate.
1100 PN->replaceAllUsesWith(*Inserted.first);
1101 PN->eraseFromParent();
1102 Changed = true;
1103
1104 // The RAUW can change PHIs that we already visited. Start over from the
1105 // beginning.
1106 PHISet.clear();
1107 I = BB->begin();
1108 }
1109 }
1110
1111 return Changed;
1112}
1113
1114/// enforceKnownAlignment - If the specified pointer points to an object that
1115/// we control, modify the object's alignment to PrefAlign. This isn't
1116/// often possible though. If alignment is important, a more reliable approach
1117/// is to simply align all global variables and allocation instructions to
1118/// their preferred alignment from the beginning.
1119static unsigned enforceKnownAlignment(Value *V, unsigned Align,
1120 unsigned PrefAlign,
1121 const DataLayout &DL) {
1122 assert(PrefAlign > Align)(static_cast <bool> (PrefAlign > Align) ? void (0) :
__assert_fail ("PrefAlign > Align", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1122, __extension__ __PRETTY_FUNCTION__))
;
1123
1124 V = V->stripPointerCasts();
1125
1126 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1127 // TODO: ideally, computeKnownBits ought to have used
1128 // AllocaInst::getAlignment() in its computation already, making
1129 // the below max redundant. But, as it turns out,
1130 // stripPointerCasts recurses through infinite layers of bitcasts,
1131 // while computeKnownBits is not allowed to traverse more than 6
1132 // levels.
1133 Align = std::max(AI->getAlignment(), Align);
1134 if (PrefAlign <= Align)
1135 return Align;
1136
1137 // If the preferred alignment is greater than the natural stack alignment
1138 // then don't round up. This avoids dynamic stack realignment.
1139 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1140 return Align;
1141 AI->setAlignment(PrefAlign);
1142 return PrefAlign;
1143 }
1144
1145 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1146 // TODO: as above, this shouldn't be necessary.
1147 Align = std::max(GO->getAlignment(), Align);
1148 if (PrefAlign <= Align)
1149 return Align;
1150
1151 // If there is a large requested alignment and we can, bump up the alignment
1152 // of the global. If the memory we set aside for the global may not be the
1153 // memory used by the final program then it is impossible for us to reliably
1154 // enforce the preferred alignment.
1155 if (!GO->canIncreaseAlignment())
1156 return Align;
1157
1158 GO->setAlignment(PrefAlign);
1159 return PrefAlign;
1160 }
1161
1162 return Align;
1163}
1164
1165unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
1166 const DataLayout &DL,
1167 const Instruction *CxtI,
1168 AssumptionCache *AC,
1169 const DominatorTree *DT) {
1170 assert(V->getType()->isPointerTy() &&(static_cast <bool> (V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!") ? void (0) :
__assert_fail ("V->getType()->isPointerTy() && \"getOrEnforceKnownAlignment expects a pointer!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1171, __extension__ __PRETTY_FUNCTION__))
1171 "getOrEnforceKnownAlignment expects a pointer!")(static_cast <bool> (V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!") ? void (0) :
__assert_fail ("V->getType()->isPointerTy() && \"getOrEnforceKnownAlignment expects a pointer!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1171, __extension__ __PRETTY_FUNCTION__))
;
1172
1173 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1174 unsigned TrailZ = Known.countMinTrailingZeros();
1175
1176 // Avoid trouble with ridiculously large TrailZ values, such as
1177 // those computed from a null pointer.
1178 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT8 - 1));
1179
1180 unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ);
1181
1182 // LLVM doesn't support alignments larger than this currently.
1183 Align = std::min(Align, +Value::MaximumAlignment);
1184
1185 if (PrefAlign > Align)
1186 Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
1187
1188 // We don't need to make any adjustment.
1189 return Align;
1190}
1191
1192///===---------------------------------------------------------------------===//
1193/// Dbg Intrinsic utilities
1194///
1195
1196/// See if there is a dbg.value intrinsic for DIVar before I.
1197static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
1198 Instruction *I) {
1199 // Since we can't guarantee that the original dbg.declare instrinsic
1200 // is removed by LowerDbgDeclare(), we need to make sure that we are
1201 // not inserting the same dbg.value intrinsic over and over.
1202 BasicBlock::InstListType::iterator PrevI(I);
1203 if (PrevI != I->getParent()->getInstList().begin()) {
1204 --PrevI;
1205 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
1206 if (DVI->getValue() == I->getOperand(0) &&
1207 DVI->getVariable() == DIVar &&
1208 DVI->getExpression() == DIExpr)
1209 return true;
1210 }
1211 return false;
1212}
1213
1214/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1215static bool PhiHasDebugValue(DILocalVariable *DIVar,
1216 DIExpression *DIExpr,
1217 PHINode *APN) {
1218 // Since we can't guarantee that the original dbg.declare instrinsic
1219 // is removed by LowerDbgDeclare(), we need to make sure that we are
1220 // not inserting the same dbg.value intrinsic over and over.
1221 SmallVector<DbgValueInst *, 1> DbgValues;
1222 findDbgValues(DbgValues, APN);
1223 for (auto *DVI : DbgValues) {
1224 assert(DVI->getValue() == APN)(static_cast <bool> (DVI->getValue() == APN) ? void (
0) : __assert_fail ("DVI->getValue() == APN", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1224, __extension__ __PRETTY_FUNCTION__))
;
1225 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1226 return true;
1227 }
1228 return false;
1229}
1230
1231/// Check if the alloc size of \p ValTy is large enough to cover the variable
1232/// (or fragment of the variable) described by \p DII.
1233///
1234/// This is primarily intended as a helper for the different
1235/// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1236/// converted describes an alloca'd variable, so we need to use the
1237/// alloc size of the value when doing the comparison. E.g. an i1 value will be
1238/// identified as covering an n-bit fragment, if the store size of i1 is at
1239/// least n bits.
1240static bool valueCoversEntireFragment(Type *ValTy, DbgInfoIntrinsic *DII) {
1241 const DataLayout &DL = DII->getModule()->getDataLayout();
1242 uint64_t ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1243 if (auto FragmentSize = DII->getFragmentSizeInBits())
1244 return ValueSize >= *FragmentSize;
1245 // We can't always calculate the size of the DI variable (e.g. if it is a
1246 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1247 // intead.
1248 if (DII->isAddressOfVariable())
1249 if (auto *AI = dyn_cast_or_null<AllocaInst>(DII->getVariableLocation()))
1250 if (auto FragmentSize = AI->getAllocationSizeInBits(DL))
1251 return ValueSize >= *FragmentSize;
1252 // Could not determine size of variable. Conservatively return false.
1253 return false;
1254}
1255
1256/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1257/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1258void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1259 StoreInst *SI, DIBuilder &Builder) {
1260 assert(DII->isAddressOfVariable())(static_cast <bool> (DII->isAddressOfVariable()) ? void
(0) : __assert_fail ("DII->isAddressOfVariable()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1260, __extension__ __PRETTY_FUNCTION__))
;
1261 auto *DIVar = DII->getVariable();
1262 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1262, __extension__ __PRETTY_FUNCTION__))
;
1263 auto *DIExpr = DII->getExpression();
1264 Value *DV = SI->getOperand(0);
1265
1266 if (!valueCoversEntireFragment(SI->getValueOperand()->getType(), DII)) {
1267 // FIXME: If storing to a part of the variable described by the dbg.declare,
1268 // then we want to insert a dbg.value for the corresponding fragment.
1269 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
1270 << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
;
1271 // For now, when there is a store to parts of the variable (but we do not
1272 // know which part) we insert an dbg.value instrinsic to indicate that we
1273 // know nothing about the variable's content.
1274 DV = UndefValue::get(DV->getType());
1275 if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1276 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
1277 SI);
1278 return;
1279 }
1280
1281 // If an argument is zero extended then use argument directly. The ZExt
1282 // may be zapped by an optimization pass in future.
1283 Argument *ExtendedArg = nullptr;
1284 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1285 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1286 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1287 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1288 if (ExtendedArg) {
1289 // If this DII was already describing only a fragment of a variable, ensure
1290 // that fragment is appropriately narrowed here.
1291 // But if a fragment wasn't used, describe the value as the original
1292 // argument (rather than the zext or sext) so that it remains described even
1293 // if the sext/zext is optimized away. This widens the variable description,
1294 // leaving it up to the consumer to know how the smaller value may be
1295 // represented in a larger register.
1296 if (auto Fragment = DIExpr->getFragmentInfo()) {
1297 unsigned FragmentOffset = Fragment->OffsetInBits;
1298 SmallVector<uint64_t, 3> Ops(DIExpr->elements_begin(),
1299 DIExpr->elements_end() - 3);
1300 Ops.push_back(dwarf::DW_OP_LLVM_fragment);
1301 Ops.push_back(FragmentOffset);
1302 const DataLayout &DL = DII->getModule()->getDataLayout();
1303 Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
1304 DIExpr = Builder.createExpression(Ops);
1305 }
1306 DV = ExtendedArg;
1307 }
1308 if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1309 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
1310 SI);
1311}
1312
1313/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1314/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1315void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1316 LoadInst *LI, DIBuilder &Builder) {
1317 auto *DIVar = DII->getVariable();
1318 auto *DIExpr = DII->getExpression();
1319 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1319, __extension__ __PRETTY_FUNCTION__))
;
1320
1321 if (LdStHasDebugValue(DIVar, DIExpr, LI))
1322 return;
1323
1324 if (!valueCoversEntireFragment(LI->getType(), DII)) {
1325 // FIXME: If only referring to a part of the variable described by the
1326 // dbg.declare, then we want to insert a dbg.value for the corresponding
1327 // fragment.
1328 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
1329 << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
;
1330 return;
1331 }
1332
1333 // We are now tracking the loaded value instead of the address. In the
1334 // future if multi-location support is added to the IR, it might be
1335 // preferable to keep tracking both the loaded value and the original
1336 // address in case the alloca can not be elided.
1337 Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1338 LI, DIVar, DIExpr, DII->getDebugLoc(), (Instruction *)nullptr);
1339 DbgValue->insertAfter(LI);
1340}
1341
1342/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1343/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1344void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1345 PHINode *APN, DIBuilder &Builder) {
1346 auto *DIVar = DII->getVariable();
1347 auto *DIExpr = DII->getExpression();
1348 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1348, __extension__ __PRETTY_FUNCTION__))
;
1349
1350 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1351 return;
1352
1353 if (!valueCoversEntireFragment(APN->getType(), DII)) {
1354 // FIXME: If only referring to a part of the variable described by the
1355 // dbg.declare, then we want to insert a dbg.value for the corresponding
1356 // fragment.
1357 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
1358 << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
;
1359 return;
1360 }
1361
1362 BasicBlock *BB = APN->getParent();
1363 auto InsertionPt = BB->getFirstInsertionPt();
1364
1365 // The block may be a catchswitch block, which does not have a valid
1366 // insertion point.
1367 // FIXME: Insert dbg.value markers in the successors when appropriate.
1368 if (InsertionPt != BB->end())
1369 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, DII->getDebugLoc(),
1370 &*InsertionPt);
1371}
1372
1373/// Determine whether this alloca is either a VLA or an array.
1374static bool isArray(AllocaInst *AI) {
1375 return AI->isArrayAllocation() ||
1376 AI->getType()->getElementType()->isArrayTy();
1377}
1378
1379/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1380/// of llvm.dbg.value intrinsics.
1381bool llvm::LowerDbgDeclare(Function &F) {
1382 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1383 SmallVector<DbgDeclareInst *, 4> Dbgs;
1384 for (auto &FI : F)
1385 for (Instruction &BI : FI)
1386 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1387 Dbgs.push_back(DDI);
1388
1389 if (Dbgs.empty())
1390 return false;
1391
1392 for (auto &I : Dbgs) {
1393 DbgDeclareInst *DDI = I;
1394 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1395 // If this is an alloca for a scalar variable, insert a dbg.value
1396 // at each load and store to the alloca and erase the dbg.declare.
1397 // The dbg.values allow tracking a variable even if it is not
1398 // stored on the stack, while the dbg.declare can only describe
1399 // the stack slot (and at a lexical-scope granularity). Later
1400 // passes will attempt to elide the stack slot.
1401 if (!AI || isArray(AI))
1402 continue;
1403
1404 // A volatile load/store means that the alloca can't be elided anyway.
1405 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1406 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1407 return LI->isVolatile();
1408 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1409 return SI->isVolatile();
1410 return false;
1411 }))
1412 continue;
1413
1414 for (auto &AIUse : AI->uses()) {
1415 User *U = AIUse.getUser();
1416 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1417 if (AIUse.getOperandNo() == 1)
1418 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1419 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1420 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1421 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1422 // This is a call by-value or some other instruction that takes a
1423 // pointer to the variable. Insert a *value* intrinsic that describes
1424 // the variable by dereferencing the alloca.
1425 auto *DerefExpr =
1426 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1427 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1428 DDI->getDebugLoc(), CI);
1429 }
1430 }
1431 DDI->eraseFromParent();
1432 }
1433 return true;
1434}
1435
1436/// Propagate dbg.value intrinsics through the newly inserted PHIs.
1437void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1438 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1439 assert(BB && "No BasicBlock to clone dbg.value(s) from.")(static_cast <bool> (BB && "No BasicBlock to clone dbg.value(s) from."
) ? void (0) : __assert_fail ("BB && \"No BasicBlock to clone dbg.value(s) from.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1439, __extension__ __PRETTY_FUNCTION__))
;
1440 if (InsertedPHIs.size() == 0)
1441 return;
1442
1443 // Map existing PHI nodes to their dbg.values.
1444 ValueToValueMapTy DbgValueMap;
1445 for (auto &I : *BB) {
1446 if (auto DbgII = dyn_cast<DbgInfoIntrinsic>(&I)) {
1447 if (auto *Loc = dyn_cast_or_null<PHINode>(DbgII->getVariableLocation()))
1448 DbgValueMap.insert({Loc, DbgII});
1449 }
1450 }
1451 if (DbgValueMap.size() == 0)
1452 return;
1453
1454 // Then iterate through the new PHIs and look to see if they use one of the
1455 // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will
1456 // propagate the info through the new PHI.
1457 LLVMContext &C = BB->getContext();
1458 for (auto PHI : InsertedPHIs) {
1459 BasicBlock *Parent = PHI->getParent();
1460 // Avoid inserting an intrinsic into an EH block.
1461 if (Parent->getFirstNonPHI()->isEHPad())
1462 continue;
1463 auto PhiMAV = MetadataAsValue::get(C, ValueAsMetadata::get(PHI));
1464 for (auto VI : PHI->operand_values()) {
1465 auto V = DbgValueMap.find(VI);
1466 if (V != DbgValueMap.end()) {
1467 auto *DbgII = cast<DbgInfoIntrinsic>(V->second);
1468 Instruction *NewDbgII = DbgII->clone();
1469 NewDbgII->setOperand(0, PhiMAV);
1470 auto InsertionPt = Parent->getFirstInsertionPt();
1471 assert(InsertionPt != Parent->end() && "Ill-formed basic block")(static_cast <bool> (InsertionPt != Parent->end() &&
"Ill-formed basic block") ? void (0) : __assert_fail ("InsertionPt != Parent->end() && \"Ill-formed basic block\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1471, __extension__ __PRETTY_FUNCTION__))
;
1472 NewDbgII->insertBefore(&*InsertionPt);
1473 }
1474 }
1475 }
1476}
1477
1478/// Finds all intrinsics declaring local variables as living in the memory that
1479/// 'V' points to. This may include a mix of dbg.declare and
1480/// dbg.addr intrinsics.
1481TinyPtrVector<DbgInfoIntrinsic *> llvm::FindDbgAddrUses(Value *V) {
1482 // This function is hot. Check whether the value has any metadata to avoid a
1483 // DenseMap lookup.
1484 if (!V->isUsedByMetadata())
1485 return {};
1486 auto *L = LocalAsMetadata::getIfExists(V);
1487 if (!L)
1488 return {};
1489 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L);
1490 if (!MDV)
1491 return {};
1492
1493 TinyPtrVector<DbgInfoIntrinsic *> Declares;
1494 for (User *U : MDV->users()) {
1495 if (auto *DII = dyn_cast<DbgInfoIntrinsic>(U))
1496 if (DII->isAddressOfVariable())
1497 Declares.push_back(DII);
1498 }
1499
1500 return Declares;
1501}
1502
1503void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
1504 // This function is hot. Check whether the value has any metadata to avoid a
1505 // DenseMap lookup.
1506 if (!V->isUsedByMetadata())
1507 return;
1508 if (auto *L = LocalAsMetadata::getIfExists(V))
1509 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1510 for (User *U : MDV->users())
1511 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1512 DbgValues.push_back(DVI);
1513}
1514
1515void llvm::findDbgUsers(SmallVectorImpl<DbgInfoIntrinsic *> &DbgUsers,
1516 Value *V) {
1517 // This function is hot. Check whether the value has any metadata to avoid a
1518 // DenseMap lookup.
1519 if (!V->isUsedByMetadata())
1520 return;
1521 if (auto *L = LocalAsMetadata::getIfExists(V))
1522 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1523 for (User *U : MDV->users())
1524 if (DbgInfoIntrinsic *DII = dyn_cast<DbgInfoIntrinsic>(U))
1525 DbgUsers.push_back(DII);
1526}
1527
1528bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1529 Instruction *InsertBefore, DIBuilder &Builder,
1530 bool DerefBefore, int Offset, bool DerefAfter) {
1531 auto DbgAddrs = FindDbgAddrUses(Address);
1532 for (DbgInfoIntrinsic *DII : DbgAddrs) {
1533 DebugLoc Loc = DII->getDebugLoc();
1534 auto *DIVar = DII->getVariable();
1535 auto *DIExpr = DII->getExpression();
1536 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1536, __extension__ __PRETTY_FUNCTION__))
;
1537 DIExpr = DIExpression::prepend(DIExpr, DerefBefore, Offset, DerefAfter);
1538 // Insert llvm.dbg.declare immediately before InsertBefore, and remove old
1539 // llvm.dbg.declare.
1540 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
1541 if (DII == InsertBefore)
1542 InsertBefore = InsertBefore->getNextNode();
1543 DII->eraseFromParent();
1544 }
1545 return !DbgAddrs.empty();
1546}
1547
1548bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1549 DIBuilder &Builder, bool DerefBefore,
1550 int Offset, bool DerefAfter) {
1551 return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
1552 DerefBefore, Offset, DerefAfter);
1553}
1554
1555static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1556 DIBuilder &Builder, int Offset) {
1557 DebugLoc Loc = DVI->getDebugLoc();
1558 auto *DIVar = DVI->getVariable();
1559 auto *DIExpr = DVI->getExpression();
1560 assert(DIVar && "Missing variable")(static_cast <bool> (DIVar && "Missing variable"
) ? void (0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1560, __extension__ __PRETTY_FUNCTION__))
;
1561
1562 // This is an alloca-based llvm.dbg.value. The first thing it should do with
1563 // the alloca pointer is dereference it. Otherwise we don't know how to handle
1564 // it and give up.
1565 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1566 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1567 return;
1568
1569 // Insert the offset immediately after the first deref.
1570 // We could just change the offset argument of dbg.value, but it's unsigned...
1571 if (Offset) {
1572 SmallVector<uint64_t, 4> Ops;
1573 Ops.push_back(dwarf::DW_OP_deref);
1574 DIExpression::appendOffset(Ops, Offset);
1575 Ops.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
1576 DIExpr = Builder.createExpression(Ops);
1577 }
1578
1579 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1580 DVI->eraseFromParent();
1581}
1582
1583void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1584 DIBuilder &Builder, int Offset) {
1585 if (auto *L = LocalAsMetadata::getIfExists(AI))
1586 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1587 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1588 Use &U = *UI++;
1589 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1590 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1591 }
1592}
1593
1594/// Wrap \p V in a ValueAsMetadata instance.
1595static MetadataAsValue *wrapValueInMetadata(LLVMContext &C, Value *V) {
1596 return MetadataAsValue::get(C, ValueAsMetadata::get(V));
1597}
1598
1599bool llvm::salvageDebugInfo(Instruction &I) {
1600 SmallVector<DbgInfoIntrinsic *, 1> DbgUsers;
1601 findDbgUsers(DbgUsers, &I);
1602 if (DbgUsers.empty())
1603 return false;
1604
1605 auto &M = *I.getModule();
1606 auto &DL = M.getDataLayout();
1607 auto &Ctx = I.getContext();
1608 auto wrapMD = [&](Value *V) { return wrapValueInMetadata(Ctx, V); };
1609
1610 auto doSalvage = [&](DbgInfoIntrinsic *DII, SmallVectorImpl<uint64_t> &Ops) {
1611 auto *DIExpr = DII->getExpression();
1612 if (!Ops.empty()) {
1613 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1614 // are implicitly pointing out the value as a DWARF memory location
1615 // description.
1616 bool WithStackValue = isa<DbgValueInst>(DII);
1617 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue);
1618 }
1619 DII->setOperand(0, wrapMD(I.getOperand(0)));
1620 DII->setOperand(2, MetadataAsValue::get(Ctx, DIExpr));
1621 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1622 };
1623
1624 auto applyOffset = [&](DbgInfoIntrinsic *DII, uint64_t Offset) {
1625 SmallVector<uint64_t, 8> Ops;
1626 DIExpression::appendOffset(Ops, Offset);
1627 doSalvage(DII, Ops);
1628 };
1629
1630 auto applyOps = [&](DbgInfoIntrinsic *DII,
1631 std::initializer_list<uint64_t> Opcodes) {
1632 SmallVector<uint64_t, 8> Ops(Opcodes);
1633 doSalvage(DII, Ops);
1634 };
1635
1636 if (auto *CI = dyn_cast<CastInst>(&I)) {
1637 if (!CI->isNoopCast(DL))
1638 return false;
1639
1640 // No-op casts are irrelevant for debug info.
1641 MetadataAsValue *CastSrc = wrapMD(I.getOperand(0));
1642 for (auto *DII : DbgUsers) {
1643 DII->setOperand(0, CastSrc);
1644 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1645 }
1646 return true;
1647 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1648 unsigned BitWidth =
1649 M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace());
1650 // Rewrite a constant GEP into a DIExpression. Since we are performing
1651 // arithmetic to compute the variable's *value* in the DIExpression, we
1652 // need to mark the expression with a DW_OP_stack_value.
1653 APInt Offset(BitWidth, 0);
1654 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset))
1655 for (auto *DII : DbgUsers)
1656 applyOffset(DII, Offset.getSExtValue());
1657 return true;
1658 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1659 // Rewrite binary operations with constant integer operands.
1660 auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1));
1661 if (!ConstInt || ConstInt->getBitWidth() > 64)
1662 return false;
1663
1664 uint64_t Val = ConstInt->getSExtValue();
1665 for (auto *DII : DbgUsers) {
1666 switch (BI->getOpcode()) {
1667 case Instruction::Add:
1668 applyOffset(DII, Val);
1669 break;
1670 case Instruction::Sub:
1671 applyOffset(DII, -int64_t(Val));
1672 break;
1673 case Instruction::Mul:
1674 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_mul});
1675 break;
1676 case Instruction::SDiv:
1677 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_div});
1678 break;
1679 case Instruction::SRem:
1680 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_mod});
1681 break;
1682 case Instruction::Or:
1683 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_or});
1684 break;
1685 case Instruction::And:
1686 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_and});
1687 break;
1688 case Instruction::Xor:
1689 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_xor});
1690 break;
1691 case Instruction::Shl:
1692 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shl});
1693 break;
1694 case Instruction::LShr:
1695 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shr});
1696 break;
1697 case Instruction::AShr:
1698 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shra});
1699 break;
1700 default:
1701 // TODO: Salvage constants from each kind of binop we know about.
1702 return false;
1703 }
1704 }
1705 return true;
1706 } else if (isa<LoadInst>(&I)) {
1707 MetadataAsValue *AddrMD = wrapMD(I.getOperand(0));
1708 for (auto *DII : DbgUsers) {
1709 // Rewrite the load into DW_OP_deref.
1710 auto *DIExpr = DII->getExpression();
1711 DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref);
1712 DII->setOperand(0, AddrMD);
1713 DII->setOperand(2, MetadataAsValue::get(Ctx, DIExpr));
1714 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1715 }
1716 return true;
1717 }
1718 return false;
1719}
1720
1721/// A replacement for a dbg.value expression.
1722using DbgValReplacement = Optional<DIExpression *>;
1723
1724/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1725/// possibly moving/deleting users to prevent use-before-def. Returns true if
1726/// changes are made.
1727static bool rewriteDebugUsers(
1728 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1729 function_ref<DbgValReplacement(DbgInfoIntrinsic &DII)> RewriteExpr) {
1730 // Find debug users of From.
1731 SmallVector<DbgInfoIntrinsic *, 1> Users;
1732 findDbgUsers(Users, &From);
1733 if (Users.empty())
1734 return false;
1735
1736 // Prevent use-before-def of To.
1737 bool Changed = false;
1738 SmallPtrSet<DbgInfoIntrinsic *, 1> DeleteOrSalvage;
1739 if (isa<Instruction>(&To)) {
1740 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1741
1742 for (auto *DII : Users) {
1743 // It's common to see a debug user between From and DomPoint. Move it
1744 // after DomPoint to preserve the variable update without any reordering.
1745 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1746 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "MOVE: " << *DII <<
'\n'; } } while (false)
;
1747 DII->moveAfter(&DomPoint);
1748 Changed = true;
1749
1750 // Users which otherwise aren't dominated by the replacement value must
1751 // be salvaged or deleted.
1752 } else if (!DT.dominates(&DomPoint, DII)) {
1753 DeleteOrSalvage.insert(DII);
1754 }
1755 }
1756 }
1757
1758 // Update debug users without use-before-def risk.
1759 for (auto *DII : Users) {
1760 if (DeleteOrSalvage.count(DII))
1761 continue;
1762
1763 LLVMContext &Ctx = DII->getContext();
1764 DbgValReplacement DVR = RewriteExpr(*DII);
1765 if (!DVR)
1766 continue;
1767
1768 DII->setOperand(0, wrapValueInMetadata(Ctx, &To));
1769 DII->setOperand(2, MetadataAsValue::get(Ctx, *DVR));
1770 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "REWRITE: " << *DII <<
'\n'; } } while (false)
;
1771 Changed = true;
1772 }
1773
1774 if (!DeleteOrSalvage.empty()) {
1775 // Try to salvage the remaining debug users.
1776 Changed |= salvageDebugInfo(From);
1777
1778 // Delete the debug users which weren't salvaged.
1779 for (auto *DII : DeleteOrSalvage) {
1780 if (DII->getVariableLocation() == &From) {
1781 LLVM_DEBUG(dbgs() << "Erased UseBeforeDef: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Erased UseBeforeDef: " <<
*DII << '\n'; } } while (false)
;
1782 DII->eraseFromParent();
1783 Changed = true;
1784 }
1785 }
1786 }
1787
1788 return Changed;
1789}
1790
1791/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
1792/// losslessly preserve the bits and semantics of the value. This predicate is
1793/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
1794///
1795/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
1796/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
1797/// and also does not allow lossless pointer <-> integer conversions.
1798static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
1799 Type *ToTy) {
1800 // Trivially compatible types.
1801 if (FromTy == ToTy)
1802 return true;
1803
1804 // Handle compatible pointer <-> integer conversions.
1805 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
1806 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
1807 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
1808 !DL.isNonIntegralPointerType(ToTy);
1809 return SameSize && LosslessConversion;
1810 }
1811
1812 // TODO: This is not exhaustive.
1813 return false;
1814}
1815
1816bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
1817 Instruction &DomPoint, DominatorTree &DT) {
1818 // Exit early if From has no debug users.
1819 if (!From.isUsedByMetadata())
1820 return false;
1821
1822 assert(&From != &To && "Can't replace something with itself")(static_cast <bool> (&From != &To && "Can't replace something with itself"
) ? void (0) : __assert_fail ("&From != &To && \"Can't replace something with itself\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1822, __extension__ __PRETTY_FUNCTION__))
;
1823
1824 Type *FromTy = From.getType();
1825 Type *ToTy = To.getType();
1826
1827 auto Identity = [&](DbgInfoIntrinsic &DII) -> DbgValReplacement {
1828 return DII.getExpression();
1829 };
1830
1831 // Handle no-op conversions.
1832 Module &M = *From.getModule();
1833 const DataLayout &DL = M.getDataLayout();
1834 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
1835 return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
1836
1837 // Handle integer-to-integer widening and narrowing.
1838 // FIXME: Use DW_OP_convert when it's available everywhere.
1839 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
1840 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
1841 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
1842 assert(FromBits != ToBits && "Unexpected no-op conversion")(static_cast <bool> (FromBits != ToBits && "Unexpected no-op conversion"
) ? void (0) : __assert_fail ("FromBits != ToBits && \"Unexpected no-op conversion\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 1842, __extension__ __PRETTY_FUNCTION__))
;
1843
1844 // When the width of the result grows, assume that a debugger will only
1845 // access the low `FromBits` bits when inspecting the source variable.
1846 if (FromBits < ToBits)
1847 return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
1848
1849 // The width of the result has shrunk. Use sign/zero extension to describe
1850 // the source variable's high bits.
1851 auto SignOrZeroExt = [&](DbgInfoIntrinsic &DII) -> DbgValReplacement {
1852 DILocalVariable *Var = DII.getVariable();
1853
1854 // Without knowing signedness, sign/zero extension isn't possible.
1855 auto Signedness = Var->getSignedness();
1856 if (!Signedness)
1857 return None;
1858
1859 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
1860
1861 if (!Signed) {
1862 // In the unsigned case, assume that a debugger will initialize the
1863 // high bits to 0 and do a no-op conversion.
1864 return Identity(DII);
1865 } else {
1866 // In the signed case, the high bits are given by sign extension, i.e:
1867 // (To >> (ToBits - 1)) * ((2 ^ FromBits) - 1)
1868 // Calculate the high bits and OR them together with the low bits.
1869 SmallVector<uint64_t, 8> Ops({dwarf::DW_OP_dup, dwarf::DW_OP_constu,
1870 (ToBits - 1), dwarf::DW_OP_shr,
1871 dwarf::DW_OP_lit0, dwarf::DW_OP_not,
1872 dwarf::DW_OP_mul, dwarf::DW_OP_or});
1873 return DIExpression::appendToStack(DII.getExpression(), Ops);
1874 }
1875 };
1876 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
1877 }
1878
1879 // TODO: Floating-point conversions, vectors.
1880 return false;
1881}
1882
1883unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
1884 unsigned NumDeadInst = 0;
1885 // Delete the instructions backwards, as it has a reduced likelihood of
1886 // having to update as many def-use and use-def chains.
1887 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1888 while (EndInst != &BB->front()) {
1889 // Delete the next to last instruction.
1890 Instruction *Inst = &*--EndInst->getIterator();
1891 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
1892 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1893 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
1894 EndInst = Inst;
1895 continue;
1896 }
1897 if (!isa<DbgInfoIntrinsic>(Inst))
1898 ++NumDeadInst;
1899 Inst->eraseFromParent();
1900 }
1901 return NumDeadInst;
1902}
1903
1904unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
1905 bool PreserveLCSSA, DeferredDominance *DDT) {
1906 BasicBlock *BB = I->getParent();
1907 std::vector <DominatorTree::UpdateType> Updates;
1908
1909 // Loop over all of the successors, removing BB's entry from any PHI
1910 // nodes.
1911 if (DDT)
1912 Updates.reserve(BB->getTerminator()->getNumSuccessors());
1913 for (BasicBlock *Successor : successors(BB)) {
1914 Successor->removePredecessor(BB, PreserveLCSSA);
1915 if (DDT)
1916 Updates.push_back({DominatorTree::Delete, BB, Successor});
1917 }
1918 // Insert a call to llvm.trap right before this. This turns the undefined
1919 // behavior into a hard fail instead of falling through into random code.
1920 if (UseLLVMTrap) {
1921 Function *TrapFn =
1922 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
1923 CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
1924 CallTrap->setDebugLoc(I->getDebugLoc());
1925 }
1926 new UnreachableInst(I->getContext(), I);
1927
1928 // All instructions after this are dead.
1929 unsigned NumInstrsRemoved = 0;
1930 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
1931 while (BBI != BBE) {
1932 if (!BBI->use_empty())
1933 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
1934 BB->getInstList().erase(BBI++);
1935 ++NumInstrsRemoved;
1936 }
1937 if (DDT)
1938 DDT->applyUpdates(Updates);
1939 return NumInstrsRemoved;
1940}
1941
1942/// changeToCall - Convert the specified invoke into a normal call.
1943static void changeToCall(InvokeInst *II, DeferredDominance *DDT = nullptr) {
1944 SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
1945 SmallVector<OperandBundleDef, 1> OpBundles;
1946 II->getOperandBundlesAsDefs(OpBundles);
1947 CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
1948 "", II);
1949 NewCall->takeName(II);
1950 NewCall->setCallingConv(II->getCallingConv());
1951 NewCall->setAttributes(II->getAttributes());
1952 NewCall->setDebugLoc(II->getDebugLoc());
1953 II->replaceAllUsesWith(NewCall);
1954
1955 // Follow the call by a branch to the normal destination.
1956 BasicBlock *NormalDestBB = II->getNormalDest();
1957 BranchInst::Create(NormalDestBB, II);
1958
1959 // Update PHI nodes in the unwind destination
1960 BasicBlock *BB = II->getParent();
1961 BasicBlock *UnwindDestBB = II->getUnwindDest();
1962 UnwindDestBB->removePredecessor(BB);
1963 II->eraseFromParent();
1964 if (DDT)
1965 DDT->deleteEdge(BB, UnwindDestBB);
1966}
1967
1968BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
1969 BasicBlock *UnwindEdge) {
1970 BasicBlock *BB = CI->getParent();
1971
1972 // Convert this function call into an invoke instruction. First, split the
1973 // basic block.
1974 BasicBlock *Split =
1975 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
1976
1977 // Delete the unconditional branch inserted by splitBasicBlock
1978 BB->getInstList().pop_back();
1979
1980 // Create the new invoke instruction.
1981 SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
1982 SmallVector<OperandBundleDef, 1> OpBundles;
1983
1984 CI->getOperandBundlesAsDefs(OpBundles);
1985
1986 // Note: we're round tripping operand bundles through memory here, and that
1987 // can potentially be avoided with a cleverer API design that we do not have
1988 // as of this time.
1989
1990 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
1991 InvokeArgs, OpBundles, CI->getName(), BB);
1992 II->setDebugLoc(CI->getDebugLoc());
1993 II->setCallingConv(CI->getCallingConv());
1994 II->setAttributes(CI->getAttributes());
1995
1996 // Make sure that anything using the call now uses the invoke! This also
1997 // updates the CallGraph if present, because it uses a WeakTrackingVH.
1998 CI->replaceAllUsesWith(II);
1999
2000 // Delete the original call
2001 Split->getInstList().pop_front();
2002 return Split;
2003}
2004
2005static bool markAliveBlocks(Function &F,
2006 SmallPtrSetImpl<BasicBlock*> &Reachable,
2007 DeferredDominance *DDT = nullptr) {
2008 SmallVector<BasicBlock*, 128> Worklist;
2009 BasicBlock *BB = &F.front();
2010 Worklist.push_back(BB);
2011 Reachable.insert(BB);
2012 bool Changed = false;
2013 do {
2014 BB = Worklist.pop_back_val();
2015
2016 // Do a quick scan of the basic block, turning any obviously unreachable
2017 // instructions into LLVM unreachable insts. The instruction combining pass
2018 // canonicalizes unreachable insts into stores to null or undef.
2019 for (Instruction &I : *BB) {
2020 if (auto *CI = dyn_cast<CallInst>(&I)) {
2021 Value *Callee = CI->getCalledValue();
2022 // Handle intrinsic calls.
2023 if (Function *F = dyn_cast<Function>(Callee)) {
2024 auto IntrinsicID = F->getIntrinsicID();
2025 // Assumptions that are known to be false are equivalent to
2026 // unreachable. Also, if the condition is undefined, then we make the
2027 // choice most beneficial to the optimizer, and choose that to also be
2028 // unreachable.
2029 if (IntrinsicID == Intrinsic::assume) {
2030 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2031 // Don't insert a call to llvm.trap right before the unreachable.
2032 changeToUnreachable(CI, false, false, DDT);
2033 Changed = true;
2034 break;
2035 }
2036 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2037 // A call to the guard intrinsic bails out of the current
2038 // compilation unit if the predicate passed to it is false. If the
2039 // predicate is a constant false, then we know the guard will bail
2040 // out of the current compile unconditionally, so all code following
2041 // it is dead.
2042 //
2043 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2044 // guards to treat `undef` as `false` since a guard on `undef` can
2045 // still be useful for widening.
2046 if (match(CI->getArgOperand(0), m_Zero()))
2047 if (!isa<UnreachableInst>(CI->getNextNode())) {
2048 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false,
2049 false, DDT);
2050 Changed = true;
2051 break;
2052 }
2053 }
2054 } else if ((isa<ConstantPointerNull>(Callee) &&
2055 !NullPointerIsDefined(CI->getFunction())) ||
2056 isa<UndefValue>(Callee)) {
2057 changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DDT);
2058 Changed = true;
2059 break;
2060 }
2061 if (CI->doesNotReturn()) {
2062 // If we found a call to a no-return function, insert an unreachable
2063 // instruction after it. Make sure there isn't *already* one there
2064 // though.
2065 if (!isa<UnreachableInst>(CI->getNextNode())) {
2066 // Don't insert a call to llvm.trap right before the unreachable.
2067 changeToUnreachable(CI->getNextNode(), false, false, DDT);
2068 Changed = true;
2069 }
2070 break;
2071 }
2072 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2073 // Store to undef and store to null are undefined and used to signal
2074 // that they should be changed to unreachable by passes that can't
2075 // modify the CFG.
2076
2077 // Don't touch volatile stores.
2078 if (SI->isVolatile()) continue;
2079
2080 Value *Ptr = SI->getOperand(1);
2081
2082 if (isa<UndefValue>(Ptr) ||
2083 (isa<ConstantPointerNull>(Ptr) &&
2084 !NullPointerIsDefined(SI->getFunction(),
2085 SI->getPointerAddressSpace()))) {
2086 changeToUnreachable(SI, true, false, DDT);
2087 Changed = true;
2088 break;
2089 }
2090 }
2091 }
2092
2093 TerminatorInst *Terminator = BB->getTerminator();
2094 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2
Taking false branch
2095 // Turn invokes that call 'nounwind' functions into ordinary calls.
2096 Value *Callee = II->getCalledValue();
2097 if ((isa<ConstantPointerNull>(Callee) &&
2098 !NullPointerIsDefined(BB->getParent())) ||
2099 isa<UndefValue>(Callee)) {
2100 changeToUnreachable(II, true, false, DDT);
2101 Changed = true;
2102 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2103 if (II->use_empty() && II->onlyReadsMemory()) {
2104 // jump to the normal destination branch.
2105 BasicBlock *NormalDestBB = II->getNormalDest();
2106 BasicBlock *UnwindDestBB = II->getUnwindDest();
2107 BranchInst::Create(NormalDestBB, II);
2108 UnwindDestBB->removePredecessor(II->getParent());
2109 II->eraseFromParent();
2110 if (DDT)
2111 DDT->deleteEdge(BB, UnwindDestBB);
2112 } else
2113 changeToCall(II, DDT);
2114 Changed = true;
2115 }
2116 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
3
Taking false branch
2117 // Remove catchpads which cannot be reached.
2118 struct CatchPadDenseMapInfo {
2119 static CatchPadInst *getEmptyKey() {
2120 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2121 }
2122
2123 static CatchPadInst *getTombstoneKey() {
2124 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2125 }
2126
2127 static unsigned getHashValue(CatchPadInst *CatchPad) {
2128 return static_cast<unsigned>(hash_combine_range(
2129 CatchPad->value_op_begin(), CatchPad->value_op_end()));
2130 }
2131
2132 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2133 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2134 RHS == getEmptyKey() || RHS == getTombstoneKey())
2135 return LHS == RHS;
2136 return LHS->isIdenticalTo(RHS);
2137 }
2138 };
2139
2140 // Set of unique CatchPads.
2141 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2142 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2143 HandlerSet;
2144 detail::DenseSetEmpty Empty;
2145 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2146 E = CatchSwitch->handler_end();
2147 I != E; ++I) {
2148 BasicBlock *HandlerBB = *I;
2149 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2150 if (!HandlerSet.insert({CatchPad, Empty}).second) {
2151 CatchSwitch->removeHandler(I);
2152 --I;
2153 --E;
2154 Changed = true;
2155 }
2156 }
2157 }
2158
2159 Changed |= ConstantFoldTerminator(BB, true, nullptr, DDT);
4
Calling 'ConstantFoldTerminator'
2160 for (BasicBlock *Successor : successors(BB))
2161 if (Reachable.insert(Successor).second)
2162 Worklist.push_back(Successor);
2163 } while (!Worklist.empty());
2164 return Changed;
2165}
2166
2167void llvm::removeUnwindEdge(BasicBlock *BB, DeferredDominance *DDT) {
2168 TerminatorInst *TI = BB->getTerminator();
2169
2170 if (auto *II = dyn_cast<InvokeInst>(TI)) {
2171 changeToCall(II, DDT);
2172 return;
2173 }
2174
2175 TerminatorInst *NewTI;
2176 BasicBlock *UnwindDest;
2177
2178 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2179 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2180 UnwindDest = CRI->getUnwindDest();
2181 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2182 auto *NewCatchSwitch = CatchSwitchInst::Create(
2183 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2184 CatchSwitch->getName(), CatchSwitch);
2185 for (BasicBlock *PadBB : CatchSwitch->handlers())
2186 NewCatchSwitch->addHandler(PadBB);
2187
2188 NewTI = NewCatchSwitch;
2189 UnwindDest = CatchSwitch->getUnwindDest();
2190 } else {
2191 llvm_unreachable("Could not find unwind successor")::llvm::llvm_unreachable_internal("Could not find unwind successor"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 2191)
;
2192 }
2193
2194 NewTI->takeName(TI);
2195 NewTI->setDebugLoc(TI->getDebugLoc());
2196 UnwindDest->removePredecessor(BB);
2197 TI->replaceAllUsesWith(NewTI);
2198 TI->eraseFromParent();
2199 if (DDT)
2200 DDT->deleteEdge(BB, UnwindDest);
2201}
2202
2203/// removeUnreachableBlocks - Remove blocks that are not reachable, even
2204/// if they are in a dead cycle. Return true if a change was made, false
2205/// otherwise. If `LVI` is passed, this function preserves LazyValueInfo
2206/// after modifying the CFG.
2207bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI,
2208 DeferredDominance *DDT) {
2209 SmallPtrSet<BasicBlock*, 16> Reachable;
2210 bool Changed = markAliveBlocks(F, Reachable, DDT);
1
Calling 'markAliveBlocks'
2211
2212 // If there are unreachable blocks in the CFG...
2213 if (Reachable.size() == F.size())
2214 return Changed;
2215
2216 assert(Reachable.size() < F.size())(static_cast <bool> (Reachable.size() < F.size()) ? void
(0) : __assert_fail ("Reachable.size() < F.size()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 2216, __extension__ __PRETTY_FUNCTION__))
;
2217 NumRemoved += F.size()-Reachable.size();
2218
2219 // Loop over all of the basic blocks that are not reachable, dropping all of
2220 // their internal references. Update DDT and LVI if available.
2221 std::vector <DominatorTree::UpdateType> Updates;
2222 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ++I) {
2223 auto *BB = &*I;
2224 if (Reachable.count(BB))
2225 continue;
2226 for (BasicBlock *Successor : successors(BB)) {
2227 if (Reachable.count(Successor))
2228 Successor->removePredecessor(BB);
2229 if (DDT)
2230 Updates.push_back({DominatorTree::Delete, BB, Successor});
2231 }
2232 if (LVI)
2233 LVI->eraseBlock(BB);
2234 BB->dropAllReferences();
2235 }
2236
2237 for (Function::iterator I = ++F.begin(); I != F.end();) {
2238 auto *BB = &*I;
2239 if (Reachable.count(BB)) {
2240 ++I;
2241 continue;
2242 }
2243 if (DDT) {
2244 DDT->deleteBB(BB); // deferred deletion of BB.
2245 ++I;
2246 } else {
2247 I = F.getBasicBlockList().erase(I);
2248 }
2249 }
2250
2251 if (DDT)
2252 DDT->applyUpdates(Updates);
2253 return true;
2254}
2255
2256void llvm::combineMetadata(Instruction *K, const Instruction *J,
2257 ArrayRef<unsigned> KnownIDs) {
2258 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2259 K->dropUnknownNonDebugMetadata(KnownIDs);
2260 K->getAllMetadataOtherThanDebugLoc(Metadata);
2261 for (const auto &MD : Metadata) {
2262 unsigned Kind = MD.first;
2263 MDNode *JMD = J->getMetadata(Kind);
2264 MDNode *KMD = MD.second;
2265
2266 switch (Kind) {
2267 default:
2268 K->setMetadata(Kind, nullptr); // Remove unknown metadata
2269 break;
2270 case LLVMContext::MD_dbg:
2271 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg")::llvm::llvm_unreachable_internal("getAllMetadataOtherThanDebugLoc returned a MD_dbg"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 2271)
;
2272 case LLVMContext::MD_tbaa:
2273 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2274 break;
2275 case LLVMContext::MD_alias_scope:
2276 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2277 break;
2278 case LLVMContext::MD_noalias:
2279 case LLVMContext::MD_mem_parallel_loop_access:
2280 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2281 break;
2282 case LLVMContext::MD_range:
2283 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2284 break;
2285 case LLVMContext::MD_fpmath:
2286 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2287 break;
2288 case LLVMContext::MD_invariant_load:
2289 // Only set the !invariant.load if it is present in both instructions.
2290 K->setMetadata(Kind, JMD);
2291 break;
2292 case LLVMContext::MD_nonnull:
2293 // Only set the !nonnull if it is present in both instructions.
2294 K->setMetadata(Kind, JMD);
2295 break;
2296 case LLVMContext::MD_invariant_group:
2297 // Preserve !invariant.group in K.
2298 break;
2299 case LLVMContext::MD_align:
2300 K->setMetadata(Kind,
2301 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2302 break;
2303 case LLVMContext::MD_dereferenceable:
2304 case LLVMContext::MD_dereferenceable_or_null:
2305 K->setMetadata(Kind,
2306 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2307 break;
2308 }
2309 }
2310 // Set !invariant.group from J if J has it. If both instructions have it
2311 // then we will just pick it from J - even when they are different.
2312 // Also make sure that K is load or store - f.e. combining bitcast with load
2313 // could produce bitcast with invariant.group metadata, which is invalid.
2314 // FIXME: we should try to preserve both invariant.group md if they are
2315 // different, but right now instruction can only have one invariant.group.
2316 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2317 if (isa<LoadInst>(K) || isa<StoreInst>(K))
2318 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2319}
2320
2321void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J) {
2322 unsigned KnownIDs[] = {
2323 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
2324 LLVMContext::MD_noalias, LLVMContext::MD_range,
2325 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
2326 LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2327 LLVMContext::MD_dereferenceable,
2328 LLVMContext::MD_dereferenceable_or_null};
2329 combineMetadata(K, J, KnownIDs);
2330}
2331
2332template <typename RootType, typename DominatesFn>
2333static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2334 const RootType &Root,
2335 const DominatesFn &Dominates) {
2336 assert(From->getType() == To->getType())(static_cast <bool> (From->getType() == To->getType
()) ? void (0) : __assert_fail ("From->getType() == To->getType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 2336, __extension__ __PRETTY_FUNCTION__))
;
2337
2338 unsigned Count = 0;
2339 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2340 UI != UE;) {
2341 Use &U = *UI++;
2342 if (!Dominates(Root, U))
2343 continue;
2344 U.set(To);
2345 LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Replace dominated use of '" <<
From->getName() << "' as " << *To << " in "
<< *U << "\n"; } } while (false)
2346 << "' as " << *To << " in " << *U << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Replace dominated use of '" <<
From->getName() << "' as " << *To << " in "
<< *U << "\n"; } } while (false)
;
2347 ++Count;
2348 }
2349 return Count;
2350}
2351
2352unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2353 assert(From->getType() == To->getType())(static_cast <bool> (From->getType() == To->getType
()) ? void (0) : __assert_fail ("From->getType() == To->getType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Transforms/Utils/Local.cpp"
, 2353, __extension__ __PRETTY_FUNCTION__))
;
2354 auto *BB = From->getParent();
2355 unsigned Count = 0;
2356
2357 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2358 UI != UE;) {
2359 Use &U = *UI++;
2360 auto *I = cast<Instruction>(U.getUser());
2361 if (I->getParent() == BB)
2362 continue;
2363 U.set(To);
2364 ++Count;
2365 }
2366 return Count;
2367}
2368
2369unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2370 DominatorTree &DT,
2371 const BasicBlockEdge &Root) {
2372 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2373 return DT.dominates(Root, U);
2374 };
2375 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2376}
2377
2378unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2379 DominatorTree &DT,
2380 const BasicBlock *BB) {
2381 auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) {
2382 auto *I = cast<Instruction>(U.getUser())->getParent();
2383 return DT.properlyDominates(BB, I);
2384 };
2385 return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates);
2386}
2387
2388bool llvm::callsGCLeafFunction(ImmutableCallSite CS,
2389 const TargetLibraryInfo &TLI) {
2390 // Check if the function is specifically marked as a gc leaf function.
2391 if (CS.hasFnAttr("gc-leaf-function"))
2392 return true;
2393 if (const Function *F = CS.getCalledFunction()) {
2394 if (F->hasFnAttribute("gc-leaf-function"))
2395 return true;
2396
2397 if (auto IID = F->getIntrinsicID())
2398 // Most LLVM intrinsics do not take safepoints.
2399 return IID != Intrinsic::experimental_gc_statepoint &&
2400 IID != Intrinsic::experimental_deoptimize;
2401 }
2402
2403 // Lib calls can be materialized by some passes, and won't be
2404 // marked as 'gc-leaf-function.' All available Libcalls are
2405 // GC-leaf.
2406 LibFunc LF;
2407 if (TLI.getLibFunc(CS, LF)) {
2408 return TLI.has(LF);
2409 }
2410
2411 return false;
2412}
2413
2414void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2415 LoadInst &NewLI) {
2416 auto *NewTy = NewLI.getType();
2417
2418 // This only directly applies if the new type is also a pointer.
2419 if (NewTy->isPointerTy()) {
2420 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2421 return;
2422 }
2423
2424 // The only other translation we can do is to integral loads with !range
2425 // metadata.
2426 if (!NewTy->isIntegerTy())
2427 return;
2428
2429 MDBuilder MDB(NewLI.getContext());
2430 const Value *Ptr = OldLI.getPointerOperand();
2431 auto *ITy = cast<IntegerType>(NewTy);
2432 auto *NullInt = ConstantExpr::getPtrToInt(
2433 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2434 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2435 NewLI.setMetadata(LLVMContext::MD_range,
2436 MDB.createRange(NonNullInt, NullInt));
2437}
2438
2439void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2440 MDNode *N, LoadInst &NewLI) {
2441 auto *NewTy = NewLI.getType();
2442
2443 // Give up unless it is converted to a pointer where there is a single very
2444 // valuable mapping we can do reliably.
2445 // FIXME: It would be nice to propagate this in more ways, but the type
2446 // conversions make it hard.
2447 if (!NewTy->isPointerTy())
2448 return;
2449
2450 unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy);
2451 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2452 MDNode *NN = MDNode::get(OldLI.getContext(), None);
2453 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2454 }
2455}
2456
2457namespace {
2458
2459/// A potential constituent of a bitreverse or bswap expression. See
2460/// collectBitParts for a fuller explanation.
2461struct BitPart {
2462 BitPart(Value *P, unsigned BW) : Provider(P) {
2463 Provenance.resize(BW);
2464 }
2465
2466 /// The Value that this is a bitreverse/bswap of.
2467 Value *Provider;
2468
2469 /// The "provenance" of each bit. Provenance[A] = B means that bit A
2470 /// in Provider becomes bit B in the result of this expression.
2471 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2472
2473 enum { Unset = -1 };
2474};
2475
2476} // end anonymous namespace
2477
2478/// Analyze the specified subexpression and see if it is capable of providing
2479/// pieces of a bswap or bitreverse. The subexpression provides a potential
2480/// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
2481/// the output of the expression came from a corresponding bit in some other
2482/// value. This function is recursive, and the end result is a mapping of
2483/// bitnumber to bitnumber. It is the caller's responsibility to validate that
2484/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2485///
2486/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2487/// that the expression deposits the low byte of %X into the high byte of the
2488/// result and that all other bits are zero. This expression is accepted and a
2489/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2490/// [0-7].
2491///
2492/// To avoid revisiting values, the BitPart results are memoized into the
2493/// provided map. To avoid unnecessary copying of BitParts, BitParts are
2494/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2495/// store BitParts objects, not pointers. As we need the concept of a nullptr
2496/// BitParts (Value has been analyzed and the analysis failed), we an Optional
2497/// type instead to provide the same functionality.
2498///
2499/// Because we pass around references into \c BPS, we must use a container that
2500/// does not invalidate internal references (std::map instead of DenseMap).
2501static const Optional<BitPart> &
2502collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2503 std::map<Value *, Optional<BitPart>> &BPS) {
2504 auto I = BPS.find(V);
2505 if (I != BPS.end())
2506 return I->second;
2507
2508 auto &Result = BPS[V] = None;
2509 auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2510
2511 if (Instruction *I = dyn_cast<Instruction>(V)) {
2512 // If this is an or instruction, it may be an inner node of the bswap.
2513 if (I->getOpcode() == Instruction::Or) {
2514 auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
2515 MatchBitReversals, BPS);
2516 auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
2517 MatchBitReversals, BPS);
2518 if (!A || !B)
2519 return Result;
2520
2521 // Try and merge the two together.
2522 if (!A->Provider || A->Provider != B->Provider)
2523 return Result;
2524
2525 Result = BitPart(A->Provider, BitWidth);
2526 for (unsigned i = 0; i < A->Provenance.size(); ++i) {
2527 if (A->Provenance[i] != BitPart::Unset &&
2528 B->Provenance[i] != BitPart::Unset &&
2529 A->Provenance[i] != B->Provenance[i])
2530 return Result = None;
2531
2532 if (A->Provenance[i] == BitPart::Unset)
2533 Result->Provenance[i] = B->Provenance[i];
2534 else
2535 Result->Provenance[i] = A->Provenance[i];
2536 }
2537
2538 return Result;
2539 }
2540
2541 // If this is a logical shift by a constant, recurse then shift the result.
2542 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
2543 unsigned BitShift =
2544 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
2545 // Ensure the shift amount is defined.
2546 if (BitShift > BitWidth)
2547 return Result;
2548
2549 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2550 MatchBitReversals, BPS);
2551 if (!Res)
2552 return Result;
2553 Result = Res;
2554
2555 // Perform the "shift" on BitProvenance.
2556 auto &P = Result->Provenance;
2557 if (I->getOpcode() == Instruction::Shl) {
2558 P.erase(std::prev(P.end(), BitShift), P.end());
2559 P.insert(P.begin(), BitShift, BitPart::Unset);
2560 } else {
2561 P.erase(P.begin(), std::next(P.begin(), BitShift));
2562 P.insert(P.end(), BitShift, BitPart::Unset);
2563 }
2564
2565 return Result;
2566 }
2567
2568 // If this is a logical 'and' with a mask that clears bits, recurse then
2569 // unset the appropriate bits.
2570 if (I->getOpcode() == Instruction::And &&
2571 isa<ConstantInt>(I->getOperand(1))) {
2572 APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
2573 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
2574
2575 // Check that the mask allows a multiple of 8 bits for a bswap, for an
2576 // early exit.
2577 unsigned NumMaskedBits = AndMask.countPopulation();
2578 if (!MatchBitReversals && NumMaskedBits % 8 != 0)
2579 return Result;
2580
2581 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2582 MatchBitReversals, BPS);
2583 if (!Res)
2584 return Result;
2585 Result = Res;
2586
2587 for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
2588 // If the AndMask is zero for this bit, clear the bit.
2589 if ((AndMask & Bit) == 0)
2590 Result->Provenance[i] = BitPart::Unset;
2591 return Result;
2592 }
2593
2594 // If this is a zext instruction zero extend the result.
2595 if (I->getOpcode() == Instruction::ZExt) {
2596 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2597 MatchBitReversals, BPS);
2598 if (!Res)
2599 return Result;
2600
2601 Result = BitPart(Res->Provider, BitWidth);
2602 auto NarrowBitWidth =
2603 cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
2604 for (unsigned i = 0; i < NarrowBitWidth; ++i)
2605 Result->Provenance[i] = Res->Provenance[i];
2606 for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
2607 Result->Provenance[i] = BitPart::Unset;
2608 return Result;
2609 }
2610 }
2611
2612 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
2613 // the input value to the bswap/bitreverse.
2614 Result = BitPart(V, BitWidth);
2615 for (unsigned i = 0; i < BitWidth; ++i)
2616 Result->Provenance[i] = i;
2617 return Result;
2618}
2619
2620static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
2621 unsigned BitWidth) {
2622 if (From % 8 != To % 8)
2623 return false;
2624 // Convert from bit indices to byte indices and check for a byte reversal.
2625 From >>= 3;
2626 To >>= 3;
2627 BitWidth >>= 3;
2628 return From == BitWidth - To - 1;
2629}
2630
2631static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
2632 unsigned BitWidth) {
2633 return From == BitWidth - To - 1;
2634}
2635
2636bool llvm::recognizeBSwapOrBitReverseIdiom(
2637 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
2638 SmallVectorImpl<Instruction *> &InsertedInsts) {
2639 if (Operator::getOpcode(I) != Instruction::Or)
2640 return false;
2641 if (!MatchBSwaps && !MatchBitReversals)
2642 return false;
2643 IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
2644 if (!ITy || ITy->getBitWidth() > 128)
2645 return false; // Can't do vectors or integers > 128 bits.
2646 unsigned BW = ITy->getBitWidth();
2647
2648 unsigned DemandedBW = BW;
2649 IntegerType *DemandedTy = ITy;
2650 if (I->hasOneUse()) {
2651 if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
2652 DemandedTy = cast<IntegerType>(Trunc->getType());
2653 DemandedBW = DemandedTy->getBitWidth();
2654 }
2655 }
2656
2657 // Try to find all the pieces corresponding to the bswap.
2658 std::map<Value *, Optional<BitPart>> BPS;
2659 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
2660 if (!Res)
2661 return false;
2662 auto &BitProvenance = Res->Provenance;
2663
2664 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
2665 // only byteswap values with an even number of bytes.
2666 bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
2667 for (unsigned i = 0; i < DemandedBW; ++i) {
2668 OKForBSwap &=
2669 bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
2670 OKForBitReverse &=
2671 bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
2672 }
2673
2674 Intrinsic::ID Intrin;
2675 if (OKForBSwap && MatchBSwaps)
2676 Intrin = Intrinsic::bswap;
2677 else if (OKForBitReverse && MatchBitReversals)
2678 Intrin = Intrinsic::bitreverse;
2679 else
2680 return false;
2681
2682 if (ITy != DemandedTy) {
2683 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
2684 Value *Provider = Res->Provider;
2685 IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
2686 // We may need to truncate the provider.
2687 if (DemandedTy != ProviderTy) {
2688 auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
2689 "trunc", I);
2690 InsertedInsts.push_back(Trunc);
2691 Provider = Trunc;
2692 }
2693 auto *CI = CallInst::Create(F, Provider, "rev", I);
2694 InsertedInsts.push_back(CI);
2695 auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
2696 InsertedInsts.push_back(ExtInst);
2697 return true;
2698 }
2699
2700 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
2701 InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
2702 return true;
2703}
2704
2705// CodeGen has special handling for some string functions that may replace
2706// them with target-specific intrinsics. Since that'd skip our interceptors
2707// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
2708// we mark affected calls as NoBuiltin, which will disable optimization
2709// in CodeGen.
2710void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
2711 CallInst *CI, const TargetLibraryInfo *TLI) {
2712 Function *F = CI->getCalledFunction();
2713 LibFunc Func;
2714 if (F && !F->hasLocalLinkage() && F->hasName() &&
2715 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
2716 !F->doesNotAccessMemory())
2717 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
2718}
2719
2720bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
2721 // We can't have a PHI with a metadata type.
2722 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
2723 return false;
2724
2725 // Early exit.
2726 if (!isa<Constant>(I->getOperand(OpIdx)))
2727 return true;
2728
2729 switch (I->getOpcode()) {
2730 default:
2731 return true;
2732 case Instruction::Call:
2733 case Instruction::Invoke:
2734 // Can't handle inline asm. Skip it.
2735 if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue()))
2736 return false;
2737 // Many arithmetic intrinsics have no issue taking a
2738 // variable, however it's hard to distingish these from
2739 // specials such as @llvm.frameaddress that require a constant.
2740 if (isa<IntrinsicInst>(I))
2741 return false;
2742
2743 // Constant bundle operands may need to retain their constant-ness for
2744 // correctness.
2745 if (ImmutableCallSite(I).isBundleOperand(OpIdx))
2746 return false;
2747 return true;
2748 case Instruction::ShuffleVector:
2749 // Shufflevector masks are constant.
2750 return OpIdx != 2;
2751 case Instruction::Switch:
2752 case Instruction::ExtractValue:
2753 // All operands apart from the first are constant.
2754 return OpIdx == 0;
2755 case Instruction::InsertValue:
2756 // All operands apart from the first and the second are constant.
2757 return OpIdx < 2;
2758 case Instruction::Alloca:
2759 // Static allocas (constant size in the entry block) are handled by
2760 // prologue/epilogue insertion so they're free anyway. We definitely don't
2761 // want to make them non-constant.
2762 return !cast<AllocaInst>(I)->isStaticAlloca();
2763 case Instruction::GetElementPtr:
2764 if (OpIdx == 0)
2765 return true;
2766 gep_type_iterator It = gep_type_begin(I);
2767 for (auto E = std::next(It, OpIdx); It != E; ++It)
2768 if (It.isStruct())
2769 return false;
2770 return true;
2771 }
2772}

/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes the class definitions of all of the subclasses of the
11// Instruction class. This is meant to be an easy way to get access to all
12// instruction subclasses.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_IR_INSTRUCTIONS_H
17#define LLVM_IR_INSTRUCTIONS_H
18
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/OperandTraits.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Use.h"
38#include "llvm/IR/User.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/ErrorHandling.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <iterator>
47
48namespace llvm {
49
50class APInt;
51class ConstantInt;
52class DataLayout;
53class LLVMContext;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
60class AllocaInst : public UnaryInstruction {
61 Type *AllocatedType;
62
63protected:
64 // Note: Instruction needs to be a friend here to call cloneImpl.
65 friend class Instruction;
66
67 AllocaInst *cloneImpl() const;
68
69public:
70 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
71 Value *ArraySize = nullptr,
72 const Twine &Name = "",
73 Instruction *InsertBefore = nullptr);
74 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
75 const Twine &Name, BasicBlock *InsertAtEnd);
76
77 AllocaInst(Type *Ty, unsigned AddrSpace,
78 const Twine &Name, Instruction *InsertBefore = nullptr);
79 AllocaInst(Type *Ty, unsigned AddrSpace,
80 const Twine &Name, BasicBlock *InsertAtEnd);
81
82 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
83 const Twine &Name = "", Instruction *InsertBefore = nullptr);
84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
85 const Twine &Name, BasicBlock *InsertAtEnd);
86
87 /// Return true if there is an allocation size parameter to the allocation
88 /// instruction that is not 1.
89 bool isArrayAllocation() const;
90
91 /// Get the number of elements allocated. For a simple allocation of a single
92 /// element, this will return a constant 1 value.
93 const Value *getArraySize() const { return getOperand(0); }
94 Value *getArraySize() { return getOperand(0); }
95
96 /// Overload to return most specific pointer type.
97 PointerType *getType() const {
98 return cast<PointerType>(Instruction::getType());
99 }
100
101 /// Get allocation size in bits. Returns None if size can't be determined,
102 /// e.g. in case of a VLA.
103 Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const;
104
105 /// Return the type that is being allocated by the instruction.
106 Type *getAllocatedType() const { return AllocatedType; }
107 /// for use only in special circumstances that need to generically
108 /// transform a whole instruction (eg: IR linking and vectorization).
109 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
110
111 /// Return the alignment of the memory that is being allocated by the
112 /// instruction.
113 unsigned getAlignment() const {
114 return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
115 }
116 void setAlignment(unsigned Align);
117
118 /// Return true if this alloca is in the entry block of the function and is a
119 /// constant size. If so, the code generator will fold it into the
120 /// prolog/epilog code, so it is basically free.
121 bool isStaticAlloca() const;
122
123 /// Return true if this alloca is used as an inalloca argument to a call. Such
124 /// allocas are never considered static even if they are in the entry block.
125 bool isUsedWithInAlloca() const {
126 return getSubclassDataFromInstruction() & 32;
127 }
128
129 /// Specify whether this alloca is used to represent the arguments to a call.
130 void setUsedWithInAlloca(bool V) {
131 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
132 (V ? 32 : 0));
133 }
134
135 /// Return true if this alloca is used as a swifterror argument to a call.
136 bool isSwiftError() const {
137 return getSubclassDataFromInstruction() & 64;
138 }
139
140 /// Specify whether this alloca is used to represent a swifterror.
141 void setSwiftError(bool V) {
142 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
143 (V ? 64 : 0));
144 }
145
146 // Methods for support type inquiry through isa, cast, and dyn_cast:
147 static bool classof(const Instruction *I) {
148 return (I->getOpcode() == Instruction::Alloca);
149 }
150 static bool classof(const Value *V) {
151 return isa<Instruction>(V) && classof(cast<Instruction>(V));
152 }
153
154private:
155 // Shadow Instruction::setInstructionSubclassData with a private forwarding
156 // method so that subclasses cannot accidentally use it.
157 void setInstructionSubclassData(unsigned short D) {
158 Instruction::setInstructionSubclassData(D);
159 }
160};
161
162//===----------------------------------------------------------------------===//
163// LoadInst Class
164//===----------------------------------------------------------------------===//
165
166/// An instruction for reading from memory. This uses the SubclassData field in
167/// Value to store whether or not the load is volatile.
168class LoadInst : public UnaryInstruction {
169 void AssertOK();
170
171protected:
172 // Note: Instruction needs to be a friend here to call cloneImpl.
173 friend class Instruction;
174
175 LoadInst *cloneImpl() const;
176
177public:
178 LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
179 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
180 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
181 Instruction *InsertBefore = nullptr);
182 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
183 Instruction *InsertBefore = nullptr)
184 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
185 NameStr, isVolatile, InsertBefore) {}
186 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
187 BasicBlock *InsertAtEnd);
188 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
189 Instruction *InsertBefore = nullptr)
190 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
191 NameStr, isVolatile, Align, InsertBefore) {}
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
193 unsigned Align, Instruction *InsertBefore = nullptr);
194 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
195 unsigned Align, BasicBlock *InsertAtEnd);
196 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
197 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
198 Instruction *InsertBefore = nullptr)
199 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
200 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 unsigned Align, AtomicOrdering Order,
203 SyncScope::ID SSID = SyncScope::System,
204 Instruction *InsertBefore = nullptr);
205 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
206 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
207 BasicBlock *InsertAtEnd);
208 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
209 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
210 LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
211 bool isVolatile = false, Instruction *InsertBefore = nullptr);
212 explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
213 bool isVolatile = false,
214 Instruction *InsertBefore = nullptr)
215 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
216 NameStr, isVolatile, InsertBefore) {}
217 LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
218 BasicBlock *InsertAtEnd);
219
220 /// Return true if this is a load from a volatile memory location.
221 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
222
223 /// Specify whether this is a volatile load or not.
224 void setVolatile(bool V) {
225 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
226 (V ? 1 : 0));
227 }
228
229 /// Return the alignment of the access that is being performed.
230 unsigned getAlignment() const {
231 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
232 }
233
234 void setAlignment(unsigned Align);
235
236 /// Returns the ordering constraint of this load instruction.
237 AtomicOrdering getOrdering() const {
238 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
239 }
240
241 /// Sets the ordering constraint of this load instruction. May not be Release
242 /// or AcquireRelease.
243 void setOrdering(AtomicOrdering Ordering) {
244 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
245 ((unsigned)Ordering << 7));
246 }
247
248 /// Returns the synchronization scope ID of this load instruction.
249 SyncScope::ID getSyncScopeID() const {
250 return SSID;
251 }
252
253 /// Sets the synchronization scope ID of this load instruction.
254 void setSyncScopeID(SyncScope::ID SSID) {
255 this->SSID = SSID;
256 }
257
258 /// Sets the ordering constraint and the synchronization scope ID of this load
259 /// instruction.
260 void setAtomic(AtomicOrdering Ordering,
261 SyncScope::ID SSID = SyncScope::System) {
262 setOrdering(Ordering);
263 setSyncScopeID(SSID);
264 }
265
266 bool isSimple() const { return !isAtomic() && !isVolatile(); }
267
268 bool isUnordered() const {
269 return (getOrdering() == AtomicOrdering::NotAtomic ||
270 getOrdering() == AtomicOrdering::Unordered) &&
271 !isVolatile();
272 }
273
274 Value *getPointerOperand() { return getOperand(0); }
275 const Value *getPointerOperand() const { return getOperand(0); }
276 static unsigned getPointerOperandIndex() { return 0U; }
277 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
278
279 /// Returns the address space of the pointer operand.
280 unsigned getPointerAddressSpace() const {
281 return getPointerOperandType()->getPointerAddressSpace();
282 }
283
284 // Methods for support type inquiry through isa, cast, and dyn_cast:
285 static bool classof(const Instruction *I) {
286 return I->getOpcode() == Instruction::Load;
287 }
288 static bool classof(const Value *V) {
289 return isa<Instruction>(V) && classof(cast<Instruction>(V));
290 }
291
292private:
293 // Shadow Instruction::setInstructionSubclassData with a private forwarding
294 // method so that subclasses cannot accidentally use it.
295 void setInstructionSubclassData(unsigned short D) {
296 Instruction::setInstructionSubclassData(D);
297 }
298
299 /// The synchronization scope ID of this load instruction. Not quite enough
300 /// room in SubClassData for everything, so synchronization scope ID gets its
301 /// own field.
302 SyncScope::ID SSID;
303};
304
305//===----------------------------------------------------------------------===//
306// StoreInst Class
307//===----------------------------------------------------------------------===//
308
309/// An instruction for storing to memory.
310class StoreInst : public Instruction {
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
323 Instruction *InsertBefore = nullptr);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
326 unsigned Align, Instruction *InsertBefore = nullptr);
327 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
328 unsigned Align, BasicBlock *InsertAtEnd);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
330 unsigned Align, AtomicOrdering Order,
331 SyncScope::ID SSID = SyncScope::System,
332 Instruction *InsertBefore = nullptr);
333 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
334 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
335 BasicBlock *InsertAtEnd);
336
337 // allocate space for exactly two operands
338 void *operator new(size_t s) {
339 return User::operator new(s, 2);
340 }
341
342 /// Return true if this is a store to a volatile memory location.
343 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
344
345 /// Specify whether this is a volatile store or not.
346 void setVolatile(bool V) {
347 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
348 (V ? 1 : 0));
349 }
350
351 /// Transparently provide more efficient getOperand methods.
352 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
353
354 /// Return the alignment of the access that is being performed
355 unsigned getAlignment() const {
356 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
357 }
358
359 void setAlignment(unsigned Align);
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
370 ((unsigned)Ordering << 7));
371 }
372
373 /// Returns the synchronization scope ID of this store instruction.
374 SyncScope::ID getSyncScopeID() const {
375 return SSID;
376 }
377
378 /// Sets the synchronization scope ID of this store instruction.
379 void setSyncScopeID(SyncScope::ID SSID) {
380 this->SSID = SSID;
381 }
382
383 /// Sets the ordering constraint and the synchronization scope ID of this
384 /// store instruction.
385 void setAtomic(AtomicOrdering Ordering,
386 SyncScope::ID SSID = SyncScope::System) {
387 setOrdering(Ordering);
388 setSyncScopeID(SSID);
389 }
390
391 bool isSimple() const { return !isAtomic() && !isVolatile(); }
392
393 bool isUnordered() const {
394 return (getOrdering() == AtomicOrdering::NotAtomic ||
395 getOrdering() == AtomicOrdering::Unordered) &&
396 !isVolatile();
397 }
398
399 Value *getValueOperand() { return getOperand(0); }
400 const Value *getValueOperand() const { return getOperand(0); }
401
402 Value *getPointerOperand() { return getOperand(1); }
403 const Value *getPointerOperand() const { return getOperand(1); }
404 static unsigned getPointerOperandIndex() { return 1U; }
405 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
406
407 /// Returns the address space of the pointer operand.
408 unsigned getPointerAddressSpace() const {
409 return getPointerOperandType()->getPointerAddressSpace();
410 }
411
412 // Methods for support type inquiry through isa, cast, and dyn_cast:
413 static bool classof(const Instruction *I) {
414 return I->getOpcode() == Instruction::Store;
415 }
416 static bool classof(const Value *V) {
417 return isa<Instruction>(V) && classof(cast<Instruction>(V));
418 }
419
420private:
421 // Shadow Instruction::setInstructionSubclassData with a private forwarding
422 // method so that subclasses cannot accidentally use it.
423 void setInstructionSubclassData(unsigned short D) {
424 Instruction::setInstructionSubclassData(D);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
446
447protected:
448 // Note: Instruction needs to be a friend here to call cloneImpl.
449 friend class Instruction;
450
451 FenceInst *cloneImpl() const;
452
453public:
454 // Ordering may only be Acquire, Release, AcquireRelease, or
455 // SequentiallyConsistent.
456 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
457 SyncScope::ID SSID = SyncScope::System,
458 Instruction *InsertBefore = nullptr);
459 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
460 BasicBlock *InsertAtEnd);
461
462 // allocate space for exactly zero operands
463 void *operator new(size_t s) {
464 return User::operator new(s, 0);
465 }
466
467 /// Returns the ordering constraint of this fence instruction.
468 AtomicOrdering getOrdering() const {
469 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
470 }
471
472 /// Sets the ordering constraint of this fence instruction. May only be
473 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
474 void setOrdering(AtomicOrdering Ordering) {
475 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
476 ((unsigned)Ordering << 1));
477 }
478
479 /// Returns the synchronization scope ID of this fence instruction.
480 SyncScope::ID getSyncScopeID() const {
481 return SSID;
482 }
483
484 /// Sets the synchronization scope ID of this fence instruction.
485 void setSyncScopeID(SyncScope::ID SSID) {
486 this->SSID = SSID;
487 }
488
489 // Methods for support type inquiry through isa, cast, and dyn_cast:
490 static bool classof(const Instruction *I) {
491 return I->getOpcode() == Instruction::Fence;
492 }
493 static bool classof(const Value *V) {
494 return isa<Instruction>(V) && classof(cast<Instruction>(V));
495 }
496
497private:
498 // Shadow Instruction::setInstructionSubclassData with a private forwarding
499 // method so that subclasses cannot accidentally use it.
500 void setInstructionSubclassData(unsigned short D) {
501 Instruction::setInstructionSubclassData(D);
502 }
503
504 /// The synchronization scope ID of this fence instruction. Not quite enough
505 /// room in SubClassData for everything, so synchronization scope ID gets its
506 /// own field.
507 SyncScope::ID SSID;
508};
509
510//===----------------------------------------------------------------------===//
511// AtomicCmpXchgInst Class
512//===----------------------------------------------------------------------===//
513
514/// an instruction that atomically checks whether a
515/// specified value is in a memory location, and, if it is, stores a new value
516/// there. Returns the value that was loaded.
517///
518class AtomicCmpXchgInst : public Instruction {
519 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
520 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
521 SyncScope::ID SSID);
522
523protected:
524 // Note: Instruction needs to be a friend here to call cloneImpl.
525 friend class Instruction;
526
527 AtomicCmpXchgInst *cloneImpl() const;
528
529public:
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering,
533 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
534 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
535 AtomicOrdering SuccessOrdering,
536 AtomicOrdering FailureOrdering,
537 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
538
539 // allocate space for exactly three operands
540 void *operator new(size_t s) {
541 return User::operator new(s, 3);
542 }
543
544 /// Return true if this is a cmpxchg from a volatile memory
545 /// location.
546 ///
547 bool isVolatile() const {
548 return getSubclassDataFromInstruction() & 1;
549 }
550
551 /// Specify whether this is a volatile cmpxchg.
552 ///
553 void setVolatile(bool V) {
554 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
555 (unsigned)V);
556 }
557
558 /// Return true if this cmpxchg may spuriously fail.
559 bool isWeak() const {
560 return getSubclassDataFromInstruction() & 0x100;
561 }
562
563 void setWeak(bool IsWeak) {
564 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
565 (IsWeak << 8));
566 }
567
568 /// Transparently provide more efficient getOperand methods.
569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
570
571 /// Returns the success ordering constraint of this cmpxchg instruction.
572 AtomicOrdering getSuccessOrdering() const {
573 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
574 }
575
576 /// Sets the success ordering constraint of this cmpxchg instruction.
577 void setSuccessOrdering(AtomicOrdering Ordering) {
578 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 579, __extension__ __PRETTY_FUNCTION__))
579 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 579, __extension__ __PRETTY_FUNCTION__))
;
580 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
581 ((unsigned)Ordering << 2));
582 }
583
584 /// Returns the failure ordering constraint of this cmpxchg instruction.
585 AtomicOrdering getFailureOrdering() const {
586 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
587 }
588
589 /// Sets the failure ordering constraint of this cmpxchg instruction.
590 void setFailureOrdering(AtomicOrdering Ordering) {
591 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 592, __extension__ __PRETTY_FUNCTION__))
592 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 592, __extension__ __PRETTY_FUNCTION__))
;
593 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
594 ((unsigned)Ordering << 5));
595 }
596
597 /// Returns the synchronization scope ID of this cmpxchg instruction.
598 SyncScope::ID getSyncScopeID() const {
599 return SSID;
600 }
601
602 /// Sets the synchronization scope ID of this cmpxchg instruction.
603 void setSyncScopeID(SyncScope::ID SSID) {
604 this->SSID = SSID;
605 }
606
607 Value *getPointerOperand() { return getOperand(0); }
608 const Value *getPointerOperand() const { return getOperand(0); }
609 static unsigned getPointerOperandIndex() { return 0U; }
610
611 Value *getCompareOperand() { return getOperand(1); }
612 const Value *getCompareOperand() const { return getOperand(1); }
613
614 Value *getNewValOperand() { return getOperand(2); }
615 const Value *getNewValOperand() const { return getOperand(2); }
616
617 /// Returns the address space of the pointer operand.
618 unsigned getPointerAddressSpace() const {
619 return getPointerOperand()->getType()->getPointerAddressSpace();
620 }
621
622 /// Returns the strongest permitted ordering on failure, given the
623 /// desired ordering on success.
624 ///
625 /// If the comparison in a cmpxchg operation fails, there is no atomic store
626 /// so release semantics cannot be provided. So this function drops explicit
627 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
628 /// operation would remain SequentiallyConsistent.
629 static AtomicOrdering
630 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
631 switch (SuccessOrdering) {
632 default:
633 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 633)
;
634 case AtomicOrdering::Release:
635 case AtomicOrdering::Monotonic:
636 return AtomicOrdering::Monotonic;
637 case AtomicOrdering::AcquireRelease:
638 case AtomicOrdering::Acquire:
639 return AtomicOrdering::Acquire;
640 case AtomicOrdering::SequentiallyConsistent:
641 return AtomicOrdering::SequentiallyConsistent;
642 }
643 }
644
645 // Methods for support type inquiry through isa, cast, and dyn_cast:
646 static bool classof(const Instruction *I) {
647 return I->getOpcode() == Instruction::AtomicCmpXchg;
648 }
649 static bool classof(const Value *V) {
650 return isa<Instruction>(V) && classof(cast<Instruction>(V));
651 }
652
653private:
654 // Shadow Instruction::setInstructionSubclassData with a private forwarding
655 // method so that subclasses cannot accidentally use it.
656 void setInstructionSubclassData(unsigned short D) {
657 Instruction::setInstructionSubclassData(D);
658 }
659
660 /// The synchronization scope ID of this cmpxchg instruction. Not quite
661 /// enough room in SubClassData for everything, so synchronization scope ID
662 /// gets its own field.
663 SyncScope::ID SSID;
664};
665
666template <>
667struct OperandTraits<AtomicCmpXchgInst> :
668 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
669};
670
671DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 671, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 671, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
672
673//===----------------------------------------------------------------------===//
674// AtomicRMWInst Class
675//===----------------------------------------------------------------------===//
676
677/// an instruction that atomically reads a memory location,
678/// combines it with another value, and then stores the result back. Returns
679/// the old value.
680///
681class AtomicRMWInst : public Instruction {
682protected:
683 // Note: Instruction needs to be a friend here to call cloneImpl.
684 friend class Instruction;
685
686 AtomicRMWInst *cloneImpl() const;
687
688public:
689 /// This enumeration lists the possible modifications atomicrmw can make. In
690 /// the descriptions, 'p' is the pointer to the instruction's memory location,
691 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
692 /// instruction. These instructions always return 'old'.
693 enum BinOp {
694 /// *p = v
695 Xchg,
696 /// *p = old + v
697 Add,
698 /// *p = old - v
699 Sub,
700 /// *p = old & v
701 And,
702 /// *p = ~(old & v)
703 Nand,
704 /// *p = old | v
705 Or,
706 /// *p = old ^ v
707 Xor,
708 /// *p = old >signed v ? old : v
709 Max,
710 /// *p = old <signed v ? old : v
711 Min,
712 /// *p = old >unsigned v ? old : v
713 UMax,
714 /// *p = old <unsigned v ? old : v
715 UMin,
716
717 FIRST_BINOP = Xchg,
718 LAST_BINOP = UMin,
719 BAD_BINOP
720 };
721
722 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
723 AtomicOrdering Ordering, SyncScope::ID SSID,
724 Instruction *InsertBefore = nullptr);
725 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
726 AtomicOrdering Ordering, SyncScope::ID SSID,
727 BasicBlock *InsertAtEnd);
728
729 // allocate space for exactly two operands
730 void *operator new(size_t s) {
731 return User::operator new(s, 2);
732 }
733
734 BinOp getOperation() const {
735 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
736 }
737
738 void setOperation(BinOp Operation) {
739 unsigned short SubclassData = getSubclassDataFromInstruction();
740 setInstructionSubclassData((SubclassData & 31) |
741 (Operation << 5));
742 }
743
744 /// Return true if this is a RMW on a volatile memory location.
745 ///
746 bool isVolatile() const {
747 return getSubclassDataFromInstruction() & 1;
748 }
749
750 /// Specify whether this is a volatile RMW or not.
751 ///
752 void setVolatile(bool V) {
753 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
754 (unsigned)V);
755 }
756
757 /// Transparently provide more efficient getOperand methods.
758 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
759
760 /// Returns the ordering constraint of this rmw instruction.
761 AtomicOrdering getOrdering() const {
762 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
763 }
764
765 /// Sets the ordering constraint of this rmw instruction.
766 void setOrdering(AtomicOrdering Ordering) {
767 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 768, __extension__ __PRETTY_FUNCTION__))
768 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 768, __extension__ __PRETTY_FUNCTION__))
;
769 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
770 ((unsigned)Ordering << 2));
771 }
772
773 /// Returns the synchronization scope ID of this rmw instruction.
774 SyncScope::ID getSyncScopeID() const {
775 return SSID;
776 }
777
778 /// Sets the synchronization scope ID of this rmw instruction.
779 void setSyncScopeID(SyncScope::ID SSID) {
780 this->SSID = SSID;
781 }
782
783 Value *getPointerOperand() { return getOperand(0); }
784 const Value *getPointerOperand() const { return getOperand(0); }
785 static unsigned getPointerOperandIndex() { return 0U; }
786
787 Value *getValOperand() { return getOperand(1); }
788 const Value *getValOperand() const { return getOperand(1); }
789
790 /// Returns the address space of the pointer operand.
791 unsigned getPointerAddressSpace() const {
792 return getPointerOperand()->getType()->getPointerAddressSpace();
793 }
794
795 // Methods for support type inquiry through isa, cast, and dyn_cast:
796 static bool classof(const Instruction *I) {
797 return I->getOpcode() == Instruction::AtomicRMW;
798 }
799 static bool classof(const Value *V) {
800 return isa<Instruction>(V) && classof(cast<Instruction>(V));
801 }
802
803private:
804 void Init(BinOp Operation, Value *Ptr, Value *Val,
805 AtomicOrdering Ordering, SyncScope::ID SSID);
806
807 // Shadow Instruction::setInstructionSubclassData with a private forwarding
808 // method so that subclasses cannot accidentally use it.
809 void setInstructionSubclassData(unsigned short D) {
810 Instruction::setInstructionSubclassData(D);
811 }
812
813 /// The synchronization scope ID of this rmw instruction. Not quite enough
814 /// room in SubClassData for everything, so synchronization scope ID gets its
815 /// own field.
816 SyncScope::ID SSID;
817};
818
819template <>
820struct OperandTraits<AtomicRMWInst>
821 : public FixedNumOperandTraits<AtomicRMWInst,2> {
822};
823
824DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 824, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 824, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
825
826//===----------------------------------------------------------------------===//
827// GetElementPtrInst Class
828//===----------------------------------------------------------------------===//
829
830// checkGEPType - Simple wrapper function to give a better assertion failure
831// message on bad indexes for a gep instruction.
832//
833inline Type *checkGEPType(Type *Ty) {
834 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 834, __extension__ __PRETTY_FUNCTION__))
;
835 return Ty;
836}
837
838/// an instruction for type-safe pointer arithmetic to
839/// access elements of arrays and structs
840///
841class GetElementPtrInst : public Instruction {
842 Type *SourceElementType;
843 Type *ResultElementType;
844
845 GetElementPtrInst(const GetElementPtrInst &GEPI);
846
847 /// Constructors - Create a getelementptr instruction with a base pointer an
848 /// list of indices. The first ctor can optionally insert before an existing
849 /// instruction, the second appends the new instruction to the specified
850 /// BasicBlock.
851 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
852 ArrayRef<Value *> IdxList, unsigned Values,
853 const Twine &NameStr, Instruction *InsertBefore);
854 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
855 ArrayRef<Value *> IdxList, unsigned Values,
856 const Twine &NameStr, BasicBlock *InsertAtEnd);
857
858 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
859
860protected:
861 // Note: Instruction needs to be a friend here to call cloneImpl.
862 friend class Instruction;
863
864 GetElementPtrInst *cloneImpl() const;
865
866public:
867 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
868 ArrayRef<Value *> IdxList,
869 const Twine &NameStr = "",
870 Instruction *InsertBefore = nullptr) {
871 unsigned Values = 1 + unsigned(IdxList.size());
872 if (!PointeeType)
873 PointeeType =
874 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
875 else
876 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 878, __extension__ __PRETTY_FUNCTION__))
877 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 878, __extension__ __PRETTY_FUNCTION__))
878 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 878, __extension__ __PRETTY_FUNCTION__))
;
879 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
880 NameStr, InsertBefore);
881 }
882
883 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
884 ArrayRef<Value *> IdxList,
885 const Twine &NameStr,
886 BasicBlock *InsertAtEnd) {
887 unsigned Values = 1 + unsigned(IdxList.size());
888 if (!PointeeType)
889 PointeeType =
890 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
891 else
892 assert((static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 894, __extension__ __PRETTY_FUNCTION__))
893 PointeeType ==(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 894, __extension__ __PRETTY_FUNCTION__))
894 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())(static_cast <bool> (PointeeType == cast<PointerType
>(Ptr->getType()->getScalarType())->getElementType
()) ? void (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 894, __extension__ __PRETTY_FUNCTION__))
;
895 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
896 NameStr, InsertAtEnd);
897 }
898
899 /// Create an "inbounds" getelementptr. See the documentation for the
900 /// "inbounds" flag in LangRef.html for details.
901 static GetElementPtrInst *CreateInBounds(Value *Ptr,
902 ArrayRef<Value *> IdxList,
903 const Twine &NameStr = "",
904 Instruction *InsertBefore = nullptr){
905 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
906 }
907
908 static GetElementPtrInst *
909 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
910 const Twine &NameStr = "",
911 Instruction *InsertBefore = nullptr) {
912 GetElementPtrInst *GEP =
913 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
914 GEP->setIsInBounds(true);
915 return GEP;
916 }
917
918 static GetElementPtrInst *CreateInBounds(Value *Ptr,
919 ArrayRef<Value *> IdxList,
920 const Twine &NameStr,
921 BasicBlock *InsertAtEnd) {
922 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
923 }
924
925 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
926 ArrayRef<Value *> IdxList,
927 const Twine &NameStr,
928 BasicBlock *InsertAtEnd) {
929 GetElementPtrInst *GEP =
930 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
931 GEP->setIsInBounds(true);
932 return GEP;
933 }
934
935 /// Transparently provide more efficient getOperand methods.
936 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
937
938 Type *getSourceElementType() const { return SourceElementType; }
939
940 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
941 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
942
943 Type *getResultElementType() const {
944 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 945, __extension__ __PRETTY_FUNCTION__))
945 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 945, __extension__ __PRETTY_FUNCTION__))
;
946 return ResultElementType;
947 }
948
949 /// Returns the address space of this instruction's pointer type.
950 unsigned getAddressSpace() const {
951 // Note that this is always the same as the pointer operand's address space
952 // and that is cheaper to compute, so cheat here.
953 return getPointerAddressSpace();
954 }
955
956 /// Returns the type of the element that would be loaded with
957 /// a load instruction with the specified parameters.
958 ///
959 /// Null is returned if the indices are invalid for the specified
960 /// pointer type.
961 ///
962 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
963 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
964 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
965
966 inline op_iterator idx_begin() { return op_begin()+1; }
967 inline const_op_iterator idx_begin() const { return op_begin()+1; }
968 inline op_iterator idx_end() { return op_end(); }
969 inline const_op_iterator idx_end() const { return op_end(); }
970
971 inline iterator_range<op_iterator> indices() {
972 return make_range(idx_begin(), idx_end());
973 }
974
975 inline iterator_range<const_op_iterator> indices() const {
976 return make_range(idx_begin(), idx_end());
977 }
978
979 Value *getPointerOperand() {
980 return getOperand(0);
981 }
982 const Value *getPointerOperand() const {
983 return getOperand(0);
984 }
985 static unsigned getPointerOperandIndex() {
986 return 0U; // get index for modifying correct operand.
987 }
988
989 /// Method to return the pointer operand as a
990 /// PointerType.
991 Type *getPointerOperandType() const {
992 return getPointerOperand()->getType();
993 }
994
995 /// Returns the address space of the pointer operand.
996 unsigned getPointerAddressSpace() const {
997 return getPointerOperandType()->getPointerAddressSpace();
998 }
999
1000 /// Returns the pointer type returned by the GEP
1001 /// instruction, which may be a vector of pointers.
1002 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
1003 return getGEPReturnType(
1004 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
1005 Ptr, IdxList);
1006 }
1007 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1008 ArrayRef<Value *> IdxList) {
1009 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1010 Ptr->getType()->getPointerAddressSpace());
1011 // Vector GEP
1012 if (Ptr->getType()->isVectorTy()) {
1013 unsigned NumElem = Ptr->getType()->getVectorNumElements();
1014 return VectorType::get(PtrTy, NumElem);
1015 }
1016 for (Value *Index : IdxList)
1017 if (Index->getType()->isVectorTy()) {
1018 unsigned NumElem = Index->getType()->getVectorNumElements();
1019 return VectorType::get(PtrTy, NumElem);
1020 }
1021 // Scalar GEP
1022 return PtrTy;
1023 }
1024
1025 unsigned getNumIndices() const { // Note: always non-negative
1026 return getNumOperands() - 1;
1027 }
1028
1029 bool hasIndices() const {
1030 return getNumOperands() > 1;
1031 }
1032
1033 /// Return true if all of the indices of this GEP are
1034 /// zeros. If so, the result pointer and the first operand have the same
1035 /// value, just potentially different types.
1036 bool hasAllZeroIndices() const;
1037
1038 /// Return true if all of the indices of this GEP are
1039 /// constant integers. If so, the result pointer and the first operand have
1040 /// a constant offset between them.
1041 bool hasAllConstantIndices() const;
1042
1043 /// Set or clear the inbounds flag on this GEP instruction.
1044 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1045 void setIsInBounds(bool b = true);
1046
1047 /// Determine whether the GEP has the inbounds flag.
1048 bool isInBounds() const;
1049
1050 /// Accumulate the constant address offset of this GEP if possible.
1051 ///
1052 /// This routine accepts an APInt into which it will accumulate the constant
1053 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1054 /// all-constant, it returns false and the value of the offset APInt is
1055 /// undefined (it is *not* preserved!). The APInt passed into this routine
1056 /// must be at least as wide as the IntPtr type for the address space of
1057 /// the base GEP pointer.
1058 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1059
1060 // Methods for support type inquiry through isa, cast, and dyn_cast:
1061 static bool classof(const Instruction *I) {
1062 return (I->getOpcode() == Instruction::GetElementPtr);
1063 }
1064 static bool classof(const Value *V) {
1065 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1066 }
1067};
1068
1069template <>
1070struct OperandTraits<GetElementPtrInst> :
1071 public VariadicOperandTraits<GetElementPtrInst, 1> {
1072};
1073
1074GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1075 ArrayRef<Value *> IdxList, unsigned Values,
1076 const Twine &NameStr,
1077 Instruction *InsertBefore)
1078 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1079 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1080 Values, InsertBefore),
1081 SourceElementType(PointeeType),
1082 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1083 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1084, __extension__ __PRETTY_FUNCTION__))
1084 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1084, __extension__ __PRETTY_FUNCTION__))
;
1085 init(Ptr, IdxList, NameStr);
1086}
1087
1088GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1089 ArrayRef<Value *> IdxList, unsigned Values,
1090 const Twine &NameStr,
1091 BasicBlock *InsertAtEnd)
1092 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1093 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1094 Values, InsertAtEnd),
1095 SourceElementType(PointeeType),
1096 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1097 assert(ResultElementType ==(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__))
1098 cast<PointerType>(getType()->getScalarType())->getElementType())(static_cast <bool> (ResultElementType == cast<PointerType
>(getType()->getScalarType())->getElementType()) ? void
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1098, __extension__ __PRETTY_FUNCTION__))
;
1099 init(Ptr, IdxList, NameStr);
1100}
1101
1102DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1102, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1102, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1103
1104//===----------------------------------------------------------------------===//
1105// ICmpInst Class
1106//===----------------------------------------------------------------------===//
1107
1108/// This instruction compares its operands according to the predicate given
1109/// to the constructor. It only operates on integers or pointers. The operands
1110/// must be identical types.
1111/// Represent an integer comparison operator.
1112class ICmpInst: public CmpInst {
1113 void AssertOK() {
1114 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1115, __extension__ __PRETTY_FUNCTION__))
1115 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1115, __extension__ __PRETTY_FUNCTION__))
;
1116 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
1117 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
;
1118 // Check that the operands are the right type
1119 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1121, __extension__ __PRETTY_FUNCTION__))
1120 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1121, __extension__ __PRETTY_FUNCTION__))
1121 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1121, __extension__ __PRETTY_FUNCTION__))
;
1122 }
1123
1124protected:
1125 // Note: Instruction needs to be a friend here to call cloneImpl.
1126 friend class Instruction;
1127
1128 /// Clone an identical ICmpInst
1129 ICmpInst *cloneImpl() const;
1130
1131public:
1132 /// Constructor with insert-before-instruction semantics.
1133 ICmpInst(
1134 Instruction *InsertBefore, ///< Where to insert
1135 Predicate pred, ///< The predicate to use for the comparison
1136 Value *LHS, ///< The left-hand-side of the expression
1137 Value *RHS, ///< The right-hand-side of the expression
1138 const Twine &NameStr = "" ///< Name of the instruction
1139 ) : CmpInst(makeCmpResultType(LHS->getType()),
1140 Instruction::ICmp, pred, LHS, RHS, NameStr,
1141 InsertBefore) {
1142#ifndef NDEBUG
1143 AssertOK();
1144#endif
1145 }
1146
1147 /// Constructor with insert-at-end semantics.
1148 ICmpInst(
1149 BasicBlock &InsertAtEnd, ///< Block to insert into.
1150 Predicate pred, ///< The predicate to use for the comparison
1151 Value *LHS, ///< The left-hand-side of the expression
1152 Value *RHS, ///< The right-hand-side of the expression
1153 const Twine &NameStr = "" ///< Name of the instruction
1154 ) : CmpInst(makeCmpResultType(LHS->getType()),
1155 Instruction::ICmp, pred, LHS, RHS, NameStr,
1156 &InsertAtEnd) {
1157#ifndef NDEBUG
1158 AssertOK();
1159#endif
1160 }
1161
1162 /// Constructor with no-insertion semantics
1163 ICmpInst(
1164 Predicate pred, ///< The predicate to use for the comparison
1165 Value *LHS, ///< The left-hand-side of the expression
1166 Value *RHS, ///< The right-hand-side of the expression
1167 const Twine &NameStr = "" ///< Name of the instruction
1168 ) : CmpInst(makeCmpResultType(LHS->getType()),
1169 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1170#ifndef NDEBUG
1171 AssertOK();
1172#endif
1173 }
1174
1175 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1176 /// @returns the predicate that would be the result if the operand were
1177 /// regarded as signed.
1178 /// Return the signed version of the predicate
1179 Predicate getSignedPredicate() const {
1180 return getSignedPredicate(getPredicate());
1181 }
1182
1183 /// This is a static version that you can use without an instruction.
1184 /// Return the signed version of the predicate.
1185 static Predicate getSignedPredicate(Predicate pred);
1186
1187 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1188 /// @returns the predicate that would be the result if the operand were
1189 /// regarded as unsigned.
1190 /// Return the unsigned version of the predicate
1191 Predicate getUnsignedPredicate() const {
1192 return getUnsignedPredicate(getPredicate());
1193 }
1194
1195 /// This is a static version that you can use without an instruction.
1196 /// Return the unsigned version of the predicate.
1197 static Predicate getUnsignedPredicate(Predicate pred);
1198
1199 /// Return true if this predicate is either EQ or NE. This also
1200 /// tests for commutativity.
1201 static bool isEquality(Predicate P) {
1202 return P == ICMP_EQ || P == ICMP_NE;
1203 }
1204
1205 /// Return true if this predicate is either EQ or NE. This also
1206 /// tests for commutativity.
1207 bool isEquality() const {
1208 return isEquality(getPredicate());
1209 }
1210
1211 /// @returns true if the predicate of this ICmpInst is commutative
1212 /// Determine if this relation is commutative.
1213 bool isCommutative() const { return isEquality(); }
1214
1215 /// Return true if the predicate is relational (not EQ or NE).
1216 ///
1217 bool isRelational() const {
1218 return !isEquality();
1219 }
1220
1221 /// Return true if the predicate is relational (not EQ or NE).
1222 ///
1223 static bool isRelational(Predicate P) {
1224 return !isEquality(P);
1225 }
1226
1227 /// Exchange the two operands to this instruction in such a way that it does
1228 /// not modify the semantics of the instruction. The predicate value may be
1229 /// changed to retain the same result if the predicate is order dependent
1230 /// (e.g. ult).
1231 /// Swap operands and adjust predicate.
1232 void swapOperands() {
1233 setPredicate(getSwappedPredicate());
1234 Op<0>().swap(Op<1>());
1235 }
1236
1237 // Methods for support type inquiry through isa, cast, and dyn_cast:
1238 static bool classof(const Instruction *I) {
1239 return I->getOpcode() == Instruction::ICmp;
1240 }
1241 static bool classof(const Value *V) {
1242 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1243 }
1244};
1245
1246//===----------------------------------------------------------------------===//
1247// FCmpInst Class
1248//===----------------------------------------------------------------------===//
1249
1250/// This instruction compares its operands according to the predicate given
1251/// to the constructor. It only operates on floating point values or packed
1252/// vectors of floating point values. The operands must be identical types.
1253/// Represents a floating point comparison operator.
1254class FCmpInst: public CmpInst {
1255 void AssertOK() {
1256 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1256, __extension__ __PRETTY_FUNCTION__))
;
1257 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1258, __extension__ __PRETTY_FUNCTION__))
1258 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1258, __extension__ __PRETTY_FUNCTION__))
;
1259 // Check that the operands are the right type
1260 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1261, __extension__ __PRETTY_FUNCTION__))
1261 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1261, __extension__ __PRETTY_FUNCTION__))
;
1262 }
1263
1264protected:
1265 // Note: Instruction needs to be a friend here to call cloneImpl.
1266 friend class Instruction;
1267
1268 /// Clone an identical FCmpInst
1269 FCmpInst *cloneImpl() const;
1270
1271public:
1272 /// Constructor with insert-before-instruction semantics.
1273 FCmpInst(
1274 Instruction *InsertBefore, ///< Where to insert
1275 Predicate pred, ///< The predicate to use for the comparison
1276 Value *LHS, ///< The left-hand-side of the expression
1277 Value *RHS, ///< The right-hand-side of the expression
1278 const Twine &NameStr = "" ///< Name of the instruction
1279 ) : CmpInst(makeCmpResultType(LHS->getType()),
1280 Instruction::FCmp, pred, LHS, RHS, NameStr,
1281 InsertBefore) {
1282 AssertOK();
1283 }
1284
1285 /// Constructor with insert-at-end semantics.
1286 FCmpInst(
1287 BasicBlock &InsertAtEnd, ///< Block to insert into.
1288 Predicate pred, ///< The predicate to use for the comparison
1289 Value *LHS, ///< The left-hand-side of the expression
1290 Value *RHS, ///< The right-hand-side of the expression
1291 const Twine &NameStr = "" ///< Name of the instruction
1292 ) : CmpInst(makeCmpResultType(LHS->getType()),
1293 Instruction::FCmp, pred, LHS, RHS, NameStr,
1294 &InsertAtEnd) {
1295 AssertOK();
1296 }
1297
1298 /// Constructor with no-insertion semantics
1299 FCmpInst(
1300 Predicate pred, ///< The predicate to use for the comparison
1301 Value *LHS, ///< The left-hand-side of the expression
1302 Value *RHS, ///< The right-hand-side of the expression
1303 const Twine &NameStr = "" ///< Name of the instruction
1304 ) : CmpInst(makeCmpResultType(LHS->getType()),
1305 Instruction::FCmp, pred, LHS, RHS, NameStr) {
1306 AssertOK();
1307 }
1308
1309 /// @returns true if the predicate of this instruction is EQ or NE.
1310 /// Determine if this is an equality predicate.
1311 static bool isEquality(Predicate Pred) {
1312 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1313 Pred == FCMP_UNE;
1314 }
1315
1316 /// @returns true if the predicate of this instruction is EQ or NE.
1317 /// Determine if this is an equality predicate.
1318 bool isEquality() const { return isEquality(getPredicate()); }
1319
1320 /// @returns true if the predicate of this instruction is commutative.
1321 /// Determine if this is a commutative predicate.
1322 bool isCommutative() const {
1323 return isEquality() ||
1324 getPredicate() == FCMP_FALSE ||
1325 getPredicate() == FCMP_TRUE ||
1326 getPredicate() == FCMP_ORD ||
1327 getPredicate() == FCMP_UNO;
1328 }
1329
1330 /// @returns true if the predicate is relational (not EQ or NE).
1331 /// Determine if this a relational predicate.
1332 bool isRelational() const { return !isEquality(); }
1333
1334 /// Exchange the two operands to this instruction in such a way that it does
1335 /// not modify the semantics of the instruction. The predicate value may be
1336 /// changed to retain the same result if the predicate is order dependent
1337 /// (e.g. ult).
1338 /// Swap operands and adjust predicate.
1339 void swapOperands() {
1340 setPredicate(getSwappedPredicate());
1341 Op<0>().swap(Op<1>());
1342 }
1343
1344 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1345 static bool classof(const Instruction *I) {
1346 return I->getOpcode() == Instruction::FCmp;
1347 }
1348 static bool classof(const Value *V) {
1349 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1350 }
1351};
1352
1353class CallInst;
1354class InvokeInst;
1355
1356template <class T> struct CallBaseParent { using type = Instruction; };
1357
1358template <> struct CallBaseParent<InvokeInst> { using type = TerminatorInst; };
1359
1360//===----------------------------------------------------------------------===//
1361/// Base class for all callable instructions (InvokeInst and CallInst)
1362/// Holds everything related to calling a function, abstracting from the base
1363/// type @p BaseInstTy and the concrete instruction @p InstTy
1364///
1365template <class InstTy>
1366class CallBase : public CallBaseParent<InstTy>::type,
1367 public OperandBundleUser<InstTy, User::op_iterator> {
1368protected:
1369 AttributeList Attrs; ///< parameter attributes for callable
1370 FunctionType *FTy;
1371 using BaseInstTy = typename CallBaseParent<InstTy>::type;
1372
1373 template <class... ArgsTy>
1374 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1375 : BaseInstTy(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1376 bool hasDescriptor() const { return Value::HasDescriptor; }
1377
1378 using BaseInstTy::BaseInstTy;
1379
1380 using OperandBundleUser<InstTy,
1381 User::op_iterator>::isFnAttrDisallowedByOpBundle;
1382 using OperandBundleUser<InstTy, User::op_iterator>::getNumTotalBundleOperands;
1383 using OperandBundleUser<InstTy, User::op_iterator>::bundleOperandHasAttr;
1384 using Instruction::getSubclassDataFromInstruction;
1385 using Instruction::setInstructionSubclassData;
1386
1387public:
1388 using Instruction::getContext;
1389 using OperandBundleUser<InstTy, User::op_iterator>::hasOperandBundles;
1390 using OperandBundleUser<InstTy,
1391 User::op_iterator>::getBundleOperandsStartIndex;
1392
1393 static bool classof(const Instruction *I) {
1394 llvm_unreachable(::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1395)
1395 "CallBase is not meant to be used as part of the classof hierarchy")::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1395)
;
1396 }
1397
1398public:
1399 /// Return the parameter attributes for this call.
1400 ///
1401 AttributeList getAttributes() const { return Attrs; }
1402
1403 /// Set the parameter attributes for this call.
1404 ///
1405 void setAttributes(AttributeList A) { Attrs = A; }
1406
1407 FunctionType *getFunctionType() const { return FTy; }
1408
1409 void mutateFunctionType(FunctionType *FTy) {
1410 Value::mutateType(FTy->getReturnType());
1411 this->FTy = FTy;
1412 }
1413
1414 /// Return the number of call arguments.
1415 ///
1416 unsigned getNumArgOperands() const {
1417 return getNumOperands() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1418 }
1419
1420 /// getArgOperand/setArgOperand - Return/set the i-th call argument.
1421 ///
1422 Value *getArgOperand(unsigned i) const {
1423 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1423, __extension__ __PRETTY_FUNCTION__))
;
1424 return getOperand(i);
1425 }
1426 void setArgOperand(unsigned i, Value *v) {
1427 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1427, __extension__ __PRETTY_FUNCTION__))
;
1428 setOperand(i, v);
1429 }
1430
1431 /// Return the iterator pointing to the beginning of the argument list.
1432 User::op_iterator arg_begin() { return op_begin(); }
1433
1434 /// Return the iterator pointing to the end of the argument list.
1435 User::op_iterator arg_end() {
1436 // [ call args ], [ operand bundles ], callee
1437 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1438 }
1439
1440 /// Iteration adapter for range-for loops.
1441 iterator_range<User::op_iterator> arg_operands() {
1442 return make_range(arg_begin(), arg_end());
1443 }
1444
1445 /// Return the iterator pointing to the beginning of the argument list.
1446 User::const_op_iterator arg_begin() const { return op_begin(); }
1447
1448 /// Return the iterator pointing to the end of the argument list.
1449 User::const_op_iterator arg_end() const {
1450 // [ call args ], [ operand bundles ], callee
1451 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1452 }
1453
1454 /// Iteration adapter for range-for loops.
1455 iterator_range<User::const_op_iterator> arg_operands() const {
1456 return make_range(arg_begin(), arg_end());
1457 }
1458
1459 /// Wrappers for getting the \c Use of a call argument.
1460 const Use &getArgOperandUse(unsigned i) const {
1461 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1461, __extension__ __PRETTY_FUNCTION__))
;
1462 return User::getOperandUse(i);
1463 }
1464 Use &getArgOperandUse(unsigned i) {
1465 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1465, __extension__ __PRETTY_FUNCTION__))
;
1466 return User::getOperandUse(i);
1467 }
1468
1469 /// If one of the arguments has the 'returned' attribute, return its
1470 /// operand value. Otherwise, return nullptr.
1471 Value *getReturnedArgOperand() const {
1472 unsigned Index;
1473
1474 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
1475 return getArgOperand(Index - AttributeList::FirstArgIndex);
1476 if (const Function *F = getCalledFunction())
1477 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
1478 Index)
1479 return getArgOperand(Index - AttributeList::FirstArgIndex);
1480
1481 return nullptr;
1482 }
1483
1484 User::op_iterator op_begin() {
1485 return OperandTraits<CallBase>::op_begin(this);
1486 }
1487
1488 User::const_op_iterator op_begin() const {
1489 return OperandTraits<CallBase>::op_begin(const_cast<CallBase *>(this));
1490 }
1491
1492 User::op_iterator op_end() { return OperandTraits<CallBase>::op_end(this); }
1493
1494 User::const_op_iterator op_end() const {
1495 return OperandTraits<CallBase>::op_end(const_cast<CallBase *>(this));
1496 }
1497
1498 Value *getOperand(unsigned i_nocapture) const {
1499 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1500, __extension__ __PRETTY_FUNCTION__))
1500 "getOperand() out of range!")(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1500, __extension__ __PRETTY_FUNCTION__))
;
1501 return cast_or_null<Value>(OperandTraits<CallBase>::op_begin(
1502 const_cast<CallBase *>(this))[i_nocapture]
1503 .get());
1504 }
1505
1506 void setOperand(unsigned i_nocapture, Value *Val_nocapture) {
1507 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1508, __extension__ __PRETTY_FUNCTION__))
1508 "setOperand() out of range!")(static_cast <bool> (i_nocapture < OperandTraits<
CallBase>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1508, __extension__ __PRETTY_FUNCTION__))
;
1509 OperandTraits<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture;
1510 }
1511
1512 unsigned getNumOperands() const {
1513 return OperandTraits<CallBase>::operands(this);
1514 }
1515 template <int Idx_nocapture> Use &Op() {
1516 return User::OpFrom<Idx_nocapture>(this);
1517 }
1518 template <int Idx_nocapture> const Use &Op() const {
1519 return User::OpFrom<Idx_nocapture>(this);
1520 }
1521
1522 /// Return the function called, or null if this is an
1523 /// indirect function invocation.
1524 ///
1525 Function *getCalledFunction() const {
1526 return dyn_cast<Function>(Op<-InstTy::ArgOffset>());
1527 }
1528
1529 /// Determine whether this call has the given attribute.
1530 bool hasFnAttr(Attribute::AttrKind Kind) const {
1531 assert(Kind != Attribute::NoBuiltin &&(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1532, __extension__ __PRETTY_FUNCTION__))
1532 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1532, __extension__ __PRETTY_FUNCTION__))
;
1533 return hasFnAttrImpl(Kind);
1534 }
1535
1536 /// Determine whether this call has the given attribute.
1537 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1538
1539 /// getCallingConv/setCallingConv - Get or set the calling convention of this
1540 /// function call.
1541 CallingConv::ID getCallingConv() const {
1542 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1543 }
1544 void setCallingConv(CallingConv::ID CC) {
1545 auto ID = static_cast<unsigned>(CC);
1546 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")(static_cast <bool> (!(ID & ~CallingConv::MaxID) &&
"Unsupported calling convention") ? void (0) : __assert_fail
("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1546, __extension__ __PRETTY_FUNCTION__))
;
1547 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1548 (ID << 2));
1549 }
1550
1551
1552 /// adds the attribute to the list of attributes.
1553 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1554 AttributeList PAL = getAttributes();
1555 PAL = PAL.addAttribute(getContext(), i, Kind);
1556 setAttributes(PAL);
1557 }
1558
1559 /// adds the attribute to the list of attributes.
1560 void addAttribute(unsigned i, Attribute Attr) {
1561 AttributeList PAL = getAttributes();
1562 PAL = PAL.addAttribute(getContext(), i, Attr);
1563 setAttributes(PAL);
1564 }
1565
1566 /// Adds the attribute to the indicated argument
1567 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1568 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1568, __extension__ __PRETTY_FUNCTION__))
;
1569 AttributeList PAL = getAttributes();
1570 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1571 setAttributes(PAL);
1572 }
1573
1574 /// Adds the attribute to the indicated argument
1575 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1576 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1576, __extension__ __PRETTY_FUNCTION__))
;
1577 AttributeList PAL = getAttributes();
1578 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1579 setAttributes(PAL);
1580 }
1581
1582 /// removes the attribute from the list of attributes.
1583 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1584 AttributeList PAL = getAttributes();
1585 PAL = PAL.removeAttribute(getContext(), i, Kind);
1586 setAttributes(PAL);
1587 }
1588
1589 /// removes the attribute from the list of attributes.
1590 void removeAttribute(unsigned i, StringRef Kind) {
1591 AttributeList PAL = getAttributes();
1592 PAL = PAL.removeAttribute(getContext(), i, Kind);
1593 setAttributes(PAL);
1594 }
1595
1596 /// Removes the attribute from the given argument
1597 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1598 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1598, __extension__ __PRETTY_FUNCTION__))
;
1599 AttributeList PAL = getAttributes();
1600 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1601 setAttributes(PAL);
1602 }
1603
1604 /// Removes the attribute from the given argument
1605 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1606 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1606, __extension__ __PRETTY_FUNCTION__))
;
1607 AttributeList PAL = getAttributes();
1608 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1609 setAttributes(PAL);
1610 }
1611
1612 /// adds the dereferenceable attribute to the list of attributes.
1613 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1614 AttributeList PAL = getAttributes();
1615 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1616 setAttributes(PAL);
1617 }
1618
1619 /// adds the dereferenceable_or_null attribute to the list of
1620 /// attributes.
1621 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1622 AttributeList PAL = getAttributes();
1623 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1624 setAttributes(PAL);
1625 }
1626
1627 /// Determine whether the return value has the given attribute.
1628 bool hasRetAttr(Attribute::AttrKind Kind) const {
1629 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
1630 return true;
1631
1632 // Look at the callee, if available.
1633 if (const Function *F = getCalledFunction())
1634 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
1635 return false;
1636 }
1637
1638 /// Determine whether the argument or parameter has the given attribute.
1639 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1640 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Param index out of bounds!") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Param index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1640, __extension__ __PRETTY_FUNCTION__))
;
1641
1642 if (Attrs.hasParamAttribute(ArgNo, Kind))
1643 return true;
1644 if (const Function *F = getCalledFunction())
1645 return F->getAttributes().hasParamAttribute(ArgNo, Kind);
1646 return false;
1647 }
1648
1649 /// Get the attribute of a given kind at a position.
1650 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1651 return getAttributes().getAttribute(i, Kind);
1652 }
1653
1654 /// Get the attribute of a given kind at a position.
1655 Attribute getAttribute(unsigned i, StringRef Kind) const {
1656 return getAttributes().getAttribute(i, Kind);
1657 }
1658
1659 /// Get the attribute of a given kind from a given arg
1660 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1661 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1661, __extension__ __PRETTY_FUNCTION__))
;
1662 return getAttributes().getParamAttr(ArgNo, Kind);
1663 }
1664
1665 /// Get the attribute of a given kind from a given arg
1666 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1667 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1667, __extension__ __PRETTY_FUNCTION__))
;
1668 return getAttributes().getParamAttr(ArgNo, Kind);
1669 }
1670 /// Return true if the data operand at index \p i has the attribute \p
1671 /// A.
1672 ///
1673 /// Data operands include call arguments and values used in operand bundles,
1674 /// but does not include the callee operand. This routine dispatches to the
1675 /// underlying AttributeList or the OperandBundleUser as appropriate.
1676 ///
1677 /// The index \p i is interpreted as
1678 ///
1679 /// \p i == Attribute::ReturnIndex -> the return value
1680 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1681 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1682 /// (\p i - 1) in the operand list.
1683 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1684 // There are getNumOperands() - (InstTy::ArgOffset - 1) data operands.
1685 // The last operand is the callee.
1686 assert(i < (getNumOperands() - InstTy::ArgOffset + 1) &&(static_cast <bool> (i < (getNumOperands() - InstTy::
ArgOffset + 1) && "Data operand index out of bounds!"
) ? void (0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1687, __extension__ __PRETTY_FUNCTION__))
1687 "Data operand index out of bounds!")(static_cast <bool> (i < (getNumOperands() - InstTy::
ArgOffset + 1) && "Data operand index out of bounds!"
) ? void (0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1687, __extension__ __PRETTY_FUNCTION__))
;
1688
1689 // The attribute A can either be directly specified, if the operand in
1690 // question is a call argument; or be indirectly implied by the kind of its
1691 // containing operand bundle, if the operand is a bundle operand.
1692
1693 if (i == AttributeList::ReturnIndex)
1694 return hasRetAttr(Kind);
1695
1696 // FIXME: Avoid these i - 1 calculations and update the API to use
1697 // zero-based indices.
1698 if (i < (getNumArgOperands() + 1))
1699 return paramHasAttr(i - 1, Kind);
1700
1701 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1702, __extension__ __PRETTY_FUNCTION__))
1702 "Must be either a call argument or an operand bundle!")(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1702, __extension__ __PRETTY_FUNCTION__))
;
1703 return bundleOperandHasAttr(i - 1, Kind);
1704 }
1705
1706 /// Extract the alignment of the return value.
1707 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
1708
1709 /// Extract the alignment for a call or parameter (0=unknown).
1710 unsigned getParamAlignment(unsigned ArgNo) const {
1711 return Attrs.getParamAlignment(ArgNo);
1712 }
1713
1714 /// Extract the number of dereferenceable bytes for a call or
1715 /// parameter (0=unknown).
1716 uint64_t getDereferenceableBytes(unsigned i) const {
1717 return Attrs.getDereferenceableBytes(i);
1718 }
1719
1720 /// Extract the number of dereferenceable_or_null bytes for a call or
1721 /// parameter (0=unknown).
1722 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1723 return Attrs.getDereferenceableOrNullBytes(i);
1724 }
1725
1726 /// Determine if the return value is marked with NoAlias attribute.
1727 bool returnDoesNotAlias() const {
1728 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1729 }
1730
1731 /// Return true if the call should not be treated as a call to a
1732 /// builtin.
1733 bool isNoBuiltin() const {
1734 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1735 !hasFnAttrImpl(Attribute::Builtin);
1736 }
1737
1738 /// Determine if the call requires strict floating point semantics.
1739 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1740
1741 /// Return true if the call should not be inlined.
1742 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1743 void setIsNoInline() {
1744 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1745 }
1746 /// Determine if the call does not access memory.
1747 bool doesNotAccessMemory() const {
1748 return hasFnAttr(Attribute::ReadNone);
1749 }
1750 void setDoesNotAccessMemory() {
1751 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1752 }
1753
1754 /// Determine if the call does not access or only reads memory.
1755 bool onlyReadsMemory() const {
1756 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1757 }
1758 void setOnlyReadsMemory() {
1759 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1760 }
1761
1762 /// Determine if the call does not access or only writes memory.
1763 bool doesNotReadMemory() const {
1764 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1765 }
1766 void setDoesNotReadMemory() {
1767 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1768 }
1769
1770 /// Determine if the call can access memmory only using pointers based
1771 /// on its arguments.
1772 bool onlyAccessesArgMemory() const {
1773 return hasFnAttr(Attribute::ArgMemOnly);
1774 }
1775 void setOnlyAccessesArgMemory() {
1776 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1777 }
1778
1779 /// Determine if the function may only access memory that is
1780 /// inaccessible from the IR.
1781 bool onlyAccessesInaccessibleMemory() const {
1782 return hasFnAttr(Attribute::InaccessibleMemOnly);
1783 }
1784 void setOnlyAccessesInaccessibleMemory() {
1785 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1786 }
1787
1788 /// Determine if the function may only access memory that is
1789 /// either inaccessible from the IR or pointed to by its arguments.
1790 bool onlyAccessesInaccessibleMemOrArgMem() const {
1791 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1792 }
1793 void setOnlyAccessesInaccessibleMemOrArgMem() {
1794 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
1795 }
1796 /// Determine if the call cannot return.
1797 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1798 void setDoesNotReturn() {
1799 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1800 }
1801
1802 /// Determine if the call should not perform indirect branch tracking.
1803 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1804
1805 /// Determine if the call cannot unwind.
1806 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1807 void setDoesNotThrow() {
1808 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1809 }
1810
1811 /// Determine if the invoke cannot be duplicated.
1812 bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
1813 void setCannotDuplicate() {
1814 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1815 }
1816
1817 /// Determine if the invoke is convergent
1818 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1819 void setConvergent() {
1820 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1821 }
1822 void setNotConvergent() {
1823 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1824 }
1825
1826 /// Determine if the call returns a structure through first
1827 /// pointer argument.
1828 bool hasStructRetAttr() const {
1829 if (getNumArgOperands() == 0)
1830 return false;
1831
1832 // Be friendly and also check the callee.
1833 return paramHasAttr(0, Attribute::StructRet);
1834 }
1835
1836 /// Determine if any call argument is an aggregate passed by value.
1837 bool hasByValArgument() const {
1838 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1839 }
1840 /// Get a pointer to the function that is invoked by this
1841 /// instruction.
1842 const Value *getCalledValue() const { return Op<-InstTy::ArgOffset>(); }
1843 Value *getCalledValue() { return Op<-InstTy::ArgOffset>(); }
1844
1845 /// Set the function called.
1846 void setCalledFunction(Value* Fn) {
1847 setCalledFunction(
1848 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
1849 Fn);
1850 }
1851 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1852 this->FTy = FTy;
1853 assert(FTy == cast<FunctionType>((static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1854, __extension__ __PRETTY_FUNCTION__))
1854 cast<PointerType>(Fn->getType())->getElementType()))(static_cast <bool> (FTy == cast<FunctionType>( cast
<PointerType>(Fn->getType())->getElementType())) ?
void (0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 1854, __extension__ __PRETTY_FUNCTION__))
;
1855 Op<-InstTy::ArgOffset>() = Fn;
1856 }
1857
1858protected:
1859 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
1860 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
1861 return true;
1862
1863 // Operand bundles override attributes on the called function, but don't
1864 // override attributes directly present on the call instruction.
1865 if (isFnAttrDisallowedByOpBundle(Kind))
1866 return false;
1867
1868 if (const Function *F = getCalledFunction())
1869 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
1870 Kind);
1871 return false;
1872 }
1873};
1874
1875//===----------------------------------------------------------------------===//
1876/// This class represents a function call, abstracting a target
1877/// machine's calling convention. This class uses low bit of the SubClassData
1878/// field to indicate whether or not this is a tail call. The rest of the bits
1879/// hold the calling convention of the call.
1880///
1881class CallInst : public CallBase<CallInst> {
1882 friend class OperandBundleUser<CallInst, User::op_iterator>;
1883
1884 CallInst(const CallInst &CI);
1885
1886 /// Construct a CallInst given a range of arguments.
1887 /// Construct a CallInst from a range of arguments
1888 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1889 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1890 Instruction *InsertBefore);
1891
1892 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1893 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1894 Instruction *InsertBefore)
1895 : CallInst(cast<FunctionType>(
1896 cast<PointerType>(Func->getType())->getElementType()),
1897 Func, Args, Bundles, NameStr, InsertBefore) {}
1898
1899 inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
1900 Instruction *InsertBefore)
1901 : CallInst(Func, Args, None, NameStr, InsertBefore) {}
1902
1903 /// Construct a CallInst given a range of arguments.
1904 /// Construct a CallInst from a range of arguments
1905 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1906 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1907 BasicBlock *InsertAtEnd);
1908
1909 explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore);
1910
1911 CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
1912
1913 void init(Value *Func, ArrayRef<Value *> Args,
1914 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
1915 init(cast<FunctionType>(
1916 cast<PointerType>(Func->getType())->getElementType()),
1917 Func, Args, Bundles, NameStr);
1918 }
1919 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1920 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1921 void init(Value *Func, const Twine &NameStr);
1922
1923protected:
1924 // Note: Instruction needs to be a friend here to call cloneImpl.
1925 friend class Instruction;
1926
1927 CallInst *cloneImpl() const;
1928
1929public:
1930 static constexpr int ArgOffset = 1;
1931
1932 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1933 ArrayRef<OperandBundleDef> Bundles = None,
1934 const Twine &NameStr = "",
1935 Instruction *InsertBefore = nullptr) {
1936 return Create(cast<FunctionType>(
1937 cast<PointerType>(Func->getType())->getElementType()),
1938 Func, Args, Bundles, NameStr, InsertBefore);
1939 }
1940
1941 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1942 const Twine &NameStr,
1943 Instruction *InsertBefore = nullptr) {
1944 return Create(cast<FunctionType>(
1945 cast<PointerType>(Func->getType())->getElementType()),
1946 Func, Args, None, NameStr, InsertBefore);
1947 }
1948
1949 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1950 const Twine &NameStr,
1951 Instruction *InsertBefore = nullptr) {
1952 return new (unsigned(Args.size() + 1))
1953 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1954 }
1955
1956 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1957 ArrayRef<OperandBundleDef> Bundles = None,
1958 const Twine &NameStr = "",
1959 Instruction *InsertBefore = nullptr) {
1960 const unsigned TotalOps =
1961 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1962 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1963
1964 return new (TotalOps, DescriptorBytes)
1965 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1966 }
1967
1968 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1969 ArrayRef<OperandBundleDef> Bundles,
1970 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1971 const unsigned TotalOps =
1972 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1973 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1974
1975 return new (TotalOps, DescriptorBytes)
1976 CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
1977 }
1978
1979 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1980 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1981 return new (unsigned(Args.size() + 1))
1982 CallInst(Func, Args, None, NameStr, InsertAtEnd);
1983 }
1984
1985 static CallInst *Create(Value *F, const Twine &NameStr = "",
1986 Instruction *InsertBefore = nullptr) {
1987 return new (1) CallInst(F, NameStr, InsertBefore);
1988 }
1989
1990 static CallInst *Create(Value *F, const Twine &NameStr,
1991 BasicBlock *InsertAtEnd) {
1992 return new (1) CallInst(F, NameStr, InsertAtEnd);
1993 }
1994
1995 /// Create a clone of \p CI with a different set of operand bundles and
1996 /// insert it before \p InsertPt.
1997 ///
1998 /// The returned call instruction is identical \p CI in every way except that
1999 /// the operand bundles for the new instruction are set to the operand bundles
2000 /// in \p Bundles.
2001 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
2002 Instruction *InsertPt = nullptr);
2003
2004 /// Generate the IR for a call to malloc:
2005 /// 1. Compute the malloc call's argument as the specified type's size,
2006 /// possibly multiplied by the array size if the array size is not
2007 /// constant 1.
2008 /// 2. Call malloc with that argument.
2009 /// 3. Bitcast the result of the malloc call to the specified type.
2010 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2011 Type *AllocTy, Value *AllocSize,
2012 Value *ArraySize = nullptr,
2013 Function *MallocF = nullptr,
2014 const Twine &Name = "");
2015 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2016 Type *AllocTy, Value *AllocSize,
2017 Value *ArraySize = nullptr,
2018 Function *MallocF = nullptr,
2019 const Twine &Name = "");
2020 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2021 Type *AllocTy, Value *AllocSize,
2022 Value *ArraySize = nullptr,
2023 ArrayRef<OperandBundleDef> Bundles = None,
2024 Function *MallocF = nullptr,
2025 const Twine &Name = "");
2026 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2027 Type *AllocTy, Value *AllocSize,
2028 Value *ArraySize = nullptr,
2029 ArrayRef<OperandBundleDef> Bundles = None,
2030 Function *MallocF = nullptr,
2031 const Twine &Name = "");
2032 /// Generate the IR for a call to the builtin free function.
2033 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
2034 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
2035 static Instruction *CreateFree(Value *Source,
2036 ArrayRef<OperandBundleDef> Bundles,
2037 Instruction *InsertBefore);
2038 static Instruction *CreateFree(Value *Source,
2039 ArrayRef<OperandBundleDef> Bundles,
2040 BasicBlock *InsertAtEnd);
2041
2042 // Note that 'musttail' implies 'tail'.
2043 enum TailCallKind {
2044 TCK_None = 0,
2045 TCK_Tail = 1,
2046 TCK_MustTail = 2,
2047 TCK_NoTail = 3
2048 };
2049 TailCallKind getTailCallKind() const {
2050 return TailCallKind(getSubclassDataFromInstruction() & 3);
2051 }
2052
2053 bool isTailCall() const {
2054 unsigned Kind = getSubclassDataFromInstruction() & 3;
2055 return Kind == TCK_Tail || Kind == TCK_MustTail;
2056 }
2057
2058 bool isMustTailCall() const {
2059 return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
2060 }
2061
2062 bool isNoTailCall() const {
2063 return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
2064 }
2065
2066 void setTailCall(bool isTC = true) {
2067 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2068 unsigned(isTC ? TCK_Tail : TCK_None));
2069 }
2070
2071 void setTailCallKind(TailCallKind TCK) {
2072 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2073 unsigned(TCK));
2074 }
2075
2076 /// Return true if the call can return twice
2077 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
2078 void setCanReturnTwice() {
2079 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
2080 }
2081
2082 /// Check if this call is an inline asm statement.
2083 bool isInlineAsm() const { return isa<InlineAsm>(Op<-1>()); }
2084
2085 // Methods for support type inquiry through isa, cast, and dyn_cast:
2086 static bool classof(const Instruction *I) {
2087 return I->getOpcode() == Instruction::Call;
2088 }
2089 static bool classof(const Value *V) {
2090 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2091 }
2092
2093private:
2094 // Shadow Instruction::setInstructionSubclassData with a private forwarding
2095 // method so that subclasses cannot accidentally use it.
2096 void setInstructionSubclassData(unsigned short D) {
2097 Instruction::setInstructionSubclassData(D);
2098 }
2099};
2100
2101template <>
2102struct OperandTraits<CallBase<CallInst>>
2103 : public VariadicOperandTraits<CallBase<CallInst>, 1> {};
2104
2105CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
2106 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2107 BasicBlock *InsertAtEnd)
2108 : CallBase<CallInst>(
2109 cast<FunctionType>(
2110 cast<PointerType>(Func->getType())->getElementType())
2111 ->getReturnType(),
2112 Instruction::Call,
2113 OperandTraits<CallBase<CallInst>>::op_end(this) -
2114 (Args.size() + CountBundleInputs(Bundles) + 1),
2115 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
2116 init(Func, Args, Bundles, NameStr);
2117}
2118
2119CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
2120 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2121 Instruction *InsertBefore)
2122 : CallBase<CallInst>(Ty->getReturnType(), Instruction::Call,
2123 OperandTraits<CallBase<CallInst>>::op_end(this) -
2124 (Args.size() + CountBundleInputs(Bundles) + 1),
2125 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
2126 InsertBefore) {
2127 init(Ty, Func, Args, Bundles, NameStr);
2128}
2129
2130//===----------------------------------------------------------------------===//
2131// SelectInst Class
2132//===----------------------------------------------------------------------===//
2133
2134/// This class represents the LLVM 'select' instruction.
2135///
2136class SelectInst : public Instruction {
2137 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2138 Instruction *InsertBefore)
2139 : Instruction(S1->getType(), Instruction::Select,
2140 &Op<0>(), 3, InsertBefore) {
2141 init(C, S1, S2);
2142 setName(NameStr);
2143 }
2144
2145 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2146 BasicBlock *InsertAtEnd)
2147 : Instruction(S1->getType(), Instruction::Select,
2148 &Op<0>(), 3, InsertAtEnd) {
2149 init(C, S1, S2);
2150 setName(NameStr);
2151 }
2152
2153 void init(Value *C, Value *S1, Value *S2) {
2154 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2154, __extension__ __PRETTY_FUNCTION__))
;
2155 Op<0>() = C;
2156 Op<1>() = S1;
2157 Op<2>() = S2;
2158 }
2159
2160protected:
2161 // Note: Instruction needs to be a friend here to call cloneImpl.
2162 friend class Instruction;
2163
2164 SelectInst *cloneImpl() const;
2165
2166public:
2167 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2168 const Twine &NameStr = "",
2169 Instruction *InsertBefore = nullptr,
2170 Instruction *MDFrom = nullptr) {
2171 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
2172 if (MDFrom)
2173 Sel->copyMetadata(*MDFrom);
2174 return Sel;
2175 }
2176
2177 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2178 const Twine &NameStr,
2179 BasicBlock *InsertAtEnd) {
2180 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
2181 }
2182
2183 const Value *getCondition() const { return Op<0>(); }
2184 const Value *getTrueValue() const { return Op<1>(); }
2185 const Value *getFalseValue() const { return Op<2>(); }
2186 Value *getCondition() { return Op<0>(); }
2187 Value *getTrueValue() { return Op<1>(); }
2188 Value *getFalseValue() { return Op<2>(); }
2189
2190 void setCondition(Value *V) { Op<0>() = V; }
2191 void setTrueValue(Value *V) { Op<1>() = V; }
2192 void setFalseValue(Value *V) { Op<2>() = V; }
2193
2194 /// Return a string if the specified operands are invalid
2195 /// for a select operation, otherwise return null.
2196 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
2197
2198 /// Transparently provide more efficient getOperand methods.
2199 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2200
2201 OtherOps getOpcode() const {
2202 return static_cast<OtherOps>(Instruction::getOpcode());
2203 }
2204
2205 // Methods for support type inquiry through isa, cast, and dyn_cast:
2206 static bool classof(const Instruction *I) {
2207 return I->getOpcode() == Instruction::Select;
2208 }
2209 static bool classof(const Value *V) {
2210 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2211 }
2212};
2213
2214template <>
2215struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
2216};
2217
2218DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2218, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2218, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2219
2220//===----------------------------------------------------------------------===//
2221// VAArgInst Class
2222//===----------------------------------------------------------------------===//
2223
2224/// This class represents the va_arg llvm instruction, which returns
2225/// an argument of the specified type given a va_list and increments that list
2226///
2227class VAArgInst : public UnaryInstruction {
2228protected:
2229 // Note: Instruction needs to be a friend here to call cloneImpl.
2230 friend class Instruction;
2231
2232 VAArgInst *cloneImpl() const;
2233
2234public:
2235 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
2236 Instruction *InsertBefore = nullptr)
2237 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
2238 setName(NameStr);
2239 }
2240
2241 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
2242 BasicBlock *InsertAtEnd)
2243 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
2244 setName(NameStr);
2245 }
2246
2247 Value *getPointerOperand() { return getOperand(0); }
2248 const Value *getPointerOperand() const { return getOperand(0); }
2249 static unsigned getPointerOperandIndex() { return 0U; }
2250
2251 // Methods for support type inquiry through isa, cast, and dyn_cast:
2252 static bool classof(const Instruction *I) {
2253 return I->getOpcode() == VAArg;
2254 }
2255 static bool classof(const Value *V) {
2256 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2257 }
2258};
2259
2260//===----------------------------------------------------------------------===//
2261// ExtractElementInst Class
2262//===----------------------------------------------------------------------===//
2263
2264/// This instruction extracts a single (scalar)
2265/// element from a VectorType value
2266///
2267class ExtractElementInst : public Instruction {
2268 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
2269 Instruction *InsertBefore = nullptr);
2270 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2271 BasicBlock *InsertAtEnd);
2272
2273protected:
2274 // Note: Instruction needs to be a friend here to call cloneImpl.
2275 friend class Instruction;
2276
2277 ExtractElementInst *cloneImpl() const;
2278
2279public:
2280 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2281 const Twine &NameStr = "",
2282 Instruction *InsertBefore = nullptr) {
2283 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2284 }
2285
2286 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2287 const Twine &NameStr,
2288 BasicBlock *InsertAtEnd) {
2289 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
2290 }
2291
2292 /// Return true if an extractelement instruction can be
2293 /// formed with the specified operands.
2294 static bool isValidOperands(const Value *Vec, const Value *Idx);
2295
2296 Value *getVectorOperand() { return Op<0>(); }
2297 Value *getIndexOperand() { return Op<1>(); }
2298 const Value *getVectorOperand() const { return Op<0>(); }
2299 const Value *getIndexOperand() const { return Op<1>(); }
2300
2301 VectorType *getVectorOperandType() const {
2302 return cast<VectorType>(getVectorOperand()->getType());
2303 }
2304
2305 /// Transparently provide more efficient getOperand methods.
2306 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2307
2308 // Methods for support type inquiry through isa, cast, and dyn_cast:
2309 static bool classof(const Instruction *I) {
2310 return I->getOpcode() == Instruction::ExtractElement;
2311 }
2312 static bool classof(const Value *V) {
2313 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2314 }
2315};
2316
2317template <>
2318struct OperandTraits<ExtractElementInst> :
2319 public FixedNumOperandTraits<ExtractElementInst, 2> {
2320};
2321
2322DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2322, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2322, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2323
2324//===----------------------------------------------------------------------===//
2325// InsertElementInst Class
2326//===----------------------------------------------------------------------===//
2327
2328/// This instruction inserts a single (scalar)
2329/// element into a VectorType value
2330///
2331class InsertElementInst : public Instruction {
2332 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
2333 const Twine &NameStr = "",
2334 Instruction *InsertBefore = nullptr);
2335 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2336 BasicBlock *InsertAtEnd);
2337
2338protected:
2339 // Note: Instruction needs to be a friend here to call cloneImpl.
2340 friend class Instruction;
2341
2342 InsertElementInst *cloneImpl() const;
2343
2344public:
2345 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2346 const Twine &NameStr = "",
2347 Instruction *InsertBefore = nullptr) {
2348 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2349 }
2350
2351 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2352 const Twine &NameStr,
2353 BasicBlock *InsertAtEnd) {
2354 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
2355 }
2356
2357 /// Return true if an insertelement instruction can be
2358 /// formed with the specified operands.
2359 static bool isValidOperands(const Value *Vec, const Value *NewElt,
2360 const Value *Idx);
2361
2362 /// Overload to return most specific vector type.
2363 ///
2364 VectorType *getType() const {
2365 return cast<VectorType>(Instruction::getType());
2366 }
2367
2368 /// Transparently provide more efficient getOperand methods.
2369 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2370
2371 // Methods for support type inquiry through isa, cast, and dyn_cast:
2372 static bool classof(const Instruction *I) {
2373 return I->getOpcode() == Instruction::InsertElement;
2374 }
2375 static bool classof(const Value *V) {
2376 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2377 }
2378};
2379
2380template <>
2381struct OperandTraits<InsertElementInst> :
2382 public FixedNumOperandTraits<InsertElementInst, 3> {
2383};
2384
2385DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2385, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2385, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2386
2387//===----------------------------------------------------------------------===//
2388// ShuffleVectorInst Class
2389//===----------------------------------------------------------------------===//
2390
2391/// This instruction constructs a fixed permutation of two
2392/// input vectors.
2393///
2394class ShuffleVectorInst : public Instruction {
2395protected:
2396 // Note: Instruction needs to be a friend here to call cloneImpl.
2397 friend class Instruction;
2398
2399 ShuffleVectorInst *cloneImpl() const;
2400
2401public:
2402 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2403 const Twine &NameStr = "",
2404 Instruction *InsertBefor = nullptr);
2405 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2406 const Twine &NameStr, BasicBlock *InsertAtEnd);
2407
2408 // allocate space for exactly three operands
2409 void *operator new(size_t s) {
2410 return User::operator new(s, 3);
2411 }
2412
2413 /// Return true if a shufflevector instruction can be
2414 /// formed with the specified operands.
2415 static bool isValidOperands(const Value *V1, const Value *V2,
2416 const Value *Mask);
2417
2418 /// Overload to return most specific vector type.
2419 ///
2420 VectorType *getType() const {
2421 return cast<VectorType>(Instruction::getType());
2422 }
2423
2424 /// Transparently provide more efficient getOperand methods.
2425 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2426
2427 Constant *getMask() const {
2428 return cast<Constant>(getOperand(2));
2429 }
2430
2431 /// Return the shuffle mask value for the specified element of the mask.
2432 /// Return -1 if the element is undef.
2433 static int getMaskValue(const Constant *Mask, unsigned Elt);
2434
2435 /// Return the shuffle mask value of this instruction for the given element
2436 /// index. Return -1 if the element is undef.
2437 int getMaskValue(unsigned Elt) const {
2438 return getMaskValue(getMask(), Elt);
2439 }
2440
2441 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2442 /// elements of the mask are returned as -1.
2443 static void getShuffleMask(const Constant *Mask,
2444 SmallVectorImpl<int> &Result);
2445
2446 /// Return the mask for this instruction as a vector of integers. Undefined
2447 /// elements of the mask are returned as -1.
2448 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2449 return getShuffleMask(getMask(), Result);
2450 }
2451
2452 SmallVector<int, 16> getShuffleMask() const {
2453 SmallVector<int, 16> Mask;
2454 getShuffleMask(Mask);
2455 return Mask;
2456 }
2457
2458 /// Return true if this shuffle returns a vector with a different number of
2459 /// elements than its source elements.
2460 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2>
2461 bool changesLength() const {
2462 unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
2463 unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
2464 return NumSourceElts != NumMaskElts;
2465 }
2466
2467 /// Return true if this shuffle mask chooses elements from exactly one source
2468 /// vector.
2469 /// Example: <7,5,undef,7>
2470 /// This assumes that vector operands are the same length as the mask.
2471 static bool isSingleSourceMask(ArrayRef<int> Mask);
2472 static bool isSingleSourceMask(const Constant *Mask) {
2473 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2473, __extension__ __PRETTY_FUNCTION__))
;
2474 SmallVector<int, 16> MaskAsInts;
2475 getShuffleMask(Mask, MaskAsInts);
2476 return isSingleSourceMask(MaskAsInts);
2477 }
2478
2479 /// Return true if this shuffle chooses elements from exactly one source
2480 /// vector without changing the length of that vector.
2481 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2482 /// TODO: Optionally allow length-changing shuffles.
2483 bool isSingleSource() const {
2484 return !changesLength() && isSingleSourceMask(getMask());
2485 }
2486
2487 /// Return true if this shuffle mask chooses elements from exactly one source
2488 /// vector without lane crossings. A shuffle using this mask is not
2489 /// necessarily a no-op because it may change the number of elements from its
2490 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2491 /// Example: <undef,undef,2,3>
2492 static bool isIdentityMask(ArrayRef<int> Mask);
2493 static bool isIdentityMask(const Constant *Mask) {
2494 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2494, __extension__ __PRETTY_FUNCTION__))
;
2495 SmallVector<int, 16> MaskAsInts;
2496 getShuffleMask(Mask, MaskAsInts);
2497 return isIdentityMask(MaskAsInts);
2498 }
2499
2500 /// Return true if this shuffle mask chooses elements from exactly one source
2501 /// vector without lane crossings and does not change the number of elements
2502 /// from its input vectors.
2503 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2504 /// TODO: Optionally allow length-changing shuffles.
2505 bool isIdentity() const {
2506 return !changesLength() && isIdentityMask(getShuffleMask());
2507 }
2508
2509 /// Return true if this shuffle mask chooses elements from its source vectors
2510 /// without lane crossings. A shuffle using this mask would be
2511 /// equivalent to a vector select with a constant condition operand.
2512 /// Example: <4,1,6,undef>
2513 /// This returns false if the mask does not choose from both input vectors.
2514 /// In that case, the shuffle is better classified as an identity shuffle.
2515 /// This assumes that vector operands are the same length as the mask
2516 /// (a length-changing shuffle can never be equivalent to a vector select).
2517 static bool isSelectMask(ArrayRef<int> Mask);
2518 static bool isSelectMask(const Constant *Mask) {
2519 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2519, __extension__ __PRETTY_FUNCTION__))
;
2520 SmallVector<int, 16> MaskAsInts;
2521 getShuffleMask(Mask, MaskAsInts);
2522 return isSelectMask(MaskAsInts);
2523 }
2524
2525 /// Return true if this shuffle chooses elements from its source vectors
2526 /// without lane crossings and all operands have the same number of elements.
2527 /// In other words, this shuffle is equivalent to a vector select with a
2528 /// constant condition operand.
2529 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2530 /// This returns false if the mask does not choose from both input vectors.
2531 /// In that case, the shuffle is better classified as an identity shuffle.
2532 /// TODO: Optionally allow length-changing shuffles.
2533 bool isSelect() const {
2534 return !changesLength() && isSelectMask(getMask());
2535 }
2536
2537 /// Return true if this shuffle mask swaps the order of elements from exactly
2538 /// one source vector.
2539 /// Example: <7,6,undef,4>
2540 /// This assumes that vector operands are the same length as the mask.
2541 static bool isReverseMask(ArrayRef<int> Mask);
2542 static bool isReverseMask(const Constant *Mask) {
2543 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2543, __extension__ __PRETTY_FUNCTION__))
;
2544 SmallVector<int, 16> MaskAsInts;
2545 getShuffleMask(Mask, MaskAsInts);
2546 return isReverseMask(MaskAsInts);
2547 }
2548
2549 /// Return true if this shuffle swaps the order of elements from exactly
2550 /// one source vector.
2551 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2552 /// TODO: Optionally allow length-changing shuffles.
2553 bool isReverse() const {
2554 return !changesLength() && isReverseMask(getMask());
2555 }
2556
2557 /// Return true if this shuffle mask chooses all elements with the same value
2558 /// as the first element of exactly one source vector.
2559 /// Example: <4,undef,undef,4>
2560 /// This assumes that vector operands are the same length as the mask.
2561 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2562 static bool isZeroEltSplatMask(const Constant *Mask) {
2563 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2563, __extension__ __PRETTY_FUNCTION__))
;
2564 SmallVector<int, 16> MaskAsInts;
2565 getShuffleMask(Mask, MaskAsInts);
2566 return isZeroEltSplatMask(MaskAsInts);
2567 }
2568
2569 /// Return true if all elements of this shuffle are the same value as the
2570 /// first element of exactly one source vector without changing the length
2571 /// of that vector.
2572 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2573 /// TODO: Optionally allow length-changing shuffles.
2574 /// TODO: Optionally allow splats from other elements.
2575 bool isZeroEltSplat() const {
2576 return !changesLength() && isZeroEltSplatMask(getMask());
2577 }
2578
2579 /// Return true if this shuffle mask is a transpose mask.
2580 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2581 /// even- or odd-numbered vector elements from two n-dimensional source
2582 /// vectors and write each result into consecutive elements of an
2583 /// n-dimensional destination vector. Two shuffles are necessary to complete
2584 /// the transpose, one for the even elements and another for the odd elements.
2585 /// This description closely follows how the TRN1 and TRN2 AArch64
2586 /// instructions operate.
2587 ///
2588 /// For example, a simple 2x2 matrix can be transposed with:
2589 ///
2590 /// ; Original matrix
2591 /// m0 = < a, b >
2592 /// m1 = < c, d >
2593 ///
2594 /// ; Transposed matrix
2595 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2596 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2597 ///
2598 /// For matrices having greater than n columns, the resulting nx2 transposed
2599 /// matrix is stored in two result vectors such that one vector contains
2600 /// interleaved elements from all the even-numbered rows and the other vector
2601 /// contains interleaved elements from all the odd-numbered rows. For example,
2602 /// a 2x4 matrix can be transposed with:
2603 ///
2604 /// ; Original matrix
2605 /// m0 = < a, b, c, d >
2606 /// m1 = < e, f, g, h >
2607 ///
2608 /// ; Transposed matrix
2609 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2610 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2611 static bool isTransposeMask(ArrayRef<int> Mask);
2612 static bool isTransposeMask(const Constant *Mask) {
2613 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2613, __extension__ __PRETTY_FUNCTION__))
;
2614 SmallVector<int, 16> MaskAsInts;
2615 getShuffleMask(Mask, MaskAsInts);
2616 return isTransposeMask(MaskAsInts);
2617 }
2618
2619 /// Return true if this shuffle transposes the elements of its inputs without
2620 /// changing the length of the vectors. This operation may also be known as a
2621 /// merge or interleave. See the description for isTransposeMask() for the
2622 /// exact specification.
2623 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2624 bool isTranspose() const {
2625 return !changesLength() && isTransposeMask(getMask());
2626 }
2627
2628 /// Change values in a shuffle permute mask assuming the two vector operands
2629 /// of length InVecNumElts have swapped position.
2630 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2631 unsigned InVecNumElts) {
2632 for (int &Idx : Mask) {
2633 if (Idx == -1)
2634 continue;
2635 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2636 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2637, __extension__ __PRETTY_FUNCTION__))
2637 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2637, __extension__ __PRETTY_FUNCTION__))
;
2638 }
2639 }
2640
2641 // Methods for support type inquiry through isa, cast, and dyn_cast:
2642 static bool classof(const Instruction *I) {
2643 return I->getOpcode() == Instruction::ShuffleVector;
2644 }
2645 static bool classof(const Value *V) {
2646 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2647 }
2648};
2649
2650template <>
2651struct OperandTraits<ShuffleVectorInst> :
2652 public FixedNumOperandTraits<ShuffleVectorInst, 3> {
2653};
2654
2655DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2655, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 2655, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2656
2657//===----------------------------------------------------------------------===//
2658// ExtractValueInst Class
2659//===----------------------------------------------------------------------===//
2660
2661/// This instruction extracts a struct member or array
2662/// element value from an aggregate value.
2663///
2664class ExtractValueInst : public UnaryInstruction {
2665 SmallVector<unsigned, 4> Indices;
2666
2667 ExtractValueInst(const ExtractValueInst &EVI);
2668
2669 /// Constructors - Create a extractvalue instruction with a base aggregate
2670 /// value and a list of indices. The first ctor can optionally insert before
2671 /// an existing instruction, the second appends the new instruction to the
2672 /// specified BasicBlock.
2673 inline ExtractValueInst(Value *Agg,
2674 ArrayRef<unsigned> Idxs,
2675 const Twine &NameStr,
2676 Instruction *InsertBefore);
2677 inline ExtractValueInst(Value *Agg,
2678 ArrayRef<unsigned> Idxs,
2679 const Twine &NameStr, BasicBlock *InsertAtEnd);
2680
2681 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2682
2683protected:
2684 // Note: Instruction needs to be a friend here to call cloneImpl.
2685 friend class Instruction;
2686
2687 ExtractValueInst *cloneImpl() const;
2688
2689public:
2690 static ExtractValueInst *Create(Value *Agg,
2691 ArrayRef<unsigned> Idxs,
2692 const Twine &NameStr = "",
2693 Instruction *InsertBefore = nullptr) {
2694 return new
2695 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2696 }
2697
2698 static ExtractValueInst *Create(Value *Agg,
2699 ArrayRef<unsigned> Idxs,
2700 const Twine &NameStr,
2701 BasicBlock *InsertAtEnd) {
2702 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2703 }
2704
2705 /// Returns the type of the element that would be extracted
2706 /// with an extractvalue instruction with the specified parameters.
2707 ///
2708 /// Null is returned if the indices are invalid for the specified type.
2709 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2710
2711 using idx_iterator = const unsigned*;
2712
2713 inline idx_iterator idx_begin() const { return Indices.begin(); }
2714 inline idx_iterator idx_end() const { return Indices.end(); }
2715 inline iterator_range<idx_iterator> indices() const {
2716 return make_range(idx_begin(), idx_end());
2717 }
2718
2719 Value *getAggregateOperand() {
2720 return getOperand(0);
2721 }
2722 const Value *getAggregateOperand() const {
2723 return getOperand(0);
2724 }
2725 static unsigned getAggregateOperandIndex() {
2726 return 0U; // get index for modifying correct operand
2727 }
2728
2729 ArrayRef<unsigned> getIndices() const {
2730 return Indices;
2731 }
2732
2733 unsigned getNumIndices() const {
2734 return (unsigned)Indices.size();
2735 }
2736
2737 bool hasIndices() const {
2738 return true;
2739 }
2740
2741 // Methods for support type inquiry through isa, cast, and dyn_cast:
2742 static bool classof(const Instruction *I) {
2743 return I->getOpcode() == Instruction::ExtractValue;
2744 }
2745 static bool classof(const Value *V) {
2746 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2747 }
2748};
2749
2750ExtractValueInst::ExtractValueInst(Value *Agg,
2751 ArrayRef<unsigned> Idxs,
2752 const Twine &NameStr,
2753 Instruction *InsertBefore)
2754 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2755 ExtractValue, Agg, InsertBefore) {
2756 init(Idxs, NameStr);
2757}
2758
2759ExtractValueInst::ExtractValueInst(Value *Agg,
2760 ArrayRef<unsigned> Idxs,
2761 const Twine &NameStr,
2762 BasicBlock *InsertAtEnd)
2763 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2764 ExtractValue, Agg, InsertAtEnd) {
2765 init(Idxs, NameStr);
2766}
2767
2768//===----------------------------------------------------------------------===//
2769// InsertValueInst Class
2770//===----------------------------------------------------------------------===//
2771
2772/// This instruction inserts a struct field of array element
2773/// value into an aggregate value.
2774///
2775class InsertValueInst : public Instruction {
2776 SmallVector<unsigned, 4> Indices;
2777
2778 InsertValueInst(const InsertValueInst &IVI);
2779
2780 /// Constructors - Create a insertvalue instruction with a base aggregate
2781 /// value, a value to insert, and a list of indices. The first ctor can
2782 /// optionally insert before an existing instruction, the second appends
2783 /// the new instruction to the specified BasicBlock.
2784 inline InsertValueInst(Value *Agg, Value *Val,
2785 ArrayRef<unsigned> Idxs,
2786 const Twine &NameStr,
2787 Instruction *InsertBefore);
2788 inline InsertValueInst(Value *Agg, Value *Val,
2789 ArrayRef<unsigned> Idxs,
2790 const Twine &NameStr, BasicBlock *InsertAtEnd);
2791
2792 /// Constructors - These two constructors are convenience methods because one
2793 /// and two index insertvalue instructions are so common.
2794 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2795 const Twine &NameStr = "",
2796 Instruction *InsertBefore = nullptr);
2797 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2798 BasicBlock *InsertAtEnd);
2799
2800 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2801 const Twine &NameStr);
2802
2803protected:
2804 // Note: Instruction needs to be a friend here to call cloneImpl.
2805 friend class Instruction;
2806
2807 InsertValueInst *cloneImpl() const;
2808
2809public:
2810 // allocate space for exactly two operands
2811 void *operator new(size_t s) {
2812 return User::operator new(s, 2);
2813 }
2814
2815 static InsertValueInst *Create(Value *Agg, Value *Val,
2816 ArrayRef<unsigned> Idxs,
2817 const Twine &NameStr = "",
2818 Instruction *InsertBefore = nullptr) {
2819 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2820 }
2821
2822 static InsertValueInst *Create(Value *Agg, Value *Val,
2823 ArrayRef<unsigned> Idxs,
2824 const Twine &NameStr,
2825 BasicBlock *InsertAtEnd) {
2826 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2827 }
2828
2829 /// Transparently provide more efficient getOperand methods.
2830 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2831
2832 using idx_iterator = const unsigned*;
2833
2834 inline idx_iterator idx_begin() const { return Indices.begin(); }
2835 inline idx_iterator idx_end() const { return Indices.end(); }
2836 inline iterator_range<idx_iterator> indices() const {
2837 return make_range(idx_begin(), idx_end());
2838 }
2839
2840 Value *getAggregateOperand() {
2841 return getOperand(0);
2842 }
2843 const Value *getAggregateOperand() const {
2844 return getOperand(0);
2845 }
2846 static unsigned getAggregateOperandIndex() {
2847 return 0U; // get index for modifying correct operand
2848 }
2849
2850 Value *getInsertedValueOperand() {
2851 return getOperand(1);
2852 }
2853 const Value *getInsertedValueOperand() const {
2854 return getOperand(1);
2855 }
2856 static unsigned getInsertedValueOperandIndex() {
2857 return 1U; // get index for modifying correct operand
2858 }
2859
2860 ArrayRef<unsigned> getIndices() const {
2861 return Indices;
2862 }
2863
2864 unsigned getNumIndices() const {
2865 return (unsigned)Indices.size();
2866 }
2867
2868 bool hasIndices() const {
2869 return true;
2870 }
2871
2872 // Methods for support type inquiry through isa, cast, and dyn_cast:
2873 static bool classof(const Instruction *I) {
2874 return I->getOpcode() == Instruction::InsertValue;
2875 }
2876 static bool classof(const Value *V) {
2877 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2878 }
2879};
2880
2881template <>
2882struct OperandTraits<InsertValueInst> :
2883 public FixedNumOperandTraits<InsertValueInst, 2> {
2884};
2885
2886InsertValueInst::InsertValueInst(Value *Agg,
2887 Value *Val,
2888 ArrayRef<unsigned> Idxs,
2889 const Twine &NameStr,
2890 Instruction *InsertBefore)
2891 : Instruction(Agg->getType(), InsertValue,
2892 OperandTraits<InsertValueInst>::op_begin(this),
2893 2, InsertBefore) {
2894 init(Agg, Val, Idxs, NameStr);
2895}
2896
2897InsertValueInst::InsertValueInst(Value *Agg,
2898 Value *Val,
2899 ArrayRef<unsigned> Idxs,
2900 const Twine &NameStr,
2901 BasicBlock *InsertAtEnd)
2902 : Instruction(Agg->getType(), InsertValue,
2903 OperandTraits<InsertValueInst>::op_begin(this),
2904 2, InsertAtEnd) {
2905 init(Agg, Val, Idxs, NameStr);
2906}