Bug Summary

File:lib/Transforms/Utils/Local.cpp
Warning:line 142, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name Local.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Transforms/Utils -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp

1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This family of functions perform various local transformations to the
11// program.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/Local.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/DenseSet.h"
20#include "llvm/ADT/Hashing.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/Optional.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
25#include "llvm/ADT/SmallPtrSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/Statistic.h"
28#include "llvm/ADT/TinyPtrVector.h"
29#include "llvm/Analysis/ConstantFolding.h"
30#include "llvm/Analysis/EHPersonalities.h"
31#include "llvm/Analysis/InstructionSimplify.h"
32#include "llvm/Analysis/LazyValueInfo.h"
33#include "llvm/Analysis/MemoryBuiltins.h"
34#include "llvm/Analysis/MemorySSAUpdater.h"
35#include "llvm/Analysis/TargetLibraryInfo.h"
36#include "llvm/Analysis/ValueTracking.h"
37#include "llvm/BinaryFormat/Dwarf.h"
38#include "llvm/IR/Argument.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/CFG.h"
42#include "llvm/IR/CallSite.h"
43#include "llvm/IR/Constant.h"
44#include "llvm/IR/ConstantRange.h"
45#include "llvm/IR/Constants.h"
46#include "llvm/IR/DIBuilder.h"
47#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/DebugInfoMetadata.h"
49#include "llvm/IR/DebugLoc.h"
50#include "llvm/IR/DerivedTypes.h"
51#include "llvm/IR/DomTreeUpdater.h"
52#include "llvm/IR/Dominators.h"
53#include "llvm/IR/Function.h"
54#include "llvm/IR/GetElementPtrTypeIterator.h"
55#include "llvm/IR/GlobalObject.h"
56#include "llvm/IR/IRBuilder.h"
57#include "llvm/IR/InstrTypes.h"
58#include "llvm/IR/Instruction.h"
59#include "llvm/IR/Instructions.h"
60#include "llvm/IR/IntrinsicInst.h"
61#include "llvm/IR/Intrinsics.h"
62#include "llvm/IR/LLVMContext.h"
63#include "llvm/IR/MDBuilder.h"
64#include "llvm/IR/Metadata.h"
65#include "llvm/IR/Module.h"
66#include "llvm/IR/Operator.h"
67#include "llvm/IR/PatternMatch.h"
68#include "llvm/IR/Type.h"
69#include "llvm/IR/Use.h"
70#include "llvm/IR/User.h"
71#include "llvm/IR/Value.h"
72#include "llvm/IR/ValueHandle.h"
73#include "llvm/Support/Casting.h"
74#include "llvm/Support/Debug.h"
75#include "llvm/Support/ErrorHandling.h"
76#include "llvm/Support/KnownBits.h"
77#include "llvm/Support/raw_ostream.h"
78#include "llvm/Transforms/Utils/ValueMapper.h"
79#include <algorithm>
80#include <cassert>
81#include <climits>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <utility>
86
87using namespace llvm;
88using namespace llvm::PatternMatch;
89
90#define DEBUG_TYPE"local" "local"
91
92STATISTIC(NumRemoved, "Number of unreachable basic blocks removed")static llvm::Statistic NumRemoved = {"local", "NumRemoved", "Number of unreachable basic blocks removed"
, {0}, {false}}
;
93
94//===----------------------------------------------------------------------===//
95// Local constant propagation.
96//
97
98/// ConstantFoldTerminator - If a terminator instruction is predicated on a
99/// constant value, convert it into an unconditional branch to the constant
100/// destination. This is a nontrivial operation because the successors of this
101/// basic block must have their PHI nodes updated.
102/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
103/// conditions and indirectbr addresses this might make dead if
104/// DeleteDeadConditions is true.
105bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
106 const TargetLibraryInfo *TLI,
107 DomTreeUpdater *DTU) {
108 Instruction *T = BB->getTerminator();
109 IRBuilder<> Builder(T);
110
111 // Branch - See if we are conditional jumping on constant
112 if (auto *BI = dyn_cast<BranchInst>(T)) {
5
Taking true branch
113 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
6
Taking false branch
114 BasicBlock *Dest1 = BI->getSuccessor(0);
7
Calling 'BranchInst::getSuccessor'
17
Returning from 'BranchInst::getSuccessor'
18
'Dest1' initialized here
115 BasicBlock *Dest2 = BI->getSuccessor(1);
116
117 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
19
Taking false branch
118 // Are we branching on constant?
119 // YES. Change to unconditional branch...
120 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
121 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
122
123 // Let the basic block know that we are letting go of it. Based on this,
124 // it will adjust it's PHI nodes.
125 OldDest->removePredecessor(BB);
126
127 // Replace the conditional branch with an unconditional one.
128 Builder.CreateBr(Destination);
129 BI->eraseFromParent();
130 if (DTU)
131 DTU->deleteEdgeRelaxed(BB, OldDest);
132 return true;
133 }
134
135 if (Dest2 == Dest1) { // Conditional branch to same location?
20
Assuming 'Dest2' is equal to 'Dest1'
21
Assuming pointer value is null
22
Taking true branch
136 // This branch matches something like this:
137 // br bool %cond, label %Dest, label %Dest
138 // and changes it into: br label %Dest
139
140 // Let the basic block know that we are letting go of one copy of it.
141 assert(BI->getParent() && "Terminator not inserted in block!")((BI->getParent() && "Terminator not inserted in block!"
) ? static_cast<void> (0) : __assert_fail ("BI->getParent() && \"Terminator not inserted in block!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 141, __PRETTY_FUNCTION__))
;
142 Dest1->removePredecessor(BI->getParent());
23
Called C++ object pointer is null
143
144 // Replace the conditional branch with an unconditional one.
145 Builder.CreateBr(Dest1);
146 Value *Cond = BI->getCondition();
147 BI->eraseFromParent();
148 if (DeleteDeadConditions)
149 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
150 return true;
151 }
152 return false;
153 }
154
155 if (auto *SI = dyn_cast<SwitchInst>(T)) {
156 // If we are switching on a constant, we can convert the switch to an
157 // unconditional branch.
158 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
159 BasicBlock *DefaultDest = SI->getDefaultDest();
160 BasicBlock *TheOnlyDest = DefaultDest;
161
162 // If the default is unreachable, ignore it when searching for TheOnlyDest.
163 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
164 SI->getNumCases() > 0) {
165 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
166 }
167
168 // Figure out which case it goes to.
169 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
170 // Found case matching a constant operand?
171 if (i->getCaseValue() == CI) {
172 TheOnlyDest = i->getCaseSuccessor();
173 break;
174 }
175
176 // Check to see if this branch is going to the same place as the default
177 // dest. If so, eliminate it as an explicit compare.
178 if (i->getCaseSuccessor() == DefaultDest) {
179 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
180 unsigned NCases = SI->getNumCases();
181 // Fold the case metadata into the default if there will be any branches
182 // left, unless the metadata doesn't match the switch.
183 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
184 // Collect branch weights into a vector.
185 SmallVector<uint32_t, 8> Weights;
186 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
187 ++MD_i) {
188 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
189 Weights.push_back(CI->getValue().getZExtValue());
190 }
191 // Merge weight of this case to the default weight.
192 unsigned idx = i->getCaseIndex();
193 Weights[0] += Weights[idx+1];
194 // Remove weight for this case.
195 std::swap(Weights[idx+1], Weights.back());
196 Weights.pop_back();
197 SI->setMetadata(LLVMContext::MD_prof,
198 MDBuilder(BB->getContext()).
199 createBranchWeights(Weights));
200 }
201 // Remove this entry.
202 BasicBlock *ParentBB = SI->getParent();
203 DefaultDest->removePredecessor(ParentBB);
204 i = SI->removeCase(i);
205 e = SI->case_end();
206 if (DTU)
207 DTU->deleteEdgeRelaxed(ParentBB, DefaultDest);
208 continue;
209 }
210
211 // Otherwise, check to see if the switch only branches to one destination.
212 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
213 // destinations.
214 if (i->getCaseSuccessor() != TheOnlyDest)
215 TheOnlyDest = nullptr;
216
217 // Increment this iterator as we haven't removed the case.
218 ++i;
219 }
220
221 if (CI && !TheOnlyDest) {
222 // Branching on a constant, but not any of the cases, go to the default
223 // successor.
224 TheOnlyDest = SI->getDefaultDest();
225 }
226
227 // If we found a single destination that we can fold the switch into, do so
228 // now.
229 if (TheOnlyDest) {
230 // Insert the new branch.
231 Builder.CreateBr(TheOnlyDest);
232 BasicBlock *BB = SI->getParent();
233 std::vector <DominatorTree::UpdateType> Updates;
234 if (DTU)
235 Updates.reserve(SI->getNumSuccessors() - 1);
236
237 // Remove entries from PHI nodes which we no longer branch to...
238 for (BasicBlock *Succ : successors(SI)) {
239 // Found case matching a constant operand?
240 if (Succ == TheOnlyDest) {
241 TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
242 } else {
243 Succ->removePredecessor(BB);
244 if (DTU)
245 Updates.push_back({DominatorTree::Delete, BB, Succ});
246 }
247 }
248
249 // Delete the old switch.
250 Value *Cond = SI->getCondition();
251 SI->eraseFromParent();
252 if (DeleteDeadConditions)
253 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
254 if (DTU)
255 DTU->applyUpdates(Updates, /*ForceRemoveDuplicates*/ true);
256 return true;
257 }
258
259 if (SI->getNumCases() == 1) {
260 // Otherwise, we can fold this switch into a conditional branch
261 // instruction if it has only one non-default destination.
262 auto FirstCase = *SI->case_begin();
263 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
264 FirstCase.getCaseValue(), "cond");
265
266 // Insert the new branch.
267 BranchInst *NewBr = Builder.CreateCondBr(Cond,
268 FirstCase.getCaseSuccessor(),
269 SI->getDefaultDest());
270 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
271 if (MD && MD->getNumOperands() == 3) {
272 ConstantInt *SICase =
273 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
274 ConstantInt *SIDef =
275 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
276 assert(SICase && SIDef)((SICase && SIDef) ? static_cast<void> (0) : __assert_fail
("SICase && SIDef", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 276, __PRETTY_FUNCTION__))
;
277 // The TrueWeight should be the weight for the single case of SI.
278 NewBr->setMetadata(LLVMContext::MD_prof,
279 MDBuilder(BB->getContext()).
280 createBranchWeights(SICase->getValue().getZExtValue(),
281 SIDef->getValue().getZExtValue()));
282 }
283
284 // Update make.implicit metadata to the newly-created conditional branch.
285 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
286 if (MakeImplicitMD)
287 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
288
289 // Delete the old switch.
290 SI->eraseFromParent();
291 return true;
292 }
293 return false;
294 }
295
296 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
297 // indirectbr blockaddress(@F, @BB) -> br label @BB
298 if (auto *BA =
299 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
300 BasicBlock *TheOnlyDest = BA->getBasicBlock();
301 std::vector <DominatorTree::UpdateType> Updates;
302 if (DTU)
303 Updates.reserve(IBI->getNumDestinations() - 1);
304
305 // Insert the new branch.
306 Builder.CreateBr(TheOnlyDest);
307
308 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
309 if (IBI->getDestination(i) == TheOnlyDest) {
310 TheOnlyDest = nullptr;
311 } else {
312 BasicBlock *ParentBB = IBI->getParent();
313 BasicBlock *DestBB = IBI->getDestination(i);
314 DestBB->removePredecessor(ParentBB);
315 if (DTU)
316 Updates.push_back({DominatorTree::Delete, ParentBB, DestBB});
317 }
318 }
319 Value *Address = IBI->getAddress();
320 IBI->eraseFromParent();
321 if (DeleteDeadConditions)
322 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
323
324 // If we didn't find our destination in the IBI successor list, then we
325 // have undefined behavior. Replace the unconditional branch with an
326 // 'unreachable' instruction.
327 if (TheOnlyDest) {
328 BB->getTerminator()->eraseFromParent();
329 new UnreachableInst(BB->getContext(), BB);
330 }
331
332 if (DTU)
333 DTU->applyUpdates(Updates, /*ForceRemoveDuplicates*/ true);
334 return true;
335 }
336 }
337
338 return false;
339}
340
341//===----------------------------------------------------------------------===//
342// Local dead code elimination.
343//
344
345/// isInstructionTriviallyDead - Return true if the result produced by the
346/// instruction is not used, and the instruction has no side effects.
347///
348bool llvm::isInstructionTriviallyDead(Instruction *I,
349 const TargetLibraryInfo *TLI) {
350 if (!I->use_empty())
351 return false;
352 return wouldInstructionBeTriviallyDead(I, TLI);
353}
354
355bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
356 const TargetLibraryInfo *TLI) {
357 if (I->isTerminator())
358 return false;
359
360 // We don't want the landingpad-like instructions removed by anything this
361 // general.
362 if (I->isEHPad())
363 return false;
364
365 // We don't want debug info removed by anything this general, unless
366 // debug info is empty.
367 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
368 if (DDI->getAddress())
369 return false;
370 return true;
371 }
372 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
373 if (DVI->getValue())
374 return false;
375 return true;
376 }
377 if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
378 if (DLI->getLabel())
379 return false;
380 return true;
381 }
382
383 if (!I->mayHaveSideEffects())
384 return true;
385
386 // Special case intrinsics that "may have side effects" but can be deleted
387 // when dead.
388 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
389 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
390 if (II->getIntrinsicID() == Intrinsic::stacksave ||
391 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
392 return true;
393
394 // Lifetime intrinsics are dead when their right-hand is undef.
395 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
396 II->getIntrinsicID() == Intrinsic::lifetime_end)
397 return isa<UndefValue>(II->getArgOperand(1));
398
399 // Assumptions are dead if their condition is trivially true. Guards on
400 // true are operationally no-ops. In the future we can consider more
401 // sophisticated tradeoffs for guards considering potential for check
402 // widening, but for now we keep things simple.
403 if (II->getIntrinsicID() == Intrinsic::assume ||
404 II->getIntrinsicID() == Intrinsic::experimental_guard) {
405 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
406 return !Cond->isZero();
407
408 return false;
409 }
410 }
411
412 if (isAllocLikeFn(I, TLI))
413 return true;
414
415 if (CallInst *CI = isFreeCall(I, TLI))
416 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
417 return C->isNullValue() || isa<UndefValue>(C);
418
419 if (CallSite CS = CallSite(I))
420 if (isMathLibCallNoop(CS, TLI))
421 return true;
422
423 return false;
424}
425
426/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
427/// trivially dead instruction, delete it. If that makes any of its operands
428/// trivially dead, delete them too, recursively. Return true if any
429/// instructions were deleted.
430bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
431 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU) {
432 Instruction *I = dyn_cast<Instruction>(V);
433 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
434 return false;
435
436 SmallVector<Instruction*, 16> DeadInsts;
437 DeadInsts.push_back(I);
438 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU);
439
440 return true;
441}
442
443void llvm::RecursivelyDeleteTriviallyDeadInstructions(
444 SmallVectorImpl<Instruction *> &DeadInsts, const TargetLibraryInfo *TLI,
445 MemorySSAUpdater *MSSAU) {
446 // Process the dead instruction list until empty.
447 while (!DeadInsts.empty()) {
448 Instruction &I = *DeadInsts.pop_back_val();
449 assert(I.use_empty() && "Instructions with uses are not dead.")((I.use_empty() && "Instructions with uses are not dead."
) ? static_cast<void> (0) : __assert_fail ("I.use_empty() && \"Instructions with uses are not dead.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 449, __PRETTY_FUNCTION__))
;
450 assert(isInstructionTriviallyDead(&I, TLI) &&((isInstructionTriviallyDead(&I, TLI) && "Live instruction found in dead worklist!"
) ? static_cast<void> (0) : __assert_fail ("isInstructionTriviallyDead(&I, TLI) && \"Live instruction found in dead worklist!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 451, __PRETTY_FUNCTION__))
451 "Live instruction found in dead worklist!")((isInstructionTriviallyDead(&I, TLI) && "Live instruction found in dead worklist!"
) ? static_cast<void> (0) : __assert_fail ("isInstructionTriviallyDead(&I, TLI) && \"Live instruction found in dead worklist!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 451, __PRETTY_FUNCTION__))
;
452
453 // Don't lose the debug info while deleting the instructions.
454 salvageDebugInfo(I);
455
456 // Null out all of the instruction's operands to see if any operand becomes
457 // dead as we go.
458 for (Use &OpU : I.operands()) {
459 Value *OpV = OpU.get();
460 OpU.set(nullptr);
461
462 if (!OpV->use_empty())
463 continue;
464
465 // If the operand is an instruction that became dead as we nulled out the
466 // operand, and if it is 'trivially' dead, delete it in a future loop
467 // iteration.
468 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
469 if (isInstructionTriviallyDead(OpI, TLI))
470 DeadInsts.push_back(OpI);
471 }
472 if (MSSAU)
473 MSSAU->removeMemoryAccess(&I);
474
475 I.eraseFromParent();
476 }
477}
478
479/// areAllUsesEqual - Check whether the uses of a value are all the same.
480/// This is similar to Instruction::hasOneUse() except this will also return
481/// true when there are no uses or multiple uses that all refer to the same
482/// value.
483static bool areAllUsesEqual(Instruction *I) {
484 Value::user_iterator UI = I->user_begin();
485 Value::user_iterator UE = I->user_end();
486 if (UI == UE)
487 return true;
488
489 User *TheUse = *UI;
490 for (++UI; UI != UE; ++UI) {
491 if (*UI != TheUse)
492 return false;
493 }
494 return true;
495}
496
497/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
498/// dead PHI node, due to being a def-use chain of single-use nodes that
499/// either forms a cycle or is terminated by a trivially dead instruction,
500/// delete it. If that makes any of its operands trivially dead, delete them
501/// too, recursively. Return true if a change was made.
502bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
503 const TargetLibraryInfo *TLI) {
504 SmallPtrSet<Instruction*, 4> Visited;
505 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
506 I = cast<Instruction>(*I->user_begin())) {
507 if (I->use_empty())
508 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
509
510 // If we find an instruction more than once, we're on a cycle that
511 // won't prove fruitful.
512 if (!Visited.insert(I).second) {
513 // Break the cycle and delete the instruction and its operands.
514 I->replaceAllUsesWith(UndefValue::get(I->getType()));
515 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
516 return true;
517 }
518 }
519 return false;
520}
521
522static bool
523simplifyAndDCEInstruction(Instruction *I,
524 SmallSetVector<Instruction *, 16> &WorkList,
525 const DataLayout &DL,
526 const TargetLibraryInfo *TLI) {
527 if (isInstructionTriviallyDead(I, TLI)) {
528 salvageDebugInfo(*I);
529
530 // Null out all of the instruction's operands to see if any operand becomes
531 // dead as we go.
532 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
533 Value *OpV = I->getOperand(i);
534 I->setOperand(i, nullptr);
535
536 if (!OpV->use_empty() || I == OpV)
537 continue;
538
539 // If the operand is an instruction that became dead as we nulled out the
540 // operand, and if it is 'trivially' dead, delete it in a future loop
541 // iteration.
542 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
543 if (isInstructionTriviallyDead(OpI, TLI))
544 WorkList.insert(OpI);
545 }
546
547 I->eraseFromParent();
548
549 return true;
550 }
551
552 if (Value *SimpleV = SimplifyInstruction(I, DL)) {
553 // Add the users to the worklist. CAREFUL: an instruction can use itself,
554 // in the case of a phi node.
555 for (User *U : I->users()) {
556 if (U != I) {
557 WorkList.insert(cast<Instruction>(U));
558 }
559 }
560
561 // Replace the instruction with its simplified value.
562 bool Changed = false;
563 if (!I->use_empty()) {
564 I->replaceAllUsesWith(SimpleV);
565 Changed = true;
566 }
567 if (isInstructionTriviallyDead(I, TLI)) {
568 I->eraseFromParent();
569 Changed = true;
570 }
571 return Changed;
572 }
573 return false;
574}
575
576/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
577/// simplify any instructions in it and recursively delete dead instructions.
578///
579/// This returns true if it changed the code, note that it can delete
580/// instructions in other blocks as well in this block.
581bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
582 const TargetLibraryInfo *TLI) {
583 bool MadeChange = false;
584 const DataLayout &DL = BB->getModule()->getDataLayout();
585
586#ifndef NDEBUG
587 // In debug builds, ensure that the terminator of the block is never replaced
588 // or deleted by these simplifications. The idea of simplification is that it
589 // cannot introduce new instructions, and there is no way to replace the
590 // terminator of a block without introducing a new instruction.
591 AssertingVH<Instruction> TerminatorVH(&BB->back());
592#endif
593
594 SmallSetVector<Instruction *, 16> WorkList;
595 // Iterate over the original function, only adding insts to the worklist
596 // if they actually need to be revisited. This avoids having to pre-init
597 // the worklist with the entire function's worth of instructions.
598 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
599 BI != E;) {
600 assert(!BI->isTerminator())((!BI->isTerminator()) ? static_cast<void> (0) : __assert_fail
("!BI->isTerminator()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 600, __PRETTY_FUNCTION__))
;
601 Instruction *I = &*BI;
602 ++BI;
603
604 // We're visiting this instruction now, so make sure it's not in the
605 // worklist from an earlier visit.
606 if (!WorkList.count(I))
607 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
608 }
609
610 while (!WorkList.empty()) {
611 Instruction *I = WorkList.pop_back_val();
612 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
613 }
614 return MadeChange;
615}
616
617//===----------------------------------------------------------------------===//
618// Control Flow Graph Restructuring.
619//
620
621/// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
622/// method is called when we're about to delete Pred as a predecessor of BB. If
623/// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
624///
625/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
626/// nodes that collapse into identity values. For example, if we have:
627/// x = phi(1, 0, 0, 0)
628/// y = and x, z
629///
630/// .. and delete the predecessor corresponding to the '1', this will attempt to
631/// recursively fold the and to 0.
632void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
633 DomTreeUpdater *DTU) {
634 // This only adjusts blocks with PHI nodes.
635 if (!isa<PHINode>(BB->begin()))
636 return;
637
638 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
639 // them down. This will leave us with single entry phi nodes and other phis
640 // that can be removed.
641 BB->removePredecessor(Pred, true);
642
643 WeakTrackingVH PhiIt = &BB->front();
644 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
645 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
646 Value *OldPhiIt = PhiIt;
647
648 if (!recursivelySimplifyInstruction(PN))
649 continue;
650
651 // If recursive simplification ended up deleting the next PHI node we would
652 // iterate to, then our iterator is invalid, restart scanning from the top
653 // of the block.
654 if (PhiIt != OldPhiIt) PhiIt = &BB->front();
655 }
656 if (DTU)
657 DTU->deleteEdgeRelaxed(Pred, BB);
658}
659
660/// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
661/// predecessor is known to have one successor (DestBB!). Eliminate the edge
662/// between them, moving the instructions in the predecessor into DestBB and
663/// deleting the predecessor block.
664void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
665 DomTreeUpdater *DTU) {
666
667 // If BB has single-entry PHI nodes, fold them.
668 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
669 Value *NewVal = PN->getIncomingValue(0);
670 // Replace self referencing PHI with undef, it must be dead.
671 if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
672 PN->replaceAllUsesWith(NewVal);
673 PN->eraseFromParent();
674 }
675
676 BasicBlock *PredBB = DestBB->getSinglePredecessor();
677 assert(PredBB && "Block doesn't have a single predecessor!")((PredBB && "Block doesn't have a single predecessor!"
) ? static_cast<void> (0) : __assert_fail ("PredBB && \"Block doesn't have a single predecessor!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 677, __PRETTY_FUNCTION__))
;
678
679 bool ReplaceEntryBB = false;
680 if (PredBB == &DestBB->getParent()->getEntryBlock())
681 ReplaceEntryBB = true;
682
683 // DTU updates: Collect all the edges that enter
684 // PredBB. These dominator edges will be redirected to DestBB.
685 std::vector <DominatorTree::UpdateType> Updates;
686
687 if (DTU) {
688 Updates.reserve(1 + (2 * pred_size(PredBB)));
689 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
690 for (auto I = pred_begin(PredBB), E = pred_end(PredBB); I != E; ++I) {
691 Updates.push_back({DominatorTree::Delete, *I, PredBB});
692 // This predecessor of PredBB may already have DestBB as a successor.
693 if (llvm::find(successors(*I), DestBB) == succ_end(*I))
694 Updates.push_back({DominatorTree::Insert, *I, DestBB});
695 }
696 }
697
698 // Zap anything that took the address of DestBB. Not doing this will give the
699 // address an invalid value.
700 if (DestBB->hasAddressTaken()) {
701 BlockAddress *BA = BlockAddress::get(DestBB);
702 Constant *Replacement =
703 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
704 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
705 BA->getType()));
706 BA->destroyConstant();
707 }
708
709 // Anything that branched to PredBB now branches to DestBB.
710 PredBB->replaceAllUsesWith(DestBB);
711
712 // Splice all the instructions from PredBB to DestBB.
713 PredBB->getTerminator()->eraseFromParent();
714 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
715 new UnreachableInst(PredBB->getContext(), PredBB);
716
717 // If the PredBB is the entry block of the function, move DestBB up to
718 // become the entry block after we erase PredBB.
719 if (ReplaceEntryBB)
720 DestBB->moveAfter(PredBB);
721
722 if (DTU) {
723 assert(PredBB->getInstList().size() == 1 &&((PredBB->getInstList().size() == 1 && isa<UnreachableInst
>(PredBB->getTerminator()) && "The successor list of PredBB isn't empty before "
"applying corresponding DTU updates.") ? static_cast<void
> (0) : __assert_fail ("PredBB->getInstList().size() == 1 && isa<UnreachableInst>(PredBB->getTerminator()) && \"The successor list of PredBB isn't empty before \" \"applying corresponding DTU updates.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 726, __PRETTY_FUNCTION__))
724 isa<UnreachableInst>(PredBB->getTerminator()) &&((PredBB->getInstList().size() == 1 && isa<UnreachableInst
>(PredBB->getTerminator()) && "The successor list of PredBB isn't empty before "
"applying corresponding DTU updates.") ? static_cast<void
> (0) : __assert_fail ("PredBB->getInstList().size() == 1 && isa<UnreachableInst>(PredBB->getTerminator()) && \"The successor list of PredBB isn't empty before \" \"applying corresponding DTU updates.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 726, __PRETTY_FUNCTION__))
725 "The successor list of PredBB isn't empty before "((PredBB->getInstList().size() == 1 && isa<UnreachableInst
>(PredBB->getTerminator()) && "The successor list of PredBB isn't empty before "
"applying corresponding DTU updates.") ? static_cast<void
> (0) : __assert_fail ("PredBB->getInstList().size() == 1 && isa<UnreachableInst>(PredBB->getTerminator()) && \"The successor list of PredBB isn't empty before \" \"applying corresponding DTU updates.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 726, __PRETTY_FUNCTION__))
726 "applying corresponding DTU updates.")((PredBB->getInstList().size() == 1 && isa<UnreachableInst
>(PredBB->getTerminator()) && "The successor list of PredBB isn't empty before "
"applying corresponding DTU updates.") ? static_cast<void
> (0) : __assert_fail ("PredBB->getInstList().size() == 1 && isa<UnreachableInst>(PredBB->getTerminator()) && \"The successor list of PredBB isn't empty before \" \"applying corresponding DTU updates.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 726, __PRETTY_FUNCTION__))
;
727 DTU->applyUpdates(Updates, /*ForceRemoveDuplicates*/ true);
728 DTU->deleteBB(PredBB);
729 // Recalculation of DomTree is needed when updating a forward DomTree and
730 // the Entry BB is replaced.
731 if (ReplaceEntryBB && DTU->hasDomTree()) {
732 // The entry block was removed and there is no external interface for
733 // the dominator tree to be notified of this change. In this corner-case
734 // we recalculate the entire tree.
735 DTU->recalculate(*(DestBB->getParent()));
736 }
737 }
738
739 else {
740 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
741 }
742}
743
744/// CanMergeValues - Return true if we can choose one of these values to use
745/// in place of the other. Note that we will always choose the non-undef
746/// value to keep.
747static bool CanMergeValues(Value *First, Value *Second) {
748 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
749}
750
751/// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
752/// almost-empty BB ending in an unconditional branch to Succ, into Succ.
753///
754/// Assumption: Succ is the single successor for BB.
755static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
756 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!")((*succ_begin(BB) == Succ && "Succ is not successor of BB!"
) ? static_cast<void> (0) : __assert_fail ("*succ_begin(BB) == Succ && \"Succ is not successor of BB!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 756, __PRETTY_FUNCTION__))
;
757
758 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Looking to fold " << BB->
getName() << " into " << Succ->getName() <<
"\n"; } } while (false)
759 << Succ->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Looking to fold " << BB->
getName() << " into " << Succ->getName() <<
"\n"; } } while (false)
;
760 // Shortcut, if there is only a single predecessor it must be BB and merging
761 // is always safe
762 if (Succ->getSinglePredecessor()) return true;
763
764 // Make a list of the predecessors of BB
765 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
766
767 // Look at all the phi nodes in Succ, to see if they present a conflict when
768 // merging these blocks
769 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
770 PHINode *PN = cast<PHINode>(I);
771
772 // If the incoming value from BB is again a PHINode in
773 // BB which has the same incoming value for *PI as PN does, we can
774 // merge the phi nodes and then the blocks can still be merged
775 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
776 if (BBPN && BBPN->getParent() == BB) {
777 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
778 BasicBlock *IBB = PN->getIncomingBlock(PI);
779 if (BBPreds.count(IBB) &&
780 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
781 PN->getIncomingValue(PI))) {
782 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
783 << "Can't fold, phi node " << PN->getName() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
784 << Succ->getName() << " is conflicting with "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
785 << BBPN->getName() << " with regard to common predecessor "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
786 << IBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with " << BBPN->getName()
<< " with regard to common predecessor " << IBB->
getName() << "\n"; } } while (false)
;
787 return false;
788 }
789 }
790 } else {
791 Value* Val = PN->getIncomingValueForBlock(BB);
792 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
793 // See if the incoming value for the common predecessor is equal to the
794 // one for BB, in which case this phi node will not prevent the merging
795 // of the block.
796 BasicBlock *IBB = PN->getIncomingBlock(PI);
797 if (BBPreds.count(IBB) &&
798 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
799 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
800 << " in " << Succ->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
801 << " is conflicting with regard to common "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
802 << "predecessor " << IBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Can't fold, phi node " <<
PN->getName() << " in " << Succ->getName()
<< " is conflicting with regard to common " << "predecessor "
<< IBB->getName() << "\n"; } } while (false)
;
803 return false;
804 }
805 }
806 }
807 }
808
809 return true;
810}
811
812using PredBlockVector = SmallVector<BasicBlock *, 16>;
813using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
814
815/// Determines the value to use as the phi node input for a block.
816///
817/// Select between \p OldVal any value that we know flows from \p BB
818/// to a particular phi on the basis of which one (if either) is not
819/// undef. Update IncomingValues based on the selected value.
820///
821/// \param OldVal The value we are considering selecting.
822/// \param BB The block that the value flows in from.
823/// \param IncomingValues A map from block-to-value for other phi inputs
824/// that we have examined.
825///
826/// \returns the selected value.
827static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
828 IncomingValueMap &IncomingValues) {
829 if (!isa<UndefValue>(OldVal)) {
830 assert((!IncomingValues.count(BB) ||(((!IncomingValues.count(BB) || IncomingValues.find(BB)->second
== OldVal) && "Expected OldVal to match incoming value from BB!"
) ? static_cast<void> (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 832, __PRETTY_FUNCTION__))
831 IncomingValues.find(BB)->second == OldVal) &&(((!IncomingValues.count(BB) || IncomingValues.find(BB)->second
== OldVal) && "Expected OldVal to match incoming value from BB!"
) ? static_cast<void> (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 832, __PRETTY_FUNCTION__))
832 "Expected OldVal to match incoming value from BB!")(((!IncomingValues.count(BB) || IncomingValues.find(BB)->second
== OldVal) && "Expected OldVal to match incoming value from BB!"
) ? static_cast<void> (0) : __assert_fail ("(!IncomingValues.count(BB) || IncomingValues.find(BB)->second == OldVal) && \"Expected OldVal to match incoming value from BB!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 832, __PRETTY_FUNCTION__))
;
833
834 IncomingValues.insert(std::make_pair(BB, OldVal));
835 return OldVal;
836 }
837
838 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
839 if (It != IncomingValues.end()) return It->second;
840
841 return OldVal;
842}
843
844/// Create a map from block to value for the operands of a
845/// given phi.
846///
847/// Create a map from block to value for each non-undef value flowing
848/// into \p PN.
849///
850/// \param PN The phi we are collecting the map for.
851/// \param IncomingValues [out] The map from block to value for this phi.
852static void gatherIncomingValuesToPhi(PHINode *PN,
853 IncomingValueMap &IncomingValues) {
854 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
855 BasicBlock *BB = PN->getIncomingBlock(i);
856 Value *V = PN->getIncomingValue(i);
857
858 if (!isa<UndefValue>(V))
859 IncomingValues.insert(std::make_pair(BB, V));
860 }
861}
862
863/// Replace the incoming undef values to a phi with the values
864/// from a block-to-value map.
865///
866/// \param PN The phi we are replacing the undefs in.
867/// \param IncomingValues A map from block to value.
868static void replaceUndefValuesInPhi(PHINode *PN,
869 const IncomingValueMap &IncomingValues) {
870 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
871 Value *V = PN->getIncomingValue(i);
872
873 if (!isa<UndefValue>(V)) continue;
874
875 BasicBlock *BB = PN->getIncomingBlock(i);
876 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
877 if (It == IncomingValues.end()) continue;
878
879 PN->setIncomingValue(i, It->second);
880 }
881}
882
883/// Replace a value flowing from a block to a phi with
884/// potentially multiple instances of that value flowing from the
885/// block's predecessors to the phi.
886///
887/// \param BB The block with the value flowing into the phi.
888/// \param BBPreds The predecessors of BB.
889/// \param PN The phi that we are updating.
890static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
891 const PredBlockVector &BBPreds,
892 PHINode *PN) {
893 Value *OldVal = PN->removeIncomingValue(BB, false);
894 assert(OldVal && "No entry in PHI for Pred BB!")((OldVal && "No entry in PHI for Pred BB!") ? static_cast
<void> (0) : __assert_fail ("OldVal && \"No entry in PHI for Pred BB!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 894, __PRETTY_FUNCTION__))
;
895
896 IncomingValueMap IncomingValues;
897
898 // We are merging two blocks - BB, and the block containing PN - and
899 // as a result we need to redirect edges from the predecessors of BB
900 // to go to the block containing PN, and update PN
901 // accordingly. Since we allow merging blocks in the case where the
902 // predecessor and successor blocks both share some predecessors,
903 // and where some of those common predecessors might have undef
904 // values flowing into PN, we want to rewrite those values to be
905 // consistent with the non-undef values.
906
907 gatherIncomingValuesToPhi(PN, IncomingValues);
908
909 // If this incoming value is one of the PHI nodes in BB, the new entries
910 // in the PHI node are the entries from the old PHI.
911 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
912 PHINode *OldValPN = cast<PHINode>(OldVal);
913 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
914 // Note that, since we are merging phi nodes and BB and Succ might
915 // have common predecessors, we could end up with a phi node with
916 // identical incoming branches. This will be cleaned up later (and
917 // will trigger asserts if we try to clean it up now, without also
918 // simplifying the corresponding conditional branch).
919 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
920 Value *PredVal = OldValPN->getIncomingValue(i);
921 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
922 IncomingValues);
923
924 // And add a new incoming value for this predecessor for the
925 // newly retargeted branch.
926 PN->addIncoming(Selected, PredBB);
927 }
928 } else {
929 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
930 // Update existing incoming values in PN for this
931 // predecessor of BB.
932 BasicBlock *PredBB = BBPreds[i];
933 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
934 IncomingValues);
935
936 // And add a new incoming value for this predecessor for the
937 // newly retargeted branch.
938 PN->addIncoming(Selected, PredBB);
939 }
940 }
941
942 replaceUndefValuesInPhi(PN, IncomingValues);
943}
944
945/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
946/// unconditional branch, and contains no instructions other than PHI nodes,
947/// potential side-effect free intrinsics and the branch. If possible,
948/// eliminate BB by rewriting all the predecessors to branch to the successor
949/// block and return true. If we can't transform, return false.
950bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
951 DomTreeUpdater *DTU) {
952 assert(BB != &BB->getParent()->getEntryBlock() &&((BB != &BB->getParent()->getEntryBlock() &&
"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"
) ? static_cast<void> (0) : __assert_fail ("BB != &BB->getParent()->getEntryBlock() && \"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 953, __PRETTY_FUNCTION__))
953 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!")((BB != &BB->getParent()->getEntryBlock() &&
"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"
) ? static_cast<void> (0) : __assert_fail ("BB != &BB->getParent()->getEntryBlock() && \"TryToSimplifyUncondBranchFromEmptyBlock called on entry block!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 953, __PRETTY_FUNCTION__))
;
954
955 // We can't eliminate infinite loops.
956 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
957 if (BB == Succ) return false;
958
959 // Check to see if merging these blocks would cause conflicts for any of the
960 // phi nodes in BB or Succ. If not, we can safely merge.
961 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
962
963 // Check for cases where Succ has multiple predecessors and a PHI node in BB
964 // has uses which will not disappear when the PHI nodes are merged. It is
965 // possible to handle such cases, but difficult: it requires checking whether
966 // BB dominates Succ, which is non-trivial to calculate in the case where
967 // Succ has multiple predecessors. Also, it requires checking whether
968 // constructing the necessary self-referential PHI node doesn't introduce any
969 // conflicts; this isn't too difficult, but the previous code for doing this
970 // was incorrect.
971 //
972 // Note that if this check finds a live use, BB dominates Succ, so BB is
973 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
974 // folding the branch isn't profitable in that case anyway.
975 if (!Succ->getSinglePredecessor()) {
976 BasicBlock::iterator BBI = BB->begin();
977 while (isa<PHINode>(*BBI)) {
978 for (Use &U : BBI->uses()) {
979 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
980 if (PN->getIncomingBlock(U) != BB)
981 return false;
982 } else {
983 return false;
984 }
985 }
986 ++BBI;
987 }
988 }
989
990 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Killing Trivial BB: \n" <<
*BB; } } while (false)
;
991
992 std::vector<DominatorTree::UpdateType> Updates;
993 if (DTU) {
994 Updates.reserve(1 + (2 * pred_size(BB)));
995 Updates.push_back({DominatorTree::Delete, BB, Succ});
996 // All predecessors of BB will be moved to Succ.
997 for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
998 Updates.push_back({DominatorTree::Delete, *I, BB});
999 // This predecessor of BB may already have Succ as a successor.
1000 if (llvm::find(successors(*I), Succ) == succ_end(*I))
1001 Updates.push_back({DominatorTree::Insert, *I, Succ});
1002 }
1003 }
1004
1005 if (isa<PHINode>(Succ->begin())) {
1006 // If there is more than one pred of succ, and there are PHI nodes in
1007 // the successor, then we need to add incoming edges for the PHI nodes
1008 //
1009 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
1010
1011 // Loop over all of the PHI nodes in the successor of BB.
1012 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1013 PHINode *PN = cast<PHINode>(I);
1014
1015 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1016 }
1017 }
1018
1019 if (Succ->getSinglePredecessor()) {
1020 // BB is the only predecessor of Succ, so Succ will end up with exactly
1021 // the same predecessors BB had.
1022
1023 // Copy over any phi, debug or lifetime instruction.
1024 BB->getTerminator()->eraseFromParent();
1025 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1026 BB->getInstList());
1027 } else {
1028 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1029 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1030 assert(PN->use_empty() && "There shouldn't be any uses here!")((PN->use_empty() && "There shouldn't be any uses here!"
) ? static_cast<void> (0) : __assert_fail ("PN->use_empty() && \"There shouldn't be any uses here!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1030, __PRETTY_FUNCTION__))
;
1031 PN->eraseFromParent();
1032 }
1033 }
1034
1035 // If the unconditional branch we replaced contains llvm.loop metadata, we
1036 // add the metadata to the branch instructions in the predecessors.
1037 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1038 Instruction *TI = BB->getTerminator();
1039 if (TI)
1040 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1041 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1042 BasicBlock *Pred = *PI;
1043 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1044 }
1045
1046 // Everything that jumped to BB now goes to Succ.
1047 BB->replaceAllUsesWith(Succ);
1048 if (!Succ->hasName()) Succ->takeName(BB);
1049
1050 // Clear the successor list of BB to match updates applying to DTU later.
1051 if (BB->getTerminator())
1052 BB->getInstList().pop_back();
1053 new UnreachableInst(BB->getContext(), BB);
1054 assert(succ_empty(BB) && "The successor list of BB isn't empty before "((succ_empty(BB) && "The successor list of BB isn't empty before "
"applying corresponding DTU updates.") ? static_cast<void
> (0) : __assert_fail ("succ_empty(BB) && \"The successor list of BB isn't empty before \" \"applying corresponding DTU updates.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1055, __PRETTY_FUNCTION__))
1055 "applying corresponding DTU updates.")((succ_empty(BB) && "The successor list of BB isn't empty before "
"applying corresponding DTU updates.") ? static_cast<void
> (0) : __assert_fail ("succ_empty(BB) && \"The successor list of BB isn't empty before \" \"applying corresponding DTU updates.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1055, __PRETTY_FUNCTION__))
;
1056
1057 if (DTU) {
1058 DTU->applyUpdates(Updates, /*ForceRemoveDuplicates*/ true);
1059 DTU->deleteBB(BB);
1060 } else {
1061 BB->eraseFromParent(); // Delete the old basic block.
1062 }
1063 return true;
1064}
1065
1066/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
1067/// nodes in this block. This doesn't try to be clever about PHI nodes
1068/// which differ only in the order of the incoming values, but instcombine
1069/// orders them so it usually won't matter.
1070bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1071 // This implementation doesn't currently consider undef operands
1072 // specially. Theoretically, two phis which are identical except for
1073 // one having an undef where the other doesn't could be collapsed.
1074
1075 struct PHIDenseMapInfo {
1076 static PHINode *getEmptyKey() {
1077 return DenseMapInfo<PHINode *>::getEmptyKey();
1078 }
1079
1080 static PHINode *getTombstoneKey() {
1081 return DenseMapInfo<PHINode *>::getTombstoneKey();
1082 }
1083
1084 static unsigned getHashValue(PHINode *PN) {
1085 // Compute a hash value on the operands. Instcombine will likely have
1086 // sorted them, which helps expose duplicates, but we have to check all
1087 // the operands to be safe in case instcombine hasn't run.
1088 return static_cast<unsigned>(hash_combine(
1089 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1090 hash_combine_range(PN->block_begin(), PN->block_end())));
1091 }
1092
1093 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1094 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1095 RHS == getEmptyKey() || RHS == getTombstoneKey())
1096 return LHS == RHS;
1097 return LHS->isIdenticalTo(RHS);
1098 }
1099 };
1100
1101 // Set of unique PHINodes.
1102 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1103
1104 // Examine each PHI.
1105 bool Changed = false;
1106 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1107 auto Inserted = PHISet.insert(PN);
1108 if (!Inserted.second) {
1109 // A duplicate. Replace this PHI with its duplicate.
1110 PN->replaceAllUsesWith(*Inserted.first);
1111 PN->eraseFromParent();
1112 Changed = true;
1113
1114 // The RAUW can change PHIs that we already visited. Start over from the
1115 // beginning.
1116 PHISet.clear();
1117 I = BB->begin();
1118 }
1119 }
1120
1121 return Changed;
1122}
1123
1124/// enforceKnownAlignment - If the specified pointer points to an object that
1125/// we control, modify the object's alignment to PrefAlign. This isn't
1126/// often possible though. If alignment is important, a more reliable approach
1127/// is to simply align all global variables and allocation instructions to
1128/// their preferred alignment from the beginning.
1129static unsigned enforceKnownAlignment(Value *V, unsigned Align,
1130 unsigned PrefAlign,
1131 const DataLayout &DL) {
1132 assert(PrefAlign > Align)((PrefAlign > Align) ? static_cast<void> (0) : __assert_fail
("PrefAlign > Align", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1132, __PRETTY_FUNCTION__))
;
1133
1134 V = V->stripPointerCasts();
1135
1136 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1137 // TODO: ideally, computeKnownBits ought to have used
1138 // AllocaInst::getAlignment() in its computation already, making
1139 // the below max redundant. But, as it turns out,
1140 // stripPointerCasts recurses through infinite layers of bitcasts,
1141 // while computeKnownBits is not allowed to traverse more than 6
1142 // levels.
1143 Align = std::max(AI->getAlignment(), Align);
1144 if (PrefAlign <= Align)
1145 return Align;
1146
1147 // If the preferred alignment is greater than the natural stack alignment
1148 // then don't round up. This avoids dynamic stack realignment.
1149 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1150 return Align;
1151 AI->setAlignment(PrefAlign);
1152 return PrefAlign;
1153 }
1154
1155 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1156 // TODO: as above, this shouldn't be necessary.
1157 Align = std::max(GO->getAlignment(), Align);
1158 if (PrefAlign <= Align)
1159 return Align;
1160
1161 // If there is a large requested alignment and we can, bump up the alignment
1162 // of the global. If the memory we set aside for the global may not be the
1163 // memory used by the final program then it is impossible for us to reliably
1164 // enforce the preferred alignment.
1165 if (!GO->canIncreaseAlignment())
1166 return Align;
1167
1168 GO->setAlignment(PrefAlign);
1169 return PrefAlign;
1170 }
1171
1172 return Align;
1173}
1174
1175unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
1176 const DataLayout &DL,
1177 const Instruction *CxtI,
1178 AssumptionCache *AC,
1179 const DominatorTree *DT) {
1180 assert(V->getType()->isPointerTy() &&((V->getType()->isPointerTy() && "getOrEnforceKnownAlignment expects a pointer!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isPointerTy() && \"getOrEnforceKnownAlignment expects a pointer!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1181, __PRETTY_FUNCTION__))
1181 "getOrEnforceKnownAlignment expects a pointer!")((V->getType()->isPointerTy() && "getOrEnforceKnownAlignment expects a pointer!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isPointerTy() && \"getOrEnforceKnownAlignment expects a pointer!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1181, __PRETTY_FUNCTION__))
;
1182
1183 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1184 unsigned TrailZ = Known.countMinTrailingZeros();
1185
1186 // Avoid trouble with ridiculously large TrailZ values, such as
1187 // those computed from a null pointer.
1188 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT8 - 1));
1189
1190 unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ);
1191
1192 // LLVM doesn't support alignments larger than this currently.
1193 Align = std::min(Align, +Value::MaximumAlignment);
1194
1195 if (PrefAlign > Align)
1196 Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
1197
1198 // We don't need to make any adjustment.
1199 return Align;
1200}
1201
1202///===---------------------------------------------------------------------===//
1203/// Dbg Intrinsic utilities
1204///
1205
1206/// See if there is a dbg.value intrinsic for DIVar before I.
1207static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
1208 Instruction *I) {
1209 // Since we can't guarantee that the original dbg.declare instrinsic
1210 // is removed by LowerDbgDeclare(), we need to make sure that we are
1211 // not inserting the same dbg.value intrinsic over and over.
1212 BasicBlock::InstListType::iterator PrevI(I);
1213 if (PrevI != I->getParent()->getInstList().begin()) {
1214 --PrevI;
1215 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
1216 if (DVI->getValue() == I->getOperand(0) &&
1217 DVI->getVariable() == DIVar &&
1218 DVI->getExpression() == DIExpr)
1219 return true;
1220 }
1221 return false;
1222}
1223
1224/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1225static bool PhiHasDebugValue(DILocalVariable *DIVar,
1226 DIExpression *DIExpr,
1227 PHINode *APN) {
1228 // Since we can't guarantee that the original dbg.declare instrinsic
1229 // is removed by LowerDbgDeclare(), we need to make sure that we are
1230 // not inserting the same dbg.value intrinsic over and over.
1231 SmallVector<DbgValueInst *, 1> DbgValues;
1232 findDbgValues(DbgValues, APN);
1233 for (auto *DVI : DbgValues) {
1234 assert(DVI->getValue() == APN)((DVI->getValue() == APN) ? static_cast<void> (0) : __assert_fail
("DVI->getValue() == APN", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1234, __PRETTY_FUNCTION__))
;
1235 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1236 return true;
1237 }
1238 return false;
1239}
1240
1241/// Check if the alloc size of \p ValTy is large enough to cover the variable
1242/// (or fragment of the variable) described by \p DII.
1243///
1244/// This is primarily intended as a helper for the different
1245/// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1246/// converted describes an alloca'd variable, so we need to use the
1247/// alloc size of the value when doing the comparison. E.g. an i1 value will be
1248/// identified as covering an n-bit fragment, if the store size of i1 is at
1249/// least n bits.
1250static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1251 const DataLayout &DL = DII->getModule()->getDataLayout();
1252 uint64_t ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1253 if (auto FragmentSize = DII->getFragmentSizeInBits())
1254 return ValueSize >= *FragmentSize;
1255 // We can't always calculate the size of the DI variable (e.g. if it is a
1256 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1257 // intead.
1258 if (DII->isAddressOfVariable())
1259 if (auto *AI = dyn_cast_or_null<AllocaInst>(DII->getVariableLocation()))
1260 if (auto FragmentSize = AI->getAllocationSizeInBits(DL))
1261 return ValueSize >= *FragmentSize;
1262 // Could not determine size of variable. Conservatively return false.
1263 return false;
1264}
1265
1266/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1267/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1268void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1269 StoreInst *SI, DIBuilder &Builder) {
1270 assert(DII->isAddressOfVariable())((DII->isAddressOfVariable()) ? static_cast<void> (0
) : __assert_fail ("DII->isAddressOfVariable()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1270, __PRETTY_FUNCTION__))
;
1271 auto *DIVar = DII->getVariable();
1272 assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void>
(0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1272, __PRETTY_FUNCTION__))
;
1273 auto *DIExpr = DII->getExpression();
1274 Value *DV = SI->getOperand(0);
1275
1276 if (!valueCoversEntireFragment(SI->getValueOperand()->getType(), DII)) {
1277 // FIXME: If storing to a part of the variable described by the dbg.declare,
1278 // then we want to insert a dbg.value for the corresponding fragment.
1279 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
1280 << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
;
1281 // For now, when there is a store to parts of the variable (but we do not
1282 // know which part) we insert an dbg.value instrinsic to indicate that we
1283 // know nothing about the variable's content.
1284 DV = UndefValue::get(DV->getType());
1285 if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1286 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
1287 SI);
1288 return;
1289 }
1290
1291 // If an argument is zero extended then use argument directly. The ZExt
1292 // may be zapped by an optimization pass in future.
1293 Argument *ExtendedArg = nullptr;
1294 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1295 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1296 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1297 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1298 if (ExtendedArg) {
1299 // If this DII was already describing only a fragment of a variable, ensure
1300 // that fragment is appropriately narrowed here.
1301 // But if a fragment wasn't used, describe the value as the original
1302 // argument (rather than the zext or sext) so that it remains described even
1303 // if the sext/zext is optimized away. This widens the variable description,
1304 // leaving it up to the consumer to know how the smaller value may be
1305 // represented in a larger register.
1306 if (auto Fragment = DIExpr->getFragmentInfo()) {
1307 unsigned FragmentOffset = Fragment->OffsetInBits;
1308 SmallVector<uint64_t, 3> Ops(DIExpr->elements_begin(),
1309 DIExpr->elements_end() - 3);
1310 Ops.push_back(dwarf::DW_OP_LLVM_fragment);
1311 Ops.push_back(FragmentOffset);
1312 const DataLayout &DL = DII->getModule()->getDataLayout();
1313 Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
1314 DIExpr = Builder.createExpression(Ops);
1315 }
1316 DV = ExtendedArg;
1317 }
1318 if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1319 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
1320 SI);
1321}
1322
1323/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1324/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1325void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1326 LoadInst *LI, DIBuilder &Builder) {
1327 auto *DIVar = DII->getVariable();
1328 auto *DIExpr = DII->getExpression();
1329 assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void>
(0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1329, __PRETTY_FUNCTION__))
;
1330
1331 if (LdStHasDebugValue(DIVar, DIExpr, LI))
1332 return;
1333
1334 if (!valueCoversEntireFragment(LI->getType(), DII)) {
1335 // FIXME: If only referring to a part of the variable described by the
1336 // dbg.declare, then we want to insert a dbg.value for the corresponding
1337 // fragment.
1338 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
1339 << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
;
1340 return;
1341 }
1342
1343 // We are now tracking the loaded value instead of the address. In the
1344 // future if multi-location support is added to the IR, it might be
1345 // preferable to keep tracking both the loaded value and the original
1346 // address in case the alloca can not be elided.
1347 Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1348 LI, DIVar, DIExpr, DII->getDebugLoc(), (Instruction *)nullptr);
1349 DbgValue->insertAfter(LI);
1350}
1351
1352/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1353/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1354void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1355 PHINode *APN, DIBuilder &Builder) {
1356 auto *DIVar = DII->getVariable();
1357 auto *DIExpr = DII->getExpression();
1358 assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void>
(0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1358, __PRETTY_FUNCTION__))
;
1359
1360 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1361 return;
1362
1363 if (!valueCoversEntireFragment(APN->getType(), DII)) {
1364 // FIXME: If only referring to a part of the variable described by the
1365 // dbg.declare, then we want to insert a dbg.value for the corresponding
1366 // fragment.
1367 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
1368 << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Failed to convert dbg.declare to dbg.value: "
<< *DII << '\n'; } } while (false)
;
1369 return;
1370 }
1371
1372 BasicBlock *BB = APN->getParent();
1373 auto InsertionPt = BB->getFirstInsertionPt();
1374
1375 // The block may be a catchswitch block, which does not have a valid
1376 // insertion point.
1377 // FIXME: Insert dbg.value markers in the successors when appropriate.
1378 if (InsertionPt != BB->end())
1379 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, DII->getDebugLoc(),
1380 &*InsertionPt);
1381}
1382
1383/// Determine whether this alloca is either a VLA or an array.
1384static bool isArray(AllocaInst *AI) {
1385 return AI->isArrayAllocation() ||
1386 AI->getType()->getElementType()->isArrayTy();
1387}
1388
1389/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1390/// of llvm.dbg.value intrinsics.
1391bool llvm::LowerDbgDeclare(Function &F) {
1392 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1393 SmallVector<DbgDeclareInst *, 4> Dbgs;
1394 for (auto &FI : F)
1395 for (Instruction &BI : FI)
1396 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1397 Dbgs.push_back(DDI);
1398
1399 if (Dbgs.empty())
1400 return false;
1401
1402 for (auto &I : Dbgs) {
1403 DbgDeclareInst *DDI = I;
1404 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1405 // If this is an alloca for a scalar variable, insert a dbg.value
1406 // at each load and store to the alloca and erase the dbg.declare.
1407 // The dbg.values allow tracking a variable even if it is not
1408 // stored on the stack, while the dbg.declare can only describe
1409 // the stack slot (and at a lexical-scope granularity). Later
1410 // passes will attempt to elide the stack slot.
1411 if (!AI || isArray(AI))
1412 continue;
1413
1414 // A volatile load/store means that the alloca can't be elided anyway.
1415 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1416 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1417 return LI->isVolatile();
1418 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1419 return SI->isVolatile();
1420 return false;
1421 }))
1422 continue;
1423
1424 for (auto &AIUse : AI->uses()) {
1425 User *U = AIUse.getUser();
1426 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1427 if (AIUse.getOperandNo() == 1)
1428 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1429 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1430 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1431 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1432 // This is a call by-value or some other instruction that takes a
1433 // pointer to the variable. Insert a *value* intrinsic that describes
1434 // the variable by dereferencing the alloca.
1435 auto *DerefExpr =
1436 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1437 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1438 DDI->getDebugLoc(), CI);
1439 }
1440 }
1441 DDI->eraseFromParent();
1442 }
1443 return true;
1444}
1445
1446/// Propagate dbg.value intrinsics through the newly inserted PHIs.
1447void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1448 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1449 assert(BB && "No BasicBlock to clone dbg.value(s) from.")((BB && "No BasicBlock to clone dbg.value(s) from.") ?
static_cast<void> (0) : __assert_fail ("BB && \"No BasicBlock to clone dbg.value(s) from.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1449, __PRETTY_FUNCTION__))
;
1450 if (InsertedPHIs.size() == 0)
1451 return;
1452
1453 // Map existing PHI nodes to their dbg.values.
1454 ValueToValueMapTy DbgValueMap;
1455 for (auto &I : *BB) {
1456 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1457 if (auto *Loc = dyn_cast_or_null<PHINode>(DbgII->getVariableLocation()))
1458 DbgValueMap.insert({Loc, DbgII});
1459 }
1460 }
1461 if (DbgValueMap.size() == 0)
1462 return;
1463
1464 // Then iterate through the new PHIs and look to see if they use one of the
1465 // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will
1466 // propagate the info through the new PHI.
1467 LLVMContext &C = BB->getContext();
1468 for (auto PHI : InsertedPHIs) {
1469 BasicBlock *Parent = PHI->getParent();
1470 // Avoid inserting an intrinsic into an EH block.
1471 if (Parent->getFirstNonPHI()->isEHPad())
1472 continue;
1473 auto PhiMAV = MetadataAsValue::get(C, ValueAsMetadata::get(PHI));
1474 for (auto VI : PHI->operand_values()) {
1475 auto V = DbgValueMap.find(VI);
1476 if (V != DbgValueMap.end()) {
1477 auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1478 Instruction *NewDbgII = DbgII->clone();
1479 NewDbgII->setOperand(0, PhiMAV);
1480 auto InsertionPt = Parent->getFirstInsertionPt();
1481 assert(InsertionPt != Parent->end() && "Ill-formed basic block")((InsertionPt != Parent->end() && "Ill-formed basic block"
) ? static_cast<void> (0) : __assert_fail ("InsertionPt != Parent->end() && \"Ill-formed basic block\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1481, __PRETTY_FUNCTION__))
;
1482 NewDbgII->insertBefore(&*InsertionPt);
1483 }
1484 }
1485 }
1486}
1487
1488/// Finds all intrinsics declaring local variables as living in the memory that
1489/// 'V' points to. This may include a mix of dbg.declare and
1490/// dbg.addr intrinsics.
1491TinyPtrVector<DbgVariableIntrinsic *> llvm::FindDbgAddrUses(Value *V) {
1492 // This function is hot. Check whether the value has any metadata to avoid a
1493 // DenseMap lookup.
1494 if (!V->isUsedByMetadata())
1495 return {};
1496 auto *L = LocalAsMetadata::getIfExists(V);
1497 if (!L)
1498 return {};
1499 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L);
1500 if (!MDV)
1501 return {};
1502
1503 TinyPtrVector<DbgVariableIntrinsic *> Declares;
1504 for (User *U : MDV->users()) {
1505 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(U))
1506 if (DII->isAddressOfVariable())
1507 Declares.push_back(DII);
1508 }
1509
1510 return Declares;
1511}
1512
1513void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
1514 // This function is hot. Check whether the value has any metadata to avoid a
1515 // DenseMap lookup.
1516 if (!V->isUsedByMetadata())
1517 return;
1518 if (auto *L = LocalAsMetadata::getIfExists(V))
1519 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1520 for (User *U : MDV->users())
1521 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1522 DbgValues.push_back(DVI);
1523}
1524
1525void llvm::findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers,
1526 Value *V) {
1527 // This function is hot. Check whether the value has any metadata to avoid a
1528 // DenseMap lookup.
1529 if (!V->isUsedByMetadata())
1530 return;
1531 if (auto *L = LocalAsMetadata::getIfExists(V))
1532 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1533 for (User *U : MDV->users())
1534 if (DbgVariableIntrinsic *DII = dyn_cast<DbgVariableIntrinsic>(U))
1535 DbgUsers.push_back(DII);
1536}
1537
1538bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1539 Instruction *InsertBefore, DIBuilder &Builder,
1540 bool DerefBefore, int Offset, bool DerefAfter) {
1541 auto DbgAddrs = FindDbgAddrUses(Address);
1542 for (DbgVariableIntrinsic *DII : DbgAddrs) {
1543 DebugLoc Loc = DII->getDebugLoc();
1544 auto *DIVar = DII->getVariable();
1545 auto *DIExpr = DII->getExpression();
1546 assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void>
(0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1546, __PRETTY_FUNCTION__))
;
1547 DIExpr = DIExpression::prepend(DIExpr, DerefBefore, Offset, DerefAfter);
1548 // Insert llvm.dbg.declare immediately before InsertBefore, and remove old
1549 // llvm.dbg.declare.
1550 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
1551 if (DII == InsertBefore)
1552 InsertBefore = InsertBefore->getNextNode();
1553 DII->eraseFromParent();
1554 }
1555 return !DbgAddrs.empty();
1556}
1557
1558bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1559 DIBuilder &Builder, bool DerefBefore,
1560 int Offset, bool DerefAfter) {
1561 return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
1562 DerefBefore, Offset, DerefAfter);
1563}
1564
1565static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1566 DIBuilder &Builder, int Offset) {
1567 DebugLoc Loc = DVI->getDebugLoc();
1568 auto *DIVar = DVI->getVariable();
1569 auto *DIExpr = DVI->getExpression();
1570 assert(DIVar && "Missing variable")((DIVar && "Missing variable") ? static_cast<void>
(0) : __assert_fail ("DIVar && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1570, __PRETTY_FUNCTION__))
;
1571
1572 // This is an alloca-based llvm.dbg.value. The first thing it should do with
1573 // the alloca pointer is dereference it. Otherwise we don't know how to handle
1574 // it and give up.
1575 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1576 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1577 return;
1578
1579 // Insert the offset immediately after the first deref.
1580 // We could just change the offset argument of dbg.value, but it's unsigned...
1581 if (Offset) {
1582 SmallVector<uint64_t, 4> Ops;
1583 Ops.push_back(dwarf::DW_OP_deref);
1584 DIExpression::appendOffset(Ops, Offset);
1585 Ops.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
1586 DIExpr = Builder.createExpression(Ops);
1587 }
1588
1589 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1590 DVI->eraseFromParent();
1591}
1592
1593void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1594 DIBuilder &Builder, int Offset) {
1595 if (auto *L = LocalAsMetadata::getIfExists(AI))
1596 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1597 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1598 Use &U = *UI++;
1599 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1600 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1601 }
1602}
1603
1604/// Wrap \p V in a ValueAsMetadata instance.
1605static MetadataAsValue *wrapValueInMetadata(LLVMContext &C, Value *V) {
1606 return MetadataAsValue::get(C, ValueAsMetadata::get(V));
1607}
1608
1609bool llvm::salvageDebugInfo(Instruction &I) {
1610 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1611 findDbgUsers(DbgUsers, &I);
1612 if (DbgUsers.empty())
1613 return false;
1614
1615 auto &M = *I.getModule();
1616 auto &DL = M.getDataLayout();
1617 auto &Ctx = I.getContext();
1618 auto wrapMD = [&](Value *V) { return wrapValueInMetadata(Ctx, V); };
1619
1620 auto doSalvage = [&](DbgVariableIntrinsic *DII, SmallVectorImpl<uint64_t> &Ops) {
1621 auto *DIExpr = DII->getExpression();
1622 if (!Ops.empty()) {
1623 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1624 // are implicitly pointing out the value as a DWARF memory location
1625 // description.
1626 bool WithStackValue = isa<DbgValueInst>(DII);
1627 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue);
1628 }
1629 DII->setOperand(0, wrapMD(I.getOperand(0)));
1630 DII->setOperand(2, MetadataAsValue::get(Ctx, DIExpr));
1631 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1632 };
1633
1634 auto applyOffset = [&](DbgVariableIntrinsic *DII, uint64_t Offset) {
1635 SmallVector<uint64_t, 8> Ops;
1636 DIExpression::appendOffset(Ops, Offset);
1637 doSalvage(DII, Ops);
1638 };
1639
1640 auto applyOps = [&](DbgVariableIntrinsic *DII,
1641 std::initializer_list<uint64_t> Opcodes) {
1642 SmallVector<uint64_t, 8> Ops(Opcodes);
1643 doSalvage(DII, Ops);
1644 };
1645
1646 if (auto *CI = dyn_cast<CastInst>(&I)) {
1647 if (!CI->isNoopCast(DL))
1648 return false;
1649
1650 // No-op casts are irrelevant for debug info.
1651 MetadataAsValue *CastSrc = wrapMD(I.getOperand(0));
1652 for (auto *DII : DbgUsers) {
1653 DII->setOperand(0, CastSrc);
1654 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1655 }
1656 return true;
1657 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1658 unsigned BitWidth =
1659 M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace());
1660 // Rewrite a constant GEP into a DIExpression. Since we are performing
1661 // arithmetic to compute the variable's *value* in the DIExpression, we
1662 // need to mark the expression with a DW_OP_stack_value.
1663 APInt Offset(BitWidth, 0);
1664 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset))
1665 for (auto *DII : DbgUsers)
1666 applyOffset(DII, Offset.getSExtValue());
1667 return true;
1668 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1669 // Rewrite binary operations with constant integer operands.
1670 auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1));
1671 if (!ConstInt || ConstInt->getBitWidth() > 64)
1672 return false;
1673
1674 uint64_t Val = ConstInt->getSExtValue();
1675 for (auto *DII : DbgUsers) {
1676 switch (BI->getOpcode()) {
1677 case Instruction::Add:
1678 applyOffset(DII, Val);
1679 break;
1680 case Instruction::Sub:
1681 applyOffset(DII, -int64_t(Val));
1682 break;
1683 case Instruction::Mul:
1684 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_mul});
1685 break;
1686 case Instruction::SDiv:
1687 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_div});
1688 break;
1689 case Instruction::SRem:
1690 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_mod});
1691 break;
1692 case Instruction::Or:
1693 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_or});
1694 break;
1695 case Instruction::And:
1696 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_and});
1697 break;
1698 case Instruction::Xor:
1699 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_xor});
1700 break;
1701 case Instruction::Shl:
1702 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shl});
1703 break;
1704 case Instruction::LShr:
1705 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shr});
1706 break;
1707 case Instruction::AShr:
1708 applyOps(DII, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_shra});
1709 break;
1710 default:
1711 // TODO: Salvage constants from each kind of binop we know about.
1712 return false;
1713 }
1714 }
1715 return true;
1716 } else if (isa<LoadInst>(&I)) {
1717 MetadataAsValue *AddrMD = wrapMD(I.getOperand(0));
1718 for (auto *DII : DbgUsers) {
1719 // Rewrite the load into DW_OP_deref.
1720 auto *DIExpr = DII->getExpression();
1721 DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref);
1722 DII->setOperand(0, AddrMD);
1723 DII->setOperand(2, MetadataAsValue::get(Ctx, DIExpr));
1724 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "SALVAGE: " << *DII <<
'\n'; } } while (false)
;
1725 }
1726 return true;
1727 }
1728 return false;
1729}
1730
1731/// A replacement for a dbg.value expression.
1732using DbgValReplacement = Optional<DIExpression *>;
1733
1734/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1735/// possibly moving/deleting users to prevent use-before-def. Returns true if
1736/// changes are made.
1737static bool rewriteDebugUsers(
1738 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1739 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
1740 // Find debug users of From.
1741 SmallVector<DbgVariableIntrinsic *, 1> Users;
1742 findDbgUsers(Users, &From);
1743 if (Users.empty())
1744 return false;
1745
1746 // Prevent use-before-def of To.
1747 bool Changed = false;
1748 SmallPtrSet<DbgVariableIntrinsic *, 1> DeleteOrSalvage;
1749 if (isa<Instruction>(&To)) {
1750 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1751
1752 for (auto *DII : Users) {
1753 // It's common to see a debug user between From and DomPoint. Move it
1754 // after DomPoint to preserve the variable update without any reordering.
1755 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1756 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "MOVE: " << *DII <<
'\n'; } } while (false)
;
1757 DII->moveAfter(&DomPoint);
1758 Changed = true;
1759
1760 // Users which otherwise aren't dominated by the replacement value must
1761 // be salvaged or deleted.
1762 } else if (!DT.dominates(&DomPoint, DII)) {
1763 DeleteOrSalvage.insert(DII);
1764 }
1765 }
1766 }
1767
1768 // Update debug users without use-before-def risk.
1769 for (auto *DII : Users) {
1770 if (DeleteOrSalvage.count(DII))
1771 continue;
1772
1773 LLVMContext &Ctx = DII->getContext();
1774 DbgValReplacement DVR = RewriteExpr(*DII);
1775 if (!DVR)
1776 continue;
1777
1778 DII->setOperand(0, wrapValueInMetadata(Ctx, &To));
1779 DII->setOperand(2, MetadataAsValue::get(Ctx, *DVR));
1780 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "REWRITE: " << *DII <<
'\n'; } } while (false)
;
1781 Changed = true;
1782 }
1783
1784 if (!DeleteOrSalvage.empty()) {
1785 // Try to salvage the remaining debug users.
1786 Changed |= salvageDebugInfo(From);
1787
1788 // Delete the debug users which weren't salvaged.
1789 for (auto *DII : DeleteOrSalvage) {
1790 if (DII->getVariableLocation() == &From) {
1791 LLVM_DEBUG(dbgs() << "Erased UseBeforeDef: " << *DII << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Erased UseBeforeDef: " <<
*DII << '\n'; } } while (false)
;
1792 DII->eraseFromParent();
1793 Changed = true;
1794 }
1795 }
1796 }
1797
1798 return Changed;
1799}
1800
1801/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
1802/// losslessly preserve the bits and semantics of the value. This predicate is
1803/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
1804///
1805/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
1806/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
1807/// and also does not allow lossless pointer <-> integer conversions.
1808static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
1809 Type *ToTy) {
1810 // Trivially compatible types.
1811 if (FromTy == ToTy)
1812 return true;
1813
1814 // Handle compatible pointer <-> integer conversions.
1815 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
1816 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
1817 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
1818 !DL.isNonIntegralPointerType(ToTy);
1819 return SameSize && LosslessConversion;
1820 }
1821
1822 // TODO: This is not exhaustive.
1823 return false;
1824}
1825
1826bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
1827 Instruction &DomPoint, DominatorTree &DT) {
1828 // Exit early if From has no debug users.
1829 if (!From.isUsedByMetadata())
1830 return false;
1831
1832 assert(&From != &To && "Can't replace something with itself")((&From != &To && "Can't replace something with itself"
) ? static_cast<void> (0) : __assert_fail ("&From != &To && \"Can't replace something with itself\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1832, __PRETTY_FUNCTION__))
;
1833
1834 Type *FromTy = From.getType();
1835 Type *ToTy = To.getType();
1836
1837 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
1838 return DII.getExpression();
1839 };
1840
1841 // Handle no-op conversions.
1842 Module &M = *From.getModule();
1843 const DataLayout &DL = M.getDataLayout();
1844 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
1845 return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
1846
1847 // Handle integer-to-integer widening and narrowing.
1848 // FIXME: Use DW_OP_convert when it's available everywhere.
1849 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
1850 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
1851 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
1852 assert(FromBits != ToBits && "Unexpected no-op conversion")((FromBits != ToBits && "Unexpected no-op conversion"
) ? static_cast<void> (0) : __assert_fail ("FromBits != ToBits && \"Unexpected no-op conversion\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 1852, __PRETTY_FUNCTION__))
;
1853
1854 // When the width of the result grows, assume that a debugger will only
1855 // access the low `FromBits` bits when inspecting the source variable.
1856 if (FromBits < ToBits)
1857 return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
1858
1859 // The width of the result has shrunk. Use sign/zero extension to describe
1860 // the source variable's high bits.
1861 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
1862 DILocalVariable *Var = DII.getVariable();
1863
1864 // Without knowing signedness, sign/zero extension isn't possible.
1865 auto Signedness = Var->getSignedness();
1866 if (!Signedness)
1867 return None;
1868
1869 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
1870
1871 if (!Signed) {
1872 // In the unsigned case, assume that a debugger will initialize the
1873 // high bits to 0 and do a no-op conversion.
1874 return Identity(DII);
1875 } else {
1876 // In the signed case, the high bits are given by sign extension, i.e:
1877 // (To >> (ToBits - 1)) * ((2 ^ FromBits) - 1)
1878 // Calculate the high bits and OR them together with the low bits.
1879 SmallVector<uint64_t, 8> Ops({dwarf::DW_OP_dup, dwarf::DW_OP_constu,
1880 (ToBits - 1), dwarf::DW_OP_shr,
1881 dwarf::DW_OP_lit0, dwarf::DW_OP_not,
1882 dwarf::DW_OP_mul, dwarf::DW_OP_or});
1883 return DIExpression::appendToStack(DII.getExpression(), Ops);
1884 }
1885 };
1886 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
1887 }
1888
1889 // TODO: Floating-point conversions, vectors.
1890 return false;
1891}
1892
1893unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
1894 unsigned NumDeadInst = 0;
1895 // Delete the instructions backwards, as it has a reduced likelihood of
1896 // having to update as many def-use and use-def chains.
1897 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1898 while (EndInst != &BB->front()) {
1899 // Delete the next to last instruction.
1900 Instruction *Inst = &*--EndInst->getIterator();
1901 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
1902 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1903 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
1904 EndInst = Inst;
1905 continue;
1906 }
1907 if (!isa<DbgInfoIntrinsic>(Inst))
1908 ++NumDeadInst;
1909 Inst->eraseFromParent();
1910 }
1911 return NumDeadInst;
1912}
1913
1914unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
1915 bool PreserveLCSSA, DomTreeUpdater *DTU) {
1916 BasicBlock *BB = I->getParent();
1917 std::vector <DominatorTree::UpdateType> Updates;
1918
1919 // Loop over all of the successors, removing BB's entry from any PHI
1920 // nodes.
1921 if (DTU)
1922 Updates.reserve(BB->getTerminator()->getNumSuccessors());
1923 for (BasicBlock *Successor : successors(BB)) {
1924 Successor->removePredecessor(BB, PreserveLCSSA);
1925 if (DTU)
1926 Updates.push_back({DominatorTree::Delete, BB, Successor});
1927 }
1928 // Insert a call to llvm.trap right before this. This turns the undefined
1929 // behavior into a hard fail instead of falling through into random code.
1930 if (UseLLVMTrap) {
1931 Function *TrapFn =
1932 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
1933 CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
1934 CallTrap->setDebugLoc(I->getDebugLoc());
1935 }
1936 auto *UI = new UnreachableInst(I->getContext(), I);
1937 UI->setDebugLoc(I->getDebugLoc());
1938
1939 // All instructions after this are dead.
1940 unsigned NumInstrsRemoved = 0;
1941 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
1942 while (BBI != BBE) {
1943 if (!BBI->use_empty())
1944 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
1945 BB->getInstList().erase(BBI++);
1946 ++NumInstrsRemoved;
1947 }
1948 if (DTU)
1949 DTU->applyUpdates(Updates, /*ForceRemoveDuplicates*/ true);
1950 return NumInstrsRemoved;
1951}
1952
1953/// changeToCall - Convert the specified invoke into a normal call.
1954static void changeToCall(InvokeInst *II, DomTreeUpdater *DTU = nullptr) {
1955 SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
1956 SmallVector<OperandBundleDef, 1> OpBundles;
1957 II->getOperandBundlesAsDefs(OpBundles);
1958 CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
1959 "", II);
1960 NewCall->takeName(II);
1961 NewCall->setCallingConv(II->getCallingConv());
1962 NewCall->setAttributes(II->getAttributes());
1963 NewCall->setDebugLoc(II->getDebugLoc());
1964 II->replaceAllUsesWith(NewCall);
1965
1966 // Follow the call by a branch to the normal destination.
1967 BasicBlock *NormalDestBB = II->getNormalDest();
1968 BranchInst::Create(NormalDestBB, II);
1969
1970 // Update PHI nodes in the unwind destination
1971 BasicBlock *BB = II->getParent();
1972 BasicBlock *UnwindDestBB = II->getUnwindDest();
1973 UnwindDestBB->removePredecessor(BB);
1974 II->eraseFromParent();
1975 if (DTU)
1976 DTU->deleteEdgeRelaxed(BB, UnwindDestBB);
1977}
1978
1979BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
1980 BasicBlock *UnwindEdge) {
1981 BasicBlock *BB = CI->getParent();
1982
1983 // Convert this function call into an invoke instruction. First, split the
1984 // basic block.
1985 BasicBlock *Split =
1986 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
1987
1988 // Delete the unconditional branch inserted by splitBasicBlock
1989 BB->getInstList().pop_back();
1990
1991 // Create the new invoke instruction.
1992 SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
1993 SmallVector<OperandBundleDef, 1> OpBundles;
1994
1995 CI->getOperandBundlesAsDefs(OpBundles);
1996
1997 // Note: we're round tripping operand bundles through memory here, and that
1998 // can potentially be avoided with a cleverer API design that we do not have
1999 // as of this time.
2000
2001 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
2002 InvokeArgs, OpBundles, CI->getName(), BB);
2003 II->setDebugLoc(CI->getDebugLoc());
2004 II->setCallingConv(CI->getCallingConv());
2005 II->setAttributes(CI->getAttributes());
2006
2007 // Make sure that anything using the call now uses the invoke! This also
2008 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2009 CI->replaceAllUsesWith(II);
2010
2011 // Delete the original call
2012 Split->getInstList().pop_front();
2013 return Split;
2014}
2015
2016static bool markAliveBlocks(Function &F,
2017 SmallPtrSetImpl<BasicBlock *> &Reachable,
2018 DomTreeUpdater *DTU = nullptr) {
2019 SmallVector<BasicBlock*, 128> Worklist;
2020 BasicBlock *BB = &F.front();
2021 Worklist.push_back(BB);
2022 Reachable.insert(BB);
2023 bool Changed = false;
2024 do {
2025 BB = Worklist.pop_back_val();
2026
2027 // Do a quick scan of the basic block, turning any obviously unreachable
2028 // instructions into LLVM unreachable insts. The instruction combining pass
2029 // canonicalizes unreachable insts into stores to null or undef.
2030 for (Instruction &I : *BB) {
2031 if (auto *CI = dyn_cast<CallInst>(&I)) {
2032 Value *Callee = CI->getCalledValue();
2033 // Handle intrinsic calls.
2034 if (Function *F = dyn_cast<Function>(Callee)) {
2035 auto IntrinsicID = F->getIntrinsicID();
2036 // Assumptions that are known to be false are equivalent to
2037 // unreachable. Also, if the condition is undefined, then we make the
2038 // choice most beneficial to the optimizer, and choose that to also be
2039 // unreachable.
2040 if (IntrinsicID == Intrinsic::assume) {
2041 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2042 // Don't insert a call to llvm.trap right before the unreachable.
2043 changeToUnreachable(CI, false, false, DTU);
2044 Changed = true;
2045 break;
2046 }
2047 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2048 // A call to the guard intrinsic bails out of the current
2049 // compilation unit if the predicate passed to it is false. If the
2050 // predicate is a constant false, then we know the guard will bail
2051 // out of the current compile unconditionally, so all code following
2052 // it is dead.
2053 //
2054 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2055 // guards to treat `undef` as `false` since a guard on `undef` can
2056 // still be useful for widening.
2057 if (match(CI->getArgOperand(0), m_Zero()))
2058 if (!isa<UnreachableInst>(CI->getNextNode())) {
2059 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false,
2060 false, DTU);
2061 Changed = true;
2062 break;
2063 }
2064 }
2065 } else if ((isa<ConstantPointerNull>(Callee) &&
2066 !NullPointerIsDefined(CI->getFunction())) ||
2067 isa<UndefValue>(Callee)) {
2068 changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU);
2069 Changed = true;
2070 break;
2071 }
2072 if (CI->doesNotReturn()) {
2073 // If we found a call to a no-return function, insert an unreachable
2074 // instruction after it. Make sure there isn't *already* one there
2075 // though.
2076 if (!isa<UnreachableInst>(CI->getNextNode())) {
2077 // Don't insert a call to llvm.trap right before the unreachable.
2078 changeToUnreachable(CI->getNextNode(), false, false, DTU);
2079 Changed = true;
2080 }
2081 break;
2082 }
2083 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2084 // Store to undef and store to null are undefined and used to signal
2085 // that they should be changed to unreachable by passes that can't
2086 // modify the CFG.
2087
2088 // Don't touch volatile stores.
2089 if (SI->isVolatile()) continue;
2090
2091 Value *Ptr = SI->getOperand(1);
2092
2093 if (isa<UndefValue>(Ptr) ||
2094 (isa<ConstantPointerNull>(Ptr) &&
2095 !NullPointerIsDefined(SI->getFunction(),
2096 SI->getPointerAddressSpace()))) {
2097 changeToUnreachable(SI, true, false, DTU);
2098 Changed = true;
2099 break;
2100 }
2101 }
2102 }
2103
2104 Instruction *Terminator = BB->getTerminator();
2105 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2
Taking false branch
2106 // Turn invokes that call 'nounwind' functions into ordinary calls.
2107 Value *Callee = II->getCalledValue();
2108 if ((isa<ConstantPointerNull>(Callee) &&
2109 !NullPointerIsDefined(BB->getParent())) ||
2110 isa<UndefValue>(Callee)) {
2111 changeToUnreachable(II, true, false, DTU);
2112 Changed = true;
2113 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2114 if (II->use_empty() && II->onlyReadsMemory()) {
2115 // jump to the normal destination branch.
2116 BasicBlock *NormalDestBB = II->getNormalDest();
2117 BasicBlock *UnwindDestBB = II->getUnwindDest();
2118 BranchInst::Create(NormalDestBB, II);
2119 UnwindDestBB->removePredecessor(II->getParent());
2120 II->eraseFromParent();
2121 if (DTU)
2122 DTU->deleteEdgeRelaxed(BB, UnwindDestBB);
2123 } else
2124 changeToCall(II, DTU);
2125 Changed = true;
2126 }
2127 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
3
Taking false branch
2128 // Remove catchpads which cannot be reached.
2129 struct CatchPadDenseMapInfo {
2130 static CatchPadInst *getEmptyKey() {
2131 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2132 }
2133
2134 static CatchPadInst *getTombstoneKey() {
2135 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2136 }
2137
2138 static unsigned getHashValue(CatchPadInst *CatchPad) {
2139 return static_cast<unsigned>(hash_combine_range(
2140 CatchPad->value_op_begin(), CatchPad->value_op_end()));
2141 }
2142
2143 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2144 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2145 RHS == getEmptyKey() || RHS == getTombstoneKey())
2146 return LHS == RHS;
2147 return LHS->isIdenticalTo(RHS);
2148 }
2149 };
2150
2151 // Set of unique CatchPads.
2152 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2153 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2154 HandlerSet;
2155 detail::DenseSetEmpty Empty;
2156 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2157 E = CatchSwitch->handler_end();
2158 I != E; ++I) {
2159 BasicBlock *HandlerBB = *I;
2160 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2161 if (!HandlerSet.insert({CatchPad, Empty}).second) {
2162 CatchSwitch->removeHandler(I);
2163 --I;
2164 --E;
2165 Changed = true;
2166 }
2167 }
2168 }
2169
2170 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
4
Calling 'ConstantFoldTerminator'
2171 for (BasicBlock *Successor : successors(BB))
2172 if (Reachable.insert(Successor).second)
2173 Worklist.push_back(Successor);
2174 } while (!Worklist.empty());
2175 return Changed;
2176}
2177
2178void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2179 Instruction *TI = BB->getTerminator();
2180
2181 if (auto *II = dyn_cast<InvokeInst>(TI)) {
2182 changeToCall(II, DTU);
2183 return;
2184 }
2185
2186 Instruction *NewTI;
2187 BasicBlock *UnwindDest;
2188
2189 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2190 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2191 UnwindDest = CRI->getUnwindDest();
2192 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2193 auto *NewCatchSwitch = CatchSwitchInst::Create(
2194 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2195 CatchSwitch->getName(), CatchSwitch);
2196 for (BasicBlock *PadBB : CatchSwitch->handlers())
2197 NewCatchSwitch->addHandler(PadBB);
2198
2199 NewTI = NewCatchSwitch;
2200 UnwindDest = CatchSwitch->getUnwindDest();
2201 } else {
2202 llvm_unreachable("Could not find unwind successor")::llvm::llvm_unreachable_internal("Could not find unwind successor"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 2202)
;
2203 }
2204
2205 NewTI->takeName(TI);
2206 NewTI->setDebugLoc(TI->getDebugLoc());
2207 UnwindDest->removePredecessor(BB);
2208 TI->replaceAllUsesWith(NewTI);
2209 TI->eraseFromParent();
2210 if (DTU)
2211 DTU->deleteEdgeRelaxed(BB, UnwindDest);
2212}
2213
2214/// removeUnreachableBlocks - Remove blocks that are not reachable, even
2215/// if they are in a dead cycle. Return true if a change was made, false
2216/// otherwise. If `LVI` is passed, this function preserves LazyValueInfo
2217/// after modifying the CFG.
2218bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI,
2219 DomTreeUpdater *DTU,
2220 MemorySSAUpdater *MSSAU) {
2221 SmallPtrSet<BasicBlock*, 16> Reachable;
2222 bool Changed = markAliveBlocks(F, Reachable, DTU);
1
Calling 'markAliveBlocks'
2223
2224 // If there are unreachable blocks in the CFG...
2225 if (Reachable.size() == F.size())
2226 return Changed;
2227
2228 assert(Reachable.size() < F.size())((Reachable.size() < F.size()) ? static_cast<void> (
0) : __assert_fail ("Reachable.size() < F.size()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 2228, __PRETTY_FUNCTION__))
;
2229 NumRemoved += F.size()-Reachable.size();
2230
2231 SmallPtrSet<BasicBlock *, 16> DeadBlockSet;
2232 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ++I) {
2233 auto *BB = &*I;
2234 if (Reachable.count(BB))
2235 continue;
2236 DeadBlockSet.insert(BB);
2237 }
2238
2239 if (MSSAU)
2240 MSSAU->removeBlocks(DeadBlockSet);
2241
2242 // Loop over all of the basic blocks that are not reachable, dropping all of
2243 // their internal references. Update DTU and LVI if available.
2244 std::vector<DominatorTree::UpdateType> Updates;
2245 for (auto *BB : DeadBlockSet) {
2246 for (BasicBlock *Successor : successors(BB)) {
2247 if (!DeadBlockSet.count(Successor))
2248 Successor->removePredecessor(BB);
2249 if (DTU)
2250 Updates.push_back({DominatorTree::Delete, BB, Successor});
2251 }
2252 if (LVI)
2253 LVI->eraseBlock(BB);
2254 BB->dropAllReferences();
2255 }
2256 for (Function::iterator I = ++F.begin(); I != F.end();) {
2257 auto *BB = &*I;
2258 if (Reachable.count(BB)) {
2259 ++I;
2260 continue;
2261 }
2262 if (DTU) {
2263 // Remove the terminator of BB to clear the successor list of BB.
2264 if (BB->getTerminator())
2265 BB->getInstList().pop_back();
2266 new UnreachableInst(BB->getContext(), BB);
2267 assert(succ_empty(BB) && "The successor list of BB isn't empty before "((succ_empty(BB) && "The successor list of BB isn't empty before "
"applying corresponding DTU updates.") ? static_cast<void
> (0) : __assert_fail ("succ_empty(BB) && \"The successor list of BB isn't empty before \" \"applying corresponding DTU updates.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 2268, __PRETTY_FUNCTION__))
2268 "applying corresponding DTU updates.")((succ_empty(BB) && "The successor list of BB isn't empty before "
"applying corresponding DTU updates.") ? static_cast<void
> (0) : __assert_fail ("succ_empty(BB) && \"The successor list of BB isn't empty before \" \"applying corresponding DTU updates.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 2268, __PRETTY_FUNCTION__))
;
2269 ++I;
2270 } else {
2271 I = F.getBasicBlockList().erase(I);
2272 }
2273 }
2274
2275 if (DTU) {
2276 DTU->applyUpdates(Updates, /*ForceRemoveDuplicates*/ true);
2277 bool Deleted = false;
2278 for (auto *BB : DeadBlockSet) {
2279 if (DTU->isBBPendingDeletion(BB))
2280 --NumRemoved;
2281 else
2282 Deleted = true;
2283 DTU->deleteBB(BB);
2284 }
2285 if (!Deleted)
2286 return false;
2287 }
2288 return true;
2289}
2290
2291void llvm::combineMetadata(Instruction *K, const Instruction *J,
2292 ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2293 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2294 K->dropUnknownNonDebugMetadata(KnownIDs);
2295 K->getAllMetadataOtherThanDebugLoc(Metadata);
2296 for (const auto &MD : Metadata) {
2297 unsigned Kind = MD.first;
2298 MDNode *JMD = J->getMetadata(Kind);
2299 MDNode *KMD = MD.second;
2300
2301 switch (Kind) {
2302 default:
2303 K->setMetadata(Kind, nullptr); // Remove unknown metadata
2304 break;
2305 case LLVMContext::MD_dbg:
2306 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg")::llvm::llvm_unreachable_internal("getAllMetadataOtherThanDebugLoc returned a MD_dbg"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 2306)
;
2307 case LLVMContext::MD_tbaa:
2308 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2309 break;
2310 case LLVMContext::MD_alias_scope:
2311 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2312 break;
2313 case LLVMContext::MD_noalias:
2314 case LLVMContext::MD_mem_parallel_loop_access:
2315 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2316 break;
2317 case LLVMContext::MD_range:
2318
2319 // If K does move, use most generic range. Otherwise keep the range of
2320 // K.
2321 if (DoesKMove)
2322 // FIXME: If K does move, we should drop the range info and nonnull.
2323 // Currently this function is used with DoesKMove in passes
2324 // doing hoisting/sinking and the current behavior of using the
2325 // most generic range is correct in those cases.
2326 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2327 break;
2328 case LLVMContext::MD_fpmath:
2329 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2330 break;
2331 case LLVMContext::MD_invariant_load:
2332 // Only set the !invariant.load if it is present in both instructions.
2333 K->setMetadata(Kind, JMD);
2334 break;
2335 case LLVMContext::MD_nonnull:
2336 // If K does move, keep nonull if it is present in both instructions.
2337 if (DoesKMove)
2338 K->setMetadata(Kind, JMD);
2339 break;
2340 case LLVMContext::MD_invariant_group:
2341 // Preserve !invariant.group in K.
2342 break;
2343 case LLVMContext::MD_align:
2344 K->setMetadata(Kind,
2345 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2346 break;
2347 case LLVMContext::MD_dereferenceable:
2348 case LLVMContext::MD_dereferenceable_or_null:
2349 K->setMetadata(Kind,
2350 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2351 break;
2352 }
2353 }
2354 // Set !invariant.group from J if J has it. If both instructions have it
2355 // then we will just pick it from J - even when they are different.
2356 // Also make sure that K is load or store - f.e. combining bitcast with load
2357 // could produce bitcast with invariant.group metadata, which is invalid.
2358 // FIXME: we should try to preserve both invariant.group md if they are
2359 // different, but right now instruction can only have one invariant.group.
2360 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2361 if (isa<LoadInst>(K) || isa<StoreInst>(K))
2362 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2363}
2364
2365void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2366 bool KDominatesJ) {
2367 unsigned KnownIDs[] = {
2368 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
2369 LLVMContext::MD_noalias, LLVMContext::MD_range,
2370 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
2371 LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2372 LLVMContext::MD_dereferenceable,
2373 LLVMContext::MD_dereferenceable_or_null};
2374 combineMetadata(K, J, KnownIDs, KDominatesJ);
2375}
2376
2377void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2378 auto *ReplInst = dyn_cast<Instruction>(Repl);
2379 if (!ReplInst)
2380 return;
2381
2382 // Patch the replacement so that it is not more restrictive than the value
2383 // being replaced.
2384 // Note that if 'I' is a load being replaced by some operation,
2385 // for example, by an arithmetic operation, then andIRFlags()
2386 // would just erase all math flags from the original arithmetic
2387 // operation, which is clearly not wanted and not needed.
2388 if (!isa<LoadInst>(I))
2389 ReplInst->andIRFlags(I);
2390
2391 // FIXME: If both the original and replacement value are part of the
2392 // same control-flow region (meaning that the execution of one
2393 // guarantees the execution of the other), then we can combine the
2394 // noalias scopes here and do better than the general conservative
2395 // answer used in combineMetadata().
2396
2397 // In general, GVN unifies expressions over different control-flow
2398 // regions, and so we need a conservative combination of the noalias
2399 // scopes.
2400 static const unsigned KnownIDs[] = {
2401 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
2402 LLVMContext::MD_noalias, LLVMContext::MD_range,
2403 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load,
2404 LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull};
2405 combineMetadata(ReplInst, I, KnownIDs, false);
2406}
2407
2408template <typename RootType, typename DominatesFn>
2409static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2410 const RootType &Root,
2411 const DominatesFn &Dominates) {
2412 assert(From->getType() == To->getType())((From->getType() == To->getType()) ? static_cast<void
> (0) : __assert_fail ("From->getType() == To->getType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 2412, __PRETTY_FUNCTION__))
;
2413
2414 unsigned Count = 0;
2415 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2416 UI != UE;) {
2417 Use &U = *UI++;
2418 if (!Dominates(Root, U))
2419 continue;
2420 U.set(To);
2421 LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Replace dominated use of '" <<
From->getName() << "' as " << *To << " in "
<< *U << "\n"; } } while (false)
2422 << "' as " << *To << " in " << *U << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("local")) { dbgs() << "Replace dominated use of '" <<
From->getName() << "' as " << *To << " in "
<< *U << "\n"; } } while (false)
;
2423 ++Count;
2424 }
2425 return Count;
2426}
2427
2428unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2429 assert(From->getType() == To->getType())((From->getType() == To->getType()) ? static_cast<void
> (0) : __assert_fail ("From->getType() == To->getType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Transforms/Utils/Local.cpp"
, 2429, __PRETTY_FUNCTION__))
;
2430 auto *BB = From->getParent();
2431 unsigned Count = 0;
2432
2433 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2434 UI != UE;) {
2435 Use &U = *UI++;
2436 auto *I = cast<Instruction>(U.getUser());
2437 if (I->getParent() == BB)
2438 continue;
2439 U.set(To);
2440 ++Count;
2441 }
2442 return Count;
2443}
2444
2445unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2446 DominatorTree &DT,
2447 const BasicBlockEdge &Root) {
2448 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2449 return DT.dominates(Root, U);
2450 };
2451 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2452}
2453
2454unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2455 DominatorTree &DT,
2456 const BasicBlock *BB) {
2457 auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) {
2458 auto *I = cast<Instruction>(U.getUser())->getParent();
2459 return DT.properlyDominates(BB, I);
2460 };
2461 return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates);
2462}
2463
2464bool llvm::callsGCLeafFunction(ImmutableCallSite CS,
2465 const TargetLibraryInfo &TLI) {
2466 // Check if the function is specifically marked as a gc leaf function.
2467 if (CS.hasFnAttr("gc-leaf-function"))
2468 return true;
2469 if (const Function *F = CS.getCalledFunction()) {
2470 if (F->hasFnAttribute("gc-leaf-function"))
2471 return true;
2472
2473 if (auto IID = F->getIntrinsicID())
2474 // Most LLVM intrinsics do not take safepoints.
2475 return IID != Intrinsic::experimental_gc_statepoint &&
2476 IID != Intrinsic::experimental_deoptimize;
2477 }
2478
2479 // Lib calls can be materialized by some passes, and won't be
2480 // marked as 'gc-leaf-function.' All available Libcalls are
2481 // GC-leaf.
2482 LibFunc LF;
2483 if (TLI.getLibFunc(CS, LF)) {
2484 return TLI.has(LF);
2485 }
2486
2487 return false;
2488}
2489
2490void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2491 LoadInst &NewLI) {
2492 auto *NewTy = NewLI.getType();
2493
2494 // This only directly applies if the new type is also a pointer.
2495 if (NewTy->isPointerTy()) {
2496 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2497 return;
2498 }
2499
2500 // The only other translation we can do is to integral loads with !range
2501 // metadata.
2502 if (!NewTy->isIntegerTy())
2503 return;
2504
2505 MDBuilder MDB(NewLI.getContext());
2506 const Value *Ptr = OldLI.getPointerOperand();
2507 auto *ITy = cast<IntegerType>(NewTy);
2508 auto *NullInt = ConstantExpr::getPtrToInt(
2509 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2510 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2511 NewLI.setMetadata(LLVMContext::MD_range,
2512 MDB.createRange(NonNullInt, NullInt));
2513}
2514
2515void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2516 MDNode *N, LoadInst &NewLI) {
2517 auto *NewTy = NewLI.getType();
2518
2519 // Give up unless it is converted to a pointer where there is a single very
2520 // valuable mapping we can do reliably.
2521 // FIXME: It would be nice to propagate this in more ways, but the type
2522 // conversions make it hard.
2523 if (!NewTy->isPointerTy())
2524 return;
2525
2526 unsigned BitWidth = DL.getIndexTypeSizeInBits(NewTy);
2527 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2528 MDNode *NN = MDNode::get(OldLI.getContext(), None);
2529 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2530 }
2531}
2532
2533void llvm::dropDebugUsers(Instruction &I) {
2534 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2535 findDbgUsers(DbgUsers, &I);
2536 for (auto *DII : DbgUsers)
2537 DII->eraseFromParent();
2538}
2539
2540void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
2541 BasicBlock *BB) {
2542 // Since we are moving the instructions out of its basic block, we do not
2543 // retain their original debug locations (DILocations) and debug intrinsic
2544 // instructions (dbg.values).
2545 //
2546 // Doing so would degrade the debugging experience and adversely affect the
2547 // accuracy of profiling information.
2548 //
2549 // Currently, when hoisting the instructions, we take the following actions:
2550 // - Remove their dbg.values.
2551 // - Set their debug locations to the values from the insertion point.
2552 //
2553 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
2554 // need to be deleted, is because there will not be any instructions with a
2555 // DILocation in either branch left after performing the transformation. We
2556 // can only insert a dbg.value after the two branches are joined again.
2557 //
2558 // See PR38762, PR39243 for more details.
2559 //
2560 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
2561 // encode predicated DIExpressions that yield different results on different
2562 // code paths.
2563 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
2564 Instruction *I = &*II;
2565 I->dropUnknownNonDebugMetadata();
2566 if (I->isUsedByMetadata())
2567 dropDebugUsers(*I);
2568 if (isa<DbgVariableIntrinsic>(I)) {
2569 // Remove DbgInfo Intrinsics.
2570 II = I->eraseFromParent();
2571 continue;
2572 }
2573 I->setDebugLoc(InsertPt->getDebugLoc());
2574 ++II;
2575 }
2576 DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(),
2577 BB->begin(),
2578 BB->getTerminator()->getIterator());
2579}
2580
2581namespace {
2582
2583/// A potential constituent of a bitreverse or bswap expression. See
2584/// collectBitParts for a fuller explanation.
2585struct BitPart {
2586 BitPart(Value *P, unsigned BW) : Provider(P) {
2587 Provenance.resize(BW);
2588 }
2589
2590 /// The Value that this is a bitreverse/bswap of.
2591 Value *Provider;
2592
2593 /// The "provenance" of each bit. Provenance[A] = B means that bit A
2594 /// in Provider becomes bit B in the result of this expression.
2595 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2596
2597 enum { Unset = -1 };
2598};
2599
2600} // end anonymous namespace
2601
2602/// Analyze the specified subexpression and see if it is capable of providing
2603/// pieces of a bswap or bitreverse. The subexpression provides a potential
2604/// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
2605/// the output of the expression came from a corresponding bit in some other
2606/// value. This function is recursive, and the end result is a mapping of
2607/// bitnumber to bitnumber. It is the caller's responsibility to validate that
2608/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2609///
2610/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2611/// that the expression deposits the low byte of %X into the high byte of the
2612/// result and that all other bits are zero. This expression is accepted and a
2613/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2614/// [0-7].
2615///
2616/// To avoid revisiting values, the BitPart results are memoized into the
2617/// provided map. To avoid unnecessary copying of BitParts, BitParts are
2618/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2619/// store BitParts objects, not pointers. As we need the concept of a nullptr
2620/// BitParts (Value has been analyzed and the analysis failed), we an Optional
2621/// type instead to provide the same functionality.
2622///
2623/// Because we pass around references into \c BPS, we must use a container that
2624/// does not invalidate internal references (std::map instead of DenseMap).
2625static const Optional<BitPart> &
2626collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2627 std::map<Value *, Optional<BitPart>> &BPS) {
2628 auto I = BPS.find(V);
2629 if (I != BPS.end())
2630 return I->second;
2631
2632 auto &Result = BPS[V] = None;
2633 auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2634
2635 if (Instruction *I = dyn_cast<Instruction>(V)) {
2636 // If this is an or instruction, it may be an inner node of the bswap.
2637 if (I->getOpcode() == Instruction::Or) {
2638 auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
2639 MatchBitReversals, BPS);
2640 auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
2641 MatchBitReversals, BPS);
2642 if (!A || !B)
2643 return Result;
2644
2645 // Try and merge the two together.
2646 if (!A->Provider || A->Provider != B->Provider)
2647 return Result;
2648
2649 Result = BitPart(A->Provider, BitWidth);
2650 for (unsigned i = 0; i < A->Provenance.size(); ++i) {
2651 if (A->Provenance[i] != BitPart::Unset &&
2652 B->Provenance[i] != BitPart::Unset &&
2653 A->Provenance[i] != B->Provenance[i])
2654 return Result = None;
2655
2656 if (A->Provenance[i] == BitPart::Unset)
2657 Result->Provenance[i] = B->Provenance[i];
2658 else
2659 Result->Provenance[i] = A->Provenance[i];
2660 }
2661
2662 return Result;
2663 }
2664
2665 // If this is a logical shift by a constant, recurse then shift the result.
2666 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
2667 unsigned BitShift =
2668 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
2669 // Ensure the shift amount is defined.
2670 if (BitShift > BitWidth)
2671 return Result;
2672
2673 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2674 MatchBitReversals, BPS);
2675 if (!Res)
2676 return Result;
2677 Result = Res;
2678
2679 // Perform the "shift" on BitProvenance.
2680 auto &P = Result->Provenance;
2681 if (I->getOpcode() == Instruction::Shl) {
2682 P.erase(std::prev(P.end(), BitShift), P.end());
2683 P.insert(P.begin(), BitShift, BitPart::Unset);
2684 } else {
2685 P.erase(P.begin(), std::next(P.begin(), BitShift));
2686 P.insert(P.end(), BitShift, BitPart::Unset);
2687 }
2688
2689 return Result;
2690 }
2691
2692 // If this is a logical 'and' with a mask that clears bits, recurse then
2693 // unset the appropriate bits.
2694 if (I->getOpcode() == Instruction::And &&
2695 isa<ConstantInt>(I->getOperand(1))) {
2696 APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
2697 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
2698
2699 // Check that the mask allows a multiple of 8 bits for a bswap, for an
2700 // early exit.
2701 unsigned NumMaskedBits = AndMask.countPopulation();
2702 if (!MatchBitReversals && NumMaskedBits % 8 != 0)
2703 return Result;
2704
2705 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2706 MatchBitReversals, BPS);
2707 if (!Res)
2708 return Result;
2709 Result = Res;
2710
2711 for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
2712 // If the AndMask is zero for this bit, clear the bit.
2713 if ((AndMask & Bit) == 0)
2714 Result->Provenance[i] = BitPart::Unset;
2715 return Result;
2716 }
2717
2718 // If this is a zext instruction zero extend the result.
2719 if (I->getOpcode() == Instruction::ZExt) {
2720 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2721 MatchBitReversals, BPS);
2722 if (!Res)
2723 return Result;
2724
2725 Result = BitPart(Res->Provider, BitWidth);
2726 auto NarrowBitWidth =
2727 cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
2728 for (unsigned i = 0; i < NarrowBitWidth; ++i)
2729 Result->Provenance[i] = Res->Provenance[i];
2730 for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
2731 Result->Provenance[i] = BitPart::Unset;
2732 return Result;
2733 }
2734 }
2735
2736 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
2737 // the input value to the bswap/bitreverse.
2738 Result = BitPart(V, BitWidth);
2739 for (unsigned i = 0; i < BitWidth; ++i)
2740 Result->Provenance[i] = i;
2741 return Result;
2742}
2743
2744static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
2745 unsigned BitWidth) {
2746 if (From % 8 != To % 8)
2747 return false;
2748 // Convert from bit indices to byte indices and check for a byte reversal.
2749 From >>= 3;
2750 To >>= 3;
2751 BitWidth >>= 3;
2752 return From == BitWidth - To - 1;
2753}
2754
2755static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
2756 unsigned BitWidth) {
2757 return From == BitWidth - To - 1;
2758}
2759
2760bool llvm::recognizeBSwapOrBitReverseIdiom(
2761 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
2762 SmallVectorImpl<Instruction *> &InsertedInsts) {
2763 if (Operator::getOpcode(I) != Instruction::Or)
2764 return false;
2765 if (!MatchBSwaps && !MatchBitReversals)
2766 return false;
2767 IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
2768 if (!ITy || ITy->getBitWidth() > 128)
2769 return false; // Can't do vectors or integers > 128 bits.
2770 unsigned BW = ITy->getBitWidth();
2771
2772 unsigned DemandedBW = BW;
2773 IntegerType *DemandedTy = ITy;
2774 if (I->hasOneUse()) {
2775 if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
2776 DemandedTy = cast<IntegerType>(Trunc->getType());
2777 DemandedBW = DemandedTy->getBitWidth();
2778 }
2779 }
2780
2781 // Try to find all the pieces corresponding to the bswap.
2782 std::map<Value *, Optional<BitPart>> BPS;
2783 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
2784 if (!Res)
2785 return false;
2786 auto &BitProvenance = Res->Provenance;
2787
2788 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
2789 // only byteswap values with an even number of bytes.
2790 bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
2791 for (unsigned i = 0; i < DemandedBW; ++i) {
2792 OKForBSwap &=
2793 bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
2794 OKForBitReverse &=
2795 bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
2796 }
2797
2798 Intrinsic::ID Intrin;
2799 if (OKForBSwap && MatchBSwaps)
2800 Intrin = Intrinsic::bswap;
2801 else if (OKForBitReverse && MatchBitReversals)
2802 Intrin = Intrinsic::bitreverse;
2803 else
2804 return false;
2805
2806 if (ITy != DemandedTy) {
2807 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
2808 Value *Provider = Res->Provider;
2809 IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
2810 // We may need to truncate the provider.
2811 if (DemandedTy != ProviderTy) {
2812 auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
2813 "trunc", I);
2814 InsertedInsts.push_back(Trunc);
2815 Provider = Trunc;
2816 }
2817 auto *CI = CallInst::Create(F, Provider, "rev", I);
2818 InsertedInsts.push_back(CI);
2819 auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
2820 InsertedInsts.push_back(ExtInst);
2821 return true;
2822 }
2823
2824 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
2825 InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
2826 return true;
2827}
2828
2829// CodeGen has special handling for some string functions that may replace
2830// them with target-specific intrinsics. Since that'd skip our interceptors
2831// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
2832// we mark affected calls as NoBuiltin, which will disable optimization
2833// in CodeGen.
2834void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
2835 CallInst *CI, const TargetLibraryInfo *TLI) {
2836 Function *F = CI->getCalledFunction();
2837 LibFunc Func;
2838 if (F && !F->hasLocalLinkage() && F->hasName() &&
2839 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
2840 !F->doesNotAccessMemory())
2841 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
2842}
2843
2844bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
2845 // We can't have a PHI with a metadata type.
2846 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
2847 return false;
2848
2849 // Early exit.
2850 if (!isa<Constant>(I->getOperand(OpIdx)))
2851 return true;
2852
2853 switch (I->getOpcode()) {
2854 default:
2855 return true;
2856 case Instruction::Call:
2857 case Instruction::Invoke:
2858 // Can't handle inline asm. Skip it.
2859 if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue()))
2860 return false;
2861 // Many arithmetic intrinsics have no issue taking a
2862 // variable, however it's hard to distingish these from
2863 // specials such as @llvm.frameaddress that require a constant.
2864 if (isa<IntrinsicInst>(I))
2865 return false;
2866
2867 // Constant bundle operands may need to retain their constant-ness for
2868 // correctness.
2869 if (ImmutableCallSite(I).isBundleOperand(OpIdx))
2870 return false;
2871 return true;
2872 case Instruction::ShuffleVector:
2873 // Shufflevector masks are constant.
2874 return OpIdx != 2;
2875 case Instruction::Switch:
2876 case Instruction::ExtractValue:
2877 // All operands apart from the first are constant.
2878 return OpIdx == 0;
2879 case Instruction::InsertValue:
2880 // All operands apart from the first and the second are constant.
2881 return OpIdx < 2;
2882 case Instruction::Alloca:
2883 // Static allocas (constant size in the entry block) are handled by
2884 // prologue/epilogue insertion so they're free anyway. We definitely don't
2885 // want to make them non-constant.
2886 return !cast<AllocaInst>(I)->isStaticAlloca();
2887 case Instruction::GetElementPtr:
2888 if (OpIdx == 0)
2889 return true;
2890 gep_type_iterator It = gep_type_begin(I);
2891 for (auto E = std::next(It, OpIdx); It != E; ++It)
2892 if (It.isStruct())
2893 return false;
2894 return true;
2895 }
2896}

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes the class definitions of all of the subclasses of the
11// Instruction class. This is meant to be an easy way to get access to all
12// instruction subclasses.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_IR_INSTRUCTIONS_H
17#define LLVM_IR_INSTRUCTIONS_H
18
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/OperandTraits.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Use.h"
38#include "llvm/IR/User.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/ErrorHandling.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <iterator>
47
48namespace llvm {
49
50class APInt;
51class ConstantInt;
52class DataLayout;
53class LLVMContext;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
60class AllocaInst : public UnaryInstruction {
61 Type *AllocatedType;
62
63protected:
64 // Note: Instruction needs to be a friend here to call cloneImpl.
65 friend class Instruction;
66
67 AllocaInst *cloneImpl() const;
68
69public:
70 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
71 Value *ArraySize = nullptr,
72 const Twine &Name = "",
73 Instruction *InsertBefore = nullptr);
74 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
75 const Twine &Name, BasicBlock *InsertAtEnd);
76
77 AllocaInst(Type *Ty, unsigned AddrSpace,
78 const Twine &Name, Instruction *InsertBefore = nullptr);
79 AllocaInst(Type *Ty, unsigned AddrSpace,
80 const Twine &Name, BasicBlock *InsertAtEnd);
81
82 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
83 const Twine &Name = "", Instruction *InsertBefore = nullptr);
84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
85 const Twine &Name, BasicBlock *InsertAtEnd);
86
87 /// Return true if there is an allocation size parameter to the allocation
88 /// instruction that is not 1.
89 bool isArrayAllocation() const;
90
91 /// Get the number of elements allocated. For a simple allocation of a single
92 /// element, this will return a constant 1 value.
93 const Value *getArraySize() const { return getOperand(0); }
94 Value *getArraySize() { return getOperand(0); }
95
96 /// Overload to return most specific pointer type.
97 PointerType *getType() const {
98 return cast<PointerType>(Instruction::getType());
99 }
100
101 /// Get allocation size in bits. Returns None if size can't be determined,
102 /// e.g. in case of a VLA.
103 Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const;
104
105 /// Return the type that is being allocated by the instruction.
106 Type *getAllocatedType() const { return AllocatedType; }
107 /// for use only in special circumstances that need to generically
108 /// transform a whole instruction (eg: IR linking and vectorization).
109 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
110
111 /// Return the alignment of the memory that is being allocated by the
112 /// instruction.
113 unsigned getAlignment() const {
114 return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
115 }
116 void setAlignment(unsigned Align);
117
118 /// Return true if this alloca is in the entry block of the function and is a
119 /// constant size. If so, the code generator will fold it into the
120 /// prolog/epilog code, so it is basically free.
121 bool isStaticAlloca() const;
122
123 /// Return true if this alloca is used as an inalloca argument to a call. Such
124 /// allocas are never considered static even if they are in the entry block.
125 bool isUsedWithInAlloca() const {
126 return getSubclassDataFromInstruction() & 32;
127 }
128
129 /// Specify whether this alloca is used to represent the arguments to a call.
130 void setUsedWithInAlloca(bool V) {
131 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
132 (V ? 32 : 0));
133 }
134
135 /// Return true if this alloca is used as a swifterror argument to a call.
136 bool isSwiftError() const {
137 return getSubclassDataFromInstruction() & 64;
138 }
139
140 /// Specify whether this alloca is used to represent a swifterror.
141 void setSwiftError(bool V) {
142 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
143 (V ? 64 : 0));
144 }
145
146 // Methods for support type inquiry through isa, cast, and dyn_cast:
147 static bool classof(const Instruction *I) {
148 return (I->getOpcode() == Instruction::Alloca);
149 }
150 static bool classof(const Value *V) {
151 return isa<Instruction>(V) && classof(cast<Instruction>(V));
152 }
153
154private:
155 // Shadow Instruction::setInstructionSubclassData with a private forwarding
156 // method so that subclasses cannot accidentally use it.
157 void setInstructionSubclassData(unsigned short D) {
158 Instruction::setInstructionSubclassData(D);
159 }
160};
161
162//===----------------------------------------------------------------------===//
163// LoadInst Class
164//===----------------------------------------------------------------------===//
165
166/// An instruction for reading from memory. This uses the SubclassData field in
167/// Value to store whether or not the load is volatile.
168class LoadInst : public UnaryInstruction {
169 void AssertOK();
170
171protected:
172 // Note: Instruction needs to be a friend here to call cloneImpl.
173 friend class Instruction;
174
175 LoadInst *cloneImpl() const;
176
177public:
178 LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
179 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
180 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
181 Instruction *InsertBefore = nullptr);
182 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
183 Instruction *InsertBefore = nullptr)
184 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
185 NameStr, isVolatile, InsertBefore) {}
186 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
187 BasicBlock *InsertAtEnd);
188 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
189 Instruction *InsertBefore = nullptr)
190 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
191 NameStr, isVolatile, Align, InsertBefore) {}
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
193 unsigned Align, Instruction *InsertBefore = nullptr);
194 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
195 unsigned Align, BasicBlock *InsertAtEnd);
196 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
197 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
198 Instruction *InsertBefore = nullptr)
199 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
200 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 unsigned Align, AtomicOrdering Order,
203 SyncScope::ID SSID = SyncScope::System,
204 Instruction *InsertBefore = nullptr);
205 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
206 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
207 BasicBlock *InsertAtEnd);
208 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
209 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
210 LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
211 bool isVolatile = false, Instruction *InsertBefore = nullptr);
212 explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
213 bool isVolatile = false,
214 Instruction *InsertBefore = nullptr)
215 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
216 NameStr, isVolatile, InsertBefore) {}
217 LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
218 BasicBlock *InsertAtEnd);
219
220 /// Return true if this is a load from a volatile memory location.
221 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
222
223 /// Specify whether this is a volatile load or not.
224 void setVolatile(bool V) {
225 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
226 (V ? 1 : 0));
227 }
228
229 /// Return the alignment of the access that is being performed.
230 unsigned getAlignment() const {
231 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
232 }
233
234 void setAlignment(unsigned Align);
235
236 /// Returns the ordering constraint of this load instruction.
237 AtomicOrdering getOrdering() const {
238 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
239 }
240
241 /// Sets the ordering constraint of this load instruction. May not be Release
242 /// or AcquireRelease.
243 void setOrdering(AtomicOrdering Ordering) {
244 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
245 ((unsigned)Ordering << 7));
246 }
247
248 /// Returns the synchronization scope ID of this load instruction.
249 SyncScope::ID getSyncScopeID() const {
250 return SSID;
251 }
252
253 /// Sets the synchronization scope ID of this load instruction.
254 void setSyncScopeID(SyncScope::ID SSID) {
255 this->SSID = SSID;
256 }
257
258 /// Sets the ordering constraint and the synchronization scope ID of this load
259 /// instruction.
260 void setAtomic(AtomicOrdering Ordering,
261 SyncScope::ID SSID = SyncScope::System) {
262 setOrdering(Ordering);
263 setSyncScopeID(SSID);
264 }
265
266 bool isSimple() const { return !isAtomic() && !isVolatile(); }
267
268 bool isUnordered() const {
269 return (getOrdering() == AtomicOrdering::NotAtomic ||
270 getOrdering() == AtomicOrdering::Unordered) &&
271 !isVolatile();
272 }
273
274 Value *getPointerOperand() { return getOperand(0); }
275 const Value *getPointerOperand() const { return getOperand(0); }
276 static unsigned getPointerOperandIndex() { return 0U; }
277 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
278
279 /// Returns the address space of the pointer operand.
280 unsigned getPointerAddressSpace() const {
281 return getPointerOperandType()->getPointerAddressSpace();
282 }
283
284 // Methods for support type inquiry through isa, cast, and dyn_cast:
285 static bool classof(const Instruction *I) {
286 return I->getOpcode() == Instruction::Load;
287 }
288 static bool classof(const Value *V) {
289 return isa<Instruction>(V) && classof(cast<Instruction>(V));
290 }
291
292private:
293 // Shadow Instruction::setInstructionSubclassData with a private forwarding
294 // method so that subclasses cannot accidentally use it.
295 void setInstructionSubclassData(unsigned short D) {
296 Instruction::setInstructionSubclassData(D);
297 }
298
299 /// The synchronization scope ID of this load instruction. Not quite enough
300 /// room in SubClassData for everything, so synchronization scope ID gets its
301 /// own field.
302 SyncScope::ID SSID;
303};
304
305//===----------------------------------------------------------------------===//
306// StoreInst Class
307//===----------------------------------------------------------------------===//
308
309/// An instruction for storing to memory.
310class StoreInst : public Instruction {
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
323 Instruction *InsertBefore = nullptr);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
326 unsigned Align, Instruction *InsertBefore = nullptr);
327 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
328 unsigned Align, BasicBlock *InsertAtEnd);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
330 unsigned Align, AtomicOrdering Order,
331 SyncScope::ID SSID = SyncScope::System,
332 Instruction *InsertBefore = nullptr);
333 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
334 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
335 BasicBlock *InsertAtEnd);
336
337 // allocate space for exactly two operands
338 void *operator new(size_t s) {
339 return User::operator new(s, 2);
340 }
341
342 /// Return true if this is a store to a volatile memory location.
343 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
344
345 /// Specify whether this is a volatile store or not.
346 void setVolatile(bool V) {
347 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
348 (V ? 1 : 0));
349 }
350
351 /// Transparently provide more efficient getOperand methods.
352 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
353
354 /// Return the alignment of the access that is being performed
355 unsigned getAlignment() const {
356 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
357 }
358
359 void setAlignment(unsigned Align);
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
370 ((unsigned)Ordering << 7));
371 }
372
373 /// Returns the synchronization scope ID of this store instruction.
374 SyncScope::ID getSyncScopeID() const {
375 return SSID;
376 }
377
378 /// Sets the synchronization scope ID of this store instruction.
379 void setSyncScopeID(SyncScope::ID SSID) {
380 this->SSID = SSID;
381 }
382
383 /// Sets the ordering constraint and the synchronization scope ID of this
384 /// store instruction.
385 void setAtomic(AtomicOrdering Ordering,
386 SyncScope::ID SSID = SyncScope::System) {
387 setOrdering(Ordering);
388 setSyncScopeID(SSID);
389 }
390
391 bool isSimple() const { return !isAtomic() && !isVolatile(); }
392
393 bool isUnordered() const {
394 return (getOrdering() == AtomicOrdering::NotAtomic ||
395 getOrdering() == AtomicOrdering::Unordered) &&
396 !isVolatile();
397 }
398
399 Value *getValueOperand() { return getOperand(0); }
400 const Value *getValueOperand() const { return getOperand(0); }
401
402 Value *getPointerOperand() { return getOperand(1); }
403 const Value *getPointerOperand() const { return getOperand(1); }
404 static unsigned getPointerOperandIndex() { return 1U; }
405 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
406
407 /// Returns the address space of the pointer operand.
408 unsigned getPointerAddressSpace() const {
409 return getPointerOperandType()->getPointerAddressSpace();
410 }
411
412 // Methods for support type inquiry through isa, cast, and dyn_cast:
413 static bool classof(const Instruction *I) {
414 return I->getOpcode() == Instruction::Store;
415 }
416 static bool classof(const Value *V) {
417 return isa<Instruction>(V) && classof(cast<Instruction>(V));
418 }
419
420private:
421 // Shadow Instruction::setInstructionSubclassData with a private forwarding
422 // method so that subclasses cannot accidentally use it.
423 void setInstructionSubclassData(unsigned short D) {
424 Instruction::setInstructionSubclassData(D);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<StoreInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst
*>(this))[i_nocapture].get()); } void StoreInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst
::getNumOperands() const { return OperandTraits<StoreInst>
::operands(this); } template <int Idx_nocapture> Use &
StoreInst::Op() { return this->OpFrom<Idx_nocapture>
(this); } template <int Idx_nocapture> const Use &StoreInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
446
447protected:
448 // Note: Instruction needs to be a friend here to call cloneImpl.
449 friend class Instruction;
450
451 FenceInst *cloneImpl() const;
452
453public:
454 // Ordering may only be Acquire, Release, AcquireRelease, or
455 // SequentiallyConsistent.
456 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
457 SyncScope::ID SSID = SyncScope::System,
458 Instruction *InsertBefore = nullptr);
459 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
460 BasicBlock *InsertAtEnd);
461
462 // allocate space for exactly zero operands
463 void *operator new(size_t s) {
464 return User::operator new(s, 0);
465 }
466
467 /// Returns the ordering constraint of this fence instruction.
468 AtomicOrdering getOrdering() const {
469 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
470 }
471
472 /// Sets the ordering constraint of this fence instruction. May only be
473 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
474 void setOrdering(AtomicOrdering Ordering) {
475 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
476 ((unsigned)Ordering << 1));
477 }
478
479 /// Returns the synchronization scope ID of this fence instruction.
480 SyncScope::ID getSyncScopeID() const {
481 return SSID;
482 }
483
484 /// Sets the synchronization scope ID of this fence instruction.
485 void setSyncScopeID(SyncScope::ID SSID) {
486 this->SSID = SSID;
487 }
488
489 // Methods for support type inquiry through isa, cast, and dyn_cast:
490 static bool classof(const Instruction *I) {
491 return I->getOpcode() == Instruction::Fence;
492 }
493 static bool classof(const Value *V) {
494 return isa<Instruction>(V) && classof(cast<Instruction>(V));
495 }
496
497private:
498 // Shadow Instruction::setInstructionSubclassData with a private forwarding
499 // method so that subclasses cannot accidentally use it.
500 void setInstructionSubclassData(unsigned short D) {
501 Instruction::setInstructionSubclassData(D);
502 }
503
504 /// The synchronization scope ID of this fence instruction. Not quite enough
505 /// room in SubClassData for everything, so synchronization scope ID gets its
506 /// own field.
507 SyncScope::ID SSID;
508};
509
510//===----------------------------------------------------------------------===//
511// AtomicCmpXchgInst Class
512//===----------------------------------------------------------------------===//
513
514/// an instruction that atomically checks whether a
515/// specified value is in a memory location, and, if it is, stores a new value
516/// there. Returns the value that was loaded.
517///
518class AtomicCmpXchgInst : public Instruction {
519 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
520 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
521 SyncScope::ID SSID);
522
523protected:
524 // Note: Instruction needs to be a friend here to call cloneImpl.
525 friend class Instruction;
526
527 AtomicCmpXchgInst *cloneImpl() const;
528
529public:
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering,
533 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
534 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
535 AtomicOrdering SuccessOrdering,
536 AtomicOrdering FailureOrdering,
537 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
538
539 // allocate space for exactly three operands
540 void *operator new(size_t s) {
541 return User::operator new(s, 3);
542 }
543
544 /// Return true if this is a cmpxchg from a volatile memory
545 /// location.
546 ///
547 bool isVolatile() const {
548 return getSubclassDataFromInstruction() & 1;
549 }
550
551 /// Specify whether this is a volatile cmpxchg.
552 ///
553 void setVolatile(bool V) {
554 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
555 (unsigned)V);
556 }
557
558 /// Return true if this cmpxchg may spuriously fail.
559 bool isWeak() const {
560 return getSubclassDataFromInstruction() & 0x100;
561 }
562
563 void setWeak(bool IsWeak) {
564 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
565 (IsWeak << 8));
566 }
567
568 /// Transparently provide more efficient getOperand methods.
569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
570
571 /// Returns the success ordering constraint of this cmpxchg instruction.
572 AtomicOrdering getSuccessOrdering() const {
573 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
574 }
575
576 /// Sets the success ordering constraint of this cmpxchg instruction.
577 void setSuccessOrdering(AtomicOrdering Ordering) {
578 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 579, __PRETTY_FUNCTION__))
579 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 579, __PRETTY_FUNCTION__))
;
580 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
581 ((unsigned)Ordering << 2));
582 }
583
584 /// Returns the failure ordering constraint of this cmpxchg instruction.
585 AtomicOrdering getFailureOrdering() const {
586 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
587 }
588
589 /// Sets the failure ordering constraint of this cmpxchg instruction.
590 void setFailureOrdering(AtomicOrdering Ordering) {
591 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 592, __PRETTY_FUNCTION__))
592 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 592, __PRETTY_FUNCTION__))
;
593 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
594 ((unsigned)Ordering << 5));
595 }
596
597 /// Returns the synchronization scope ID of this cmpxchg instruction.
598 SyncScope::ID getSyncScopeID() const {
599 return SSID;
600 }
601
602 /// Sets the synchronization scope ID of this cmpxchg instruction.
603 void setSyncScopeID(SyncScope::ID SSID) {
604 this->SSID = SSID;
605 }
606
607 Value *getPointerOperand() { return getOperand(0); }
608 const Value *getPointerOperand() const { return getOperand(0); }
609 static unsigned getPointerOperandIndex() { return 0U; }
610
611 Value *getCompareOperand() { return getOperand(1); }
612 const Value *getCompareOperand() const { return getOperand(1); }
613
614 Value *getNewValOperand() { return getOperand(2); }
615 const Value *getNewValOperand() const { return getOperand(2); }
616
617 /// Returns the address space of the pointer operand.
618 unsigned getPointerAddressSpace() const {
619 return getPointerOperand()->getType()->getPointerAddressSpace();
620 }
621
622 /// Returns the strongest permitted ordering on failure, given the
623 /// desired ordering on success.
624 ///
625 /// If the comparison in a cmpxchg operation fails, there is no atomic store
626 /// so release semantics cannot be provided. So this function drops explicit
627 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
628 /// operation would remain SequentiallyConsistent.
629 static AtomicOrdering
630 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
631 switch (SuccessOrdering) {
632 default:
633 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 633)
;
634 case AtomicOrdering::Release:
635 case AtomicOrdering::Monotonic:
636 return AtomicOrdering::Monotonic;
637 case AtomicOrdering::AcquireRelease:
638 case AtomicOrdering::Acquire:
639 return AtomicOrdering::Acquire;
640 case AtomicOrdering::SequentiallyConsistent:
641 return AtomicOrdering::SequentiallyConsistent;
642 }
643 }
644
645 // Methods for support type inquiry through isa, cast, and dyn_cast:
646 static bool classof(const Instruction *I) {
647 return I->getOpcode() == Instruction::AtomicCmpXchg;
648 }
649 static bool classof(const Value *V) {
650 return isa<Instruction>(V) && classof(cast<Instruction>(V));
651 }
652
653private:
654 // Shadow Instruction::setInstructionSubclassData with a private forwarding
655 // method so that subclasses cannot accidentally use it.
656 void setInstructionSubclassData(unsigned short D) {
657 Instruction::setInstructionSubclassData(D);
658 }
659
660 /// The synchronization scope ID of this cmpxchg instruction. Not quite
661 /// enough room in SubClassData for everything, so synchronization scope ID
662 /// gets its own field.
663 SyncScope::ID SSID;
664};
665
666template <>
667struct OperandTraits<AtomicCmpXchgInst> :
668 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
669};
670
671DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 671, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast
<AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void
AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 671, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
AtomicCmpXchgInst::getNumOperands() const { return OperandTraits
<AtomicCmpXchgInst>::operands(this); } template <int
Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &AtomicCmpXchgInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
672
673//===----------------------------------------------------------------------===//
674// AtomicRMWInst Class
675//===----------------------------------------------------------------------===//
676
677/// an instruction that atomically reads a memory location,
678/// combines it with another value, and then stores the result back. Returns
679/// the old value.
680///
681class AtomicRMWInst : public Instruction {
682protected:
683 // Note: Instruction needs to be a friend here to call cloneImpl.
684 friend class Instruction;
685
686 AtomicRMWInst *cloneImpl() const;
687
688public:
689 /// This enumeration lists the possible modifications atomicrmw can make. In
690 /// the descriptions, 'p' is the pointer to the instruction's memory location,
691 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
692 /// instruction. These instructions always return 'old'.
693 enum BinOp {
694 /// *p = v
695 Xchg,
696 /// *p = old + v
697 Add,
698 /// *p = old - v
699 Sub,
700 /// *p = old & v
701 And,
702 /// *p = ~(old & v)
703 Nand,
704 /// *p = old | v
705 Or,
706 /// *p = old ^ v
707 Xor,
708 /// *p = old >signed v ? old : v
709 Max,
710 /// *p = old <signed v ? old : v
711 Min,
712 /// *p = old >unsigned v ? old : v
713 UMax,
714 /// *p = old <unsigned v ? old : v
715 UMin,
716
717 FIRST_BINOP = Xchg,
718 LAST_BINOP = UMin,
719 BAD_BINOP
720 };
721
722 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
723 AtomicOrdering Ordering, SyncScope::ID SSID,
724 Instruction *InsertBefore = nullptr);
725 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
726 AtomicOrdering Ordering, SyncScope::ID SSID,
727 BasicBlock *InsertAtEnd);
728
729 // allocate space for exactly two operands
730 void *operator new(size_t s) {
731 return User::operator new(s, 2);
732 }
733
734 BinOp getOperation() const {
735 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
736 }
737
738 static StringRef getOperationName(BinOp Op);
739
740 void setOperation(BinOp Operation) {
741 unsigned short SubclassData = getSubclassDataFromInstruction();
742 setInstructionSubclassData((SubclassData & 31) |
743 (Operation << 5));
744 }
745
746 /// Return true if this is a RMW on a volatile memory location.
747 ///
748 bool isVolatile() const {
749 return getSubclassDataFromInstruction() & 1;
750 }
751
752 /// Specify whether this is a volatile RMW or not.
753 ///
754 void setVolatile(bool V) {
755 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
756 (unsigned)V);
757 }
758
759 /// Transparently provide more efficient getOperand methods.
760 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
761
762 /// Returns the ordering constraint of this rmw instruction.
763 AtomicOrdering getOrdering() const {
764 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
765 }
766
767 /// Sets the ordering constraint of this rmw instruction.
768 void setOrdering(AtomicOrdering Ordering) {
769 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 770, __PRETTY_FUNCTION__))
770 "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 770, __PRETTY_FUNCTION__))
;
771 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
772 ((unsigned)Ordering << 2));
773 }
774
775 /// Returns the synchronization scope ID of this rmw instruction.
776 SyncScope::ID getSyncScopeID() const {
777 return SSID;
778 }
779
780 /// Sets the synchronization scope ID of this rmw instruction.
781 void setSyncScopeID(SyncScope::ID SSID) {
782 this->SSID = SSID;
783 }
784
785 Value *getPointerOperand() { return getOperand(0); }
786 const Value *getPointerOperand() const { return getOperand(0); }
787 static unsigned getPointerOperandIndex() { return 0U; }
788
789 Value *getValOperand() { return getOperand(1); }
790 const Value *getValOperand() const { return getOperand(1); }
791
792 /// Returns the address space of the pointer operand.
793 unsigned getPointerAddressSpace() const {
794 return getPointerOperand()->getType()->getPointerAddressSpace();
795 }
796
797 // Methods for support type inquiry through isa, cast, and dyn_cast:
798 static bool classof(const Instruction *I) {
799 return I->getOpcode() == Instruction::AtomicRMW;
800 }
801 static bool classof(const Value *V) {
802 return isa<Instruction>(V) && classof(cast<Instruction>(V));
803 }
804
805private:
806 void Init(BinOp Operation, Value *Ptr, Value *Val,
807 AtomicOrdering Ordering, SyncScope::ID SSID);
808
809 // Shadow Instruction::setInstructionSubclassData with a private forwarding
810 // method so that subclasses cannot accidentally use it.
811 void setInstructionSubclassData(unsigned short D) {
812 Instruction::setInstructionSubclassData(D);
813 }
814
815 /// The synchronization scope ID of this rmw instruction. Not quite enough
816 /// room in SubClassData for everything, so synchronization scope ID gets its
817 /// own field.
818 SyncScope::ID SSID;
819};
820
821template <>
822struct OperandTraits<AtomicRMWInst>
823 : public FixedNumOperandTraits<AtomicRMWInst,2> {
824};
825
826DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<AtomicRMWInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 826, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicRMWInst>::op_begin(const_cast<
AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<AtomicRMWInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 826, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst
::getNumOperands() const { return OperandTraits<AtomicRMWInst
>::operands(this); } template <int Idx_nocapture> Use
&AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
827
828//===----------------------------------------------------------------------===//
829// GetElementPtrInst Class
830//===----------------------------------------------------------------------===//
831
832// checkGEPType - Simple wrapper function to give a better assertion failure
833// message on bad indexes for a gep instruction.
834//
835inline Type *checkGEPType(Type *Ty) {
836 assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!"
) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 836, __PRETTY_FUNCTION__))
;
837 return Ty;
838}
839
840/// an instruction for type-safe pointer arithmetic to
841/// access elements of arrays and structs
842///
843class GetElementPtrInst : public Instruction {
844 Type *SourceElementType;
845 Type *ResultElementType;
846
847 GetElementPtrInst(const GetElementPtrInst &GEPI);
848
849 /// Constructors - Create a getelementptr instruction with a base pointer an
850 /// list of indices. The first ctor can optionally insert before an existing
851 /// instruction, the second appends the new instruction to the specified
852 /// BasicBlock.
853 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
854 ArrayRef<Value *> IdxList, unsigned Values,
855 const Twine &NameStr, Instruction *InsertBefore);
856 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
857 ArrayRef<Value *> IdxList, unsigned Values,
858 const Twine &NameStr, BasicBlock *InsertAtEnd);
859
860 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
861
862protected:
863 // Note: Instruction needs to be a friend here to call cloneImpl.
864 friend class Instruction;
865
866 GetElementPtrInst *cloneImpl() const;
867
868public:
869 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
870 ArrayRef<Value *> IdxList,
871 const Twine &NameStr = "",
872 Instruction *InsertBefore = nullptr) {
873 unsigned Values = 1 + unsigned(IdxList.size());
874 if (!PointeeType)
875 PointeeType =
876 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
877 else
878 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 880, __PRETTY_FUNCTION__))
879 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 880, __PRETTY_FUNCTION__))
880 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 880, __PRETTY_FUNCTION__))
;
881 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
882 NameStr, InsertBefore);
883 }
884
885 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
886 ArrayRef<Value *> IdxList,
887 const Twine &NameStr,
888 BasicBlock *InsertAtEnd) {
889 unsigned Values = 1 + unsigned(IdxList.size());
890 if (!PointeeType)
891 PointeeType =
892 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
893 else
894 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 896, __PRETTY_FUNCTION__))
895 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 896, __PRETTY_FUNCTION__))
896 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 896, __PRETTY_FUNCTION__))
;
897 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
898 NameStr, InsertAtEnd);
899 }
900
901 /// Create an "inbounds" getelementptr. See the documentation for the
902 /// "inbounds" flag in LangRef.html for details.
903 static GetElementPtrInst *CreateInBounds(Value *Ptr,
904 ArrayRef<Value *> IdxList,
905 const Twine &NameStr = "",
906 Instruction *InsertBefore = nullptr){
907 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
908 }
909
910 static GetElementPtrInst *
911 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
912 const Twine &NameStr = "",
913 Instruction *InsertBefore = nullptr) {
914 GetElementPtrInst *GEP =
915 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
916 GEP->setIsInBounds(true);
917 return GEP;
918 }
919
920 static GetElementPtrInst *CreateInBounds(Value *Ptr,
921 ArrayRef<Value *> IdxList,
922 const Twine &NameStr,
923 BasicBlock *InsertAtEnd) {
924 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
925 }
926
927 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
928 ArrayRef<Value *> IdxList,
929 const Twine &NameStr,
930 BasicBlock *InsertAtEnd) {
931 GetElementPtrInst *GEP =
932 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
933 GEP->setIsInBounds(true);
934 return GEP;
935 }
936
937 /// Transparently provide more efficient getOperand methods.
938 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
939
940 Type *getSourceElementType() const { return SourceElementType; }
941
942 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
943 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
944
945 Type *getResultElementType() const {
946 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 947, __PRETTY_FUNCTION__))
947 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 947, __PRETTY_FUNCTION__))
;
948 return ResultElementType;
949 }
950
951 /// Returns the address space of this instruction's pointer type.
952 unsigned getAddressSpace() const {
953 // Note that this is always the same as the pointer operand's address space
954 // and that is cheaper to compute, so cheat here.
955 return getPointerAddressSpace();
956 }
957
958 /// Returns the type of the element that would be loaded with
959 /// a load instruction with the specified parameters.
960 ///
961 /// Null is returned if the indices are invalid for the specified
962 /// pointer type.
963 ///
964 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
965 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
966 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
967
968 inline op_iterator idx_begin() { return op_begin()+1; }
969 inline const_op_iterator idx_begin() const { return op_begin()+1; }
970 inline op_iterator idx_end() { return op_end(); }
971 inline const_op_iterator idx_end() const { return op_end(); }
972
973 inline iterator_range<op_iterator> indices() {
974 return make_range(idx_begin(), idx_end());
975 }
976
977 inline iterator_range<const_op_iterator> indices() const {
978 return make_range(idx_begin(), idx_end());
979 }
980
981 Value *getPointerOperand() {
982 return getOperand(0);
983 }
984 const Value *getPointerOperand() const {
985 return getOperand(0);
986 }
987 static unsigned getPointerOperandIndex() {
988 return 0U; // get index for modifying correct operand.
989 }
990
991 /// Method to return the pointer operand as a
992 /// PointerType.
993 Type *getPointerOperandType() const {
994 return getPointerOperand()->getType();
995 }
996
997 /// Returns the address space of the pointer operand.
998 unsigned getPointerAddressSpace() const {
999 return getPointerOperandType()->getPointerAddressSpace();
1000 }
1001
1002 /// Returns the pointer type returned by the GEP
1003 /// instruction, which may be a vector of pointers.
1004 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
1005 return getGEPReturnType(
1006 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
1007 Ptr, IdxList);
1008 }
1009 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1010 ArrayRef<Value *> IdxList) {
1011 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1012 Ptr->getType()->getPointerAddressSpace());
1013 // Vector GEP
1014 if (Ptr->getType()->isVectorTy()) {
1015 unsigned NumElem = Ptr->getType()->getVectorNumElements();
1016 return VectorType::get(PtrTy, NumElem);
1017 }
1018 for (Value *Index : IdxList)
1019 if (Index->getType()->isVectorTy()) {
1020 unsigned NumElem = Index->getType()->getVectorNumElements();
1021 return VectorType::get(PtrTy, NumElem);
1022 }
1023 // Scalar GEP
1024 return PtrTy;
1025 }
1026
1027 unsigned getNumIndices() const { // Note: always non-negative
1028 return getNumOperands() - 1;
1029 }
1030
1031 bool hasIndices() const {
1032 return getNumOperands() > 1;
1033 }
1034
1035 /// Return true if all of the indices of this GEP are
1036 /// zeros. If so, the result pointer and the first operand have the same
1037 /// value, just potentially different types.
1038 bool hasAllZeroIndices() const;
1039
1040 /// Return true if all of the indices of this GEP are
1041 /// constant integers. If so, the result pointer and the first operand have
1042 /// a constant offset between them.
1043 bool hasAllConstantIndices() const;
1044
1045 /// Set or clear the inbounds flag on this GEP instruction.
1046 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1047 void setIsInBounds(bool b = true);
1048
1049 /// Determine whether the GEP has the inbounds flag.
1050 bool isInBounds() const;
1051
1052 /// Accumulate the constant address offset of this GEP if possible.
1053 ///
1054 /// This routine accepts an APInt into which it will accumulate the constant
1055 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1056 /// all-constant, it returns false and the value of the offset APInt is
1057 /// undefined (it is *not* preserved!). The APInt passed into this routine
1058 /// must be at least as wide as the IntPtr type for the address space of
1059 /// the base GEP pointer.
1060 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1061
1062 // Methods for support type inquiry through isa, cast, and dyn_cast:
1063 static bool classof(const Instruction *I) {
1064 return (I->getOpcode() == Instruction::GetElementPtr);
1065 }
1066 static bool classof(const Value *V) {
1067 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1068 }
1069};
1070
1071template <>
1072struct OperandTraits<GetElementPtrInst> :
1073 public VariadicOperandTraits<GetElementPtrInst, 1> {
1074};
1075
1076GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1077 ArrayRef<Value *> IdxList, unsigned Values,
1078 const Twine &NameStr,
1079 Instruction *InsertBefore)
1080 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1081 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1082 Values, InsertBefore),
1083 SourceElementType(PointeeType),
1084 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1085 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1086, __PRETTY_FUNCTION__))
1086 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1086, __PRETTY_FUNCTION__))
;
1087 init(Ptr, IdxList, NameStr);
1088}
1089
1090GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1091 ArrayRef<Value *> IdxList, unsigned Values,
1092 const Twine &NameStr,
1093 BasicBlock *InsertAtEnd)
1094 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1095 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1096 Values, InsertAtEnd),
1097 SourceElementType(PointeeType),
1098 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1099 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1100, __PRETTY_FUNCTION__))
1100 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1100, __PRETTY_FUNCTION__))
;
1101 init(Ptr, IdxList, NameStr);
1102}
1103
1104DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1104, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<GetElementPtrInst>::op_begin(const_cast
<GetElementPtrInst*>(this))[i_nocapture].get()); } void
GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<GetElementPtrInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1104, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
GetElementPtrInst::getNumOperands() const { return OperandTraits
<GetElementPtrInst>::operands(this); } template <int
Idx_nocapture> Use &GetElementPtrInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &GetElementPtrInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1105
1106//===----------------------------------------------------------------------===//
1107// ICmpInst Class
1108//===----------------------------------------------------------------------===//
1109
1110/// This instruction compares its operands according to the predicate given
1111/// to the constructor. It only operates on integers or pointers. The operands
1112/// must be identical types.
1113/// Represent an integer comparison operator.
1114class ICmpInst: public CmpInst {
1115 void AssertOK() {
1116 assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1117, __PRETTY_FUNCTION__))
1117 "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1117, __PRETTY_FUNCTION__))
;
1118 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1119, __PRETTY_FUNCTION__))
1119 "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1119, __PRETTY_FUNCTION__))
;
1120 // Check that the operands are the right type
1121 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1123, __PRETTY_FUNCTION__))
1122 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1123, __PRETTY_FUNCTION__))
1123 "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1123, __PRETTY_FUNCTION__))
;
1124 }
1125
1126protected:
1127 // Note: Instruction needs to be a friend here to call cloneImpl.
1128 friend class Instruction;
1129
1130 /// Clone an identical ICmpInst
1131 ICmpInst *cloneImpl() const;
1132
1133public:
1134 /// Constructor with insert-before-instruction semantics.
1135 ICmpInst(
1136 Instruction *InsertBefore, ///< Where to insert
1137 Predicate pred, ///< The predicate to use for the comparison
1138 Value *LHS, ///< The left-hand-side of the expression
1139 Value *RHS, ///< The right-hand-side of the expression
1140 const Twine &NameStr = "" ///< Name of the instruction
1141 ) : CmpInst(makeCmpResultType(LHS->getType()),
1142 Instruction::ICmp, pred, LHS, RHS, NameStr,
1143 InsertBefore) {
1144#ifndef NDEBUG
1145 AssertOK();
1146#endif
1147 }
1148
1149 /// Constructor with insert-at-end semantics.
1150 ICmpInst(
1151 BasicBlock &InsertAtEnd, ///< Block to insert into.
1152 Predicate pred, ///< The predicate to use for the comparison
1153 Value *LHS, ///< The left-hand-side of the expression
1154 Value *RHS, ///< The right-hand-side of the expression
1155 const Twine &NameStr = "" ///< Name of the instruction
1156 ) : CmpInst(makeCmpResultType(LHS->getType()),
1157 Instruction::ICmp, pred, LHS, RHS, NameStr,
1158 &InsertAtEnd) {
1159#ifndef NDEBUG
1160 AssertOK();
1161#endif
1162 }
1163
1164 /// Constructor with no-insertion semantics
1165 ICmpInst(
1166 Predicate pred, ///< The predicate to use for the comparison
1167 Value *LHS, ///< The left-hand-side of the expression
1168 Value *RHS, ///< The right-hand-side of the expression
1169 const Twine &NameStr = "" ///< Name of the instruction
1170 ) : CmpInst(makeCmpResultType(LHS->getType()),
1171 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1172#ifndef NDEBUG
1173 AssertOK();
1174#endif
1175 }
1176
1177 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1178 /// @returns the predicate that would be the result if the operand were
1179 /// regarded as signed.
1180 /// Return the signed version of the predicate
1181 Predicate getSignedPredicate() const {
1182 return getSignedPredicate(getPredicate());
1183 }
1184
1185 /// This is a static version that you can use without an instruction.
1186 /// Return the signed version of the predicate.
1187 static Predicate getSignedPredicate(Predicate pred);
1188
1189 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1190 /// @returns the predicate that would be the result if the operand were
1191 /// regarded as unsigned.
1192 /// Return the unsigned version of the predicate
1193 Predicate getUnsignedPredicate() const {
1194 return getUnsignedPredicate(getPredicate());
1195 }
1196
1197 /// This is a static version that you can use without an instruction.
1198 /// Return the unsigned version of the predicate.
1199 static Predicate getUnsignedPredicate(Predicate pred);
1200
1201 /// Return true if this predicate is either EQ or NE. This also
1202 /// tests for commutativity.
1203 static bool isEquality(Predicate P) {
1204 return P == ICMP_EQ || P == ICMP_NE;
1205 }
1206
1207 /// Return true if this predicate is either EQ or NE. This also
1208 /// tests for commutativity.
1209 bool isEquality() const {
1210 return isEquality(getPredicate());
1211 }
1212
1213 /// @returns true if the predicate of this ICmpInst is commutative
1214 /// Determine if this relation is commutative.
1215 bool isCommutative() const { return isEquality(); }
1216
1217 /// Return true if the predicate is relational (not EQ or NE).
1218 ///
1219 bool isRelational() const {
1220 return !isEquality();
1221 }
1222
1223 /// Return true if the predicate is relational (not EQ or NE).
1224 ///
1225 static bool isRelational(Predicate P) {
1226 return !isEquality(P);
1227 }
1228
1229 /// Exchange the two operands to this instruction in such a way that it does
1230 /// not modify the semantics of the instruction. The predicate value may be
1231 /// changed to retain the same result if the predicate is order dependent
1232 /// (e.g. ult).
1233 /// Swap operands and adjust predicate.
1234 void swapOperands() {
1235 setPredicate(getSwappedPredicate());
1236 Op<0>().swap(Op<1>());
1237 }
1238
1239 // Methods for support type inquiry through isa, cast, and dyn_cast:
1240 static bool classof(const Instruction *I) {
1241 return I->getOpcode() == Instruction::ICmp;
1242 }
1243 static bool classof(const Value *V) {
1244 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1245 }
1246};
1247
1248//===----------------------------------------------------------------------===//
1249// FCmpInst Class
1250//===----------------------------------------------------------------------===//
1251
1252/// This instruction compares its operands according to the predicate given
1253/// to the constructor. It only operates on floating point values or packed
1254/// vectors of floating point values. The operands must be identical types.
1255/// Represents a floating point comparison operator.
1256class FCmpInst: public CmpInst {
1257 void AssertOK() {
1258 assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ?
static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1258, __PRETTY_FUNCTION__))
;
1259 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1260, __PRETTY_FUNCTION__))
1260 "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1260, __PRETTY_FUNCTION__))
;
1261 // Check that the operands are the right type
1262 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1263, __PRETTY_FUNCTION__))
1263 "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1263, __PRETTY_FUNCTION__))
;
1264 }
1265
1266protected:
1267 // Note: Instruction needs to be a friend here to call cloneImpl.
1268 friend class Instruction;
1269
1270 /// Clone an identical FCmpInst
1271 FCmpInst *cloneImpl() const;
1272
1273public:
1274 /// Constructor with insert-before-instruction semantics.
1275 FCmpInst(
1276 Instruction *InsertBefore, ///< Where to insert
1277 Predicate pred, ///< The predicate to use for the comparison
1278 Value *LHS, ///< The left-hand-side of the expression
1279 Value *RHS, ///< The right-hand-side of the expression
1280 const Twine &NameStr = "" ///< Name of the instruction
1281 ) : CmpInst(makeCmpResultType(LHS->getType()),
1282 Instruction::FCmp, pred, LHS, RHS, NameStr,
1283 InsertBefore) {
1284 AssertOK();
1285 }
1286
1287 /// Constructor with insert-at-end semantics.
1288 FCmpInst(
1289 BasicBlock &InsertAtEnd, ///< Block to insert into.
1290 Predicate pred, ///< The predicate to use for the comparison
1291 Value *LHS, ///< The left-hand-side of the expression
1292 Value *RHS, ///< The right-hand-side of the expression
1293 const Twine &NameStr = "" ///< Name of the instruction
1294 ) : CmpInst(makeCmpResultType(LHS->getType()),
1295 Instruction::FCmp, pred, LHS, RHS, NameStr,
1296 &InsertAtEnd) {
1297 AssertOK();
1298 }
1299
1300 /// Constructor with no-insertion semantics
1301 FCmpInst(
1302 Predicate pred, ///< The predicate to use for the comparison
1303 Value *LHS, ///< The left-hand-side of the expression
1304 Value *RHS, ///< The right-hand-side of the expression
1305 const Twine &NameStr = "" ///< Name of the instruction
1306 ) : CmpInst(makeCmpResultType(LHS->getType()),
1307 Instruction::FCmp, pred, LHS, RHS, NameStr) {
1308 AssertOK();
1309 }
1310
1311 /// @returns true if the predicate of this instruction is EQ or NE.
1312 /// Determine if this is an equality predicate.
1313 static bool isEquality(Predicate Pred) {
1314 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1315 Pred == FCMP_UNE;
1316 }
1317
1318 /// @returns true if the predicate of this instruction is EQ or NE.
1319 /// Determine if this is an equality predicate.
1320 bool isEquality() const { return isEquality(getPredicate()); }
1321
1322 /// @returns true if the predicate of this instruction is commutative.
1323 /// Determine if this is a commutative predicate.
1324 bool isCommutative() const {
1325 return isEquality() ||
1326 getPredicate() == FCMP_FALSE ||
1327 getPredicate() == FCMP_TRUE ||
1328 getPredicate() == FCMP_ORD ||
1329 getPredicate() == FCMP_UNO;
1330 }
1331
1332 /// @returns true if the predicate is relational (not EQ or NE).
1333 /// Determine if this a relational predicate.
1334 bool isRelational() const { return !isEquality(); }
1335
1336 /// Exchange the two operands to this instruction in such a way that it does
1337 /// not modify the semantics of the instruction. The predicate value may be
1338 /// changed to retain the same result if the predicate is order dependent
1339 /// (e.g. ult).
1340 /// Swap operands and adjust predicate.
1341 void swapOperands() {
1342 setPredicate(getSwappedPredicate());
1343 Op<0>().swap(Op<1>());
1344 }
1345
1346 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1347 static bool classof(const Instruction *I) {
1348 return I->getOpcode() == Instruction::FCmp;
1349 }
1350 static bool classof(const Value *V) {
1351 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1352 }
1353};
1354
1355class CallInst;
1356class InvokeInst;
1357
1358template <class T> struct CallBaseParent { using type = Instruction; };
1359
1360//===----------------------------------------------------------------------===//
1361/// Base class for all callable instructions (InvokeInst and CallInst)
1362/// Holds everything related to calling a function, abstracting from the base
1363/// type @p BaseInstTy and the concrete instruction @p InstTy
1364///
1365template <class InstTy>
1366class CallBase : public CallBaseParent<InstTy>::type,
1367 public OperandBundleUser<InstTy, User::op_iterator> {
1368protected:
1369 AttributeList Attrs; ///< parameter attributes for callable
1370 FunctionType *FTy;
1371 using BaseInstTy = typename CallBaseParent<InstTy>::type;
1372
1373 template <class... ArgsTy>
1374 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1375 : BaseInstTy(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1376 bool hasDescriptor() const { return Value::HasDescriptor; }
1377
1378 using BaseInstTy::BaseInstTy;
1379
1380 using OperandBundleUser<InstTy,
1381 User::op_iterator>::isFnAttrDisallowedByOpBundle;
1382 using OperandBundleUser<InstTy, User::op_iterator>::getNumTotalBundleOperands;
1383 using OperandBundleUser<InstTy, User::op_iterator>::bundleOperandHasAttr;
1384 using Instruction::getSubclassDataFromInstruction;
1385 using Instruction::setInstructionSubclassData;
1386
1387public:
1388 using Instruction::getContext;
1389 using OperandBundleUser<InstTy, User::op_iterator>::hasOperandBundles;
1390 using OperandBundleUser<InstTy,
1391 User::op_iterator>::getBundleOperandsStartIndex;
1392
1393 static bool classof(const Instruction *I) {
1394 llvm_unreachable(::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1395)
1395 "CallBase is not meant to be used as part of the classof hierarchy")::llvm::llvm_unreachable_internal("CallBase is not meant to be used as part of the classof hierarchy"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1395)
;
1396 }
1397
1398public:
1399 /// Return the parameter attributes for this call.
1400 ///
1401 AttributeList getAttributes() const { return Attrs; }
1402
1403 /// Set the parameter attributes for this call.
1404 ///
1405 void setAttributes(AttributeList A) { Attrs = A; }
1406
1407 FunctionType *getFunctionType() const { return FTy; }
1408
1409 void mutateFunctionType(FunctionType *FTy) {
1410 Value::mutateType(FTy->getReturnType());
1411 this->FTy = FTy;
1412 }
1413
1414 /// Return the number of call arguments.
1415 ///
1416 unsigned getNumArgOperands() const {
1417 return getNumOperands() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1418 }
1419
1420 /// getArgOperand/setArgOperand - Return/set the i-th call argument.
1421 ///
1422 Value *getArgOperand(unsigned i) const {
1423 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1423, __PRETTY_FUNCTION__))
;
1424 return getOperand(i);
1425 }
1426 void setArgOperand(unsigned i, Value *v) {
1427 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1427, __PRETTY_FUNCTION__))
;
1428 setOperand(i, v);
1429 }
1430
1431 /// Return the iterator pointing to the beginning of the argument list.
1432 User::op_iterator arg_begin() { return op_begin(); }
1433
1434 /// Return the iterator pointing to the end of the argument list.
1435 User::op_iterator arg_end() {
1436 // [ call args ], [ operand bundles ], callee
1437 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1438 }
1439
1440 /// Iteration adapter for range-for loops.
1441 iterator_range<User::op_iterator> arg_operands() {
1442 return make_range(arg_begin(), arg_end());
1443 }
1444
1445 /// Return the iterator pointing to the beginning of the argument list.
1446 User::const_op_iterator arg_begin() const { return op_begin(); }
1447
1448 /// Return the iterator pointing to the end of the argument list.
1449 User::const_op_iterator arg_end() const {
1450 // [ call args ], [ operand bundles ], callee
1451 return op_end() - getNumTotalBundleOperands() - InstTy::ArgOffset;
1452 }
1453
1454 /// Iteration adapter for range-for loops.
1455 iterator_range<User::const_op_iterator> arg_operands() const {
1456 return make_range(arg_begin(), arg_end());
1457 }
1458
1459 /// Wrappers for getting the \c Use of a call argument.
1460 const Use &getArgOperandUse(unsigned i) const {
1461 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1461, __PRETTY_FUNCTION__))
;
1462 return User::getOperandUse(i);
1463 }
1464 Use &getArgOperandUse(unsigned i) {
1465 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1465, __PRETTY_FUNCTION__))
;
1466 return User::getOperandUse(i);
1467 }
1468
1469 /// If one of the arguments has the 'returned' attribute, return its
1470 /// operand value. Otherwise, return nullptr.
1471 Value *getReturnedArgOperand() const {
1472 unsigned Index;
1473
1474 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
1475 return getArgOperand(Index - AttributeList::FirstArgIndex);
1476 if (const Function *F = getCalledFunction())
1477 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
1478 Index)
1479 return getArgOperand(Index - AttributeList::FirstArgIndex);
1480
1481 return nullptr;
1482 }
1483
1484 User::op_iterator op_begin() {
1485 return OperandTraits<CallBase>::op_begin(this);
1486 }
1487
1488 User::const_op_iterator op_begin() const {
1489 return OperandTraits<CallBase>::op_begin(const_cast<CallBase *>(this));
1490 }
1491
1492 User::op_iterator op_end() { return OperandTraits<CallBase>::op_end(this); }
1493
1494 User::const_op_iterator op_end() const {
1495 return OperandTraits<CallBase>::op_end(const_cast<CallBase *>(this));
1496 }
1497
1498 Value *getOperand(unsigned i_nocapture) const {
1499 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&((i_nocapture < OperandTraits<CallBase>::operands(this
) && "getOperand() out of range!") ? static_cast<void
> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1500, __PRETTY_FUNCTION__))
1500 "getOperand() out of range!")((i_nocapture < OperandTraits<CallBase>::operands(this
) && "getOperand() out of range!") ? static_cast<void
> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1500, __PRETTY_FUNCTION__))
;
1501 return cast_or_null<Value>(OperandTraits<CallBase>::op_begin(
1502 const_cast<CallBase *>(this))[i_nocapture]
1503 .get());
1504 }
1505
1506 void setOperand(unsigned i_nocapture, Value *Val_nocapture) {
1507 assert(i_nocapture < OperandTraits<CallBase>::operands(this) &&((i_nocapture < OperandTraits<CallBase>::operands(this
) && "setOperand() out of range!") ? static_cast<void
> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1508, __PRETTY_FUNCTION__))
1508 "setOperand() out of range!")((i_nocapture < OperandTraits<CallBase>::operands(this
) && "setOperand() out of range!") ? static_cast<void
> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1508, __PRETTY_FUNCTION__))
;
1509 OperandTraits<CallBase>::op_begin(this)[i_nocapture] = Val_nocapture;
1510 }
1511
1512 unsigned getNumOperands() const {
1513 return OperandTraits<CallBase>::operands(this);
1514 }
1515 template <int Idx_nocapture> Use &Op() {
1516 return User::OpFrom<Idx_nocapture>(this);
1517 }
1518 template <int Idx_nocapture> const Use &Op() const {
1519 return User::OpFrom<Idx_nocapture>(this);
1520 }
1521
1522 /// Return the function called, or null if this is an
1523 /// indirect function invocation.
1524 ///
1525 Function *getCalledFunction() const {
1526 return dyn_cast<Function>(Op<-InstTy::ArgOffset>());
1527 }
1528
1529 /// Determine whether this call has the given attribute.
1530 bool hasFnAttr(Attribute::AttrKind Kind) const {
1531 assert(Kind != Attribute::NoBuiltin &&((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1532, __PRETTY_FUNCTION__))
1532 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1532, __PRETTY_FUNCTION__))
;
1533 return hasFnAttrImpl(Kind);
1534 }
1535
1536 /// Determine whether this call has the given attribute.
1537 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1538
1539 /// getCallingConv/setCallingConv - Get or set the calling convention of this
1540 /// function call.
1541 CallingConv::ID getCallingConv() const {
1542 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1543 }
1544 void setCallingConv(CallingConv::ID CC) {
1545 auto ID = static_cast<unsigned>(CC);
1546 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")((!(ID & ~CallingConv::MaxID) && "Unsupported calling convention"
) ? static_cast<void> (0) : __assert_fail ("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1546, __PRETTY_FUNCTION__))
;
1547 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1548 (ID << 2));
1549 }
1550
1551
1552 /// adds the attribute to the list of attributes.
1553 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1554 AttributeList PAL = getAttributes();
1555 PAL = PAL.addAttribute(getContext(), i, Kind);
1556 setAttributes(PAL);
1557 }
1558
1559 /// adds the attribute to the list of attributes.
1560 void addAttribute(unsigned i, Attribute Attr) {
1561 AttributeList PAL = getAttributes();
1562 PAL = PAL.addAttribute(getContext(), i, Attr);
1563 setAttributes(PAL);
1564 }
1565
1566 /// Adds the attribute to the indicated argument
1567 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1568 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1568, __PRETTY_FUNCTION__))
;
1569 AttributeList PAL = getAttributes();
1570 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1571 setAttributes(PAL);
1572 }
1573
1574 /// Adds the attribute to the indicated argument
1575 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1576 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1576, __PRETTY_FUNCTION__))
;
1577 AttributeList PAL = getAttributes();
1578 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1579 setAttributes(PAL);
1580 }
1581
1582 /// removes the attribute from the list of attributes.
1583 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1584 AttributeList PAL = getAttributes();
1585 PAL = PAL.removeAttribute(getContext(), i, Kind);
1586 setAttributes(PAL);
1587 }
1588
1589 /// removes the attribute from the list of attributes.
1590 void removeAttribute(unsigned i, StringRef Kind) {
1591 AttributeList PAL = getAttributes();
1592 PAL = PAL.removeAttribute(getContext(), i, Kind);
1593 setAttributes(PAL);
1594 }
1595
1596 /// Removes the attribute from the given argument
1597 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1598 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1598, __PRETTY_FUNCTION__))
;
1599 AttributeList PAL = getAttributes();
1600 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1601 setAttributes(PAL);
1602 }
1603
1604 /// Removes the attribute from the given argument
1605 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1606 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1606, __PRETTY_FUNCTION__))
;
1607 AttributeList PAL = getAttributes();
1608 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1609 setAttributes(PAL);
1610 }
1611
1612 /// adds the dereferenceable attribute to the list of attributes.
1613 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1614 AttributeList PAL = getAttributes();
1615 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1616 setAttributes(PAL);
1617 }
1618
1619 /// adds the dereferenceable_or_null attribute to the list of
1620 /// attributes.
1621 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1622 AttributeList PAL = getAttributes();
1623 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1624 setAttributes(PAL);
1625 }
1626
1627 /// Determine whether the return value has the given attribute.
1628 bool hasRetAttr(Attribute::AttrKind Kind) const {
1629 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
1630 return true;
1631
1632 // Look at the callee, if available.
1633 if (const Function *F = getCalledFunction())
1634 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
1635 return false;
1636 }
1637
1638 /// Determine whether the argument or parameter has the given attribute.
1639 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1640 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!")((ArgNo < getNumArgOperands() && "Param index out of bounds!"
) ? static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Param index out of bounds!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1640, __PRETTY_FUNCTION__))
;
1641
1642 if (Attrs.hasParamAttribute(ArgNo, Kind))
1643 return true;
1644 if (const Function *F = getCalledFunction())
1645 return F->getAttributes().hasParamAttribute(ArgNo, Kind);
1646 return false;
1647 }
1648
1649 /// Get the attribute of a given kind at a position.
1650 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1651 return getAttributes().getAttribute(i, Kind);
1652 }
1653
1654 /// Get the attribute of a given kind at a position.
1655 Attribute getAttribute(unsigned i, StringRef Kind) const {
1656 return getAttributes().getAttribute(i, Kind);
1657 }
1658
1659 /// Get the attribute of a given kind from a given arg
1660 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1661 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1661, __PRETTY_FUNCTION__))
;
1662 return getAttributes().getParamAttr(ArgNo, Kind);
1663 }
1664
1665 /// Get the attribute of a given kind from a given arg
1666 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1667 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1667, __PRETTY_FUNCTION__))
;
1668 return getAttributes().getParamAttr(ArgNo, Kind);
1669 }
1670 /// Return true if the data operand at index \p i has the attribute \p
1671 /// A.
1672 ///
1673 /// Data operands include call arguments and values used in operand bundles,
1674 /// but does not include the callee operand. This routine dispatches to the
1675 /// underlying AttributeList or the OperandBundleUser as appropriate.
1676 ///
1677 /// The index \p i is interpreted as
1678 ///
1679 /// \p i == Attribute::ReturnIndex -> the return value
1680 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1681 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1682 /// (\p i - 1) in the operand list.
1683 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1684 // There are getNumOperands() - (InstTy::ArgOffset - 1) data operands.
1685 // The last operand is the callee.
1686 assert(i < (getNumOperands() - InstTy::ArgOffset + 1) &&((i < (getNumOperands() - InstTy::ArgOffset + 1) &&
"Data operand index out of bounds!") ? static_cast<void>
(0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1687, __PRETTY_FUNCTION__))
1687 "Data operand index out of bounds!")((i < (getNumOperands() - InstTy::ArgOffset + 1) &&
"Data operand index out of bounds!") ? static_cast<void>
(0) : __assert_fail ("i < (getNumOperands() - InstTy::ArgOffset + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1687, __PRETTY_FUNCTION__))
;
1688
1689 // The attribute A can either be directly specified, if the operand in
1690 // question is a call argument; or be indirectly implied by the kind of its
1691 // containing operand bundle, if the operand is a bundle operand.
1692
1693 if (i == AttributeList::ReturnIndex)
1694 return hasRetAttr(Kind);
1695
1696 // FIXME: Avoid these i - 1 calculations and update the API to use
1697 // zero-based indices.
1698 if (i < (getNumArgOperands() + 1))
1699 return paramHasAttr(i - 1, Kind);
1700
1701 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1702, __PRETTY_FUNCTION__))
1702 "Must be either a call argument or an operand bundle!")((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1702, __PRETTY_FUNCTION__))
;
1703 return bundleOperandHasAttr(i - 1, Kind);
1704 }
1705
1706 /// Extract the alignment of the return value.
1707 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
1708
1709 /// Extract the alignment for a call or parameter (0=unknown).
1710 unsigned getParamAlignment(unsigned ArgNo) const {
1711 return Attrs.getParamAlignment(ArgNo);
1712 }
1713
1714 /// Extract the number of dereferenceable bytes for a call or
1715 /// parameter (0=unknown).
1716 uint64_t getDereferenceableBytes(unsigned i) const {
1717 return Attrs.getDereferenceableBytes(i);
1718 }
1719
1720 /// Extract the number of dereferenceable_or_null bytes for a call or
1721 /// parameter (0=unknown).
1722 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1723 return Attrs.getDereferenceableOrNullBytes(i);
1724 }
1725
1726 /// Determine if the return value is marked with NoAlias attribute.
1727 bool returnDoesNotAlias() const {
1728 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1729 }
1730
1731 /// Return true if the call should not be treated as a call to a
1732 /// builtin.
1733 bool isNoBuiltin() const {
1734 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1735 !hasFnAttrImpl(Attribute::Builtin);
1736 }
1737
1738 /// Determine if the call requires strict floating point semantics.
1739 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1740
1741 /// Return true if the call should not be inlined.
1742 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1743 void setIsNoInline() {
1744 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1745 }
1746 /// Determine if the call does not access memory.
1747 bool doesNotAccessMemory() const {
1748 return hasFnAttr(Attribute::ReadNone);
1749 }
1750 void setDoesNotAccessMemory() {
1751 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1752 }
1753
1754 /// Determine if the call does not access or only reads memory.
1755 bool onlyReadsMemory() const {
1756 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1757 }
1758 void setOnlyReadsMemory() {
1759 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1760 }
1761
1762 /// Determine if the call does not access or only writes memory.
1763 bool doesNotReadMemory() const {
1764 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1765 }
1766 void setDoesNotReadMemory() {
1767 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1768 }
1769
1770 /// Determine if the call can access memmory only using pointers based
1771 /// on its arguments.
1772 bool onlyAccessesArgMemory() const {
1773 return hasFnAttr(Attribute::ArgMemOnly);
1774 }
1775 void setOnlyAccessesArgMemory() {
1776 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1777 }
1778
1779 /// Determine if the function may only access memory that is
1780 /// inaccessible from the IR.
1781 bool onlyAccessesInaccessibleMemory() const {
1782 return hasFnAttr(Attribute::InaccessibleMemOnly);
1783 }
1784 void setOnlyAccessesInaccessibleMemory() {
1785 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1786 }
1787
1788 /// Determine if the function may only access memory that is
1789 /// either inaccessible from the IR or pointed to by its arguments.
1790 bool onlyAccessesInaccessibleMemOrArgMem() const {
1791 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1792 }
1793 void setOnlyAccessesInaccessibleMemOrArgMem() {
1794 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
1795 }
1796 /// Determine if the call cannot return.
1797 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1798 void setDoesNotReturn() {
1799 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1800 }
1801
1802 /// Determine if the call should not perform indirect branch tracking.
1803 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1804
1805 /// Determine if the call cannot unwind.
1806 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1807 void setDoesNotThrow() {
1808 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1809 }
1810
1811 /// Determine if the invoke cannot be duplicated.
1812 bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
1813 void setCannotDuplicate() {
1814 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1815 }
1816
1817 /// Determine if the invoke is convergent
1818 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1819 void setConvergent() {
1820 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1821 }
1822 void setNotConvergent() {
1823 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1824 }
1825
1826 /// Determine if the call returns a structure through first
1827 /// pointer argument.
1828 bool hasStructRetAttr() const {
1829 if (getNumArgOperands() == 0)
1830 return false;
1831
1832 // Be friendly and also check the callee.
1833 return paramHasAttr(0, Attribute::StructRet);
1834 }
1835
1836 /// Determine if any call argument is an aggregate passed by value.
1837 bool hasByValArgument() const {
1838 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1839 }
1840 /// Get a pointer to the function that is invoked by this
1841 /// instruction.
1842 const Value *getCalledValue() const { return Op<-InstTy::ArgOffset>(); }
1843 Value *getCalledValue() { return Op<-InstTy::ArgOffset>(); }
1844
1845 /// Set the function called.
1846 void setCalledFunction(Value* Fn) {
1847 setCalledFunction(
1848 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
1849 Fn);
1850 }
1851 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1852 this->FTy = FTy;
1853 assert(FTy == cast<FunctionType>(((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1854, __PRETTY_FUNCTION__))
1854 cast<PointerType>(Fn->getType())->getElementType()))((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 1854, __PRETTY_FUNCTION__))
;
1855 Op<-InstTy::ArgOffset>() = Fn;
1856 }
1857
1858protected:
1859 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
1860 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
1861 return true;
1862
1863 // Operand bundles override attributes on the called function, but don't
1864 // override attributes directly present on the call instruction.
1865 if (isFnAttrDisallowedByOpBundle(Kind))
1866 return false;
1867
1868 if (const Function *F = getCalledFunction())
1869 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
1870 Kind);
1871 return false;
1872 }
1873};
1874
1875//===----------------------------------------------------------------------===//
1876/// This class represents a function call, abstracting a target
1877/// machine's calling convention. This class uses low bit of the SubClassData
1878/// field to indicate whether or not this is a tail call. The rest of the bits
1879/// hold the calling convention of the call.
1880///
1881class CallInst : public CallBase<CallInst> {
1882 friend class OperandBundleUser<CallInst, User::op_iterator>;
1883
1884 CallInst(const CallInst &CI);
1885
1886 /// Construct a CallInst given a range of arguments.
1887 /// Construct a CallInst from a range of arguments
1888 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1889 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1890 Instruction *InsertBefore);
1891
1892 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1893 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1894 Instruction *InsertBefore)
1895 : CallInst(cast<FunctionType>(
1896 cast<PointerType>(Func->getType())->getElementType()),
1897 Func, Args, Bundles, NameStr, InsertBefore) {}
1898
1899 inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
1900 Instruction *InsertBefore)
1901 : CallInst(Func, Args, None, NameStr, InsertBefore) {}
1902
1903 /// Construct a CallInst given a range of arguments.
1904 /// Construct a CallInst from a range of arguments
1905 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1906 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1907 BasicBlock *InsertAtEnd);
1908
1909 explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore);
1910
1911 CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
1912
1913 void init(Value *Func, ArrayRef<Value *> Args,
1914 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
1915 init(cast<FunctionType>(
1916 cast<PointerType>(Func->getType())->getElementType()),
1917 Func, Args, Bundles, NameStr);
1918 }
1919 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1920 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1921 void init(Value *Func, const Twine &NameStr);
1922
1923protected:
1924 // Note: Instruction needs to be a friend here to call cloneImpl.
1925 friend class Instruction;
1926
1927 CallInst *cloneImpl() const;
1928
1929public:
1930 static constexpr int ArgOffset = 1;
1931
1932 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1933 ArrayRef<OperandBundleDef> Bundles = None,
1934 const Twine &NameStr = "",
1935 Instruction *InsertBefore = nullptr) {
1936 return Create(cast<FunctionType>(
1937 cast<PointerType>(Func->getType())->getElementType()),
1938 Func, Args, Bundles, NameStr, InsertBefore);
1939 }
1940
1941 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1942 const Twine &NameStr,
1943 Instruction *InsertBefore = nullptr) {
1944 return Create(cast<FunctionType>(
1945 cast<PointerType>(Func->getType())->getElementType()),
1946 Func, Args, None, NameStr, InsertBefore);
1947 }
1948
1949 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1950 const Twine &NameStr,
1951 Instruction *InsertBefore = nullptr) {
1952 return new (unsigned(Args.size() + 1))
1953 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1954 }
1955
1956 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1957 ArrayRef<OperandBundleDef> Bundles = None,
1958 const Twine &NameStr = "",
1959 Instruction *InsertBefore = nullptr) {
1960 const unsigned TotalOps =
1961 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1962 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1963
1964 return new (TotalOps, DescriptorBytes)
1965 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1966 }
1967
1968 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1969 ArrayRef<OperandBundleDef> Bundles,
1970 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1971 const unsigned TotalOps =
1972 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1973 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1974
1975 return new (TotalOps, DescriptorBytes)
1976 CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
1977 }
1978
1979 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1980 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1981 return new (unsigned(Args.size() + 1))
1982 CallInst(Func, Args, None, NameStr, InsertAtEnd);
1983 }
1984
1985 static CallInst *Create(Value *F, const Twine &NameStr = "",
1986 Instruction *InsertBefore = nullptr) {
1987 return new (1) CallInst(F, NameStr, InsertBefore);
1988 }
1989
1990 static CallInst *Create(Value *F, const Twine &NameStr,
1991 BasicBlock *InsertAtEnd) {
1992 return new (1) CallInst(F, NameStr, InsertAtEnd);
1993 }
1994
1995 /// Create a clone of \p CI with a different set of operand bundles and
1996 /// insert it before \p InsertPt.
1997 ///
1998 /// The returned call instruction is identical \p CI in every way except that
1999 /// the operand bundles for the new instruction are set to the operand bundles
2000 /// in \p Bundles.
2001 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
2002 Instruction *InsertPt = nullptr);
2003
2004 /// Generate the IR for a call to malloc:
2005 /// 1. Compute the malloc call's argument as the specified type's size,
2006 /// possibly multiplied by the array size if the array size is not
2007 /// constant 1.
2008 /// 2. Call malloc with that argument.
2009 /// 3. Bitcast the result of the malloc call to the specified type.
2010 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2011 Type *AllocTy, Value *AllocSize,
2012 Value *ArraySize = nullptr,
2013 Function *MallocF = nullptr,
2014 const Twine &Name = "");
2015 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2016 Type *AllocTy, Value *AllocSize,
2017 Value *ArraySize = nullptr,
2018 Function *MallocF = nullptr,
2019 const Twine &Name = "");
2020 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
2021 Type *AllocTy, Value *AllocSize,
2022 Value *ArraySize = nullptr,
2023 ArrayRef<OperandBundleDef> Bundles = None,
2024 Function *MallocF = nullptr,
2025 const Twine &Name = "");
2026 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
2027 Type *AllocTy, Value *AllocSize,
2028 Value *ArraySize = nullptr,
2029 ArrayRef<OperandBundleDef> Bundles = None,
2030 Function *MallocF = nullptr,
2031 const Twine &Name = "");
2032 /// Generate the IR for a call to the builtin free function.
2033 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
2034 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
2035 static Instruction *CreateFree(Value *Source,
2036 ArrayRef<OperandBundleDef> Bundles,
2037 Instruction *InsertBefore);
2038 static Instruction *CreateFree(Value *Source,
2039 ArrayRef<OperandBundleDef> Bundles,
2040 BasicBlock *InsertAtEnd);
2041
2042 // Note that 'musttail' implies 'tail'.
2043 enum TailCallKind {
2044 TCK_None = 0,
2045 TCK_Tail = 1,
2046 TCK_MustTail = 2,
2047 TCK_NoTail = 3
2048 };
2049 TailCallKind getTailCallKind() const {
2050 return TailCallKind(getSubclassDataFromInstruction() & 3);
2051 }
2052
2053 bool isTailCall() const {
2054 unsigned Kind = getSubclassDataFromInstruction() & 3;
2055 return Kind == TCK_Tail || Kind == TCK_MustTail;
2056 }
2057
2058 bool isMustTailCall() const {
2059 return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
2060 }
2061
2062 bool isNoTailCall() const {
2063 return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
2064 }
2065
2066 void setTailCall(bool isTC = true) {
2067 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2068 unsigned(isTC ? TCK_Tail : TCK_None));
2069 }
2070
2071 void setTailCallKind(TailCallKind TCK) {
2072 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
2073 unsigned(TCK));
2074 }
2075
2076 /// Return true if the call can return twice
2077 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
2078 void setCanReturnTwice() {
2079 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
2080 }
2081
2082 /// Check if this call is an inline asm statement.
2083 bool isInlineAsm() const { return isa<InlineAsm>(Op<-1>()); }
2084
2085 // Methods for support type inquiry through isa, cast, and dyn_cast:
2086 static bool classof(const Instruction *I) {
2087 return I->getOpcode() == Instruction::Call;
2088 }
2089 static bool classof(const Value *V) {
2090 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2091 }
2092
2093private:
2094 // Shadow Instruction::setInstructionSubclassData with a private forwarding
2095 // method so that subclasses cannot accidentally use it.
2096 void setInstructionSubclassData(unsigned short D) {
2097 Instruction::setInstructionSubclassData(D);
2098 }
2099};
2100
2101template <>
2102struct OperandTraits<CallBase<CallInst>>
2103 : public VariadicOperandTraits<CallBase<CallInst>, 1> {};
2104
2105CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
2106 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2107 BasicBlock *InsertAtEnd)
2108 : CallBase<CallInst>(
2109 cast<FunctionType>(
2110 cast<PointerType>(Func->getType())->getElementType())
2111 ->getReturnType(),
2112 Instruction::Call,
2113 OperandTraits<CallBase<CallInst>>::op_end(this) -
2114 (Args.size() + CountBundleInputs(Bundles) + 1),
2115 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
2116 init(Func, Args, Bundles, NameStr);
2117}
2118
2119CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
2120 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
2121 Instruction *InsertBefore)
2122 : CallBase<CallInst>(Ty->getReturnType(), Instruction::Call,
2123 OperandTraits<CallBase<CallInst>>::op_end(this) -
2124 (Args.size() + CountBundleInputs(Bundles) + 1),
2125 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
2126 InsertBefore) {
2127 init(Ty, Func, Args, Bundles, NameStr);
2128}
2129
2130//===----------------------------------------------------------------------===//
2131// SelectInst Class
2132//===----------------------------------------------------------------------===//
2133
2134/// This class represents the LLVM 'select' instruction.
2135///
2136class SelectInst : public Instruction {
2137 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2138 Instruction *InsertBefore)
2139 : Instruction(S1->getType(), Instruction::Select,
2140 &Op<0>(), 3, InsertBefore) {
2141 init(C, S1, S2);
2142 setName(NameStr);
2143 }
2144
2145 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
2146 BasicBlock *InsertAtEnd)
2147 : Instruction(S1->getType(), Instruction::Select,
2148 &Op<0>(), 3, InsertAtEnd) {
2149 init(C, S1, S2);
2150 setName(NameStr);
2151 }
2152
2153 void init(Value *C, Value *S1, Value *S2) {
2154 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select"
) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2154, __PRETTY_FUNCTION__))
;
2155 Op<0>() = C;
2156 Op<1>() = S1;
2157 Op<2>() = S2;
2158 }
2159
2160protected:
2161 // Note: Instruction needs to be a friend here to call cloneImpl.
2162 friend class Instruction;
2163
2164 SelectInst *cloneImpl() const;
2165
2166public:
2167 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2168 const Twine &NameStr = "",
2169 Instruction *InsertBefore = nullptr,
2170 Instruction *MDFrom = nullptr) {
2171 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
2172 if (MDFrom)
2173 Sel->copyMetadata(*MDFrom);
2174 return Sel;
2175 }
2176
2177 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2178 const Twine &NameStr,
2179 BasicBlock *InsertAtEnd) {
2180 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
2181 }
2182
2183 const Value *getCondition() const { return Op<0>(); }
2184 const Value *getTrueValue() const { return Op<1>(); }
2185 const Value *getFalseValue() const { return Op<2>(); }
2186 Value *getCondition() { return Op<0>(); }
2187 Value *getTrueValue() { return Op<1>(); }
2188 Value *getFalseValue() { return Op<2>(); }
2189
2190 void setCondition(Value *V) { Op<0>() = V; }
2191 void setTrueValue(Value *V) { Op<1>() = V; }
2192 void setFalseValue(Value *V) { Op<2>() = V; }
2193
2194 /// Return a string if the specified operands are invalid
2195 /// for a select operation, otherwise return null.
2196 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
2197
2198 /// Transparently provide more efficient getOperand methods.
2199 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2200
2201 OtherOps getOpcode() const {
2202 return static_cast<OtherOps>(Instruction::getOpcode());
2203 }
2204
2205 // Methods for support type inquiry through isa, cast, and dyn_cast:
2206 static bool classof(const Instruction *I) {
2207 return I->getOpcode() == Instruction::Select;
2208 }
2209 static bool classof(const Value *V) {
2210 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2211 }
2212};
2213
2214template <>
2215struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
2216};
2217
2218DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2218, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst
*>(this))[i_nocapture].get()); } void SelectInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2218, __PRETTY_FUNCTION__)); OperandTraits<SelectInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst
::getNumOperands() const { return OperandTraits<SelectInst
>::operands(this); } template <int Idx_nocapture> Use
&SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2219
2220//===----------------------------------------------------------------------===//
2221// VAArgInst Class
2222//===----------------------------------------------------------------------===//
2223
2224/// This class represents the va_arg llvm instruction, which returns
2225/// an argument of the specified type given a va_list and increments that list
2226///
2227class VAArgInst : public UnaryInstruction {
2228protected:
2229 // Note: Instruction needs to be a friend here to call cloneImpl.
2230 friend class Instruction;
2231
2232 VAArgInst *cloneImpl() const;
2233
2234public:
2235 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
2236 Instruction *InsertBefore = nullptr)
2237 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
2238 setName(NameStr);
2239 }
2240
2241 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
2242 BasicBlock *InsertAtEnd)
2243 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
2244 setName(NameStr);
2245 }
2246
2247 Value *getPointerOperand() { return getOperand(0); }
2248 const Value *getPointerOperand() const { return getOperand(0); }
2249 static unsigned getPointerOperandIndex() { return 0U; }
2250
2251 // Methods for support type inquiry through isa, cast, and dyn_cast:
2252 static bool classof(const Instruction *I) {
2253 return I->getOpcode() == VAArg;
2254 }
2255 static bool classof(const Value *V) {
2256 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2257 }
2258};
2259
2260//===----------------------------------------------------------------------===//
2261// ExtractElementInst Class
2262//===----------------------------------------------------------------------===//
2263
2264/// This instruction extracts a single (scalar)
2265/// element from a VectorType value
2266///
2267class ExtractElementInst : public Instruction {
2268 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
2269 Instruction *InsertBefore = nullptr);
2270 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2271 BasicBlock *InsertAtEnd);
2272
2273protected:
2274 // Note: Instruction needs to be a friend here to call cloneImpl.
2275 friend class Instruction;
2276
2277 ExtractElementInst *cloneImpl() const;
2278
2279public:
2280 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2281 const Twine &NameStr = "",
2282 Instruction *InsertBefore = nullptr) {
2283 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2284 }
2285
2286 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2287 const Twine &NameStr,
2288 BasicBlock *InsertAtEnd) {
2289 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
2290 }
2291
2292 /// Return true if an extractelement instruction can be
2293 /// formed with the specified operands.
2294 static bool isValidOperands(const Value *Vec, const Value *Idx);
2295
2296 Value *getVectorOperand() { return Op<0>(); }
2297 Value *getIndexOperand() { return Op<1>(); }
2298 const Value *getVectorOperand() const { return Op<0>(); }
2299 const Value *getIndexOperand() const { return Op<1>(); }
2300
2301 VectorType *getVectorOperandType() const {
2302 return cast<VectorType>(getVectorOperand()->getType());
2303 }
2304
2305 /// Transparently provide more efficient getOperand methods.
2306 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2307
2308 // Methods for support type inquiry through isa, cast, and dyn_cast:
2309 static bool classof(const Instruction *I) {
2310 return I->getOpcode() == Instruction::ExtractElement;
2311 }
2312 static bool classof(const Value *V) {
2313 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2314 }
2315};
2316
2317template <>
2318struct OperandTraits<ExtractElementInst> :
2319 public FixedNumOperandTraits<ExtractElementInst, 2> {
2320};
2321
2322DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
((i_nocapture < OperandTraits<ExtractElementInst>::
operands(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2322, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ExtractElementInst>::op_begin(const_cast
<ExtractElementInst*>(this))[i_nocapture].get()); } void
ExtractElementInst::setOperand(unsigned i_nocapture, Value *
Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst
>::operands(this) && "setOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2322, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ExtractElementInst::getNumOperands() const { return OperandTraits
<ExtractElementInst>::operands(this); } template <int
Idx_nocapture> Use &ExtractElementInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ExtractElementInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2323
2324//===----------------------------------------------------------------------===//
2325// InsertElementInst Class
2326//===----------------------------------------------------------------------===//
2327
2328/// This instruction inserts a single (scalar)
2329/// element into a VectorType value
2330///
2331class InsertElementInst : public Instruction {
2332 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
2333 const Twine &NameStr = "",
2334 Instruction *InsertBefore = nullptr);
2335 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2336 BasicBlock *InsertAtEnd);
2337
2338protected:
2339 // Note: Instruction needs to be a friend here to call cloneImpl.
2340 friend class Instruction;
2341
2342 InsertElementInst *cloneImpl() const;
2343
2344public:
2345 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2346 const Twine &NameStr = "",
2347 Instruction *InsertBefore = nullptr) {
2348 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2349 }
2350
2351 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2352 const Twine &NameStr,
2353 BasicBlock *InsertAtEnd) {
2354 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
2355 }
2356
2357 /// Return true if an insertelement instruction can be
2358 /// formed with the specified operands.
2359 static bool isValidOperands(const Value *Vec, const Value *NewElt,
2360 const Value *Idx);
2361
2362 /// Overload to return most specific vector type.
2363 ///
2364 VectorType *getType() const {
2365 return cast<VectorType>(Instruction::getType());
2366 }
2367
2368 /// Transparently provide more efficient getOperand methods.
2369 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2370
2371 // Methods for support type inquiry through isa, cast, and dyn_cast:
2372 static bool classof(const Instruction *I) {
2373 return I->getOpcode() == Instruction::InsertElement;
2374 }
2375 static bool classof(const Value *V) {
2376 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2377 }
2378};
2379
2380template <>
2381struct OperandTraits<InsertElementInst> :
2382 public FixedNumOperandTraits<InsertElementInst, 3> {
2383};
2384
2385DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2385, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertElementInst>::op_begin(const_cast
<InsertElementInst*>(this))[i_nocapture].get()); } void
InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<InsertElementInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2385, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertElementInst::getNumOperands() const { return OperandTraits
<InsertElementInst>::operands(this); } template <int
Idx_nocapture> Use &InsertElementInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &InsertElementInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
2386
2387//===----------------------------------------------------------------------===//
2388// ShuffleVectorInst Class
2389//===----------------------------------------------------------------------===//
2390
2391/// This instruction constructs a fixed permutation of two
2392/// input vectors.
2393///
2394class ShuffleVectorInst : public Instruction {
2395protected:
2396 // Note: Instruction needs to be a friend here to call cloneImpl.
2397 friend class Instruction;
2398
2399 ShuffleVectorInst *cloneImpl() const;
2400
2401public:
2402 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2403 const Twine &NameStr = "",
2404 Instruction *InsertBefor = nullptr);
2405 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2406 const Twine &NameStr, BasicBlock *InsertAtEnd);
2407
2408 // allocate space for exactly three operands
2409 void *operator new(size_t s) {
2410 return User::operator new(s, 3);
2411 }
2412
2413 /// Return true if a shufflevector instruction can be
2414 /// formed with the specified operands.
2415 static bool isValidOperands(const Value *V1, const Value *V2,
2416 const Value *Mask);
2417
2418 /// Overload to return most specific vector type.
2419 ///
2420 VectorType *getType() const {
2421 return cast<VectorType>(Instruction::getType());
2422 }
2423
2424 /// Transparently provide more efficient getOperand methods.
2425 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2426
2427 Constant *getMask() const {
2428 return cast<Constant>(getOperand(2));
2429 }
2430
2431 /// Return the shuffle mask value for the specified element of the mask.
2432 /// Return -1 if the element is undef.
2433 static int getMaskValue(const Constant *Mask, unsigned Elt);
2434
2435 /// Return the shuffle mask value of this instruction for the given element
2436 /// index. Return -1 if the element is undef.
2437 int getMaskValue(unsigned Elt) const {
2438 return getMaskValue(getMask(), Elt);
2439 }
2440
2441 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2442 /// elements of the mask are returned as -1.
2443 static void getShuffleMask(const Constant *Mask,
2444 SmallVectorImpl<int> &Result);
2445
2446 /// Return the mask for this instruction as a vector of integers. Undefined
2447 /// elements of the mask are returned as -1.
2448 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2449 return getShuffleMask(getMask(), Result);
2450 }
2451
2452 SmallVector<int, 16> getShuffleMask() const {
2453 SmallVector<int, 16> Mask;
2454 getShuffleMask(Mask);
2455 return Mask;
2456 }
2457
2458 /// Return true if this shuffle returns a vector with a different number of
2459 /// elements than its source vectors.
2460 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2461 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2462 bool changesLength() const {
2463 unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
2464 unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
2465 return NumSourceElts != NumMaskElts;
2466 }
2467
2468 /// Return true if this shuffle returns a vector with a greater number of
2469 /// elements than its source vectors.
2470 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2471 bool increasesLength() const {
2472 unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements();
2473 unsigned NumMaskElts = getMask()->getType()->getVectorNumElements();
2474 return NumSourceElts < NumMaskElts;
2475 }
2476
2477 /// Return true if this shuffle mask chooses elements from exactly one source
2478 /// vector.
2479 /// Example: <7,5,undef,7>
2480 /// This assumes that vector operands are the same length as the mask.
2481 static bool isSingleSourceMask(ArrayRef<int> Mask);
2482 static bool isSingleSourceMask(const Constant *Mask) {
2483 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2483, __PRETTY_FUNCTION__))
;
2484 SmallVector<int, 16> MaskAsInts;
2485 getShuffleMask(Mask, MaskAsInts);
2486 return isSingleSourceMask(MaskAsInts);
2487 }
2488
2489 /// Return true if this shuffle chooses elements from exactly one source
2490 /// vector without changing the length of that vector.
2491 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2492 /// TODO: Optionally allow length-changing shuffles.
2493 bool isSingleSource() const {
2494 return !changesLength() && isSingleSourceMask(getMask());
2495 }
2496
2497 /// Return true if this shuffle mask chooses elements from exactly one source
2498 /// vector without lane crossings. A shuffle using this mask is not
2499 /// necessarily a no-op because it may change the number of elements from its
2500 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2501 /// Example: <undef,undef,2,3>
2502 static bool isIdentityMask(ArrayRef<int> Mask);
2503 static bool isIdentityMask(const Constant *Mask) {
2504 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2504, __PRETTY_FUNCTION__))
;
2505 SmallVector<int, 16> MaskAsInts;
2506 getShuffleMask(Mask, MaskAsInts);
2507 return isIdentityMask(MaskAsInts);
2508 }
2509
2510 /// Return true if this shuffle chooses elements from exactly one source
2511 /// vector without lane crossings and does not change the number of elements
2512 /// from its input vectors.
2513 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2514 bool isIdentity() const {
2515 return !changesLength() && isIdentityMask(getShuffleMask());
2516 }
2517
2518 /// Return true if this shuffle lengthens exactly one source vector with
2519 /// undefs in the high elements.
2520 bool isIdentityWithPadding() const;
2521
2522 /// Return true if this shuffle extracts the first N elements of exactly one
2523 /// source vector.
2524 bool isIdentityWithExtract() const;
2525
2526 /// Return true if this shuffle concatenates its 2 source vectors. This
2527 /// returns false if either input is undefined. In that case, the shuffle is
2528 /// is better classified as an identity with padding operation.
2529 bool isConcat() const;
2530
2531 /// Return true if this shuffle mask chooses elements from its source vectors
2532 /// without lane crossings. A shuffle using this mask would be
2533 /// equivalent to a vector select with a constant condition operand.
2534 /// Example: <4,1,6,undef>
2535 /// This returns false if the mask does not choose from both input vectors.
2536 /// In that case, the shuffle is better classified as an identity shuffle.
2537 /// This assumes that vector operands are the same length as the mask
2538 /// (a length-changing shuffle can never be equivalent to a vector select).
2539 static bool isSelectMask(ArrayRef<int> Mask);
2540 static bool isSelectMask(const Constant *Mask) {
2541 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2541, __PRETTY_FUNCTION__))
;
2542 SmallVector<int, 16> MaskAsInts;
2543 getShuffleMask(Mask, MaskAsInts);
2544 return isSelectMask(MaskAsInts);
2545 }
2546
2547 /// Return true if this shuffle chooses elements from its source vectors
2548 /// without lane crossings and all operands have the same number of elements.
2549 /// In other words, this shuffle is equivalent to a vector select with a
2550 /// constant condition operand.
2551 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2552 /// This returns false if the mask does not choose from both input vectors.
2553 /// In that case, the shuffle is better classified as an identity shuffle.
2554 /// TODO: Optionally allow length-changing shuffles.
2555 bool isSelect() const {
2556 return !changesLength() && isSelectMask(getMask());
2557 }
2558
2559 /// Return true if this shuffle mask swaps the order of elements from exactly
2560 /// one source vector.
2561 /// Example: <7,6,undef,4>
2562 /// This assumes that vector operands are the same length as the mask.
2563 static bool isReverseMask(ArrayRef<int> Mask);
2564 static bool isReverseMask(const Constant *Mask) {
2565 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2565, __PRETTY_FUNCTION__))
;
2566 SmallVector<int, 16> MaskAsInts;
2567 getShuffleMask(Mask, MaskAsInts);
2568 return isReverseMask(MaskAsInts);
2569 }
2570
2571 /// Return true if this shuffle swaps the order of elements from exactly
2572 /// one source vector.
2573 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2574 /// TODO: Optionally allow length-changing shuffles.
2575 bool isReverse() const {
2576 return !changesLength() && isReverseMask(getMask());
2577 }
2578
2579 /// Return true if this shuffle mask chooses all elements with the same value
2580 /// as the first element of exactly one source vector.
2581 /// Example: <4,undef,undef,4>
2582 /// This assumes that vector operands are the same length as the mask.
2583 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2584 static bool isZeroEltSplatMask(const Constant *Mask) {
2585 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2585, __PRETTY_FUNCTION__))
;
2586 SmallVector<int, 16> MaskAsInts;
2587 getShuffleMask(Mask, MaskAsInts);
2588 return isZeroEltSplatMask(MaskAsInts);
2589 }
2590
2591 /// Return true if all elements of this shuffle are the same value as the
2592 /// first element of exactly one source vector without changing the length
2593 /// of that vector.
2594 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2595 /// TODO: Optionally allow length-changing shuffles.
2596 /// TODO: Optionally allow splats from other elements.
2597 bool isZeroEltSplat() const {
2598 return !changesLength() && isZeroEltSplatMask(getMask());
2599 }
2600
2601 /// Return true if this shuffle mask is a transpose mask.
2602 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2603 /// even- or odd-numbered vector elements from two n-dimensional source
2604 /// vectors and write each result into consecutive elements of an
2605 /// n-dimensional destination vector. Two shuffles are necessary to complete
2606 /// the transpose, one for the even elements and another for the odd elements.
2607 /// This description closely follows how the TRN1 and TRN2 AArch64
2608 /// instructions operate.
2609 ///
2610 /// For example, a simple 2x2 matrix can be transposed with:
2611 ///
2612 /// ; Original matrix
2613 /// m0 = < a, b >
2614 /// m1 = < c, d >
2615 ///
2616 /// ; Transposed matrix
2617 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2618 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2619 ///
2620 /// For matrices having greater than n columns, the resulting nx2 transposed
2621 /// matrix is stored in two result vectors such that one vector contains
2622 /// interleaved elements from all the even-numbered rows and the other vector
2623 /// contains interleaved elements from all the odd-numbered rows. For example,
2624 /// a 2x4 matrix can be transposed with:
2625 ///
2626 /// ; Original matrix
2627 /// m0 = < a, b, c, d >
2628 /// m1 = < e, f, g, h >
2629 ///
2630 /// ; Transposed matrix
2631 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2632 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2633 static bool isTransposeMask(ArrayRef<int> Mask);
2634 static bool isTransposeMask(const Constant *Mask) {
2635 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2635, __PRETTY_FUNCTION__))
;
2636 SmallVector<int, 16> MaskAsInts;
2637 getShuffleMask(Mask, MaskAsInts);
2638 return isTransposeMask(MaskAsInts);
2639 }
2640
2641 /// Return true if this shuffle transposes the elements of its inputs without
2642 /// changing the length of the vectors. This operation may also be known as a
2643 /// merge or interleave. See the description for isTransposeMask() for the
2644 /// exact specification.
2645 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2646 bool isTranspose() const {
2647 return !changesLength() && isTransposeMask(getMask());
2648 }
2649
2650 /// Change values in a shuffle permute mask assuming the two vector operands
2651 /// of length InVecNumElts have swapped position.
2652 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2653 unsigned InVecNumElts) {
2654 for (int &Idx : Mask) {
2655 if (Idx == -1)
2656 continue;
2657 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2658 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2659, __PRETTY_FUNCTION__))
2659 "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2659, __PRETTY_FUNCTION__))
;
2660 }
2661 }
2662
2663 // Methods for support type inquiry through isa, cast, and dyn_cast:
2664 static bool classof(const Instruction *I) {
2665 return I->getOpcode() == Instruction::ShuffleVector;
2666 }
2667 static bool classof(const Value *V) {
2668 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2669 }
2670};
2671
2672template <>
2673struct OperandTraits<ShuffleVectorInst> :
2674 public FixedNumOperandTraits<ShuffleVectorInst, 3> {
2675};
2676
2677DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2677, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ShuffleVectorInst>::op_begin(const_cast
<ShuffleVectorInst*>(this))[i_nocapture].get()); } void
ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<ShuffleVectorInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/IR/Instructions.h"
, 2677, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ShuffleVectorInst::getNumOperands() const { return OperandTraits
<ShuffleVectorInst>::operands(this); } template <int
Idx_nocapture> Use &ShuffleVectorInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &ShuffleVectorInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
2678
2679//===----------------------------------------------------------------------===//
2680// ExtractValueInst Class
2681//===----------------------------------------------------------------------===//
2682
2683/// This instruction extracts a struct member or array
2684/// element value from an aggregate value.
2685///
2686class ExtractValueInst : public UnaryInstruction {
2687 SmallVector<unsigned, 4> Indices;
2688
2689 ExtractValueInst(const ExtractValueInst &EVI);
2690
2691 /// Constructors - Create a extractvalue instruction with a base aggregate
2692 /// value and a list of indices. The first ctor can optionally insert before
2693 /// an existing instruction, the second appends the new instruction to the
2694 /// specified BasicBlock.
2695 inline ExtractValueInst(Value *Agg,
2696 ArrayRef<unsigned> Idxs,
2697 const Twine &NameStr,
2698 Instruction *InsertBefore);
2699 inline ExtractValueInst(Value *Agg,
2700 ArrayRef<unsigned> Idxs,
2701 const Twine &NameStr, BasicBlock *InsertAtEnd);
2702
2703 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2704
2705protected:
2706 // Note: Instruction needs to be a friend here to call cloneImpl.
2707 friend class Instruction;
2708
2709 ExtractValueInst *cloneImpl() const;
2710
2711public:
2712 static ExtractValueInst *Create(Value *Agg,
2713 ArrayRef<unsigned> Idxs,
2714 const Twine &NameStr = "",
2715 Instruction *InsertBefore = nullptr) {
2716 return new
2717 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2718 }
2719
2720 static ExtractValueInst *Create(Value *Agg,
2721 ArrayRef<unsigned> Idxs,
2722 const Twine &NameStr,
2723 BasicBlock *InsertAtEnd) {
2724 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2725 }
2726
2727 /// Returns the type of the element that would be extracted
2728 /// with an extractvalue instruction with the specified parameters.
2729 ///
2730 /// Null is returned if the indices are invalid for the specified type.
2731 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2732
2733 using idx_iterator = const unsigned*;
2734
2735 inline idx_iterator idx_begin() const { return Indices.begin(); }
2736 inline idx_iterator idx_end() const { return Indices.end(); }
2737 inline iterator_range<idx_iterator> indices() const {
2738 return make_range(idx_begin(), idx_end());
2739 }
2740
2741 Value *getAggregateOperand() {
2742 return getOperand(0);
2743 }
2744 const Value *getAggregateOperand() const {
2745 return getOperand(0);
2746 }
2747 static unsigned getAggregateOperandIndex() {
2748 return 0U; // get index for modifying correct operand
2749 }
2750
2751 ArrayRef<unsigned> getIndices() const {
2752 return Indices;
2753 }
2754
2755 unsigned getNumIndices() const {
2756 return (unsigned)Indices.size();
2757 }
2758
2759 bool hasIndices() const {
2760 return true;
2761 }
2762
2763 // Methods for support type inquiry through isa, cast, and dyn_cast:
2764 static bool classof(const Instruction *I) {
2765 return I->getOpcode() == Instruction::ExtractValue;
2766 }
2767 static bool classof(const Value *V) {
2768 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2769 }
2770};
2771