Bug Summary

File:llvm/lib/Transforms/Scalar/MergeICmps.cpp
Warning:line 377, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MergeICmps.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-28-092409-31635-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp

/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp

1//===- MergeICmps.cpp - Optimize chains of integer comparisons ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass turns chains of integer comparisons into memcmp (the memcmp is
10// later typically inlined as a chain of efficient hardware comparisons). This
11// typically benefits c++ member or nonmember operator==().
12//
13// The basic idea is to replace a longer chain of integer comparisons loaded
14// from contiguous memory locations into a shorter chain of larger integer
15// comparisons. Benefits are double:
16// - There are less jumps, and therefore less opportunities for mispredictions
17// and I-cache misses.
18// - Code size is smaller, both because jumps are removed and because the
19// encoding of a 2*n byte compare is smaller than that of two n-byte
20// compares.
21//
22// Example:
23//
24// struct S {
25// int a;
26// char b;
27// char c;
28// uint16_t d;
29// bool operator==(const S& o) const {
30// return a == o.a && b == o.b && c == o.c && d == o.d;
31// }
32// };
33//
34// Is optimized as :
35//
36// bool S::operator==(const S& o) const {
37// return memcmp(this, &o, 8) == 0;
38// }
39//
40// Which will later be expanded (ExpandMemCmp) as a single 8-bytes icmp.
41//
42//===----------------------------------------------------------------------===//
43
44#include "llvm/Transforms/Scalar/MergeICmps.h"
45#include "llvm/Analysis/DomTreeUpdater.h"
46#include "llvm/Analysis/GlobalsModRef.h"
47#include "llvm/Analysis/Loads.h"
48#include "llvm/Analysis/TargetLibraryInfo.h"
49#include "llvm/Analysis/TargetTransformInfo.h"
50#include "llvm/IR/Dominators.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/InitializePasses.h"
54#include "llvm/Pass.h"
55#include "llvm/Transforms/Scalar.h"
56#include "llvm/Transforms/Utils/BasicBlockUtils.h"
57#include "llvm/Transforms/Utils/BuildLibCalls.h"
58#include <algorithm>
59#include <numeric>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67#define DEBUG_TYPE"mergeicmps" "mergeicmps"
68
69// Returns true if the instruction is a simple load or a simple store
70static bool isSimpleLoadOrStore(const Instruction *I) {
71 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
72 return LI->isSimple();
73 if (const StoreInst *SI = dyn_cast<StoreInst>(I))
74 return SI->isSimple();
75 return false;
76}
77
78// A BCE atom "Binary Compare Expression Atom" represents an integer load
79// that is a constant offset from a base value, e.g. `a` or `o.c` in the example
80// at the top.
81struct BCEAtom {
82 BCEAtom() = default;
83 BCEAtom(GetElementPtrInst *GEP, LoadInst *LoadI, int BaseId, APInt Offset)
84 : GEP(GEP), LoadI(LoadI), BaseId(BaseId), Offset(Offset) {}
85
86 BCEAtom(const BCEAtom &) = delete;
87 BCEAtom &operator=(const BCEAtom &) = delete;
88
89 BCEAtom(BCEAtom &&that) = default;
90 BCEAtom &operator=(BCEAtom &&that) {
91 if (this == &that)
92 return *this;
93 GEP = that.GEP;
94 LoadI = that.LoadI;
95 BaseId = that.BaseId;
96 Offset = std::move(that.Offset);
97 return *this;
98 }
99
100 // We want to order BCEAtoms by (Base, Offset). However we cannot use
101 // the pointer values for Base because these are non-deterministic.
102 // To make sure that the sort order is stable, we first assign to each atom
103 // base value an index based on its order of appearance in the chain of
104 // comparisons. We call this index `BaseOrdering`. For example, for:
105 // b[3] == c[2] && a[1] == d[1] && b[4] == c[3]
106 // | block 1 | | block 2 | | block 3 |
107 // b gets assigned index 0 and a index 1, because b appears as LHS in block 1,
108 // which is before block 2.
109 // We then sort by (BaseOrdering[LHS.Base()], LHS.Offset), which is stable.
110 bool operator<(const BCEAtom &O) const {
111 return BaseId != O.BaseId ? BaseId < O.BaseId : Offset.slt(O.Offset);
112 }
113
114 GetElementPtrInst *GEP = nullptr;
115 LoadInst *LoadI = nullptr;
116 unsigned BaseId = 0;
117 APInt Offset;
118};
119
120// A class that assigns increasing ids to values in the order in which they are
121// seen. See comment in `BCEAtom::operator<()``.
122class BaseIdentifier {
123public:
124 // Returns the id for value `Base`, after assigning one if `Base` has not been
125 // seen before.
126 int getBaseId(const Value *Base) {
127 assert(Base && "invalid base")((Base && "invalid base") ? static_cast<void> (
0) : __assert_fail ("Base && \"invalid base\"", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 127, __PRETTY_FUNCTION__))
;
128 const auto Insertion = BaseToIndex.try_emplace(Base, Order);
129 if (Insertion.second)
130 ++Order;
131 return Insertion.first->second;
132 }
133
134private:
135 unsigned Order = 1;
136 DenseMap<const Value*, int> BaseToIndex;
137};
138
139// If this value is a load from a constant offset w.r.t. a base address, and
140// there are no other users of the load or address, returns the base address and
141// the offset.
142BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) {
143 auto *const LoadI = dyn_cast<LoadInst>(Val);
144 if (!LoadI)
145 return {};
146 LLVM_DEBUG(dbgs() << "load\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "load\n"; } } while (false)
;
147 if (LoadI->isUsedOutsideOfBlock(LoadI->getParent())) {
148 LLVM_DEBUG(dbgs() << "used outside of block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "used outside of block\n"; }
} while (false)
;
149 return {};
150 }
151 // Do not optimize atomic loads to non-atomic memcmp
152 if (!LoadI->isSimple()) {
153 LLVM_DEBUG(dbgs() << "volatile or atomic\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "volatile or atomic\n"; } }
while (false)
;
154 return {};
155 }
156 Value *const Addr = LoadI->getOperand(0);
157 auto *const GEP = dyn_cast<GetElementPtrInst>(Addr);
158 if (!GEP)
159 return {};
160 LLVM_DEBUG(dbgs() << "GEP\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "GEP\n"; } } while (false)
;
161 if (GEP->isUsedOutsideOfBlock(LoadI->getParent())) {
162 LLVM_DEBUG(dbgs() << "used outside of block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "used outside of block\n"; }
} while (false)
;
163 return {};
164 }
165 const auto &DL = GEP->getModule()->getDataLayout();
166 if (!isDereferenceablePointer(GEP, LoadI->getType(), DL)) {
167 LLVM_DEBUG(dbgs() << "not dereferenceable\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "not dereferenceable\n"; } }
while (false)
;
168 // We need to make sure that we can do comparison in any order, so we
169 // require memory to be unconditionnally dereferencable.
170 return {};
171 }
172 APInt Offset = APInt(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
173 if (!GEP->accumulateConstantOffset(DL, Offset))
174 return {};
175 return BCEAtom(GEP, LoadI, BaseId.getBaseId(GEP->getPointerOperand()),
176 Offset);
177}
178
179// A basic block with a comparison between two BCE atoms, e.g. `a == o.a` in the
180// example at the top.
181// The block might do extra work besides the atom comparison, in which case
182// doesOtherWork() returns true. Under some conditions, the block can be
183// split into the atom comparison part and the "other work" part
184// (see canSplit()).
185// Note: the terminology is misleading: the comparison is symmetric, so there
186// is no real {l/r}hs. What we want though is to have the same base on the
187// left (resp. right), so that we can detect consecutive loads. To ensure this
188// we put the smallest atom on the left.
189class BCECmpBlock {
190 public:
191 BCECmpBlock() {}
192
193 BCECmpBlock(BCEAtom L, BCEAtom R, int SizeBits)
194 : Lhs_(std::move(L)), Rhs_(std::move(R)), SizeBits_(SizeBits) {
195 if (Rhs_ < Lhs_) std::swap(Rhs_, Lhs_);
196 }
197
198 bool IsValid() const { return Lhs_.BaseId != 0 && Rhs_.BaseId != 0; }
199
200 // Assert the block is consistent: If valid, it should also have
201 // non-null members besides Lhs_ and Rhs_.
202 void AssertConsistent() const {
203 if (IsValid()) {
204 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 204, __PRETTY_FUNCTION__))
;
205 assert(CmpI)((CmpI) ? static_cast<void> (0) : __assert_fail ("CmpI"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 205, __PRETTY_FUNCTION__))
;
206 assert(BranchI)((BranchI) ? static_cast<void> (0) : __assert_fail ("BranchI"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 206, __PRETTY_FUNCTION__))
;
207 }
208 }
209
210 const BCEAtom &Lhs() const { return Lhs_; }
211 const BCEAtom &Rhs() const { return Rhs_; }
212 int SizeBits() const { return SizeBits_; }
213
214 // Returns true if the block does other works besides comparison.
215 bool doesOtherWork() const;
216
217 // Returns true if the non-BCE-cmp instructions can be separated from BCE-cmp
218 // instructions in the block.
219 bool canSplit(AliasAnalysis &AA) const;
220
221 // Return true if this all the relevant instructions in the BCE-cmp-block can
222 // be sunk below this instruction. By doing this, we know we can separate the
223 // BCE-cmp-block instructions from the non-BCE-cmp-block instructions in the
224 // block.
225 bool canSinkBCECmpInst(const Instruction *, DenseSet<Instruction *> &,
226 AliasAnalysis &AA) const;
227
228 // We can separate the BCE-cmp-block instructions and the non-BCE-cmp-block
229 // instructions. Split the old block and move all non-BCE-cmp-insts into the
230 // new parent block.
231 void split(BasicBlock *NewParent, AliasAnalysis &AA) const;
232
233 // The basic block where this comparison happens.
234 BasicBlock *BB = nullptr;
235 // The ICMP for this comparison.
236 ICmpInst *CmpI = nullptr;
237 // The terminating branch.
238 BranchInst *BranchI = nullptr;
239 // The block requires splitting.
240 bool RequireSplit = false;
241
242private:
243 BCEAtom Lhs_;
244 BCEAtom Rhs_;
245 int SizeBits_ = 0;
246};
247
248bool BCECmpBlock::canSinkBCECmpInst(const Instruction *Inst,
249 DenseSet<Instruction *> &BlockInsts,
250 AliasAnalysis &AA) const {
251 // If this instruction has side effects and its in middle of the BCE cmp block
252 // instructions, then bail for now.
253 if (Inst->mayHaveSideEffects()) {
254 // Bail if this is not a simple load or store
255 if (!isSimpleLoadOrStore(Inst))
256 return false;
257 // Disallow stores that might alias the BCE operands
258 MemoryLocation LLoc = MemoryLocation::get(Lhs_.LoadI);
259 MemoryLocation RLoc = MemoryLocation::get(Rhs_.LoadI);
260 if (isModSet(AA.getModRefInfo(Inst, LLoc)) ||
261 isModSet(AA.getModRefInfo(Inst, RLoc)))
262 return false;
263 }
264 // Make sure this instruction does not use any of the BCE cmp block
265 // instructions as operand.
266 for (auto BI : BlockInsts) {
267 if (is_contained(Inst->operands(), BI))
268 return false;
269 }
270 return true;
271}
272
273void BCECmpBlock::split(BasicBlock *NewParent, AliasAnalysis &AA) const {
274 DenseSet<Instruction *> BlockInsts(
275 {Lhs_.GEP, Rhs_.GEP, Lhs_.LoadI, Rhs_.LoadI, CmpI, BranchI});
276 llvm::SmallVector<Instruction *, 4> OtherInsts;
277 for (Instruction &Inst : *BB) {
278 if (BlockInsts.count(&Inst))
279 continue;
280 assert(canSinkBCECmpInst(&Inst, BlockInsts, AA) &&((canSinkBCECmpInst(&Inst, BlockInsts, AA) && "Split unsplittable block"
) ? static_cast<void> (0) : __assert_fail ("canSinkBCECmpInst(&Inst, BlockInsts, AA) && \"Split unsplittable block\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 281, __PRETTY_FUNCTION__))
281 "Split unsplittable block")((canSinkBCECmpInst(&Inst, BlockInsts, AA) && "Split unsplittable block"
) ? static_cast<void> (0) : __assert_fail ("canSinkBCECmpInst(&Inst, BlockInsts, AA) && \"Split unsplittable block\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 281, __PRETTY_FUNCTION__))
;
282 // This is a non-BCE-cmp-block instruction. And it can be separated
283 // from the BCE-cmp-block instruction.
284 OtherInsts.push_back(&Inst);
285 }
286
287 // Do the actual spliting.
288 for (Instruction *Inst : reverse(OtherInsts)) {
289 Inst->moveBefore(&*NewParent->begin());
290 }
291}
292
293bool BCECmpBlock::canSplit(AliasAnalysis &AA) const {
294 DenseSet<Instruction *> BlockInsts(
295 {Lhs_.GEP, Rhs_.GEP, Lhs_.LoadI, Rhs_.LoadI, CmpI, BranchI});
296 for (Instruction &Inst : *BB) {
297 if (!BlockInsts.count(&Inst)) {
298 if (!canSinkBCECmpInst(&Inst, BlockInsts, AA))
299 return false;
300 }
301 }
302 return true;
303}
304
305bool BCECmpBlock::doesOtherWork() const {
306 AssertConsistent();
307 // All the instructions we care about in the BCE cmp block.
308 DenseSet<Instruction *> BlockInsts(
309 {Lhs_.GEP, Rhs_.GEP, Lhs_.LoadI, Rhs_.LoadI, CmpI, BranchI});
310 // TODO(courbet): Can we allow some other things ? This is very conservative.
311 // We might be able to get away with anything does not have any side
312 // effects outside of the basic block.
313 // Note: The GEPs and/or loads are not necessarily in the same block.
314 for (const Instruction &Inst : *BB) {
315 if (!BlockInsts.count(&Inst))
316 return true;
317 }
318 return false;
319}
320
321// Visit the given comparison. If this is a comparison between two valid
322// BCE atoms, returns the comparison.
323BCECmpBlock visitICmp(const ICmpInst *const CmpI,
324 const ICmpInst::Predicate ExpectedPredicate,
325 BaseIdentifier &BaseId) {
326 // The comparison can only be used once:
327 // - For intermediate blocks, as a branch condition.
328 // - For the final block, as an incoming value for the Phi.
329 // If there are any other uses of the comparison, we cannot merge it with
330 // other comparisons as we would create an orphan use of the value.
331 if (!CmpI->hasOneUse()) {
332 LLVM_DEBUG(dbgs() << "cmp has several uses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "cmp has several uses\n"; }
} while (false)
;
333 return {};
334 }
335 if (CmpI->getPredicate() != ExpectedPredicate)
336 return {};
337 LLVM_DEBUG(dbgs() << "cmp "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "cmp " << (ExpectedPredicate
== ICmpInst::ICMP_EQ ? "eq" : "ne") << "\n"; } } while
(false)
338 << (ExpectedPredicate == ICmpInst::ICMP_EQ ? "eq" : "ne")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "cmp " << (ExpectedPredicate
== ICmpInst::ICMP_EQ ? "eq" : "ne") << "\n"; } } while
(false)
339 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "cmp " << (ExpectedPredicate
== ICmpInst::ICMP_EQ ? "eq" : "ne") << "\n"; } } while
(false)
;
340 auto Lhs = visitICmpLoadOperand(CmpI->getOperand(0), BaseId);
341 if (!Lhs.BaseId)
342 return {};
343 auto Rhs = visitICmpLoadOperand(CmpI->getOperand(1), BaseId);
344 if (!Rhs.BaseId)
345 return {};
346 const auto &DL = CmpI->getModule()->getDataLayout();
347 return BCECmpBlock(std::move(Lhs), std::move(Rhs),
348 DL.getTypeSizeInBits(CmpI->getOperand(0)->getType()));
349}
350
351// Visit the given comparison block. If this is a comparison between two valid
352// BCE atoms, returns the comparison.
353BCECmpBlock visitCmpBlock(Value *const Val, BasicBlock *const Block,
354 const BasicBlock *const PhiBlock,
355 BaseIdentifier &BaseId) {
356 if (Block->empty()) return {};
41
Assuming the condition is false
42
Taking false branch
357 auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator());
43
Assuming the object is a 'BranchInst'
358 if (!BranchI
43.1
'BranchI' is non-null
43.1
'BranchI' is non-null
) return {};
44
Taking false branch
359 LLVM_DEBUG(dbgs() << "branch\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "branch\n"; } } while (false
)
;
45
Assuming 'DebugFlag' is false
46
Loop condition is false. Exiting loop
360 if (BranchI->isUnconditional()) {
47
Calling 'BranchInst::isUnconditional'
50
Returning from 'BranchInst::isUnconditional'
51
Taking false branch
361 // In this case, we expect an incoming value which is the result of the
362 // comparison. This is the last link in the chain of comparisons (note
363 // that this does not mean that this is the last incoming value, blocks
364 // can be reordered).
365 auto *const CmpI = dyn_cast<ICmpInst>(Val);
366 if (!CmpI) return {};
367 LLVM_DEBUG(dbgs() << "icmp\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "icmp\n"; } } while (false)
;
368 auto Result = visitICmp(CmpI, ICmpInst::ICMP_EQ, BaseId);
369 Result.CmpI = CmpI;
370 Result.BranchI = BranchI;
371 return Result;
372 } else {
373 // In this case, we expect a constant incoming value (the comparison is
374 // chained).
375 const auto *const Const = dyn_cast<ConstantInt>(Val);
52
Assuming 'Val' is not a 'ConstantInt'
53
'Const' initialized to a null pointer value
376 LLVM_DEBUG(dbgs() << "const\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "const\n"; } } while (false
)
;
54
Loop condition is false. Exiting loop
377 if (!Const->isZero()) return {};
55
Called C++ object pointer is null
378 LLVM_DEBUG(dbgs() << "false\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "false\n"; } } while (false
)
;
379 auto *const CmpI = dyn_cast<ICmpInst>(BranchI->getCondition());
380 if (!CmpI) return {};
381 LLVM_DEBUG(dbgs() << "icmp\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "icmp\n"; } } while (false)
;
382 assert(BranchI->getNumSuccessors() == 2 && "expecting a cond branch")((BranchI->getNumSuccessors() == 2 && "expecting a cond branch"
) ? static_cast<void> (0) : __assert_fail ("BranchI->getNumSuccessors() == 2 && \"expecting a cond branch\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 382, __PRETTY_FUNCTION__))
;
383 BasicBlock *const FalseBlock = BranchI->getSuccessor(1);
384 auto Result = visitICmp(
385 CmpI, FalseBlock == PhiBlock ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
386 BaseId);
387 Result.CmpI = CmpI;
388 Result.BranchI = BranchI;
389 return Result;
390 }
391 return {};
392}
393
394static inline void enqueueBlock(std::vector<BCECmpBlock> &Comparisons,
395 BCECmpBlock &&Comparison) {
396 LLVM_DEBUG(dbgs() << "Block '" << Comparison.BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Block '" << Comparison
.BB->getName() << "': Found cmp of " << Comparison
.SizeBits() << " bits between " << Comparison.Lhs
().BaseId << " + " << Comparison.Lhs().Offset <<
" and " << Comparison.Rhs().BaseId << " + " <<
Comparison.Rhs().Offset << "\n"; } } while (false)
397 << "': Found cmp of " << Comparison.SizeBits()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Block '" << Comparison
.BB->getName() << "': Found cmp of " << Comparison
.SizeBits() << " bits between " << Comparison.Lhs
().BaseId << " + " << Comparison.Lhs().Offset <<
" and " << Comparison.Rhs().BaseId << " + " <<
Comparison.Rhs().Offset << "\n"; } } while (false)
398 << " bits between " << Comparison.Lhs().BaseId << " + "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Block '" << Comparison
.BB->getName() << "': Found cmp of " << Comparison
.SizeBits() << " bits between " << Comparison.Lhs
().BaseId << " + " << Comparison.Lhs().Offset <<
" and " << Comparison.Rhs().BaseId << " + " <<
Comparison.Rhs().Offset << "\n"; } } while (false)
399 << Comparison.Lhs().Offset << " and "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Block '" << Comparison
.BB->getName() << "': Found cmp of " << Comparison
.SizeBits() << " bits between " << Comparison.Lhs
().BaseId << " + " << Comparison.Lhs().Offset <<
" and " << Comparison.Rhs().BaseId << " + " <<
Comparison.Rhs().Offset << "\n"; } } while (false)
400 << Comparison.Rhs().BaseId << " + "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Block '" << Comparison
.BB->getName() << "': Found cmp of " << Comparison
.SizeBits() << " bits between " << Comparison.Lhs
().BaseId << " + " << Comparison.Lhs().Offset <<
" and " << Comparison.Rhs().BaseId << " + " <<
Comparison.Rhs().Offset << "\n"; } } while (false)
401 << Comparison.Rhs().Offset << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Block '" << Comparison
.BB->getName() << "': Found cmp of " << Comparison
.SizeBits() << " bits between " << Comparison.Lhs
().BaseId << " + " << Comparison.Lhs().Offset <<
" and " << Comparison.Rhs().BaseId << " + " <<
Comparison.Rhs().Offset << "\n"; } } while (false)
;
402 LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "\n"; } } while (false)
;
403 Comparisons.push_back(std::move(Comparison));
404}
405
406// A chain of comparisons.
407class BCECmpChain {
408 public:
409 BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
410 AliasAnalysis &AA);
411
412 int size() const { return Comparisons_.size(); }
413
414#ifdef MERGEICMPS_DOT_ON
415 void dump() const;
416#endif // MERGEICMPS_DOT_ON
417
418 bool simplify(const TargetLibraryInfo &TLI, AliasAnalysis &AA,
419 DomTreeUpdater &DTU);
420
421private:
422 static bool IsContiguous(const BCECmpBlock &First,
423 const BCECmpBlock &Second) {
424 return First.Lhs().BaseId == Second.Lhs().BaseId &&
425 First.Rhs().BaseId == Second.Rhs().BaseId &&
426 First.Lhs().Offset + First.SizeBits() / 8 == Second.Lhs().Offset &&
427 First.Rhs().Offset + First.SizeBits() / 8 == Second.Rhs().Offset;
428 }
429
430 PHINode &Phi_;
431 std::vector<BCECmpBlock> Comparisons_;
432 // The original entry block (before sorting);
433 BasicBlock *EntryBlock_;
434};
435
436BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
437 AliasAnalysis &AA)
438 : Phi_(Phi) {
439 assert(!Blocks.empty() && "a chain should have at least one block")((!Blocks.empty() && "a chain should have at least one block"
) ? static_cast<void> (0) : __assert_fail ("!Blocks.empty() && \"a chain should have at least one block\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 439, __PRETTY_FUNCTION__))
;
34
Assuming the condition is true
35
'?' condition is true
440 // Now look inside blocks to check for BCE comparisons.
441 std::vector<BCECmpBlock> Comparisons;
442 BaseIdentifier BaseId;
443 for (size_t BlockIdx = 0; BlockIdx < Blocks.size(); ++BlockIdx) {
36
Assuming the condition is true
37
Loop condition is true. Entering loop body
444 BasicBlock *const Block = Blocks[BlockIdx];
445 assert(Block && "invalid block")((Block && "invalid block") ? static_cast<void>
(0) : __assert_fail ("Block && \"invalid block\"", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 445, __PRETTY_FUNCTION__))
;
38
Assuming 'Block' is non-null
39
'?' condition is true
446 BCECmpBlock Comparison = visitCmpBlock(Phi.getIncomingValueForBlock(Block),
40
Calling 'visitCmpBlock'
447 Block, Phi.getParent(), BaseId);
448 Comparison.BB = Block;
449 if (!Comparison.IsValid()) {
450 LLVM_DEBUG(dbgs() << "chain with invalid BCECmpBlock, no merge.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "chain with invalid BCECmpBlock, no merge.\n"
; } } while (false)
;
451 return;
452 }
453 if (Comparison.doesOtherWork()) {
454 LLVM_DEBUG(dbgs() << "block '" << Comparison.BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "block '" << Comparison
.BB->getName() << "' does extra work besides compare\n"
; } } while (false)
455 << "' does extra work besides compare\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "block '" << Comparison
.BB->getName() << "' does extra work besides compare\n"
; } } while (false)
;
456 if (Comparisons.empty()) {
457 // This is the initial block in the chain, in case this block does other
458 // work, we can try to split the block and move the irrelevant
459 // instructions to the predecessor.
460 //
461 // If this is not the initial block in the chain, splitting it wont
462 // work.
463 //
464 // As once split, there will still be instructions before the BCE cmp
465 // instructions that do other work in program order, i.e. within the
466 // chain before sorting. Unless we can abort the chain at this point
467 // and start anew.
468 //
469 // NOTE: we only handle blocks a with single predecessor for now.
470 if (Comparison.canSplit(AA)) {
471 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Split initial block '" <<
Comparison.BB->getName() << "' that does extra work besides compare\n"
; } } while (false)
472 << "Split initial block '" << Comparison.BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Split initial block '" <<
Comparison.BB->getName() << "' that does extra work besides compare\n"
; } } while (false)
473 << "' that does extra work besides compare\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Split initial block '" <<
Comparison.BB->getName() << "' that does extra work besides compare\n"
; } } while (false)
;
474 Comparison.RequireSplit = true;
475 enqueueBlock(Comparisons, std::move(Comparison));
476 } else {
477 LLVM_DEBUG(dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "ignoring initial block '" <<
Comparison.BB->getName() << "' that does extra work besides compare\n"
; } } while (false)
478 << "ignoring initial block '" << Comparison.BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "ignoring initial block '" <<
Comparison.BB->getName() << "' that does extra work besides compare\n"
; } } while (false)
479 << "' that does extra work besides compare\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "ignoring initial block '" <<
Comparison.BB->getName() << "' that does extra work besides compare\n"
; } } while (false)
;
480 }
481 continue;
482 }
483 // TODO(courbet): Right now we abort the whole chain. We could be
484 // merging only the blocks that don't do other work and resume the
485 // chain from there. For example:
486 // if (a[0] == b[0]) { // bb1
487 // if (a[1] == b[1]) { // bb2
488 // some_value = 3; //bb3
489 // if (a[2] == b[2]) { //bb3
490 // do a ton of stuff //bb4
491 // }
492 // }
493 // }
494 //
495 // This is:
496 //
497 // bb1 --eq--> bb2 --eq--> bb3* -eq--> bb4 --+
498 // \ \ \ \
499 // ne ne ne \
500 // \ \ \ v
501 // +------------+-----------+----------> bb_phi
502 //
503 // We can only merge the first two comparisons, because bb3* does
504 // "other work" (setting some_value to 3).
505 // We could still merge bb1 and bb2 though.
506 return;
507 }
508 enqueueBlock(Comparisons, std::move(Comparison));
509 }
510
511 // It is possible we have no suitable comparison to merge.
512 if (Comparisons.empty()) {
513 LLVM_DEBUG(dbgs() << "chain with no BCE basic blocks, no merge\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "chain with no BCE basic blocks, no merge\n"
; } } while (false)
;
514 return;
515 }
516 EntryBlock_ = Comparisons[0].BB;
517 Comparisons_ = std::move(Comparisons);
518#ifdef MERGEICMPS_DOT_ON
519 errs() << "BEFORE REORDERING:\n\n";
520 dump();
521#endif // MERGEICMPS_DOT_ON
522 // Reorder blocks by LHS. We can do that without changing the
523 // semantics because we are only accessing dereferencable memory.
524 llvm::sort(Comparisons_,
525 [](const BCECmpBlock &LhsBlock, const BCECmpBlock &RhsBlock) {
526 return std::tie(LhsBlock.Lhs(), LhsBlock.Rhs()) <
527 std::tie(RhsBlock.Lhs(), RhsBlock.Rhs());
528 });
529#ifdef MERGEICMPS_DOT_ON
530 errs() << "AFTER REORDERING:\n\n";
531 dump();
532#endif // MERGEICMPS_DOT_ON
533}
534
535#ifdef MERGEICMPS_DOT_ON
536void BCECmpChain::dump() const {
537 errs() << "digraph dag {\n";
538 errs() << " graph [bgcolor=transparent];\n";
539 errs() << " node [color=black,style=filled,fillcolor=lightyellow];\n";
540 errs() << " edge [color=black];\n";
541 for (size_t I = 0; I < Comparisons_.size(); ++I) {
542 const auto &Comparison = Comparisons_[I];
543 errs() << " \"" << I << "\" [label=\"%"
544 << Comparison.Lhs().Base()->getName() << " + "
545 << Comparison.Lhs().Offset << " == %"
546 << Comparison.Rhs().Base()->getName() << " + "
547 << Comparison.Rhs().Offset << " (" << (Comparison.SizeBits() / 8)
548 << " bytes)\"];\n";
549 const Value *const Val = Phi_.getIncomingValueForBlock(Comparison.BB);
550 if (I > 0) errs() << " \"" << (I - 1) << "\" -> \"" << I << "\";\n";
551 errs() << " \"" << I << "\" -> \"Phi\" [label=\"" << *Val << "\"];\n";
552 }
553 errs() << " \"Phi\" [label=\"Phi\"];\n";
554 errs() << "}\n\n";
555}
556#endif // MERGEICMPS_DOT_ON
557
558namespace {
559
560// A class to compute the name of a set of merged basic blocks.
561// This is optimized for the common case of no block names.
562class MergedBlockName {
563 // Storage for the uncommon case of several named blocks.
564 SmallString<16> Scratch;
565
566public:
567 explicit MergedBlockName(ArrayRef<BCECmpBlock> Comparisons)
568 : Name(makeName(Comparisons)) {}
569 const StringRef Name;
570
571private:
572 StringRef makeName(ArrayRef<BCECmpBlock> Comparisons) {
573 assert(!Comparisons.empty() && "no basic block")((!Comparisons.empty() && "no basic block") ? static_cast
<void> (0) : __assert_fail ("!Comparisons.empty() && \"no basic block\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 573, __PRETTY_FUNCTION__))
;
574 // Fast path: only one block, or no names at all.
575 if (Comparisons.size() == 1)
576 return Comparisons[0].BB->getName();
577 const int size = std::accumulate(Comparisons.begin(), Comparisons.end(), 0,
578 [](int i, const BCECmpBlock &Cmp) {
579 return i + Cmp.BB->getName().size();
580 });
581 if (size == 0)
582 return StringRef("", 0);
583
584 // Slow path: at least two blocks, at least one block with a name.
585 Scratch.clear();
586 // We'll have `size` bytes for name and `Comparisons.size() - 1` bytes for
587 // separators.
588 Scratch.reserve(size + Comparisons.size() - 1);
589 const auto append = [this](StringRef str) {
590 Scratch.append(str.begin(), str.end());
591 };
592 append(Comparisons[0].BB->getName());
593 for (int I = 1, E = Comparisons.size(); I < E; ++I) {
594 const BasicBlock *const BB = Comparisons[I].BB;
595 if (!BB->getName().empty()) {
596 append("+");
597 append(BB->getName());
598 }
599 }
600 return StringRef(Scratch);
601 }
602};
603} // namespace
604
605// Merges the given contiguous comparison blocks into one memcmp block.
606static BasicBlock *mergeComparisons(ArrayRef<BCECmpBlock> Comparisons,
607 BasicBlock *const InsertBefore,
608 BasicBlock *const NextCmpBlock,
609 PHINode &Phi, const TargetLibraryInfo &TLI,
610 AliasAnalysis &AA, DomTreeUpdater &DTU) {
611 assert(!Comparisons.empty() && "merging zero comparisons")((!Comparisons.empty() && "merging zero comparisons")
? static_cast<void> (0) : __assert_fail ("!Comparisons.empty() && \"merging zero comparisons\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 611, __PRETTY_FUNCTION__))
;
612 LLVMContext &Context = NextCmpBlock->getContext();
613 const BCECmpBlock &FirstCmp = Comparisons[0];
614
615 // Create a new cmp block before next cmp block.
616 BasicBlock *const BB =
617 BasicBlock::Create(Context, MergedBlockName(Comparisons).Name,
618 NextCmpBlock->getParent(), InsertBefore);
619 IRBuilder<> Builder(BB);
620 // Add the GEPs from the first BCECmpBlock.
621 Value *const Lhs = Builder.Insert(FirstCmp.Lhs().GEP->clone());
622 Value *const Rhs = Builder.Insert(FirstCmp.Rhs().GEP->clone());
623
624 Value *IsEqual = nullptr;
625 LLVM_DEBUG(dbgs() << "Merging " << Comparisons.size() << " comparisons -> "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Merging " << Comparisons
.size() << " comparisons -> " << BB->getName
() << "\n"; } } while (false)
626 << BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Merging " << Comparisons
.size() << " comparisons -> " << BB->getName
() << "\n"; } } while (false)
;
627 if (Comparisons.size() == 1) {
628 LLVM_DEBUG(dbgs() << "Only one comparison, updating branches\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Only one comparison, updating branches\n"
; } } while (false)
;
629 Value *const LhsLoad =
630 Builder.CreateLoad(FirstCmp.Lhs().LoadI->getType(), Lhs);
631 Value *const RhsLoad =
632 Builder.CreateLoad(FirstCmp.Rhs().LoadI->getType(), Rhs);
633 // There are no blocks to merge, just do the comparison.
634 IsEqual = Builder.CreateICmpEQ(LhsLoad, RhsLoad);
635 } else {
636 // If there is one block that requires splitting, we do it now, i.e.
637 // just before we know we will collapse the chain. The instructions
638 // can be executed before any of the instructions in the chain.
639 const auto ToSplit =
640 std::find_if(Comparisons.begin(), Comparisons.end(),
641 [](const BCECmpBlock &B) { return B.RequireSplit; });
642 if (ToSplit != Comparisons.end()) {
643 LLVM_DEBUG(dbgs() << "Splitting non_BCE work to header\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Splitting non_BCE work to header\n"
; } } while (false)
;
644 ToSplit->split(BB, AA);
645 }
646
647 const unsigned TotalSizeBits = std::accumulate(
648 Comparisons.begin(), Comparisons.end(), 0u,
649 [](int Size, const BCECmpBlock &C) { return Size + C.SizeBits(); });
650
651 // Create memcmp() == 0.
652 const auto &DL = Phi.getModule()->getDataLayout();
653 Value *const MemCmpCall = emitMemCmp(
654 Lhs, Rhs,
655 ConstantInt::get(DL.getIntPtrType(Context), TotalSizeBits / 8), Builder,
656 DL, &TLI);
657 IsEqual = Builder.CreateICmpEQ(
658 MemCmpCall, ConstantInt::get(Type::getInt32Ty(Context), 0));
659 }
660
661 BasicBlock *const PhiBB = Phi.getParent();
662 // Add a branch to the next basic block in the chain.
663 if (NextCmpBlock == PhiBB) {
664 // Continue to phi, passing it the comparison result.
665 Builder.CreateBr(PhiBB);
666 Phi.addIncoming(IsEqual, BB);
667 DTU.applyUpdates({{DominatorTree::Insert, BB, PhiBB}});
668 } else {
669 // Continue to next block if equal, exit to phi else.
670 Builder.CreateCondBr(IsEqual, NextCmpBlock, PhiBB);
671 Phi.addIncoming(ConstantInt::getFalse(Context), BB);
672 DTU.applyUpdates({{DominatorTree::Insert, BB, NextCmpBlock},
673 {DominatorTree::Insert, BB, PhiBB}});
674 }
675 return BB;
676}
677
678bool BCECmpChain::simplify(const TargetLibraryInfo &TLI, AliasAnalysis &AA,
679 DomTreeUpdater &DTU) {
680 assert(Comparisons_.size() >= 2 && "simplifying trivial BCECmpChain")((Comparisons_.size() >= 2 && "simplifying trivial BCECmpChain"
) ? static_cast<void> (0) : __assert_fail ("Comparisons_.size() >= 2 && \"simplifying trivial BCECmpChain\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 680, __PRETTY_FUNCTION__))
;
681 // First pass to check if there is at least one merge. If not, we don't do
682 // anything and we keep analysis passes intact.
683 const auto AtLeastOneMerged = [this]() {
684 for (size_t I = 1; I < Comparisons_.size(); ++I) {
685 if (IsContiguous(Comparisons_[I - 1], Comparisons_[I]))
686 return true;
687 }
688 return false;
689 };
690 if (!AtLeastOneMerged())
691 return false;
692
693 LLVM_DEBUG(dbgs() << "Simplifying comparison chain starting at block "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Simplifying comparison chain starting at block "
<< EntryBlock_->getName() << "\n"; } } while (
false)
694 << EntryBlock_->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Simplifying comparison chain starting at block "
<< EntryBlock_->getName() << "\n"; } } while (
false)
;
695
696 // Effectively merge blocks. We go in the reverse direction from the phi block
697 // so that the next block is always available to branch to.
698 const auto mergeRange = [this, &TLI, &AA, &DTU](int I, int Num,
699 BasicBlock *InsertBefore,
700 BasicBlock *Next) {
701 return mergeComparisons(makeArrayRef(Comparisons_).slice(I, Num),
702 InsertBefore, Next, Phi_, TLI, AA, DTU);
703 };
704 int NumMerged = 1;
705 BasicBlock *NextCmpBlock = Phi_.getParent();
706 for (int I = static_cast<int>(Comparisons_.size()) - 2; I >= 0; --I) {
707 if (IsContiguous(Comparisons_[I], Comparisons_[I + 1])) {
708 LLVM_DEBUG(dbgs() << "Merging block " << Comparisons_[I].BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Merging block " << Comparisons_
[I].BB->getName() << " into " << Comparisons_[
I + 1].BB->getName() << "\n"; } } while (false)
709 << " into " << Comparisons_[I + 1].BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Merging block " << Comparisons_
[I].BB->getName() << " into " << Comparisons_[
I + 1].BB->getName() << "\n"; } } while (false)
710 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Merging block " << Comparisons_
[I].BB->getName() << " into " << Comparisons_[
I + 1].BB->getName() << "\n"; } } while (false)
;
711 ++NumMerged;
712 } else {
713 NextCmpBlock = mergeRange(I + 1, NumMerged, NextCmpBlock, NextCmpBlock);
714 NumMerged = 1;
715 }
716 }
717 // Insert the entry block for the new chain before the old entry block.
718 // If the old entry block was the function entry, this ensures that the new
719 // entry can become the function entry.
720 NextCmpBlock = mergeRange(0, NumMerged, EntryBlock_, NextCmpBlock);
721
722 // Replace the original cmp chain with the new cmp chain by pointing all
723 // predecessors of EntryBlock_ to NextCmpBlock instead. This makes all cmp
724 // blocks in the old chain unreachable.
725 while (!pred_empty(EntryBlock_)) {
726 BasicBlock* const Pred = *pred_begin(EntryBlock_);
727 LLVM_DEBUG(dbgs() << "Updating jump into old chain from " << Pred->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Updating jump into old chain from "
<< Pred->getName() << "\n"; } } while (false)
728 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Updating jump into old chain from "
<< Pred->getName() << "\n"; } } while (false)
;
729 Pred->getTerminator()->replaceUsesOfWith(EntryBlock_, NextCmpBlock);
730 DTU.applyUpdates({{DominatorTree::Delete, Pred, EntryBlock_},
731 {DominatorTree::Insert, Pred, NextCmpBlock}});
732 }
733
734 // If the old cmp chain was the function entry, we need to update the function
735 // entry.
736 const bool ChainEntryIsFnEntry =
737 (EntryBlock_ == &EntryBlock_->getParent()->getEntryBlock());
738 if (ChainEntryIsFnEntry && DTU.hasDomTree()) {
739 LLVM_DEBUG(dbgs() << "Changing function entry from "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Changing function entry from "
<< EntryBlock_->getName() << " to " << NextCmpBlock
->getName() << "\n"; } } while (false)
740 << EntryBlock_->getName() << " to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Changing function entry from "
<< EntryBlock_->getName() << " to " << NextCmpBlock
->getName() << "\n"; } } while (false)
741 << NextCmpBlock->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Changing function entry from "
<< EntryBlock_->getName() << " to " << NextCmpBlock
->getName() << "\n"; } } while (false)
;
742 DTU.getDomTree().setNewRoot(NextCmpBlock);
743 DTU.applyUpdates({{DominatorTree::Delete, NextCmpBlock, EntryBlock_}});
744 }
745 EntryBlock_ = nullptr;
746
747 // Delete merged blocks. This also removes incoming values in phi.
748 SmallVector<BasicBlock *, 16> DeadBlocks;
749 for (auto &Cmp : Comparisons_) {
750 LLVM_DEBUG(dbgs() << "Deleting merged block " << Cmp.BB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "Deleting merged block " <<
Cmp.BB->getName() << "\n"; } } while (false)
;
751 DeadBlocks.push_back(Cmp.BB);
752 }
753 DeleteDeadBlocks(DeadBlocks, &DTU);
754
755 Comparisons_.clear();
756 return true;
757}
758
759std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
760 BasicBlock *const LastBlock,
761 int NumBlocks) {
762 // Walk up from the last block to find other blocks.
763 std::vector<BasicBlock *> Blocks(NumBlocks);
764 assert(LastBlock && "invalid last block")((LastBlock && "invalid last block") ? static_cast<
void> (0) : __assert_fail ("LastBlock && \"invalid last block\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Transforms/Scalar/MergeICmps.cpp"
, 764, __PRETTY_FUNCTION__))
;
765 BasicBlock *CurBlock = LastBlock;
766 for (int BlockIndex = NumBlocks - 1; BlockIndex > 0; --BlockIndex) {
767 if (CurBlock->hasAddressTaken()) {
768 // Somebody is jumping to the block through an address, all bets are
769 // off.
770 LLVM_DEBUG(dbgs() << "skip: block " << BlockIndexdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: block " << BlockIndex
<< " has its address taken\n"; } } while (false)
771 << " has its address taken\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: block " << BlockIndex
<< " has its address taken\n"; } } while (false)
;
772 return {};
773 }
774 Blocks[BlockIndex] = CurBlock;
775 auto *SinglePredecessor = CurBlock->getSinglePredecessor();
776 if (!SinglePredecessor) {
777 // The block has two or more predecessors.
778 LLVM_DEBUG(dbgs() << "skip: block " << BlockIndexdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: block " << BlockIndex
<< " has two or more predecessors\n"; } } while (false
)
779 << " has two or more predecessors\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: block " << BlockIndex
<< " has two or more predecessors\n"; } } while (false
)
;
780 return {};
781 }
782 if (Phi.getBasicBlockIndex(SinglePredecessor) < 0) {
783 // The block does not link back to the phi.
784 LLVM_DEBUG(dbgs() << "skip: block " << BlockIndexdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: block " << BlockIndex
<< " does not link back to the phi\n"; } } while (false
)
785 << " does not link back to the phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: block " << BlockIndex
<< " does not link back to the phi\n"; } } while (false
)
;
786 return {};
787 }
788 CurBlock = SinglePredecessor;
789 }
790 Blocks[0] = CurBlock;
791 return Blocks;
792}
793
794bool processPhi(PHINode &Phi, const TargetLibraryInfo &TLI, AliasAnalysis &AA,
795 DomTreeUpdater &DTU) {
796 LLVM_DEBUG(dbgs() << "processPhi()\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "processPhi()\n"; } } while
(false)
;
10
Assuming 'DebugFlag' is false
11
Loop condition is false. Exiting loop
797 if (Phi.getNumIncomingValues() <= 1) {
12
Assuming the condition is false
13
Taking false branch
798 LLVM_DEBUG(dbgs() << "skip: only one incoming value in phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: only one incoming value in phi\n"
; } } while (false)
;
799 return false;
800 }
801 // We are looking for something that has the following structure:
802 // bb1 --eq--> bb2 --eq--> bb3 --eq--> bb4 --+
803 // \ \ \ \
804 // ne ne ne \
805 // \ \ \ v
806 // +------------+-----------+----------> bb_phi
807 //
808 // - The last basic block (bb4 here) must branch unconditionally to bb_phi.
809 // It's the only block that contributes a non-constant value to the Phi.
810 // - All other blocks (b1, b2, b3) must have exactly two successors, one of
811 // them being the phi block.
812 // - All intermediate blocks (bb2, bb3) must have only one predecessor.
813 // - Blocks cannot do other work besides the comparison, see doesOtherWork()
814
815 // The blocks are not necessarily ordered in the phi, so we start from the
816 // last block and reconstruct the order.
817 BasicBlock *LastBlock = nullptr;
818 for (unsigned I = 0; I < Phi.getNumIncomingValues(); ++I) {
14
Loop condition is true. Entering loop body
21
Loop condition is true. Entering loop body
25
Assuming the condition is false
26
Loop condition is false. Execution continues on line 840
819 if (isa<ConstantInt>(Phi.getIncomingValue(I))) continue;
15
Assuming the object is not a 'ConstantInt'
16
Taking false branch
22
Assuming the object is a 'ConstantInt'
23
Taking true branch
24
Execution continues on line 818
820 if (LastBlock
16.1
'LastBlock' is null
16.1
'LastBlock' is null
) {
17
Taking false branch
821 // There are several non-constant values.
822 LLVM_DEBUG(dbgs() << "skip: several non-constant values\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: several non-constant values\n"
; } } while (false)
;
823 return false;
824 }
825 if (!isa<ICmpInst>(Phi.getIncomingValue(I)) ||
18
Assuming the object is a 'ICmpInst'
19
Assuming the condition is false
20
Taking false branch
826 cast<ICmpInst>(Phi.getIncomingValue(I))->getParent() !=
827 Phi.getIncomingBlock(I)) {
828 // Non-constant incoming value is not from a cmp instruction or not
829 // produced by the last block. We could end up processing the value
830 // producing block more than once.
831 //
832 // This is an uncommon case, so we bail.
833 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: non-constant value not from cmp or not from last block.\n"
; } } while (false)
834 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: non-constant value not from cmp or not from last block.\n"
; } } while (false)
835 << "skip: non-constant value not from cmp or not from last block.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: non-constant value not from cmp or not from last block.\n"
; } } while (false)
;
836 return false;
837 }
838 LastBlock = Phi.getIncomingBlock(I);
839 }
840 if (!LastBlock) {
27
Assuming 'LastBlock' is non-null
28
Taking false branch
841 // There is no non-constant block.
842 LLVM_DEBUG(dbgs() << "skip: no non-constant block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: no non-constant block\n"
; } } while (false)
;
843 return false;
844 }
845 if (LastBlock->getSingleSuccessor() != Phi.getParent()) {
29
Assuming the condition is false
30
Taking false branch
846 LLVM_DEBUG(dbgs() << "skip: last block non-phi successor\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: last block non-phi successor\n"
; } } while (false)
;
847 return false;
848 }
849
850 const auto Blocks =
851 getOrderedBlocks(Phi, LastBlock, Phi.getNumIncomingValues());
852 if (Blocks.empty()) return false;
31
Assuming the condition is false
32
Taking false branch
853 BCECmpChain CmpChain(Blocks, Phi, AA);
33
Calling constructor for 'BCECmpChain'
854
855 if (CmpChain.size() < 2) {
856 LLVM_DEBUG(dbgs() << "skip: only one compare block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "skip: only one compare block\n"
; } } while (false)
;
857 return false;
858 }
859
860 return CmpChain.simplify(TLI, AA, DTU);
861}
862
863static bool runImpl(Function &F, const TargetLibraryInfo &TLI,
864 const TargetTransformInfo &TTI, AliasAnalysis &AA,
865 DominatorTree *DT) {
866 LLVM_DEBUG(dbgs() << "MergeICmpsLegacyPass: " << F.getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("mergeicmps")) { dbgs() << "MergeICmpsLegacyPass: " <<
F.getName() << "\n"; } } while (false)
;
2
Assuming 'DebugFlag' is false
3
Loop condition is false. Exiting loop
867
868 // We only try merging comparisons if the target wants to expand memcmp later.
869 // The rationale is to avoid turning small chains into memcmp calls.
870 if (!TTI.enableMemCmpExpansion(F.hasOptSize(), true))
4
Taking false branch
871 return false;
872
873 // If we don't have memcmp avaiable we can't emit calls to it.
874 if (!TLI.has(LibFunc_memcmp))
5
Taking false branch
875 return false;
876
877 DomTreeUpdater DTU(DT, /*PostDominatorTree*/ nullptr,
878 DomTreeUpdater::UpdateStrategy::Eager);
879
880 bool MadeChange = false;
881
882 for (auto BBIt = ++F.begin(); BBIt != F.end(); ++BBIt) {
6
Loop condition is true. Entering loop body
883 // A Phi operation is always first in a basic block.
884 if (auto *const Phi
7.1
'Phi' is non-null
7.1
'Phi' is non-null
= dyn_cast<PHINode>(&*BBIt->begin()))
7
Assuming the object is a 'PHINode'
8
Taking true branch
885 MadeChange |= processPhi(*Phi, TLI, AA, DTU);
9
Calling 'processPhi'
886 }
887
888 return MadeChange;
889}
890
891class MergeICmpsLegacyPass : public FunctionPass {
892public:
893 static char ID;
894
895 MergeICmpsLegacyPass() : FunctionPass(ID) {
896 initializeMergeICmpsLegacyPassPass(*PassRegistry::getPassRegistry());
897 }
898
899 bool runOnFunction(Function &F) override {
900 if (skipFunction(F)) return false;
901 const auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
902 const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
903 // MergeICmps does not need the DominatorTree, but we update it if it's
904 // already available.
905 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
906 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
907 return runImpl(F, TLI, TTI, AA, DTWP ? &DTWP->getDomTree() : nullptr);
908 }
909
910 private:
911 void getAnalysisUsage(AnalysisUsage &AU) const override {
912 AU.addRequired<TargetLibraryInfoWrapperPass>();
913 AU.addRequired<TargetTransformInfoWrapperPass>();
914 AU.addRequired<AAResultsWrapperPass>();
915 AU.addPreserved<GlobalsAAWrapperPass>();
916 AU.addPreserved<DominatorTreeWrapperPass>();
917 }
918};
919
920} // namespace
921
922char MergeICmpsLegacyPass::ID = 0;
923INITIALIZE_PASS_BEGIN(MergeICmpsLegacyPass, "mergeicmps",static void *initializeMergeICmpsLegacyPassPassOnce(PassRegistry
&Registry) {
924 "Merge contiguous icmps into a memcmp", false, false)static void *initializeMergeICmpsLegacyPassPassOnce(PassRegistry
&Registry) {
925INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
926INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
927INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
928INITIALIZE_PASS_END(MergeICmpsLegacyPass, "mergeicmps",PassInfo *PI = new PassInfo( "Merge contiguous icmps into a memcmp"
, "mergeicmps", &MergeICmpsLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MergeICmpsLegacyPass>), false, false);
Registry.registerPass(*PI, true); return PI; } static llvm::
once_flag InitializeMergeICmpsLegacyPassPassFlag; void llvm::
initializeMergeICmpsLegacyPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeMergeICmpsLegacyPassPassFlag, initializeMergeICmpsLegacyPassPassOnce
, std::ref(Registry)); }
929 "Merge contiguous icmps into a memcmp", false, false)PassInfo *PI = new PassInfo( "Merge contiguous icmps into a memcmp"
, "mergeicmps", &MergeICmpsLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<MergeICmpsLegacyPass>), false, false);
Registry.registerPass(*PI, true); return PI; } static llvm::
once_flag InitializeMergeICmpsLegacyPassPassFlag; void llvm::
initializeMergeICmpsLegacyPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeMergeICmpsLegacyPassPassFlag, initializeMergeICmpsLegacyPassPassOnce
, std::ref(Registry)); }
930
931Pass *llvm::createMergeICmpsLegacyPass() { return new MergeICmpsLegacyPass(); }
932
933PreservedAnalyses MergeICmpsPass::run(Function &F,
934 FunctionAnalysisManager &AM) {
935 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
936 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
937 auto &AA = AM.getResult<AAManager>(F);
938 auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(F);
939 const bool MadeChanges = runImpl(F, TLI, TTI, AA, DT);
1
Calling 'runImpl'
940 if (!MadeChanges)
941 return PreservedAnalyses::all();
942 PreservedAnalyses PA;
943 PA.preserve<GlobalsAA>();
944 PA.preserve<DominatorTreeAnalysis>();
945 return PA;
946}

/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/InstrTypes.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/OperandTraits.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Use.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <iterator>
48
49namespace llvm {
50
51class APInt;
52class ConstantInt;
53class DataLayout;
54class LLVMContext;
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60/// an instruction to allocate memory on the stack
61class AllocaInst : public UnaryInstruction {
62 Type *AllocatedType;
63
64 using AlignmentField = AlignmentBitfieldElementT<0>;
65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68 SwiftErrorField>(),
69 "Bitfields must be contiguous");
70
71protected:
72 // Note: Instruction needs to be a friend here to call cloneImpl.
73 friend class Instruction;
74
75 AllocaInst *cloneImpl() const;
76
77public:
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 Instruction *InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace,
86 const Twine &Name, BasicBlock *InsertAtEnd);
87
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name = "", Instruction *InsertBefore = nullptr);
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock *InsertAtEnd);
92
93 /// Return true if there is an allocation size parameter to the allocation
94 /// instruction that is not 1.
95 bool isArrayAllocation() const;
96
97 /// Get the number of elements allocated. For a simple allocation of a single
98 /// element, this will return a constant 1 value.
99 const Value *getArraySize() const { return getOperand(0); }
100 Value *getArraySize() { return getOperand(0); }
101
102 /// Overload to return most specific pointer type.
103 PointerType *getType() const {
104 return cast<PointerType>(Instruction::getType());
105 }
106
107 /// Get allocation size in bits. Returns None if size can't be determined,
108 /// e.g. in case of a VLA.
109 Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const;
110
111 /// Return the type that is being allocated by the instruction.
112 Type *getAllocatedType() const { return AllocatedType; }
113 /// for use only in special circumstances that need to generically
114 /// transform a whole instruction (eg: IR linking and vectorization).
115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116
117 /// Return the alignment of the memory that is being allocated by the
118 /// instruction.
119 Align getAlign() const {
120 return Align(1ULL << getSubclassData<AlignmentField>());
121 }
122
123 void setAlignment(Align Align) {
124 setSubclassData<AlignmentField>(Log2(Align));
125 }
126
127 // FIXME: Remove this one transition to Align is over.
128 unsigned getAlignment() const { return getAlign().value(); }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 /// FIXME: Remove this function once transition to Align is over.
218 /// Use getAlign() instead.
219 unsigned getAlignment() const { return getAlign().value(); }
220
221 /// Return the alignment of the access that is being performed.
222 Align getAlign() const {
223 return Align(1ULL << (getSubclassData<AlignmentField>()));
224 }
225
226 void setAlignment(Align Align) {
227 setSubclassData<AlignmentField>(Log2(Align));
228 }
229
230 /// Returns the ordering constraint of this load instruction.
231 AtomicOrdering getOrdering() const {
232 return getSubclassData<OrderingField>();
233 }
234 /// Sets the ordering constraint of this load instruction. May not be Release
235 /// or AcquireRelease.
236 void setOrdering(AtomicOrdering Ordering) {
237 setSubclassData<OrderingField>(Ordering);
238 }
239
240 /// Returns the synchronization scope ID of this load instruction.
241 SyncScope::ID getSyncScopeID() const {
242 return SSID;
243 }
244
245 /// Sets the synchronization scope ID of this load instruction.
246 void setSyncScopeID(SyncScope::ID SSID) {
247 this->SSID = SSID;
248 }
249
250 /// Sets the ordering constraint and the synchronization scope ID of this load
251 /// instruction.
252 void setAtomic(AtomicOrdering Ordering,
253 SyncScope::ID SSID = SyncScope::System) {
254 setOrdering(Ordering);
255 setSyncScopeID(SSID);
256 }
257
258 bool isSimple() const { return !isAtomic() && !isVolatile(); }
259
260 bool isUnordered() const {
261 return (getOrdering() == AtomicOrdering::NotAtomic ||
262 getOrdering() == AtomicOrdering::Unordered) &&
263 !isVolatile();
264 }
265
266 Value *getPointerOperand() { return getOperand(0); }
267 const Value *getPointerOperand() const { return getOperand(0); }
268 static unsigned getPointerOperandIndex() { return 0U; }
269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
270
271 /// Returns the address space of the pointer operand.
272 unsigned getPointerAddressSpace() const {
273 return getPointerOperandType()->getPointerAddressSpace();
274 }
275
276 // Methods for support type inquiry through isa, cast, and dyn_cast:
277 static bool classof(const Instruction *I) {
278 return I->getOpcode() == Instruction::Load;
279 }
280 static bool classof(const Value *V) {
281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
282 }
283
284private:
285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
286 // method so that subclasses cannot accidentally use it.
287 template <typename Bitfield>
288 void setSubclassData(typename Bitfield::Type Value) {
289 Instruction::setSubclassData<Bitfield>(Value);
290 }
291
292 /// The synchronization scope ID of this load instruction. Not quite enough
293 /// room in SubClassData for everything, so synchronization scope ID gets its
294 /// own field.
295 SyncScope::ID SSID;
296};
297
298//===----------------------------------------------------------------------===//
299// StoreInst Class
300//===----------------------------------------------------------------------===//
301
302/// An instruction for storing to memory.
303class StoreInst : public Instruction {
304 using VolatileField = BoolBitfieldElementT<0>;
305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
307 static_assert(
308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309 "Bitfields must be contiguous");
310
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
330 Instruction *InsertBefore = nullptr);
331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333
334 // allocate space for exactly two operands
335 void *operator new(size_t s) {
336 return User::operator new(s, 2);
337 }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<StoreInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst
*>(this))[i_nocapture].get()); } void StoreInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst
::getNumOperands() const { return OperandTraits<StoreInst>
::operands(this); } template <int Idx_nocapture> Use &
StoreInst::Op() { return this->OpFrom<Idx_nocapture>
(this); } template <int Idx_nocapture> const Use &StoreInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t s) {
466 return User::operator new(s, 0);
467 }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t s) {
550 return User::operator new(s, 3);
551 }
552
553 using VolatileField = BoolBitfieldElementT<0>;
554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
555 using SuccessOrderingField =
556 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
557 using FailureOrderingField =
558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
559 using AlignmentField =
560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
561 static_assert(
562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
563 FailureOrderingField, AlignmentField>(),
564 "Bitfields must be contiguous");
565
566 /// Return the alignment of the memory that is being allocated by the
567 /// instruction.
568 Align getAlign() const {
569 return Align(1ULL << getSubclassData<AlignmentField>());
570 }
571
572 void setAlignment(Align Align) {
573 setSubclassData<AlignmentField>(Log2(Align));
574 }
575
576 /// Return true if this is a cmpxchg from a volatile memory
577 /// location.
578 ///
579 bool isVolatile() const { return getSubclassData<VolatileField>(); }
580
581 /// Specify whether this is a volatile cmpxchg.
582 ///
583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584
585 /// Return true if this cmpxchg may spuriously fail.
586 bool isWeak() const { return getSubclassData<WeakField>(); }
587
588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589
590 /// Transparently provide more efficient getOperand methods.
591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
592
593 /// Returns the success ordering constraint of this cmpxchg instruction.
594 AtomicOrdering getSuccessOrdering() const {
595 return getSubclassData<SuccessOrderingField>();
596 }
597
598 /// Sets the success ordering constraint of this cmpxchg instruction.
599 void setSuccessOrdering(AtomicOrdering Ordering) {
600 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 601, __PRETTY_FUNCTION__))
601 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 601, __PRETTY_FUNCTION__))
;
602 setSubclassData<SuccessOrderingField>(Ordering);
603 }
604
605 /// Returns the failure ordering constraint of this cmpxchg instruction.
606 AtomicOrdering getFailureOrdering() const {
607 return getSubclassData<FailureOrderingField>();
608 }
609
610 /// Sets the failure ordering constraint of this cmpxchg instruction.
611 void setFailureOrdering(AtomicOrdering Ordering) {
612 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 613, __PRETTY_FUNCTION__))
613 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 613, __PRETTY_FUNCTION__))
;
614 setSubclassData<FailureOrderingField>(Ordering);
615 }
616
617 /// Returns the synchronization scope ID of this cmpxchg instruction.
618 SyncScope::ID getSyncScopeID() const {
619 return SSID;
620 }
621
622 /// Sets the synchronization scope ID of this cmpxchg instruction.
623 void setSyncScopeID(SyncScope::ID SSID) {
624 this->SSID = SSID;
625 }
626
627 Value *getPointerOperand() { return getOperand(0); }
628 const Value *getPointerOperand() const { return getOperand(0); }
629 static unsigned getPointerOperandIndex() { return 0U; }
630
631 Value *getCompareOperand() { return getOperand(1); }
632 const Value *getCompareOperand() const { return getOperand(1); }
633
634 Value *getNewValOperand() { return getOperand(2); }
635 const Value *getNewValOperand() const { return getOperand(2); }
636
637 /// Returns the address space of the pointer operand.
638 unsigned getPointerAddressSpace() const {
639 return getPointerOperand()->getType()->getPointerAddressSpace();
640 }
641
642 /// Returns the strongest permitted ordering on failure, given the
643 /// desired ordering on success.
644 ///
645 /// If the comparison in a cmpxchg operation fails, there is no atomic store
646 /// so release semantics cannot be provided. So this function drops explicit
647 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
648 /// operation would remain SequentiallyConsistent.
649 static AtomicOrdering
650 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
651 switch (SuccessOrdering) {
652 default:
653 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 653)
;
654 case AtomicOrdering::Release:
655 case AtomicOrdering::Monotonic:
656 return AtomicOrdering::Monotonic;
657 case AtomicOrdering::AcquireRelease:
658 case AtomicOrdering::Acquire:
659 return AtomicOrdering::Acquire;
660 case AtomicOrdering::SequentiallyConsistent:
661 return AtomicOrdering::SequentiallyConsistent;
662 }
663 }
664
665 // Methods for support type inquiry through isa, cast, and dyn_cast:
666 static bool classof(const Instruction *I) {
667 return I->getOpcode() == Instruction::AtomicCmpXchg;
668 }
669 static bool classof(const Value *V) {
670 return isa<Instruction>(V) && classof(cast<Instruction>(V));
671 }
672
673private:
674 // Shadow Instruction::setInstructionSubclassData with a private forwarding
675 // method so that subclasses cannot accidentally use it.
676 template <typename Bitfield>
677 void setSubclassData(typename Bitfield::Type Value) {
678 Instruction::setSubclassData<Bitfield>(Value);
679 }
680
681 /// The synchronization scope ID of this cmpxchg instruction. Not quite
682 /// enough room in SubClassData for everything, so synchronization scope ID
683 /// gets its own field.
684 SyncScope::ID SSID;
685};
686
687template <>
688struct OperandTraits<AtomicCmpXchgInst> :
689 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
690};
691
692DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 692, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast
<AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void
AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 692, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
AtomicCmpXchgInst::getNumOperands() const { return OperandTraits
<AtomicCmpXchgInst>::operands(this); } template <int
Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &AtomicCmpXchgInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
693
694//===----------------------------------------------------------------------===//
695// AtomicRMWInst Class
696//===----------------------------------------------------------------------===//
697
698/// an instruction that atomically reads a memory location,
699/// combines it with another value, and then stores the result back. Returns
700/// the old value.
701///
702class AtomicRMWInst : public Instruction {
703protected:
704 // Note: Instruction needs to be a friend here to call cloneImpl.
705 friend class Instruction;
706
707 AtomicRMWInst *cloneImpl() const;
708
709public:
710 /// This enumeration lists the possible modifications atomicrmw can make. In
711 /// the descriptions, 'p' is the pointer to the instruction's memory location,
712 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
713 /// instruction. These instructions always return 'old'.
714 enum BinOp : unsigned {
715 /// *p = v
716 Xchg,
717 /// *p = old + v
718 Add,
719 /// *p = old - v
720 Sub,
721 /// *p = old & v
722 And,
723 /// *p = ~(old & v)
724 Nand,
725 /// *p = old | v
726 Or,
727 /// *p = old ^ v
728 Xor,
729 /// *p = old >signed v ? old : v
730 Max,
731 /// *p = old <signed v ? old : v
732 Min,
733 /// *p = old >unsigned v ? old : v
734 UMax,
735 /// *p = old <unsigned v ? old : v
736 UMin,
737
738 /// *p = old + v
739 FAdd,
740
741 /// *p = old - v
742 FSub,
743
744 FIRST_BINOP = Xchg,
745 LAST_BINOP = FSub,
746 BAD_BINOP
747 };
748
749private:
750 template <unsigned Offset>
751 using AtomicOrderingBitfieldElement =
752 typename Bitfield::Element<AtomicOrdering, Offset, 3,
753 AtomicOrdering::LAST>;
754
755 template <unsigned Offset>
756 using BinOpBitfieldElement =
757 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
758
759public:
760 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
761 AtomicOrdering Ordering, SyncScope::ID SSID,
762 Instruction *InsertBefore = nullptr);
763 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
764 AtomicOrdering Ordering, SyncScope::ID SSID,
765 BasicBlock *InsertAtEnd);
766
767 // allocate space for exactly two operands
768 void *operator new(size_t s) {
769 return User::operator new(s, 2);
770 }
771
772 using VolatileField = BoolBitfieldElementT<0>;
773 using AtomicOrderingField =
774 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
775 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
776 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
777 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
778 OperationField, AlignmentField>(),
779 "Bitfields must be contiguous");
780
781 BinOp getOperation() const { return getSubclassData<OperationField>(); }
782
783 static StringRef getOperationName(BinOp Op);
784
785 static bool isFPOperation(BinOp Op) {
786 switch (Op) {
787 case AtomicRMWInst::FAdd:
788 case AtomicRMWInst::FSub:
789 return true;
790 default:
791 return false;
792 }
793 }
794
795 void setOperation(BinOp Operation) {
796 setSubclassData<OperationField>(Operation);
797 }
798
799 /// Return the alignment of the memory that is being allocated by the
800 /// instruction.
801 Align getAlign() const {
802 return Align(1ULL << getSubclassData<AlignmentField>());
803 }
804
805 void setAlignment(Align Align) {
806 setSubclassData<AlignmentField>(Log2(Align));
807 }
808
809 /// Return true if this is a RMW on a volatile memory location.
810 ///
811 bool isVolatile() const { return getSubclassData<VolatileField>(); }
812
813 /// Specify whether this is a volatile RMW or not.
814 ///
815 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
816
817 /// Transparently provide more efficient getOperand methods.
818 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
819
820 /// Returns the ordering constraint of this rmw instruction.
821 AtomicOrdering getOrdering() const {
822 return getSubclassData<AtomicOrderingField>();
823 }
824
825 /// Sets the ordering constraint of this rmw instruction.
826 void setOrdering(AtomicOrdering Ordering) {
827 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 828, __PRETTY_FUNCTION__))
828 "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 828, __PRETTY_FUNCTION__))
;
829 setSubclassData<AtomicOrderingField>(Ordering);
830 }
831
832 /// Returns the synchronization scope ID of this rmw instruction.
833 SyncScope::ID getSyncScopeID() const {
834 return SSID;
835 }
836
837 /// Sets the synchronization scope ID of this rmw instruction.
838 void setSyncScopeID(SyncScope::ID SSID) {
839 this->SSID = SSID;
840 }
841
842 Value *getPointerOperand() { return getOperand(0); }
843 const Value *getPointerOperand() const { return getOperand(0); }
844 static unsigned getPointerOperandIndex() { return 0U; }
845
846 Value *getValOperand() { return getOperand(1); }
847 const Value *getValOperand() const { return getOperand(1); }
848
849 /// Returns the address space of the pointer operand.
850 unsigned getPointerAddressSpace() const {
851 return getPointerOperand()->getType()->getPointerAddressSpace();
852 }
853
854 bool isFloatingPointOperation() const {
855 return isFPOperation(getOperation());
856 }
857
858 // Methods for support type inquiry through isa, cast, and dyn_cast:
859 static bool classof(const Instruction *I) {
860 return I->getOpcode() == Instruction::AtomicRMW;
861 }
862 static bool classof(const Value *V) {
863 return isa<Instruction>(V) && classof(cast<Instruction>(V));
864 }
865
866private:
867 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
868 AtomicOrdering Ordering, SyncScope::ID SSID);
869
870 // Shadow Instruction::setInstructionSubclassData with a private forwarding
871 // method so that subclasses cannot accidentally use it.
872 template <typename Bitfield>
873 void setSubclassData(typename Bitfield::Type Value) {
874 Instruction::setSubclassData<Bitfield>(Value);
875 }
876
877 /// The synchronization scope ID of this rmw instruction. Not quite enough
878 /// room in SubClassData for everything, so synchronization scope ID gets its
879 /// own field.
880 SyncScope::ID SSID;
881};
882
883template <>
884struct OperandTraits<AtomicRMWInst>
885 : public FixedNumOperandTraits<AtomicRMWInst,2> {
886};
887
888DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<AtomicRMWInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 888, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicRMWInst>::op_begin(const_cast<
AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<AtomicRMWInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 888, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst
::getNumOperands() const { return OperandTraits<AtomicRMWInst
>::operands(this); } template <int Idx_nocapture> Use
&AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
889
890//===----------------------------------------------------------------------===//
891// GetElementPtrInst Class
892//===----------------------------------------------------------------------===//
893
894// checkGEPType - Simple wrapper function to give a better assertion failure
895// message on bad indexes for a gep instruction.
896//
897inline Type *checkGEPType(Type *Ty) {
898 assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!"
) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 898, __PRETTY_FUNCTION__))
;
899 return Ty;
900}
901
902/// an instruction for type-safe pointer arithmetic to
903/// access elements of arrays and structs
904///
905class GetElementPtrInst : public Instruction {
906 Type *SourceElementType;
907 Type *ResultElementType;
908
909 GetElementPtrInst(const GetElementPtrInst &GEPI);
910
911 /// Constructors - Create a getelementptr instruction with a base pointer an
912 /// list of indices. The first ctor can optionally insert before an existing
913 /// instruction, the second appends the new instruction to the specified
914 /// BasicBlock.
915 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
916 ArrayRef<Value *> IdxList, unsigned Values,
917 const Twine &NameStr, Instruction *InsertBefore);
918 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
919 ArrayRef<Value *> IdxList, unsigned Values,
920 const Twine &NameStr, BasicBlock *InsertAtEnd);
921
922 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
923
924protected:
925 // Note: Instruction needs to be a friend here to call cloneImpl.
926 friend class Instruction;
927
928 GetElementPtrInst *cloneImpl() const;
929
930public:
931 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
932 ArrayRef<Value *> IdxList,
933 const Twine &NameStr = "",
934 Instruction *InsertBefore = nullptr) {
935 unsigned Values = 1 + unsigned(IdxList.size());
936 if (!PointeeType)
937 PointeeType =
938 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
939 else
940 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
941 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
942 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
;
943 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
944 NameStr, InsertBefore);
945 }
946
947 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
948 ArrayRef<Value *> IdxList,
949 const Twine &NameStr,
950 BasicBlock *InsertAtEnd) {
951 unsigned Values = 1 + unsigned(IdxList.size());
952 if (!PointeeType)
953 PointeeType =
954 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
955 else
956 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
957 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
958 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
;
959 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
960 NameStr, InsertAtEnd);
961 }
962
963 /// Create an "inbounds" getelementptr. See the documentation for the
964 /// "inbounds" flag in LangRef.html for details.
965 static GetElementPtrInst *CreateInBounds(Value *Ptr,
966 ArrayRef<Value *> IdxList,
967 const Twine &NameStr = "",
968 Instruction *InsertBefore = nullptr){
969 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
970 }
971
972 static GetElementPtrInst *
973 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
974 const Twine &NameStr = "",
975 Instruction *InsertBefore = nullptr) {
976 GetElementPtrInst *GEP =
977 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
978 GEP->setIsInBounds(true);
979 return GEP;
980 }
981
982 static GetElementPtrInst *CreateInBounds(Value *Ptr,
983 ArrayRef<Value *> IdxList,
984 const Twine &NameStr,
985 BasicBlock *InsertAtEnd) {
986 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
987 }
988
989 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
990 ArrayRef<Value *> IdxList,
991 const Twine &NameStr,
992 BasicBlock *InsertAtEnd) {
993 GetElementPtrInst *GEP =
994 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
995 GEP->setIsInBounds(true);
996 return GEP;
997 }
998
999 /// Transparently provide more efficient getOperand methods.
1000 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1001
1002 Type *getSourceElementType() const { return SourceElementType; }
1003
1004 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1005 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1006
1007 Type *getResultElementType() const {
1008 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1009, __PRETTY_FUNCTION__))
1009 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1009, __PRETTY_FUNCTION__))
;
1010 return ResultElementType;
1011 }
1012
1013 /// Returns the address space of this instruction's pointer type.
1014 unsigned getAddressSpace() const {
1015 // Note that this is always the same as the pointer operand's address space
1016 // and that is cheaper to compute, so cheat here.
1017 return getPointerAddressSpace();
1018 }
1019
1020 /// Returns the result type of a getelementptr with the given source
1021 /// element type and indexes.
1022 ///
1023 /// Null is returned if the indices are invalid for the specified
1024 /// source element type.
1025 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1026 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1027 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1028
1029 /// Return the type of the element at the given index of an indexable
1030 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1031 ///
1032 /// Returns null if the type can't be indexed, or the given index is not
1033 /// legal for the given type.
1034 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1035 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1036
1037 inline op_iterator idx_begin() { return op_begin()+1; }
1038 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1039 inline op_iterator idx_end() { return op_end(); }
1040 inline const_op_iterator idx_end() const { return op_end(); }
1041
1042 inline iterator_range<op_iterator> indices() {
1043 return make_range(idx_begin(), idx_end());
1044 }
1045
1046 inline iterator_range<const_op_iterator> indices() const {
1047 return make_range(idx_begin(), idx_end());
1048 }
1049
1050 Value *getPointerOperand() {
1051 return getOperand(0);
1052 }
1053 const Value *getPointerOperand() const {
1054 return getOperand(0);
1055 }
1056 static unsigned getPointerOperandIndex() {
1057 return 0U; // get index for modifying correct operand.
1058 }
1059
1060 /// Method to return the pointer operand as a
1061 /// PointerType.
1062 Type *getPointerOperandType() const {
1063 return getPointerOperand()->getType();
1064 }
1065
1066 /// Returns the address space of the pointer operand.
1067 unsigned getPointerAddressSpace() const {
1068 return getPointerOperandType()->getPointerAddressSpace();
1069 }
1070
1071 /// Returns the pointer type returned by the GEP
1072 /// instruction, which may be a vector of pointers.
1073 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1074 ArrayRef<Value *> IdxList) {
1075 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1076 Ptr->getType()->getPointerAddressSpace());
1077 // Vector GEP
1078 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1079 ElementCount EltCount = PtrVTy->getElementCount();
1080 return VectorType::get(PtrTy, EltCount);
1081 }
1082 for (Value *Index : IdxList)
1083 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1084 ElementCount EltCount = IndexVTy->getElementCount();
1085 return VectorType::get(PtrTy, EltCount);
1086 }
1087 // Scalar GEP
1088 return PtrTy;
1089 }
1090
1091 unsigned getNumIndices() const { // Note: always non-negative
1092 return getNumOperands() - 1;
1093 }
1094
1095 bool hasIndices() const {
1096 return getNumOperands() > 1;
1097 }
1098
1099 /// Return true if all of the indices of this GEP are
1100 /// zeros. If so, the result pointer and the first operand have the same
1101 /// value, just potentially different types.
1102 bool hasAllZeroIndices() const;
1103
1104 /// Return true if all of the indices of this GEP are
1105 /// constant integers. If so, the result pointer and the first operand have
1106 /// a constant offset between them.
1107 bool hasAllConstantIndices() const;
1108
1109 /// Set or clear the inbounds flag on this GEP instruction.
1110 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1111 void setIsInBounds(bool b = true);
1112
1113 /// Determine whether the GEP has the inbounds flag.
1114 bool isInBounds() const;
1115
1116 /// Accumulate the constant address offset of this GEP if possible.
1117 ///
1118 /// This routine accepts an APInt into which it will accumulate the constant
1119 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1120 /// all-constant, it returns false and the value of the offset APInt is
1121 /// undefined (it is *not* preserved!). The APInt passed into this routine
1122 /// must be at least as wide as the IntPtr type for the address space of
1123 /// the base GEP pointer.
1124 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1125
1126 // Methods for support type inquiry through isa, cast, and dyn_cast:
1127 static bool classof(const Instruction *I) {
1128 return (I->getOpcode() == Instruction::GetElementPtr);
1129 }
1130 static bool classof(const Value *V) {
1131 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1132 }
1133};
1134
1135template <>
1136struct OperandTraits<GetElementPtrInst> :
1137 public VariadicOperandTraits<GetElementPtrInst, 1> {
1138};
1139
1140GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1141 ArrayRef<Value *> IdxList, unsigned Values,
1142 const Twine &NameStr,
1143 Instruction *InsertBefore)
1144 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1145 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1146 Values, InsertBefore),
1147 SourceElementType(PointeeType),
1148 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1149 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1150, __PRETTY_FUNCTION__))
1150 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1150, __PRETTY_FUNCTION__))
;
1151 init(Ptr, IdxList, NameStr);
1152}
1153
1154GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1155 ArrayRef<Value *> IdxList, unsigned Values,
1156 const Twine &NameStr,
1157 BasicBlock *InsertAtEnd)
1158 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1159 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1160 Values, InsertAtEnd),
1161 SourceElementType(PointeeType),
1162 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1163 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1164, __PRETTY_FUNCTION__))
1164 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1164, __PRETTY_FUNCTION__))
;
1165 init(Ptr, IdxList, NameStr);
1166}
1167
1168DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1168, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<GetElementPtrInst>::op_begin(const_cast
<GetElementPtrInst*>(this))[i_nocapture].get()); } void
GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<GetElementPtrInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1168, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
GetElementPtrInst::getNumOperands() const { return OperandTraits
<GetElementPtrInst>::operands(this); } template <int
Idx_nocapture> Use &GetElementPtrInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &GetElementPtrInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1169
1170//===----------------------------------------------------------------------===//
1171// ICmpInst Class
1172//===----------------------------------------------------------------------===//
1173
1174/// This instruction compares its operands according to the predicate given
1175/// to the constructor. It only operates on integers or pointers. The operands
1176/// must be identical types.
1177/// Represent an integer comparison operator.
1178class ICmpInst: public CmpInst {
1179 void AssertOK() {
1180 assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1181, __PRETTY_FUNCTION__))
1181 "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1181, __PRETTY_FUNCTION__))
;
1182 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1183, __PRETTY_FUNCTION__))
1183 "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1183, __PRETTY_FUNCTION__))
;
1184 // Check that the operands are the right type
1185 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
1186 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
1187 "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1187, __PRETTY_FUNCTION__))
;
1188 }
1189
1190protected:
1191 // Note: Instruction needs to be a friend here to call cloneImpl.
1192 friend class Instruction;
1193
1194 /// Clone an identical ICmpInst
1195 ICmpInst *cloneImpl() const;
1196
1197public:
1198 /// Constructor with insert-before-instruction semantics.
1199 ICmpInst(
1200 Instruction *InsertBefore, ///< Where to insert
1201 Predicate pred, ///< The predicate to use for the comparison
1202 Value *LHS, ///< The left-hand-side of the expression
1203 Value *RHS, ///< The right-hand-side of the expression
1204 const Twine &NameStr = "" ///< Name of the instruction
1205 ) : CmpInst(makeCmpResultType(LHS->getType()),
1206 Instruction::ICmp, pred, LHS, RHS, NameStr,
1207 InsertBefore) {
1208#ifndef NDEBUG
1209 AssertOK();
1210#endif
1211 }
1212
1213 /// Constructor with insert-at-end semantics.
1214 ICmpInst(
1215 BasicBlock &InsertAtEnd, ///< Block to insert into.
1216 Predicate pred, ///< The predicate to use for the comparison
1217 Value *LHS, ///< The left-hand-side of the expression
1218 Value *RHS, ///< The right-hand-side of the expression
1219 const Twine &NameStr = "" ///< Name of the instruction
1220 ) : CmpInst(makeCmpResultType(LHS->getType()),
1221 Instruction::ICmp, pred, LHS, RHS, NameStr,
1222 &InsertAtEnd) {
1223#ifndef NDEBUG
1224 AssertOK();
1225#endif
1226 }
1227
1228 /// Constructor with no-insertion semantics
1229 ICmpInst(
1230 Predicate pred, ///< The predicate to use for the comparison
1231 Value *LHS, ///< The left-hand-side of the expression
1232 Value *RHS, ///< The right-hand-side of the expression
1233 const Twine &NameStr = "" ///< Name of the instruction
1234 ) : CmpInst(makeCmpResultType(LHS->getType()),
1235 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1236#ifndef NDEBUG
1237 AssertOK();
1238#endif
1239 }
1240
1241 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1242 /// @returns the predicate that would be the result if the operand were
1243 /// regarded as signed.
1244 /// Return the signed version of the predicate
1245 Predicate getSignedPredicate() const {
1246 return getSignedPredicate(getPredicate());
1247 }
1248
1249 /// This is a static version that you can use without an instruction.
1250 /// Return the signed version of the predicate.
1251 static Predicate getSignedPredicate(Predicate pred);
1252
1253 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1254 /// @returns the predicate that would be the result if the operand were
1255 /// regarded as unsigned.
1256 /// Return the unsigned version of the predicate
1257 Predicate getUnsignedPredicate() const {
1258 return getUnsignedPredicate(getPredicate());
1259 }
1260
1261 /// This is a static version that you can use without an instruction.
1262 /// Return the unsigned version of the predicate.
1263 static Predicate getUnsignedPredicate(Predicate pred);
1264
1265 /// Return true if this predicate is either EQ or NE. This also
1266 /// tests for commutativity.
1267 static bool isEquality(Predicate P) {
1268 return P == ICMP_EQ || P == ICMP_NE;
1269 }
1270
1271 /// Return true if this predicate is either EQ or NE. This also
1272 /// tests for commutativity.
1273 bool isEquality() const {
1274 return isEquality(getPredicate());
1275 }
1276
1277 /// @returns true if the predicate of this ICmpInst is commutative
1278 /// Determine if this relation is commutative.
1279 bool isCommutative() const { return isEquality(); }
1280
1281 /// Return true if the predicate is relational (not EQ or NE).
1282 ///
1283 bool isRelational() const {
1284 return !isEquality();
1285 }
1286
1287 /// Return true if the predicate is relational (not EQ or NE).
1288 ///
1289 static bool isRelational(Predicate P) {
1290 return !isEquality(P);
1291 }
1292
1293 /// Exchange the two operands to this instruction in such a way that it does
1294 /// not modify the semantics of the instruction. The predicate value may be
1295 /// changed to retain the same result if the predicate is order dependent
1296 /// (e.g. ult).
1297 /// Swap operands and adjust predicate.
1298 void swapOperands() {
1299 setPredicate(getSwappedPredicate());
1300 Op<0>().swap(Op<1>());
1301 }
1302
1303 // Methods for support type inquiry through isa, cast, and dyn_cast:
1304 static bool classof(const Instruction *I) {
1305 return I->getOpcode() == Instruction::ICmp;
1306 }
1307 static bool classof(const Value *V) {
1308 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1309 }
1310};
1311
1312//===----------------------------------------------------------------------===//
1313// FCmpInst Class
1314//===----------------------------------------------------------------------===//
1315
1316/// This instruction compares its operands according to the predicate given
1317/// to the constructor. It only operates on floating point values or packed
1318/// vectors of floating point values. The operands must be identical types.
1319/// Represents a floating point comparison operator.
1320class FCmpInst: public CmpInst {
1321 void AssertOK() {
1322 assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ?
static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1322, __PRETTY_FUNCTION__))
;
1323 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1324, __PRETTY_FUNCTION__))
1324 "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1324, __PRETTY_FUNCTION__))
;
1325 // Check that the operands are the right type
1326 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1327, __PRETTY_FUNCTION__))
1327 "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1327, __PRETTY_FUNCTION__))
;
1328 }
1329
1330protected:
1331 // Note: Instruction needs to be a friend here to call cloneImpl.
1332 friend class Instruction;
1333
1334 /// Clone an identical FCmpInst
1335 FCmpInst *cloneImpl() const;
1336
1337public:
1338 /// Constructor with insert-before-instruction semantics.
1339 FCmpInst(
1340 Instruction *InsertBefore, ///< Where to insert
1341 Predicate pred, ///< The predicate to use for the comparison
1342 Value *LHS, ///< The left-hand-side of the expression
1343 Value *RHS, ///< The right-hand-side of the expression
1344 const Twine &NameStr = "" ///< Name of the instruction
1345 ) : CmpInst(makeCmpResultType(LHS->getType()),
1346 Instruction::FCmp, pred, LHS, RHS, NameStr,
1347 InsertBefore) {
1348 AssertOK();
1349 }
1350
1351 /// Constructor with insert-at-end semantics.
1352 FCmpInst(
1353 BasicBlock &InsertAtEnd, ///< Block to insert into.
1354 Predicate pred, ///< The predicate to use for the comparison
1355 Value *LHS, ///< The left-hand-side of the expression
1356 Value *RHS, ///< The right-hand-side of the expression
1357 const Twine &NameStr = "" ///< Name of the instruction
1358 ) : CmpInst(makeCmpResultType(LHS->getType()),
1359 Instruction::FCmp, pred, LHS, RHS, NameStr,
1360 &InsertAtEnd) {
1361 AssertOK();
1362 }
1363
1364 /// Constructor with no-insertion semantics
1365 FCmpInst(
1366 Predicate Pred, ///< The predicate to use for the comparison
1367 Value *LHS, ///< The left-hand-side of the expression
1368 Value *RHS, ///< The right-hand-side of the expression
1369 const Twine &NameStr = "", ///< Name of the instruction
1370 Instruction *FlagsSource = nullptr
1371 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1372 RHS, NameStr, nullptr, FlagsSource) {
1373 AssertOK();
1374 }
1375
1376 /// @returns true if the predicate of this instruction is EQ or NE.
1377 /// Determine if this is an equality predicate.
1378 static bool isEquality(Predicate Pred) {
1379 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1380 Pred == FCMP_UNE;
1381 }
1382
1383 /// @returns true if the predicate of this instruction is EQ or NE.
1384 /// Determine if this is an equality predicate.
1385 bool isEquality() const { return isEquality(getPredicate()); }
1386
1387 /// @returns true if the predicate of this instruction is commutative.
1388 /// Determine if this is a commutative predicate.
1389 bool isCommutative() const {
1390 return isEquality() ||
1391 getPredicate() == FCMP_FALSE ||
1392 getPredicate() == FCMP_TRUE ||
1393 getPredicate() == FCMP_ORD ||
1394 getPredicate() == FCMP_UNO;
1395 }
1396
1397 /// @returns true if the predicate is relational (not EQ or NE).
1398 /// Determine if this a relational predicate.
1399 bool isRelational() const { return !isEquality(); }
1400
1401 /// Exchange the two operands to this instruction in such a way that it does
1402 /// not modify the semantics of the instruction. The predicate value may be
1403 /// changed to retain the same result if the predicate is order dependent
1404 /// (e.g. ult).
1405 /// Swap operands and adjust predicate.
1406 void swapOperands() {
1407 setPredicate(getSwappedPredicate());
1408 Op<0>().swap(Op<1>());
1409 }
1410
1411 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1412 static bool classof(const Instruction *I) {
1413 return I->getOpcode() == Instruction::FCmp;
1414 }
1415 static bool classof(const Value *V) {
1416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1417 }
1418};
1419
1420//===----------------------------------------------------------------------===//
1421/// This class represents a function call, abstracting a target
1422/// machine's calling convention. This class uses low bit of the SubClassData
1423/// field to indicate whether or not this is a tail call. The rest of the bits
1424/// hold the calling convention of the call.
1425///
1426class CallInst : public CallBase {
1427 CallInst(const CallInst &CI);
1428
1429 /// Construct a CallInst given a range of arguments.
1430 /// Construct a CallInst from a range of arguments
1431 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1432 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1433 Instruction *InsertBefore);
1434
1435 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1436 const Twine &NameStr, Instruction *InsertBefore)
1437 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1438
1439 /// Construct a CallInst given a range of arguments.
1440 /// Construct a CallInst from a range of arguments
1441 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1442 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1443 BasicBlock *InsertAtEnd);
1444
1445 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1446 Instruction *InsertBefore);
1447
1448 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1449 BasicBlock *InsertAtEnd);
1450
1451 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1452 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1453 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1454
1455 /// Compute the number of operands to allocate.
1456 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1457 // We need one operand for the called function, plus the input operand
1458 // counts provided.
1459 return 1 + NumArgs + NumBundleInputs;
1460 }
1461
1462protected:
1463 // Note: Instruction needs to be a friend here to call cloneImpl.
1464 friend class Instruction;
1465
1466 CallInst *cloneImpl() const;
1467
1468public:
1469 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1470 Instruction *InsertBefore = nullptr) {
1471 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1472 }
1473
1474 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1475 const Twine &NameStr,
1476 Instruction *InsertBefore = nullptr) {
1477 return new (ComputeNumOperands(Args.size()))
1478 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1479 }
1480
1481 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1482 ArrayRef<OperandBundleDef> Bundles = None,
1483 const Twine &NameStr = "",
1484 Instruction *InsertBefore = nullptr) {
1485 const int NumOperands =
1486 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1487 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1488
1489 return new (NumOperands, DescriptorBytes)
1490 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1491 }
1492
1493 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1494 BasicBlock *InsertAtEnd) {
1495 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1496 }
1497
1498 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1499 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1500 return new (ComputeNumOperands(Args.size()))
1501 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1502 }
1503
1504 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1505 ArrayRef<OperandBundleDef> Bundles,
1506 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1507 const int NumOperands =
1508 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1509 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1510
1511 return new (NumOperands, DescriptorBytes)
1512 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1513 }
1514
1515 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1516 Instruction *InsertBefore = nullptr) {
1517 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1518 InsertBefore);
1519 }
1520
1521 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1522 ArrayRef<OperandBundleDef> Bundles = None,
1523 const Twine &NameStr = "",
1524 Instruction *InsertBefore = nullptr) {
1525 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1526 NameStr, InsertBefore);
1527 }
1528
1529 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1530 const Twine &NameStr,
1531 Instruction *InsertBefore = nullptr) {
1532 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1533 InsertBefore);
1534 }
1535
1536 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1537 BasicBlock *InsertAtEnd) {
1538 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1539 InsertAtEnd);
1540 }
1541
1542 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1543 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1544 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1545 InsertAtEnd);
1546 }
1547
1548 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1549 ArrayRef<OperandBundleDef> Bundles,
1550 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1551 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1552 NameStr, InsertAtEnd);
1553 }
1554
1555 /// Create a clone of \p CI with a different set of operand bundles and
1556 /// insert it before \p InsertPt.
1557 ///
1558 /// The returned call instruction is identical \p CI in every way except that
1559 /// the operand bundles for the new instruction are set to the operand bundles
1560 /// in \p Bundles.
1561 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1562 Instruction *InsertPt = nullptr);
1563
1564 /// Create a clone of \p CI with a different set of operand bundles and
1565 /// insert it before \p InsertPt.
1566 ///
1567 /// The returned call instruction is identical \p CI in every way except that
1568 /// the operand bundle for the new instruction is set to the operand bundle
1569 /// in \p Bundle.
1570 static CallInst *CreateWithReplacedBundle(CallInst *CI,
1571 OperandBundleDef Bundle,
1572 Instruction *InsertPt = nullptr);
1573
1574 /// Generate the IR for a call to malloc:
1575 /// 1. Compute the malloc call's argument as the specified type's size,
1576 /// possibly multiplied by the array size if the array size is not
1577 /// constant 1.
1578 /// 2. Call malloc with that argument.
1579 /// 3. Bitcast the result of the malloc call to the specified type.
1580 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1581 Type *AllocTy, Value *AllocSize,
1582 Value *ArraySize = nullptr,
1583 Function *MallocF = nullptr,
1584 const Twine &Name = "");
1585 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1586 Type *AllocTy, Value *AllocSize,
1587 Value *ArraySize = nullptr,
1588 Function *MallocF = nullptr,
1589 const Twine &Name = "");
1590 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1591 Type *AllocTy, Value *AllocSize,
1592 Value *ArraySize = nullptr,
1593 ArrayRef<OperandBundleDef> Bundles = None,
1594 Function *MallocF = nullptr,
1595 const Twine &Name = "");
1596 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1597 Type *AllocTy, Value *AllocSize,
1598 Value *ArraySize = nullptr,
1599 ArrayRef<OperandBundleDef> Bundles = None,
1600 Function *MallocF = nullptr,
1601 const Twine &Name = "");
1602 /// Generate the IR for a call to the builtin free function.
1603 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1604 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1605 static Instruction *CreateFree(Value *Source,
1606 ArrayRef<OperandBundleDef> Bundles,
1607 Instruction *InsertBefore);
1608 static Instruction *CreateFree(Value *Source,
1609 ArrayRef<OperandBundleDef> Bundles,
1610 BasicBlock *InsertAtEnd);
1611
1612 // Note that 'musttail' implies 'tail'.
1613 enum TailCallKind : unsigned {
1614 TCK_None = 0,
1615 TCK_Tail = 1,
1616 TCK_MustTail = 2,
1617 TCK_NoTail = 3,
1618 TCK_LAST = TCK_NoTail
1619 };
1620
1621 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1622 static_assert(
1623 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1624 "Bitfields must be contiguous");
1625
1626 TailCallKind getTailCallKind() const {
1627 return getSubclassData<TailCallKindField>();
1628 }
1629
1630 bool isTailCall() const {
1631 TailCallKind Kind = getTailCallKind();
1632 return Kind == TCK_Tail || Kind == TCK_MustTail;
1633 }
1634
1635 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1636
1637 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1638
1639 void setTailCallKind(TailCallKind TCK) {
1640 setSubclassData<TailCallKindField>(TCK);
1641 }
1642
1643 void setTailCall(bool IsTc = true) {
1644 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1645 }
1646
1647 /// Return true if the call can return twice
1648 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1649 void setCanReturnTwice() {
1650 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1651 }
1652
1653 // Methods for support type inquiry through isa, cast, and dyn_cast:
1654 static bool classof(const Instruction *I) {
1655 return I->getOpcode() == Instruction::Call;
1656 }
1657 static bool classof(const Value *V) {
1658 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1659 }
1660
1661 /// Updates profile metadata by scaling it by \p S / \p T.
1662 void updateProfWeight(uint64_t S, uint64_t T);
1663
1664private:
1665 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1666 // method so that subclasses cannot accidentally use it.
1667 template <typename Bitfield>
1668 void setSubclassData(typename Bitfield::Type Value) {
1669 Instruction::setSubclassData<Bitfield>(Value);
1670 }
1671};
1672
1673CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1674 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1675 BasicBlock *InsertAtEnd)
1676 : CallBase(Ty->getReturnType(), Instruction::Call,
1677 OperandTraits<CallBase>::op_end(this) -
1678 (Args.size() + CountBundleInputs(Bundles) + 1),
1679 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1680 InsertAtEnd) {
1681 init(Ty, Func, Args, Bundles, NameStr);
1682}
1683
1684CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1685 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1686 Instruction *InsertBefore)
1687 : CallBase(Ty->getReturnType(), Instruction::Call,
1688 OperandTraits<CallBase>::op_end(this) -
1689 (Args.size() + CountBundleInputs(Bundles) + 1),
1690 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1691 InsertBefore) {
1692 init(Ty, Func, Args, Bundles, NameStr);
1693}
1694
1695//===----------------------------------------------------------------------===//
1696// SelectInst Class
1697//===----------------------------------------------------------------------===//
1698
1699/// This class represents the LLVM 'select' instruction.
1700///
1701class SelectInst : public Instruction {
1702 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1703 Instruction *InsertBefore)
1704 : Instruction(S1->getType(), Instruction::Select,
1705 &Op<0>(), 3, InsertBefore) {
1706 init(C, S1, S2);
1707 setName(NameStr);
1708 }
1709
1710 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1711 BasicBlock *InsertAtEnd)
1712 : Instruction(S1->getType(), Instruction::Select,
1713 &Op<0>(), 3, InsertAtEnd) {
1714 init(C, S1, S2);
1715 setName(NameStr);
1716 }
1717
1718 void init(Value *C, Value *S1, Value *S2) {
1719 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select"
) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1719, __PRETTY_FUNCTION__))
;
1720 Op<0>() = C;
1721 Op<1>() = S1;
1722 Op<2>() = S2;
1723 }
1724
1725protected:
1726 // Note: Instruction needs to be a friend here to call cloneImpl.
1727 friend class Instruction;
1728
1729 SelectInst *cloneImpl() const;
1730
1731public:
1732 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1733 const Twine &NameStr = "",
1734 Instruction *InsertBefore = nullptr,
1735 Instruction *MDFrom = nullptr) {
1736 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1737 if (MDFrom)
1738 Sel->copyMetadata(*MDFrom);
1739 return Sel;
1740 }
1741
1742 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1743 const Twine &NameStr,
1744 BasicBlock *InsertAtEnd) {
1745 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1746 }
1747
1748 const Value *getCondition() const { return Op<0>(); }
1749 const Value *getTrueValue() const { return Op<1>(); }
1750 const Value *getFalseValue() const { return Op<2>(); }
1751 Value *getCondition() { return Op<0>(); }
1752 Value *getTrueValue() { return Op<1>(); }
1753 Value *getFalseValue() { return Op<2>(); }
1754
1755 void setCondition(Value *V) { Op<0>() = V; }
1756 void setTrueValue(Value *V) { Op<1>() = V; }
1757 void setFalseValue(Value *V) { Op<2>() = V; }
1758
1759 /// Swap the true and false values of the select instruction.
1760 /// This doesn't swap prof metadata.
1761 void swapValues() { Op<1>().swap(Op<2>()); }
1762
1763 /// Return a string if the specified operands are invalid
1764 /// for a select operation, otherwise return null.
1765 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1766
1767 /// Transparently provide more efficient getOperand methods.
1768 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1769
1770 OtherOps getOpcode() const {
1771 return static_cast<OtherOps>(Instruction::getOpcode());
1772 }
1773
1774 // Methods for support type inquiry through isa, cast, and dyn_cast:
1775 static bool classof(const Instruction *I) {
1776 return I->getOpcode() == Instruction::Select;
1777 }
1778 static bool classof(const Value *V) {
1779 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1780 }
1781};
1782
1783template <>
1784struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1785};
1786
1787DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1787, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst
*>(this))[i_nocapture].get()); } void SelectInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1787, __PRETTY_FUNCTION__)); OperandTraits<SelectInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst
::getNumOperands() const { return OperandTraits<SelectInst
>::operands(this); } template <int Idx_nocapture> Use
&SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1788
1789//===----------------------------------------------------------------------===//
1790// VAArgInst Class
1791//===----------------------------------------------------------------------===//
1792
1793/// This class represents the va_arg llvm instruction, which returns
1794/// an argument of the specified type given a va_list and increments that list
1795///
1796class VAArgInst : public UnaryInstruction {
1797protected:
1798 // Note: Instruction needs to be a friend here to call cloneImpl.
1799 friend class Instruction;
1800
1801 VAArgInst *cloneImpl() const;
1802
1803public:
1804 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1805 Instruction *InsertBefore = nullptr)
1806 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1807 setName(NameStr);
1808 }
1809
1810 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1811 BasicBlock *InsertAtEnd)
1812 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1813 setName(NameStr);
1814 }
1815
1816 Value *getPointerOperand() { return getOperand(0); }
1817 const Value *getPointerOperand() const { return getOperand(0); }
1818 static unsigned getPointerOperandIndex() { return 0U; }
1819
1820 // Methods for support type inquiry through isa, cast, and dyn_cast:
1821 static bool classof(const Instruction *I) {
1822 return I->getOpcode() == VAArg;
1823 }
1824 static bool classof(const Value *V) {
1825 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1826 }
1827};
1828
1829//===----------------------------------------------------------------------===//
1830// ExtractElementInst Class
1831//===----------------------------------------------------------------------===//
1832
1833/// This instruction extracts a single (scalar)
1834/// element from a VectorType value
1835///
1836class ExtractElementInst : public Instruction {
1837 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1838 Instruction *InsertBefore = nullptr);
1839 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1840 BasicBlock *InsertAtEnd);
1841
1842protected:
1843 // Note: Instruction needs to be a friend here to call cloneImpl.
1844 friend class Instruction;
1845
1846 ExtractElementInst *cloneImpl() const;
1847
1848public:
1849 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1850 const Twine &NameStr = "",
1851 Instruction *InsertBefore = nullptr) {
1852 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1853 }
1854
1855 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1856 const Twine &NameStr,
1857 BasicBlock *InsertAtEnd) {
1858 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1859 }
1860
1861 /// Return true if an extractelement instruction can be
1862 /// formed with the specified operands.
1863 static bool isValidOperands(const Value *Vec, const Value *Idx);
1864
1865 Value *getVectorOperand() { return Op<0>(); }
1866 Value *getIndexOperand() { return Op<1>(); }
1867 const Value *getVectorOperand() const { return Op<0>(); }
1868 const Value *getIndexOperand() const { return Op<1>(); }
1869
1870 VectorType *getVectorOperandType() const {
1871 return cast<VectorType>(getVectorOperand()->getType());
1872 }
1873
1874 /// Transparently provide more efficient getOperand methods.
1875 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1876
1877 // Methods for support type inquiry through isa, cast, and dyn_cast:
1878 static bool classof(const Instruction *I) {
1879 return I->getOpcode() == Instruction::ExtractElement;
1880 }
1881 static bool classof(const Value *V) {
1882 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1883 }
1884};
1885
1886template <>
1887struct OperandTraits<ExtractElementInst> :
1888 public FixedNumOperandTraits<ExtractElementInst, 2> {
1889};
1890
1891DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
((i_nocapture < OperandTraits<ExtractElementInst>::
operands(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1891, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ExtractElementInst>::op_begin(const_cast
<ExtractElementInst*>(this))[i_nocapture].get()); } void
ExtractElementInst::setOperand(unsigned i_nocapture, Value *
Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst
>::operands(this) && "setOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1891, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ExtractElementInst::getNumOperands() const { return OperandTraits
<ExtractElementInst>::operands(this); } template <int
Idx_nocapture> Use &ExtractElementInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ExtractElementInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
1892
1893//===----------------------------------------------------------------------===//
1894// InsertElementInst Class
1895//===----------------------------------------------------------------------===//
1896
1897/// This instruction inserts a single (scalar)
1898/// element into a VectorType value
1899///
1900class InsertElementInst : public Instruction {
1901 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1902 const Twine &NameStr = "",
1903 Instruction *InsertBefore = nullptr);
1904 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1905 BasicBlock *InsertAtEnd);
1906
1907protected:
1908 // Note: Instruction needs to be a friend here to call cloneImpl.
1909 friend class Instruction;
1910
1911 InsertElementInst *cloneImpl() const;
1912
1913public:
1914 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1915 const Twine &NameStr = "",
1916 Instruction *InsertBefore = nullptr) {
1917 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1918 }
1919
1920 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1921 const Twine &NameStr,
1922 BasicBlock *InsertAtEnd) {
1923 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1924 }
1925
1926 /// Return true if an insertelement instruction can be
1927 /// formed with the specified operands.
1928 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1929 const Value *Idx);
1930
1931 /// Overload to return most specific vector type.
1932 ///
1933 VectorType *getType() const {
1934 return cast<VectorType>(Instruction::getType());
1935 }
1936
1937 /// Transparently provide more efficient getOperand methods.
1938 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1939
1940 // Methods for support type inquiry through isa, cast, and dyn_cast:
1941 static bool classof(const Instruction *I) {
1942 return I->getOpcode() == Instruction::InsertElement;
1943 }
1944 static bool classof(const Value *V) {
1945 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1946 }
1947};
1948
1949template <>
1950struct OperandTraits<InsertElementInst> :
1951 public FixedNumOperandTraits<InsertElementInst, 3> {
1952};
1953
1954DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1954, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertElementInst>::op_begin(const_cast
<InsertElementInst*>(this))[i_nocapture].get()); } void
InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<InsertElementInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 1954, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertElementInst::getNumOperands() const { return OperandTraits
<InsertElementInst>::operands(this); } template <int
Idx_nocapture> Use &InsertElementInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &InsertElementInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1955
1956//===----------------------------------------------------------------------===//
1957// ShuffleVectorInst Class
1958//===----------------------------------------------------------------------===//
1959
1960constexpr int UndefMaskElem = -1;
1961
1962/// This instruction constructs a fixed permutation of two
1963/// input vectors.
1964///
1965/// For each element of the result vector, the shuffle mask selects an element
1966/// from one of the input vectors to copy to the result. Non-negative elements
1967/// in the mask represent an index into the concatenated pair of input vectors.
1968/// UndefMaskElem (-1) specifies that the result element is undefined.
1969///
1970/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1971/// requirement may be relaxed in the future.
1972class ShuffleVectorInst : public Instruction {
1973 SmallVector<int, 4> ShuffleMask;
1974 Constant *ShuffleMaskForBitcode;
1975
1976protected:
1977 // Note: Instruction needs to be a friend here to call cloneImpl.
1978 friend class Instruction;
1979
1980 ShuffleVectorInst *cloneImpl() const;
1981
1982public:
1983 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1984 const Twine &NameStr = "",
1985 Instruction *InsertBefor = nullptr);
1986 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1987 const Twine &NameStr, BasicBlock *InsertAtEnd);
1988 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1989 const Twine &NameStr = "",
1990 Instruction *InsertBefor = nullptr);
1991 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1992 const Twine &NameStr, BasicBlock *InsertAtEnd);
1993
1994 void *operator new(size_t s) { return User::operator new(s, 2); }
1995
1996 /// Swap the operands and adjust the mask to preserve the semantics
1997 /// of the instruction.
1998 void commute();
1999
2000 /// Return true if a shufflevector instruction can be
2001 /// formed with the specified operands.
2002 static bool isValidOperands(const Value *V1, const Value *V2,
2003 const Value *Mask);
2004 static bool isValidOperands(const Value *V1, const Value *V2,
2005 ArrayRef<int> Mask);
2006
2007 /// Overload to return most specific vector type.
2008 ///
2009 VectorType *getType() const {
2010 return cast<VectorType>(Instruction::getType());
2011 }
2012
2013 /// Transparently provide more efficient getOperand methods.
2014 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2015
2016 /// Return the shuffle mask value of this instruction for the given element
2017 /// index. Return UndefMaskElem if the element is undef.
2018 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2019
2020 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2021 /// elements of the mask are returned as UndefMaskElem.
2022 static void getShuffleMask(const Constant *Mask,
2023 SmallVectorImpl<int> &Result);
2024
2025 /// Return the mask for this instruction as a vector of integers. Undefined
2026 /// elements of the mask are returned as UndefMaskElem.
2027 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2028 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2029 }
2030
2031 /// Return the mask for this instruction, for use in bitcode.
2032 ///
2033 /// TODO: This is temporary until we decide a new bitcode encoding for
2034 /// shufflevector.
2035 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2036
2037 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2038 Type *ResultTy);
2039
2040 void setShuffleMask(ArrayRef<int> Mask);
2041
2042 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2043
2044 /// Return true if this shuffle returns a vector with a different number of
2045 /// elements than its source vectors.
2046 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2047 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2048 bool changesLength() const {
2049 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2050 ->getElementCount()
2051 .getKnownMinValue();
2052 unsigned NumMaskElts = ShuffleMask.size();
2053 return NumSourceElts != NumMaskElts;
2054 }
2055
2056 /// Return true if this shuffle returns a vector with a greater number of
2057 /// elements than its source vectors.
2058 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2059 bool increasesLength() const {
2060 unsigned NumSourceElts =
2061 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2062 unsigned NumMaskElts = ShuffleMask.size();
2063 return NumSourceElts < NumMaskElts;
2064 }
2065
2066 /// Return true if this shuffle mask chooses elements from exactly one source
2067 /// vector.
2068 /// Example: <7,5,undef,7>
2069 /// This assumes that vector operands are the same length as the mask.
2070 static bool isSingleSourceMask(ArrayRef<int> Mask);
2071 static bool isSingleSourceMask(const Constant *Mask) {
2072 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2072, __PRETTY_FUNCTION__))
;
2073 SmallVector<int, 16> MaskAsInts;
2074 getShuffleMask(Mask, MaskAsInts);
2075 return isSingleSourceMask(MaskAsInts);
2076 }
2077
2078 /// Return true if this shuffle chooses elements from exactly one source
2079 /// vector without changing the length of that vector.
2080 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2081 /// TODO: Optionally allow length-changing shuffles.
2082 bool isSingleSource() const {
2083 return !changesLength() && isSingleSourceMask(ShuffleMask);
2084 }
2085
2086 /// Return true if this shuffle mask chooses elements from exactly one source
2087 /// vector without lane crossings. A shuffle using this mask is not
2088 /// necessarily a no-op because it may change the number of elements from its
2089 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2090 /// Example: <undef,undef,2,3>
2091 static bool isIdentityMask(ArrayRef<int> Mask);
2092 static bool isIdentityMask(const Constant *Mask) {
2093 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2093, __PRETTY_FUNCTION__))
;
2094 SmallVector<int, 16> MaskAsInts;
2095 getShuffleMask(Mask, MaskAsInts);
2096 return isIdentityMask(MaskAsInts);
2097 }
2098
2099 /// Return true if this shuffle chooses elements from exactly one source
2100 /// vector without lane crossings and does not change the number of elements
2101 /// from its input vectors.
2102 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2103 bool isIdentity() const {
2104 return !changesLength() && isIdentityMask(ShuffleMask);
2105 }
2106
2107 /// Return true if this shuffle lengthens exactly one source vector with
2108 /// undefs in the high elements.
2109 bool isIdentityWithPadding() const;
2110
2111 /// Return true if this shuffle extracts the first N elements of exactly one
2112 /// source vector.
2113 bool isIdentityWithExtract() const;
2114
2115 /// Return true if this shuffle concatenates its 2 source vectors. This
2116 /// returns false if either input is undefined. In that case, the shuffle is
2117 /// is better classified as an identity with padding operation.
2118 bool isConcat() const;
2119
2120 /// Return true if this shuffle mask chooses elements from its source vectors
2121 /// without lane crossings. A shuffle using this mask would be
2122 /// equivalent to a vector select with a constant condition operand.
2123 /// Example: <4,1,6,undef>
2124 /// This returns false if the mask does not choose from both input vectors.
2125 /// In that case, the shuffle is better classified as an identity shuffle.
2126 /// This assumes that vector operands are the same length as the mask
2127 /// (a length-changing shuffle can never be equivalent to a vector select).
2128 static bool isSelectMask(ArrayRef<int> Mask);
2129 static bool isSelectMask(const Constant *Mask) {
2130 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2130, __PRETTY_FUNCTION__))
;
2131 SmallVector<int, 16> MaskAsInts;
2132 getShuffleMask(Mask, MaskAsInts);
2133 return isSelectMask(MaskAsInts);
2134 }
2135
2136 /// Return true if this shuffle chooses elements from its source vectors
2137 /// without lane crossings and all operands have the same number of elements.
2138 /// In other words, this shuffle is equivalent to a vector select with a
2139 /// constant condition operand.
2140 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2141 /// This returns false if the mask does not choose from both input vectors.
2142 /// In that case, the shuffle is better classified as an identity shuffle.
2143 /// TODO: Optionally allow length-changing shuffles.
2144 bool isSelect() const {
2145 return !changesLength() && isSelectMask(ShuffleMask);
2146 }
2147
2148 /// Return true if this shuffle mask swaps the order of elements from exactly
2149 /// one source vector.
2150 /// Example: <7,6,undef,4>
2151 /// This assumes that vector operands are the same length as the mask.
2152 static bool isReverseMask(ArrayRef<int> Mask);
2153 static bool isReverseMask(const Constant *Mask) {
2154 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2154, __PRETTY_FUNCTION__))
;
2155 SmallVector<int, 16> MaskAsInts;
2156 getShuffleMask(Mask, MaskAsInts);
2157 return isReverseMask(MaskAsInts);
2158 }
2159
2160 /// Return true if this shuffle swaps the order of elements from exactly
2161 /// one source vector.
2162 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2163 /// TODO: Optionally allow length-changing shuffles.
2164 bool isReverse() const {
2165 return !changesLength() && isReverseMask(ShuffleMask);
2166 }
2167
2168 /// Return true if this shuffle mask chooses all elements with the same value
2169 /// as the first element of exactly one source vector.
2170 /// Example: <4,undef,undef,4>
2171 /// This assumes that vector operands are the same length as the mask.
2172 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2173 static bool isZeroEltSplatMask(const Constant *Mask) {
2174 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2174, __PRETTY_FUNCTION__))
;
2175 SmallVector<int, 16> MaskAsInts;
2176 getShuffleMask(Mask, MaskAsInts);
2177 return isZeroEltSplatMask(MaskAsInts);
2178 }
2179
2180 /// Return true if all elements of this shuffle are the same value as the
2181 /// first element of exactly one source vector without changing the length
2182 /// of that vector.
2183 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2184 /// TODO: Optionally allow length-changing shuffles.
2185 /// TODO: Optionally allow splats from other elements.
2186 bool isZeroEltSplat() const {
2187 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2188 }
2189
2190 /// Return true if this shuffle mask is a transpose mask.
2191 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2192 /// even- or odd-numbered vector elements from two n-dimensional source
2193 /// vectors and write each result into consecutive elements of an
2194 /// n-dimensional destination vector. Two shuffles are necessary to complete
2195 /// the transpose, one for the even elements and another for the odd elements.
2196 /// This description closely follows how the TRN1 and TRN2 AArch64
2197 /// instructions operate.
2198 ///
2199 /// For example, a simple 2x2 matrix can be transposed with:
2200 ///
2201 /// ; Original matrix
2202 /// m0 = < a, b >
2203 /// m1 = < c, d >
2204 ///
2205 /// ; Transposed matrix
2206 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2207 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2208 ///
2209 /// For matrices having greater than n columns, the resulting nx2 transposed
2210 /// matrix is stored in two result vectors such that one vector contains
2211 /// interleaved elements from all the even-numbered rows and the other vector
2212 /// contains interleaved elements from all the odd-numbered rows. For example,
2213 /// a 2x4 matrix can be transposed with:
2214 ///
2215 /// ; Original matrix
2216 /// m0 = < a, b, c, d >
2217 /// m1 = < e, f, g, h >
2218 ///
2219 /// ; Transposed matrix
2220 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2221 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2222 static bool isTransposeMask(ArrayRef<int> Mask);
2223 static bool isTransposeMask(const Constant *Mask) {
2224 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2224, __PRETTY_FUNCTION__))
;
2225 SmallVector<int, 16> MaskAsInts;
2226 getShuffleMask(Mask, MaskAsInts);
2227 return isTransposeMask(MaskAsInts);
2228 }
2229
2230 /// Return true if this shuffle transposes the elements of its inputs without
2231 /// changing the length of the vectors. This operation may also be known as a
2232 /// merge or interleave. See the description for isTransposeMask() for the
2233 /// exact specification.
2234 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2235 bool isTranspose() const {
2236 return !changesLength() && isTransposeMask(ShuffleMask);
2237 }
2238
2239 /// Return true if this shuffle mask is an extract subvector mask.
2240 /// A valid extract subvector mask returns a smaller vector from a single
2241 /// source operand. The base extraction index is returned as well.
2242 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2243 int &Index);
2244 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2245 int &Index) {
2246 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant."
) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2246, __PRETTY_FUNCTION__))
;
2247 SmallVector<int, 16> MaskAsInts;
2248 getShuffleMask(Mask, MaskAsInts);
2249 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2250 }
2251
2252 /// Return true if this shuffle mask is an extract subvector mask.
2253 bool isExtractSubvectorMask(int &Index) const {
2254 int NumSrcElts =
2255 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2256 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2257 }
2258
2259 /// Change values in a shuffle permute mask assuming the two vector operands
2260 /// of length InVecNumElts have swapped position.
2261 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2262 unsigned InVecNumElts) {
2263 for (int &Idx : Mask) {
2264 if (Idx == -1)
2265 continue;
2266 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2267 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2268, __PRETTY_FUNCTION__))
2268 "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range") ? static_cast<void
> (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2268, __PRETTY_FUNCTION__))
;
2269 }
2270 }
2271
2272 // Methods for support type inquiry through isa, cast, and dyn_cast:
2273 static bool classof(const Instruction *I) {
2274 return I->getOpcode() == Instruction::ShuffleVector;
2275 }
2276 static bool classof(const Value *V) {
2277 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2278 }
2279};
2280
2281template <>
2282struct OperandTraits<ShuffleVectorInst>
2283 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2284
2285DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2285, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ShuffleVectorInst>::op_begin(const_cast
<ShuffleVectorInst*>(this))[i_nocapture].get()); } void
ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<ShuffleVectorInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2285, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
ShuffleVectorInst::getNumOperands() const { return OperandTraits
<ShuffleVectorInst>::operands(this); } template <int
Idx_nocapture> Use &ShuffleVectorInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &ShuffleVectorInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
2286
2287//===----------------------------------------------------------------------===//
2288// ExtractValueInst Class
2289//===----------------------------------------------------------------------===//
2290
2291/// This instruction extracts a struct member or array
2292/// element value from an aggregate value.
2293///
2294class ExtractValueInst : public UnaryInstruction {
2295 SmallVector<unsigned, 4> Indices;
2296
2297 ExtractValueInst(const ExtractValueInst &EVI);
2298
2299 /// Constructors - Create a extractvalue instruction with a base aggregate
2300 /// value and a list of indices. The first ctor can optionally insert before
2301 /// an existing instruction, the second appends the new instruction to the
2302 /// specified BasicBlock.
2303 inline ExtractValueInst(Value *Agg,
2304 ArrayRef<unsigned> Idxs,
2305 const Twine &NameStr,
2306 Instruction *InsertBefore);
2307 inline ExtractValueInst(Value *Agg,
2308 ArrayRef<unsigned> Idxs,
2309 const Twine &NameStr, BasicBlock *InsertAtEnd);
2310
2311 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2312
2313protected:
2314 // Note: Instruction needs to be a friend here to call cloneImpl.
2315 friend class Instruction;
2316
2317 ExtractValueInst *cloneImpl() const;
2318
2319public:
2320 static ExtractValueInst *Create(Value *Agg,
2321 ArrayRef<unsigned> Idxs,
2322 const Twine &NameStr = "",
2323 Instruction *InsertBefore = nullptr) {
2324 return new
2325 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2326 }
2327
2328 static ExtractValueInst *Create(Value *Agg,
2329 ArrayRef<unsigned> Idxs,
2330 const Twine &NameStr,
2331 BasicBlock *InsertAtEnd) {
2332 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2333 }
2334
2335 /// Returns the type of the element that would be extracted
2336 /// with an extractvalue instruction with the specified parameters.
2337 ///
2338 /// Null is returned if the indices are invalid for the specified type.
2339 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2340
2341 using idx_iterator = const unsigned*;
2342
2343 inline idx_iterator idx_begin() const { return Indices.begin(); }
2344 inline idx_iterator idx_end() const { return Indices.end(); }
2345 inline iterator_range<idx_iterator> indices() const {
2346 return make_range(idx_begin(), idx_end());
2347 }
2348
2349 Value *getAggregateOperand() {
2350 return getOperand(0);
2351 }
2352 const Value *getAggregateOperand() const {
2353 return getOperand(0);
2354 }
2355 static unsigned getAggregateOperandIndex() {
2356 return 0U; // get index for modifying correct operand
2357 }
2358
2359 ArrayRef<unsigned> getIndices() const {
2360 return Indices;
2361 }
2362
2363 unsigned getNumIndices() const {
2364 return (unsigned)Indices.size();
2365 }
2366
2367 bool hasIndices() const {
2368 return true;
2369 }
2370
2371 // Methods for support type inquiry through isa, cast, and dyn_cast:
2372 static bool classof(const Instruction *I) {
2373 return I->getOpcode() == Instruction::ExtractValue;
2374 }
2375 static bool classof(const Value *V) {
2376 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2377 }
2378};
2379
2380ExtractValueInst::ExtractValueInst(Value *Agg,
2381 ArrayRef<unsigned> Idxs,
2382 const Twine &NameStr,
2383 Instruction *InsertBefore)
2384 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2385 ExtractValue, Agg, InsertBefore) {
2386 init(Idxs, NameStr);
2387}
2388
2389ExtractValueInst::ExtractValueInst(Value *Agg,
2390 ArrayRef<unsigned> Idxs,
2391 const Twine &NameStr,
2392 BasicBlock *InsertAtEnd)
2393 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2394 ExtractValue, Agg, InsertAtEnd) {
2395 init(Idxs, NameStr);
2396}
2397
2398//===----------------------------------------------------------------------===//
2399// InsertValueInst Class
2400//===----------------------------------------------------------------------===//
2401
2402/// This instruction inserts a struct field of array element
2403/// value into an aggregate value.
2404///
2405class InsertValueInst : public Instruction {
2406 SmallVector<unsigned, 4> Indices;
2407
2408 InsertValueInst(const InsertValueInst &IVI);
2409
2410 /// Constructors - Create a insertvalue instruction with a base aggregate
2411 /// value, a value to insert, and a list of indices. The first ctor can
2412 /// optionally insert before an existing instruction, the second appends
2413 /// the new instruction to the specified BasicBlock.
2414 inline InsertValueInst(Value *Agg, Value *Val,
2415 ArrayRef<unsigned> Idxs,
2416 const Twine &NameStr,
2417 Instruction *InsertBefore);
2418 inline InsertValueInst(Value *Agg, Value *Val,
2419 ArrayRef<unsigned> Idxs,
2420 const Twine &NameStr, BasicBlock *InsertAtEnd);
2421
2422 /// Constructors - These two constructors are convenience methods because one
2423 /// and two index insertvalue instructions are so common.
2424 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2425 const Twine &NameStr = "",
2426 Instruction *InsertBefore = nullptr);
2427 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2428 BasicBlock *InsertAtEnd);
2429
2430 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2431 const Twine &NameStr);
2432
2433protected:
2434 // Note: Instruction needs to be a friend here to call cloneImpl.
2435 friend class Instruction;
2436
2437 InsertValueInst *cloneImpl() const;
2438
2439public:
2440 // allocate space for exactly two operands
2441 void *operator new(size_t s) {
2442 return User::operator new(s, 2);
2443 }
2444
2445 static InsertValueInst *Create(Value *Agg, Value *Val,
2446 ArrayRef<unsigned> Idxs,
2447 const Twine &NameStr = "",
2448 Instruction *InsertBefore = nullptr) {
2449 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2450 }
2451
2452 static InsertValueInst *Create(Value *Agg, Value *Val,
2453 ArrayRef<unsigned> Idxs,
2454 const Twine &NameStr,
2455 BasicBlock *InsertAtEnd) {
2456 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2457 }
2458
2459 /// Transparently provide more efficient getOperand methods.
2460 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2461
2462 using idx_iterator = const unsigned*;
2463
2464 inline idx_iterator idx_begin() const { return Indices.begin(); }
2465 inline idx_iterator idx_end() const { return Indices.end(); }
2466 inline iterator_range<idx_iterator> indices() const {
2467 return make_range(idx_begin(), idx_end());
2468 }
2469
2470 Value *getAggregateOperand() {
2471 return getOperand(0);
2472 }
2473 const Value *getAggregateOperand() const {
2474 return getOperand(0);
2475 }
2476 static unsigned getAggregateOperandIndex() {
2477 return 0U; // get index for modifying correct operand
2478 }
2479
2480 Value *getInsertedValueOperand() {
2481 return getOperand(1);
2482 }
2483 const Value *getInsertedValueOperand() const {
2484 return getOperand(1);
2485 }
2486 static unsigned getInsertedValueOperandIndex() {
2487 return 1U; // get index for modifying correct operand
2488 }
2489
2490 ArrayRef<unsigned> getIndices() const {
2491 return Indices;
2492 }
2493
2494 unsigned getNumIndices() const {
2495 return (unsigned)Indices.size();
2496 }
2497
2498 bool hasIndices() const {
2499 return true;
2500 }
2501
2502 // Methods for support type inquiry through isa, cast, and dyn_cast:
2503 static bool classof(const Instruction *I) {
2504 return I->getOpcode() == Instruction::InsertValue;
2505 }
2506 static bool classof(const Value *V) {
2507 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2508 }
2509};
2510
2511template <>
2512struct OperandTraits<InsertValueInst> :
2513 public FixedNumOperandTraits<InsertValueInst, 2> {
2514};
2515
2516InsertValueInst::InsertValueInst(Value *Agg,
2517 Value *Val,
2518 ArrayRef<unsigned> Idxs,
2519 const Twine &NameStr,
2520 Instruction *InsertBefore)
2521 : Instruction(Agg->getType(), InsertValue,
2522 OperandTraits<InsertValueInst>::op_begin(this),
2523 2, InsertBefore) {
2524 init(Agg, Val, Idxs, NameStr);
2525}
2526
2527InsertValueInst::InsertValueInst(Value *Agg,
2528 Value *Val,
2529 ArrayRef<unsigned> Idxs,
2530 const Twine &NameStr,
2531 BasicBlock *InsertAtEnd)
2532 : Instruction(Agg->getType(), InsertValue,
2533 OperandTraits<InsertValueInst>::op_begin(this),
2534 2, InsertAtEnd) {
2535 init(Agg, Val, Idxs, NameStr);
2536}
2537
2538DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2538, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this))[i_nocapture].get()); } void InsertValueInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<InsertValueInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2538, __PRETTY_FUNCTION__)); OperandTraits<InsertValueInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
InsertValueInst::getNumOperands() const { return OperandTraits
<InsertValueInst>::operands(this); } template <int Idx_nocapture
> Use &InsertValueInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &InsertValueInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2539
2540//===----------------------------------------------------------------------===//
2541// PHINode Class
2542//===----------------------------------------------------------------------===//
2543
2544// PHINode - The PHINode class is used to represent the magical mystical PHI
2545// node, that can not exist in nature, but can be synthesized in a computer
2546// scientist's overactive imagination.
2547//
2548class PHINode : public Instruction {
2549 /// The number of operands actually allocated. NumOperands is
2550 /// the number actually in use.
2551 unsigned ReservedSpace;
2552
2553 PHINode(const PHINode &PN);
2554
2555 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2556 const Twine &NameStr = "",
2557 Instruction *InsertBefore = nullptr)
2558 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2559 ReservedSpace(NumReservedValues) {
2560 setName(NameStr);
2561 allocHungoffUses(ReservedSpace);
2562 }
2563
2564 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2565 BasicBlock *InsertAtEnd)
2566 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2567 ReservedSpace(NumReservedValues) {
2568 setName(NameStr);
2569 allocHungoffUses(ReservedSpace);
2570 }
2571
2572protected:
2573 // Note: Instruction needs to be a friend here to call cloneImpl.
2574 friend class Instruction;
2575
2576 PHINode *cloneImpl() const;
2577
2578 // allocHungoffUses - this is more complicated than the generic
2579 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2580 // values and pointers to the incoming blocks, all in one allocation.
2581 void allocHungoffUses(unsigned N) {
2582 User::allocHungoffUses(N, /* IsPhi */ true);
2583 }
2584
2585public:
2586 /// Constructors - NumReservedValues is a hint for the number of incoming
2587 /// edges that this phi node will have (use 0 if you really have no idea).
2588 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2589 const Twine &NameStr = "",
2590 Instruction *InsertBefore = nullptr) {
2591 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2592 }
2593
2594 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2595 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2596 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2597 }
2598
2599 /// Provide fast operand accessors
2600 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2601
2602 // Block iterator interface. This provides access to the list of incoming
2603 // basic blocks, which parallels the list of incoming values.
2604
2605 using block_iterator = BasicBlock **;
2606 using const_block_iterator = BasicBlock * const *;
2607
2608 block_iterator block_begin() {
2609 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2610 }
2611
2612 const_block_iterator block_begin() const {
2613 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2614 }
2615
2616 block_iterator block_end() {
2617 return block_begin() + getNumOperands();
2618 }
2619
2620 const_block_iterator block_end() const {
2621 return block_begin() + getNumOperands();
2622 }
2623
2624 iterator_range<block_iterator> blocks() {
2625 return make_range(block_begin(), block_end());
2626 }
2627
2628 iterator_range<const_block_iterator> blocks() const {
2629 return make_range(block_begin(), block_end());
2630 }
2631
2632 op_range incoming_values() { return operands(); }
2633
2634 const_op_range incoming_values() const { return operands(); }
2635
2636 /// Return the number of incoming edges
2637 ///
2638 unsigned getNumIncomingValues() const { return getNumOperands(); }
2639
2640 /// Return incoming value number x
2641 ///
2642 Value *getIncomingValue(unsigned i) const {
2643 return getOperand(i);
2644 }
2645 void setIncomingValue(unsigned i, Value *V) {
2646 assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast<
void> (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2646, __PRETTY_FUNCTION__))
;
2647 assert(getType() == V->getType() &&((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"
) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2648, __PRETTY_FUNCTION__))
2648 "All operands to PHI node must be the same type as the PHI node!")((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"
) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2648, __PRETTY_FUNCTION__))
;
2649 setOperand(i, V);
2650 }
2651
2652 static unsigned getOperandNumForIncomingValue(unsigned i) {
2653 return i;
2654 }
2655
2656 static unsigned getIncomingValueNumForOperand(unsigned i) {
2657 return i;
2658 }
2659
2660 /// Return incoming basic block number @p i.
2661 ///
2662 BasicBlock *getIncomingBlock(unsigned i) const {
2663 return block_begin()[i];
2664 }
2665
2666 /// Return incoming basic block corresponding
2667 /// to an operand of the PHI.
2668 ///
2669 BasicBlock *getIncomingBlock(const Use &U) const {
2670 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2670, __PRETTY_FUNCTION__))
;
2671 return getIncomingBlock(unsigned(&U - op_begin()));
2672 }
2673
2674 /// Return incoming basic block corresponding
2675 /// to value use iterator.
2676 ///
2677 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2678 return getIncomingBlock(I.getUse());
2679 }
2680
2681 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2682 assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast
<void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2682, __PRETTY_FUNCTION__))
;
2683 block_begin()[i] = BB;
2684 }
2685
2686 /// Replace every incoming basic block \p Old to basic block \p New.
2687 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2688 assert(New && Old && "PHI node got a null basic block!")((New && Old && "PHI node got a null basic block!"
) ? static_cast<void> (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2688, __PRETTY_FUNCTION__))
;
2689 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2690 if (getIncomingBlock(Op) == Old)
2691 setIncomingBlock(Op, New);
2692 }
2693
2694 /// Add an incoming value to the end of the PHI list
2695 ///
2696 void addIncoming(Value *V, BasicBlock *BB) {
2697 if (getNumOperands() == ReservedSpace)
2698 growOperands(); // Get more space!
2699 // Initialize some new operands.
2700 setNumHungOffUseOperands(getNumOperands() + 1);
2701 setIncomingValue(getNumOperands() - 1, V);
2702 setIncomingBlock(getNumOperands() - 1, BB);
2703 }
2704
2705 /// Remove an incoming value. This is useful if a
2706 /// predecessor basic block is deleted. The value removed is returned.
2707 ///
2708 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2709 /// is true), the PHI node is destroyed and any uses of it are replaced with
2710 /// dummy values. The only time there should be zero incoming values to a PHI
2711 /// node is when the block is dead, so this strategy is sound.
2712 ///
2713 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2714
2715 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2716 int Idx = getBasicBlockIndex(BB);
2717 assert(Idx >= 0 && "Invalid basic block argument to remove!")((Idx >= 0 && "Invalid basic block argument to remove!"
) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2717, __PRETTY_FUNCTION__))
;
2718 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2719 }
2720
2721 /// Return the first index of the specified basic
2722 /// block in the value list for this PHI. Returns -1 if no instance.
2723 ///
2724 int getBasicBlockIndex(const BasicBlock *BB) const {
2725 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2726 if (block_begin()[i] == BB)
2727 return i;
2728 return -1;
2729 }
2730
2731 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2732 int Idx = getBasicBlockIndex(BB);
2733 assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast
<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2733, __PRETTY_FUNCTION__))
;
2734 return getIncomingValue(Idx);
2735 }
2736
2737 /// Set every incoming value(s) for block \p BB to \p V.
2738 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2739 assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast
<void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2739, __PRETTY_FUNCTION__))
;
2740 bool Found = false;
2741 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2742 if (getIncomingBlock(Op) == BB) {
2743 Found = true;
2744 setIncomingValue(Op, V);
2745 }
2746 (void)Found;
2747 assert(Found && "Invalid basic block argument to set!")((Found && "Invalid basic block argument to set!") ? static_cast
<void> (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2747, __PRETTY_FUNCTION__))
;
2748 }
2749
2750 /// If the specified PHI node always merges together the
2751 /// same value, return the value, otherwise return null.
2752 Value *hasConstantValue() const;
2753
2754 /// Whether the specified PHI node always merges
2755 /// together the same value, assuming undefs are equal to a unique
2756 /// non-undef value.
2757 bool hasConstantOrUndefValue() const;
2758
2759 /// If the PHI node is complete which means all of its parent's predecessors
2760 /// have incoming value in this PHI, return true, otherwise return false.
2761 bool isComplete() const {
2762 return llvm::all_of(predecessors(getParent()),
2763 [this](const BasicBlock *Pred) {
2764 return getBasicBlockIndex(Pred) >= 0;
2765 });
2766 }
2767
2768 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2769 static bool classof(const Instruction *I) {
2770 return I->getOpcode() == Instruction::PHI;
2771 }
2772 static bool classof(const Value *V) {
2773 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2774 }
2775
2776private:
2777 void growOperands();
2778};
2779
2780template <>
2781struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2782};
2783
2784DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { ((i_nocapture < OperandTraits<PHINode>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2784, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<PHINode>::op_begin(const_cast<PHINode
*>(this))[i_nocapture].get()); } void PHINode::setOperand(
unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2784, __PRETTY_FUNCTION__)); OperandTraits<PHINode>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode
::getNumOperands() const { return OperandTraits<PHINode>
::operands(this); } template <int Idx_nocapture> Use &
PHINode::Op() { return this->OpFrom<Idx_nocapture>(this
); } template <int Idx_nocapture> const Use &PHINode
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2785
2786//===----------------------------------------------------------------------===//
2787// LandingPadInst Class
2788//===----------------------------------------------------------------------===//
2789
2790//===---------------------------------------------------------------------------
2791/// The landingpad instruction holds all of the information
2792/// necessary to generate correct exception handling. The landingpad instruction
2793/// cannot be moved from the top of a landing pad block, which itself is
2794/// accessible only from the 'unwind' edge of an invoke. This uses the
2795/// SubclassData field in Value to store whether or not the landingpad is a
2796/// cleanup.
2797///
2798class LandingPadInst : public Instruction {
2799 using CleanupField = BoolBitfieldElementT<0>;
2800
2801 /// The number of operands actually allocated. NumOperands is
2802 /// the number actually in use.
2803 unsigned ReservedSpace;
2804
2805 LandingPadInst(const LandingPadInst &LP);
2806
2807public:
2808 enum ClauseType { Catch, Filter };
2809
2810private:
2811 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2812 const Twine &NameStr, Instruction *InsertBefore);
2813 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2814 const Twine &NameStr, BasicBlock *InsertAtEnd);
2815
2816 // Allocate space for exactly zero operands.
2817 void *operator new(size_t s) {
2818 return User::operator new(s);
2819 }
2820
2821 void growOperands(unsigned Size);
2822 void init(unsigned NumReservedValues, const Twine &NameStr);
2823
2824protected:
2825 // Note: Instruction needs to be a friend here to call cloneImpl.
2826 friend class Instruction;
2827
2828 LandingPadInst *cloneImpl() const;
2829
2830public:
2831 /// Constructors - NumReservedClauses is a hint for the number of incoming
2832 /// clauses that this landingpad will have (use 0 if you really have no idea).
2833 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2834 const Twine &NameStr = "",
2835 Instruction *InsertBefore = nullptr);
2836 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2837 const Twine &NameStr, BasicBlock *InsertAtEnd);
2838
2839 /// Provide fast operand accessors
2840 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2841
2842 /// Return 'true' if this landingpad instruction is a
2843 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2844 /// doesn't catch the exception.
2845 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2846
2847 /// Indicate that this landingpad instruction is a cleanup.
2848 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2849
2850 /// Add a catch or filter clause to the landing pad.
2851 void addClause(Constant *ClauseVal);
2852
2853 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2854 /// determine what type of clause this is.
2855 Constant *getClause(unsigned Idx) const {
2856 return cast<Constant>(getOperandList()[Idx]);
2857 }
2858
2859 /// Return 'true' if the clause and index Idx is a catch clause.
2860 bool isCatch(unsigned Idx) const {
2861 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2862 }
2863
2864 /// Return 'true' if the clause and index Idx is a filter clause.
2865 bool isFilter(unsigned Idx) const {
2866 return isa<ArrayType>(getOperandList()[Idx]->getType());
2867 }
2868
2869 /// Get the number of clauses for this landing pad.
2870 unsigned getNumClauses() const { return getNumOperands(); }
2871
2872 /// Grow the size of the operand list to accommodate the new
2873 /// number of clauses.
2874 void reserveClauses(unsigned Size) { growOperands(Size); }
2875
2876 // Methods for support type inquiry through isa, cast, and dyn_cast:
2877 static bool classof(const Instruction *I) {
2878 return I->getOpcode() == Instruction::LandingPad;
2879 }
2880 static bool classof(const Value *V) {
2881 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2882 }
2883};
2884
2885template <>
2886struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2887};
2888
2889DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2889, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this))[i_nocapture].get()); } void LandingPadInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2889, __PRETTY_FUNCTION__)); OperandTraits<LandingPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2890
2891//===----------------------------------------------------------------------===//
2892// ReturnInst Class
2893//===----------------------------------------------------------------------===//
2894
2895//===---------------------------------------------------------------------------
2896/// Return a value (possibly void), from a function. Execution
2897/// does not continue in this function any longer.
2898///
2899class ReturnInst : public Instruction {
2900 ReturnInst(const ReturnInst &RI);
2901
2902private:
2903 // ReturnInst constructors:
2904 // ReturnInst() - 'ret void' instruction
2905 // ReturnInst( null) - 'ret void' instruction
2906 // ReturnInst(Value* X) - 'ret X' instruction
2907 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2908 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2909 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2910 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2911 //
2912 // NOTE: If the Value* passed is of type void then the constructor behaves as
2913 // if it was passed NULL.
2914 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2915 Instruction *InsertBefore = nullptr);
2916 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2917 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2918
2919protected:
2920 // Note: Instruction needs to be a friend here to call cloneImpl.
2921 friend class Instruction;
2922
2923 ReturnInst *cloneImpl() const;
2924
2925public:
2926 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2927 Instruction *InsertBefore = nullptr) {
2928 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2929 }
2930
2931 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2932 BasicBlock *InsertAtEnd) {
2933 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2934 }
2935
2936 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2937 return new(0) ReturnInst(C, InsertAtEnd);
2938 }
2939
2940 /// Provide fast operand accessors
2941 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2942
2943 /// Convenience accessor. Returns null if there is no return value.
2944 Value *getReturnValue() const {
2945 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2946 }
2947
2948 unsigned getNumSuccessors() const { return 0; }
2949
2950 // Methods for support type inquiry through isa, cast, and dyn_cast:
2951 static bool classof(const Instruction *I) {
2952 return (I->getOpcode() == Instruction::Ret);
2953 }
2954 static bool classof(const Value *V) {
2955 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2956 }
2957
2958private:
2959 BasicBlock *getSuccessor(unsigned idx) const {
2960 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2960)
;
2961 }
2962
2963 void setSuccessor(unsigned idx, BasicBlock *B) {
2964 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2964)
;
2965 }
2966};
2967
2968template <>
2969struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
2970};
2971
2972DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<ReturnInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2972, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ReturnInst>::op_begin(const_cast<ReturnInst
*>(this))[i_nocapture].get()); } void ReturnInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 2972, __PRETTY_FUNCTION__)); OperandTraits<ReturnInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ReturnInst
::getNumOperands() const { return OperandTraits<ReturnInst
>::operands(this); } template <int Idx_nocapture> Use
&ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2973
2974//===----------------------------------------------------------------------===//
2975// BranchInst Class
2976//===----------------------------------------------------------------------===//
2977
2978//===---------------------------------------------------------------------------
2979/// Conditional or Unconditional Branch instruction.
2980///
2981class BranchInst : public Instruction {
2982 /// Ops list - Branches are strange. The operands are ordered:
2983 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
2984 /// they don't have to check for cond/uncond branchness. These are mostly
2985 /// accessed relative from op_end().
2986 BranchInst(const BranchInst &BI);
2987 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
2988 // BranchInst(BB *B) - 'br B'
2989 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
2990 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
2991 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
2992 // BranchInst(BB* B, BB *I) - 'br B' insert at end
2993 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
2994 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
2995 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
2996 Instruction *InsertBefore = nullptr);
2997 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
2998 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
2999 BasicBlock *InsertAtEnd);
3000
3001 void AssertOK();
3002
3003protected:
3004 // Note: Instruction needs to be a friend here to call cloneImpl.
3005 friend class Instruction;
3006
3007 BranchInst *cloneImpl() const;
3008
3009public:
3010 /// Iterator type that casts an operand to a basic block.
3011 ///
3012 /// This only makes sense because the successors are stored as adjacent
3013 /// operands for branch instructions.
3014 struct succ_op_iterator
3015 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3016 std::random_access_iterator_tag, BasicBlock *,
3017 ptrdiff_t, BasicBlock *, BasicBlock *> {
3018 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3019
3020 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3021 BasicBlock *operator->() const { return operator*(); }
3022 };
3023
3024 /// The const version of `succ_op_iterator`.
3025 struct const_succ_op_iterator
3026 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3027 std::random_access_iterator_tag,
3028 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3029 const BasicBlock *> {
3030 explicit const_succ_op_iterator(const_value_op_iterator I)
3031 : iterator_adaptor_base(I) {}
3032
3033 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3034 const BasicBlock *operator->() const { return operator*(); }
3035 };
3036
3037 static BranchInst *Create(BasicBlock *IfTrue,
3038 Instruction *InsertBefore = nullptr) {
3039 return new(1) BranchInst(IfTrue, InsertBefore);
3040 }
3041
3042 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3043 Value *Cond, Instruction *InsertBefore = nullptr) {
3044 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3045 }
3046
3047 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3048 return new(1) BranchInst(IfTrue, InsertAtEnd);
3049 }
3050
3051 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3052 Value *Cond, BasicBlock *InsertAtEnd) {
3053 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3054 }
3055
3056 /// Transparently provide more efficient getOperand methods.
3057 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3058
3059 bool isUnconditional() const { return getNumOperands() == 1; }
48
Assuming the condition is false
49
Returning zero, which participates in a condition later
3060 bool isConditional() const { return getNumOperands() == 3; }
3061
3062 Value *getCondition() const {
3063 assert(isConditional() && "Cannot get condition of an uncond branch!")((isConditional() && "Cannot get condition of an uncond branch!"
) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3063, __PRETTY_FUNCTION__))
;
3064 return Op<-3>();
3065 }
3066
3067 void setCondition(Value *V) {
3068 assert(isConditional() && "Cannot set condition of unconditional branch!")((isConditional() && "Cannot set condition of unconditional branch!"
) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3068, __PRETTY_FUNCTION__))
;
3069 Op<-3>() = V;
3070 }
3071
3072 unsigned getNumSuccessors() const { return 1+isConditional(); }
3073
3074 BasicBlock *getSuccessor(unsigned i) const {
3075 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((i < getNumSuccessors() && "Successor # out of range for Branch!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3075, __PRETTY_FUNCTION__))
;
3076 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3077 }
3078
3079 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3080 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((idx < getNumSuccessors() && "Successor # out of range for Branch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3080, __PRETTY_FUNCTION__))
;
3081 *(&Op<-1>() - idx) = NewSucc;
3082 }
3083
3084 /// Swap the successors of this branch instruction.
3085 ///
3086 /// Swaps the successors of the branch instruction. This also swaps any
3087 /// branch weight metadata associated with the instruction so that it
3088 /// continues to map correctly to each operand.
3089 void swapSuccessors();
3090
3091 iterator_range<succ_op_iterator> successors() {
3092 return make_range(
3093 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3094 succ_op_iterator(value_op_end()));
3095 }
3096
3097 iterator_range<const_succ_op_iterator> successors() const {
3098 return make_range(const_succ_op_iterator(
3099 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3100 const_succ_op_iterator(value_op_end()));
3101 }
3102
3103 // Methods for support type inquiry through isa, cast, and dyn_cast:
3104 static bool classof(const Instruction *I) {
3105 return (I->getOpcode() == Instruction::Br);
3106 }
3107 static bool classof(const Value *V) {
3108 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3109 }
3110};
3111
3112template <>
3113struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3114};
3115
3116DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<BranchInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3116, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<BranchInst>::op_begin(const_cast<BranchInst
*>(this))[i_nocapture].get()); } void BranchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3116, __PRETTY_FUNCTION__)); OperandTraits<BranchInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BranchInst
::getNumOperands() const { return OperandTraits<BranchInst
>::operands(this); } template <int Idx_nocapture> Use
&BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3117
3118//===----------------------------------------------------------------------===//
3119// SwitchInst Class
3120//===----------------------------------------------------------------------===//
3121
3122//===---------------------------------------------------------------------------
3123/// Multiway switch
3124///
3125class SwitchInst : public Instruction {
3126 unsigned ReservedSpace;
3127
3128 // Operand[0] = Value to switch on
3129 // Operand[1] = Default basic block destination
3130 // Operand[2n ] = Value to match
3131 // Operand[2n+1] = BasicBlock to go to on match
3132 SwitchInst(const SwitchInst &SI);
3133
3134 /// Create a new switch instruction, specifying a value to switch on and a
3135 /// default destination. The number of additional cases can be specified here
3136 /// to make memory allocation more efficient. This constructor can also
3137 /// auto-insert before another instruction.
3138 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3139 Instruction *InsertBefore);
3140
3141 /// Create a new switch instruction, specifying a value to switch on and a
3142 /// default destination. The number of additional cases can be specified here
3143 /// to make memory allocation more efficient. This constructor also
3144 /// auto-inserts at the end of the specified BasicBlock.
3145 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3146 BasicBlock *InsertAtEnd);
3147
3148 // allocate space for exactly zero operands
3149 void *operator new(size_t s) {
3150 return User::operator new(s);
3151 }
3152
3153 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3154 void growOperands();
3155
3156protected:
3157 // Note: Instruction needs to be a friend here to call cloneImpl.
3158 friend class Instruction;
3159
3160 SwitchInst *cloneImpl() const;
3161
3162public:
3163 // -2
3164 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3165
3166 template <typename CaseHandleT> class CaseIteratorImpl;
3167
3168 /// A handle to a particular switch case. It exposes a convenient interface
3169 /// to both the case value and the successor block.
3170 ///
3171 /// We define this as a template and instantiate it to form both a const and
3172 /// non-const handle.
3173 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3174 class CaseHandleImpl {
3175 // Directly befriend both const and non-const iterators.
3176 friend class SwitchInst::CaseIteratorImpl<
3177 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3178
3179 protected:
3180 // Expose the switch type we're parameterized with to the iterator.
3181 using SwitchInstType = SwitchInstT;
3182
3183 SwitchInstT *SI;
3184 ptrdiff_t Index;
3185
3186 CaseHandleImpl() = default;
3187 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3188
3189 public:
3190 /// Resolves case value for current case.
3191 ConstantIntT *getCaseValue() const {
3192 assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3193, __PRETTY_FUNCTION__))
3193 "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3193, __PRETTY_FUNCTION__))
;
3194 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3195 }
3196
3197 /// Resolves successor for current case.
3198 BasicBlockT *getCaseSuccessor() const {
3199 assert(((unsigned)Index < SI->getNumCases() ||((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3201, __PRETTY_FUNCTION__))
3200 (unsigned)Index == DefaultPseudoIndex) &&((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3201, __PRETTY_FUNCTION__))
3201 "Index out the number of cases.")((((unsigned)Index < SI->getNumCases() || (unsigned)Index
== DefaultPseudoIndex) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3201, __PRETTY_FUNCTION__))
;
3202 return SI->getSuccessor(getSuccessorIndex());
3203 }
3204
3205 /// Returns number of current case.
3206 unsigned getCaseIndex() const { return Index; }
3207
3208 /// Returns successor index for current case successor.
3209 unsigned getSuccessorIndex() const {
3210 assert(((unsigned)Index == DefaultPseudoIndex ||((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3212, __PRETTY_FUNCTION__))
3211 (unsigned)Index < SI->getNumCases()) &&((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3212, __PRETTY_FUNCTION__))
3212 "Index out the number of cases.")((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index <
SI->getNumCases()) && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3212, __PRETTY_FUNCTION__))
;
3213 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3214 }
3215
3216 bool operator==(const CaseHandleImpl &RHS) const {
3217 assert(SI == RHS.SI && "Incompatible operators.")((SI == RHS.SI && "Incompatible operators.") ? static_cast
<void> (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3217, __PRETTY_FUNCTION__))
;
3218 return Index == RHS.Index;
3219 }
3220 };
3221
3222 using ConstCaseHandle =
3223 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3224
3225 class CaseHandle
3226 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3227 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3228
3229 public:
3230 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3231
3232 /// Sets the new value for current case.
3233 void setValue(ConstantInt *V) {
3234 assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3235, __PRETTY_FUNCTION__))
3235 "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3235, __PRETTY_FUNCTION__))
;
3236 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3237 }
3238
3239 /// Sets the new successor for current case.
3240 void setSuccessor(BasicBlock *S) {
3241 SI->setSuccessor(getSuccessorIndex(), S);
3242 }
3243 };
3244
3245 template <typename CaseHandleT>
3246 class CaseIteratorImpl
3247 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3248 std::random_access_iterator_tag,
3249 CaseHandleT> {
3250 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3251
3252 CaseHandleT Case;
3253
3254 public:
3255 /// Default constructed iterator is in an invalid state until assigned to
3256 /// a case for a particular switch.
3257 CaseIteratorImpl() = default;
3258
3259 /// Initializes case iterator for given SwitchInst and for given
3260 /// case number.
3261 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3262
3263 /// Initializes case iterator for given SwitchInst and for given
3264 /// successor index.
3265 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3266 unsigned SuccessorIndex) {
3267 assert(SuccessorIndex < SI->getNumSuccessors() &&((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3268, __PRETTY_FUNCTION__))
3268 "Successor index # out of range!")((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"
) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3268, __PRETTY_FUNCTION__))
;
3269 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3270 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3271 }
3272
3273 /// Support converting to the const variant. This will be a no-op for const
3274 /// variant.
3275 operator CaseIteratorImpl<ConstCaseHandle>() const {
3276 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3277 }
3278
3279 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3280 // Check index correctness after addition.
3281 // Note: Index == getNumCases() means end().
3282 assert(Case.Index + N >= 0 &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3284, __PRETTY_FUNCTION__))
3283 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3284, __PRETTY_FUNCTION__))
3284 "Case.Index out the number of cases.")((Case.Index + N >= 0 && (unsigned)(Case.Index + N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3284, __PRETTY_FUNCTION__))
;
3285 Case.Index += N;
3286 return *this;
3287 }
3288 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3289 // Check index correctness after subtraction.
3290 // Note: Case.Index == getNumCases() means end().
3291 assert(Case.Index - N >= 0 &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3293, __PRETTY_FUNCTION__))
3292 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3293, __PRETTY_FUNCTION__))
3293 "Case.Index out the number of cases.")((Case.Index - N >= 0 && (unsigned)(Case.Index - N
) <= Case.SI->getNumCases() && "Case.Index out the number of cases."
) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3293, __PRETTY_FUNCTION__))
;
3294 Case.Index -= N;
3295 return *this;
3296 }
3297 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3298 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators."
) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3298, __PRETTY_FUNCTION__))
;
3299 return Case.Index - RHS.Case.Index;
3300 }
3301 bool operator==(const CaseIteratorImpl &RHS) const {
3302 return Case == RHS.Case;
3303 }
3304 bool operator<(const CaseIteratorImpl &RHS) const {
3305 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators."
) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3305, __PRETTY_FUNCTION__))
;
3306 return Case.Index < RHS.Case.Index;
3307 }
3308 CaseHandleT &operator*() { return Case; }
3309 const CaseHandleT &operator*() const { return Case; }
3310 };
3311
3312 using CaseIt = CaseIteratorImpl<CaseHandle>;
3313 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3314
3315 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3316 unsigned NumCases,
3317 Instruction *InsertBefore = nullptr) {
3318 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3319 }
3320
3321 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3322 unsigned NumCases, BasicBlock *InsertAtEnd) {
3323 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3324 }
3325
3326 /// Provide fast operand accessors
3327 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3328
3329 // Accessor Methods for Switch stmt
3330 Value *getCondition() const { return getOperand(0); }
3331 void setCondition(Value *V) { setOperand(0, V); }
3332
3333 BasicBlock *getDefaultDest() const {
3334 return cast<BasicBlock>(getOperand(1));
3335 }
3336
3337 void setDefaultDest(BasicBlock *DefaultCase) {
3338 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3339 }
3340
3341 /// Return the number of 'cases' in this switch instruction, excluding the
3342 /// default case.
3343 unsigned getNumCases() const {
3344 return getNumOperands()/2 - 1;
3345 }
3346
3347 /// Returns a read/write iterator that points to the first case in the
3348 /// SwitchInst.
3349 CaseIt case_begin() {
3350 return CaseIt(this, 0);
3351 }
3352
3353 /// Returns a read-only iterator that points to the first case in the
3354 /// SwitchInst.
3355 ConstCaseIt case_begin() const {
3356 return ConstCaseIt(this, 0);
3357 }
3358
3359 /// Returns a read/write iterator that points one past the last in the
3360 /// SwitchInst.
3361 CaseIt case_end() {
3362 return CaseIt(this, getNumCases());
3363 }
3364
3365 /// Returns a read-only iterator that points one past the last in the
3366 /// SwitchInst.
3367 ConstCaseIt case_end() const {
3368 return ConstCaseIt(this, getNumCases());
3369 }
3370
3371 /// Iteration adapter for range-for loops.
3372 iterator_range<CaseIt> cases() {
3373 return make_range(case_begin(), case_end());
3374 }
3375
3376 /// Constant iteration adapter for range-for loops.
3377 iterator_range<ConstCaseIt> cases() const {
3378 return make_range(case_begin(), case_end());
3379 }
3380
3381 /// Returns an iterator that points to the default case.
3382 /// Note: this iterator allows to resolve successor only. Attempt
3383 /// to resolve case value causes an assertion.
3384 /// Also note, that increment and decrement also causes an assertion and
3385 /// makes iterator invalid.
3386 CaseIt case_default() {
3387 return CaseIt(this, DefaultPseudoIndex);
3388 }
3389 ConstCaseIt case_default() const {
3390 return ConstCaseIt(this, DefaultPseudoIndex);
3391 }
3392
3393 /// Search all of the case values for the specified constant. If it is
3394 /// explicitly handled, return the case iterator of it, otherwise return
3395 /// default case iterator to indicate that it is handled by the default
3396 /// handler.
3397 CaseIt findCaseValue(const ConstantInt *C) {
3398 CaseIt I = llvm::find_if(
3399 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3400 if (I != case_end())
3401 return I;
3402
3403 return case_default();
3404 }
3405 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3406 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3407 return Case.getCaseValue() == C;
3408 });
3409 if (I != case_end())
3410 return I;
3411
3412 return case_default();
3413 }
3414
3415 /// Finds the unique case value for a given successor. Returns null if the
3416 /// successor is not found, not unique, or is the default case.
3417 ConstantInt *findCaseDest(BasicBlock *BB) {
3418 if (BB == getDefaultDest())
3419 return nullptr;
3420
3421 ConstantInt *CI = nullptr;
3422 for (auto Case : cases()) {
3423 if (Case.getCaseSuccessor() != BB)
3424 continue;
3425
3426 if (CI)
3427 return nullptr; // Multiple cases lead to BB.
3428
3429 CI = Case.getCaseValue();
3430 }
3431
3432 return CI;
3433 }
3434
3435 /// Add an entry to the switch instruction.
3436 /// Note:
3437 /// This action invalidates case_end(). Old case_end() iterator will
3438 /// point to the added case.
3439 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3440
3441 /// This method removes the specified case and its successor from the switch
3442 /// instruction. Note that this operation may reorder the remaining cases at
3443 /// index idx and above.
3444 /// Note:
3445 /// This action invalidates iterators for all cases following the one removed,
3446 /// including the case_end() iterator. It returns an iterator for the next
3447 /// case.
3448 CaseIt removeCase(CaseIt I);
3449
3450 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3451 BasicBlock *getSuccessor(unsigned idx) const {
3452 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((idx < getNumSuccessors() &&"Successor idx out of range for switch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3452, __PRETTY_FUNCTION__))
;
3453 return cast<BasicBlock>(getOperand(idx*2+1));
3454 }
3455 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3456 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((idx < getNumSuccessors() && "Successor # out of range for switch!"
) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3456, __PRETTY_FUNCTION__))
;
3457 setOperand(idx * 2 + 1, NewSucc);
3458 }
3459
3460 // Methods for support type inquiry through isa, cast, and dyn_cast:
3461 static bool classof(const Instruction *I) {
3462 return I->getOpcode() == Instruction::Switch;
3463 }
3464 static bool classof(const Value *V) {
3465 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3466 }
3467};
3468
3469/// A wrapper class to simplify modification of SwitchInst cases along with
3470/// their prof branch_weights metadata.
3471class SwitchInstProfUpdateWrapper {
3472 SwitchInst &SI;
3473 Optional<SmallVector<uint32_t, 8> > Weights = None;
3474 bool Changed = false;
3475
3476protected:
3477 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3478
3479 MDNode *buildProfBranchWeightsMD();
3480
3481 void init();
3482
3483public:
3484 using CaseWeightOpt = Optional<uint32_t>;
3485 SwitchInst *operator->() { return &SI; }
3486 SwitchInst &operator*() { return SI; }
3487 operator SwitchInst *() { return &SI; }
3488
3489 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3490
3491 ~SwitchInstProfUpdateWrapper() {
3492 if (Changed)
3493 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3494 }
3495
3496 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3497 /// correspondent branch weight.
3498 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3499
3500 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3501 /// specified branch weight for the added case.
3502 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3503
3504 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3505 /// this object to not touch the underlying SwitchInst in destructor.
3506 SymbolTableList<Instruction>::iterator eraseFromParent();
3507
3508 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3509 CaseWeightOpt getSuccessorWeight(unsigned idx);
3510
3511 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3512};
3513
3514template <>
3515struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3516};
3517
3518DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SwitchInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3518, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SwitchInst>::op_begin(const_cast<SwitchInst
*>(this))[i_nocapture].get()); } void SwitchInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3518, __PRETTY_FUNCTION__)); OperandTraits<SwitchInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SwitchInst
::getNumOperands() const { return OperandTraits<SwitchInst
>::operands(this); } template <int Idx_nocapture> Use
&SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3519
3520//===----------------------------------------------------------------------===//
3521// IndirectBrInst Class
3522//===----------------------------------------------------------------------===//
3523
3524//===---------------------------------------------------------------------------
3525/// Indirect Branch Instruction.
3526///
3527class IndirectBrInst : public Instruction {
3528 unsigned ReservedSpace;
3529
3530 // Operand[0] = Address to jump to
3531 // Operand[n+1] = n-th destination
3532 IndirectBrInst(const IndirectBrInst &IBI);
3533
3534 /// Create a new indirectbr instruction, specifying an
3535 /// Address to jump to. The number of expected destinations can be specified
3536 /// here to make memory allocation more efficient. This constructor can also
3537 /// autoinsert before another instruction.
3538 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3539
3540 /// Create a new indirectbr instruction, specifying an
3541 /// Address to jump to. The number of expected destinations can be specified
3542 /// here to make memory allocation more efficient. This constructor also
3543 /// autoinserts at the end of the specified BasicBlock.
3544 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3545
3546 // allocate space for exactly zero operands
3547 void *operator new(size_t s) {
3548 return User::operator new(s);
3549 }
3550
3551 void init(Value *Address, unsigned NumDests);
3552 void growOperands();
3553
3554protected:
3555 // Note: Instruction needs to be a friend here to call cloneImpl.
3556 friend class Instruction;
3557
3558 IndirectBrInst *cloneImpl() const;
3559
3560public:
3561 /// Iterator type that casts an operand to a basic block.
3562 ///
3563 /// This only makes sense because the successors are stored as adjacent
3564 /// operands for indirectbr instructions.
3565 struct succ_op_iterator
3566 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3567 std::random_access_iterator_tag, BasicBlock *,
3568 ptrdiff_t, BasicBlock *, BasicBlock *> {
3569 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3570
3571 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3572 BasicBlock *operator->() const { return operator*(); }
3573 };
3574
3575 /// The const version of `succ_op_iterator`.
3576 struct const_succ_op_iterator
3577 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3578 std::random_access_iterator_tag,
3579 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3580 const BasicBlock *> {
3581 explicit const_succ_op_iterator(const_value_op_iterator I)
3582 : iterator_adaptor_base(I) {}
3583
3584 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3585 const BasicBlock *operator->() const { return operator*(); }
3586 };
3587
3588 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3589 Instruction *InsertBefore = nullptr) {
3590 return new IndirectBrInst(Address, NumDests, InsertBefore);
3591 }
3592
3593 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3594 BasicBlock *InsertAtEnd) {
3595 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3596 }
3597
3598 /// Provide fast operand accessors.
3599 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3600
3601 // Accessor Methods for IndirectBrInst instruction.
3602 Value *getAddress() { return getOperand(0); }
3603 const Value *getAddress() const { return getOperand(0); }
3604 void setAddress(Value *V) { setOperand(0, V); }
3605
3606 /// return the number of possible destinations in this
3607 /// indirectbr instruction.
3608 unsigned getNumDestinations() const { return getNumOperands()-1; }
3609
3610 /// Return the specified destination.
3611 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3612 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3613
3614 /// Add a destination.
3615 ///
3616 void addDestination(BasicBlock *Dest);
3617
3618 /// This method removes the specified successor from the
3619 /// indirectbr instruction.
3620 void removeDestination(unsigned i);
3621
3622 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3623 BasicBlock *getSuccessor(unsigned i) const {
3624 return cast<BasicBlock>(getOperand(i+1));
3625 }
3626 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3627 setOperand(i + 1, NewSucc);
3628 }
3629
3630 iterator_range<succ_op_iterator> successors() {
3631 return make_range(succ_op_iterator(std::next(value_op_begin())),
3632 succ_op_iterator(value_op_end()));
3633 }
3634
3635 iterator_range<const_succ_op_iterator> successors() const {
3636 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3637 const_succ_op_iterator(value_op_end()));
3638 }
3639
3640 // Methods for support type inquiry through isa, cast, and dyn_cast:
3641 static bool classof(const Instruction *I) {
3642 return I->getOpcode() == Instruction::IndirectBr;
3643 }
3644 static bool classof(const Value *V) {
3645 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3646 }
3647};
3648
3649template <>
3650struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3651};
3652
3653DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3653, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this))[i_nocapture].get()); } void IndirectBrInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3653, __PRETTY_FUNCTION__)); OperandTraits<IndirectBrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3654
3655//===----------------------------------------------------------------------===//
3656// InvokeInst Class
3657//===----------------------------------------------------------------------===//
3658
3659/// Invoke instruction. The SubclassData field is used to hold the
3660/// calling convention of the call.
3661///
3662class InvokeInst : public CallBase {
3663 /// The number of operands for this call beyond the called function,
3664 /// arguments, and operand bundles.
3665 static constexpr int NumExtraOperands = 2;
3666
3667 /// The index from the end of the operand array to the normal destination.
3668 static constexpr int NormalDestOpEndIdx = -3;
3669
3670 /// The index from the end of the operand array to the unwind destination.
3671 static constexpr int UnwindDestOpEndIdx = -2;
3672
3673 InvokeInst(const InvokeInst &BI);
3674
3675 /// Construct an InvokeInst given a range of arguments.
3676 ///
3677 /// Construct an InvokeInst from a range of arguments
3678 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3679 BasicBlock *IfException, ArrayRef<Value *> Args,
3680 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3681 const Twine &NameStr, Instruction *InsertBefore);
3682
3683 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3684 BasicBlock *IfException, ArrayRef<Value *> Args,
3685 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3686 const Twine &NameStr, BasicBlock *InsertAtEnd);
3687
3688 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3689 BasicBlock *IfException, ArrayRef<Value *> Args,
3690 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3691
3692 /// Compute the number of operands to allocate.
3693 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3694 // We need one operand for the called function, plus our extra operands and
3695 // the input operand counts provided.
3696 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3697 }
3698
3699protected:
3700 // Note: Instruction needs to be a friend here to call cloneImpl.
3701 friend class Instruction;
3702
3703 InvokeInst *cloneImpl() const;
3704
3705public:
3706 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3707 BasicBlock *IfException, ArrayRef<Value *> Args,
3708 const Twine &NameStr,
3709 Instruction *InsertBefore = nullptr) {
3710 int NumOperands = ComputeNumOperands(Args.size());
3711 return new (NumOperands)
3712 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3713 NameStr, InsertBefore);
3714 }
3715
3716 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3717 BasicBlock *IfException, ArrayRef<Value *> Args,
3718 ArrayRef<OperandBundleDef> Bundles = None,
3719 const Twine &NameStr = "",
3720 Instruction *InsertBefore = nullptr) {
3721 int NumOperands =
3722 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3723 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3724
3725 return new (NumOperands, DescriptorBytes)
3726 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3727 NameStr, InsertBefore);
3728 }
3729
3730 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3731 BasicBlock *IfException, ArrayRef<Value *> Args,
3732 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3733 int NumOperands = ComputeNumOperands(Args.size());
3734 return new (NumOperands)
3735 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3736 NameStr, InsertAtEnd);
3737 }
3738
3739 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3740 BasicBlock *IfException, ArrayRef<Value *> Args,
3741 ArrayRef<OperandBundleDef> Bundles,
3742 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3743 int NumOperands =
3744 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3745 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3746
3747 return new (NumOperands, DescriptorBytes)
3748 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3749 NameStr, InsertAtEnd);
3750 }
3751
3752 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3753 BasicBlock *IfException, ArrayRef<Value *> Args,
3754 const Twine &NameStr,
3755 Instruction *InsertBefore = nullptr) {
3756 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3757 IfException, Args, None, NameStr, InsertBefore);
3758 }
3759
3760 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3761 BasicBlock *IfException, ArrayRef<Value *> Args,
3762 ArrayRef<OperandBundleDef> Bundles = None,
3763 const Twine &NameStr = "",
3764 Instruction *InsertBefore = nullptr) {
3765 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3766 IfException, Args, Bundles, NameStr, InsertBefore);
3767 }
3768
3769 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3770 BasicBlock *IfException, ArrayRef<Value *> Args,
3771 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3772 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3773 IfException, Args, NameStr, InsertAtEnd);
3774 }
3775
3776 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3777 BasicBlock *IfException, ArrayRef<Value *> Args,
3778 ArrayRef<OperandBundleDef> Bundles,
3779 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3780 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3781 IfException, Args, Bundles, NameStr, InsertAtEnd);
3782 }
3783
3784 /// Create a clone of \p II with a different set of operand bundles and
3785 /// insert it before \p InsertPt.
3786 ///
3787 /// The returned invoke instruction is identical to \p II in every way except
3788 /// that the operand bundles for the new instruction are set to the operand
3789 /// bundles in \p Bundles.
3790 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3791 Instruction *InsertPt = nullptr);
3792
3793 /// Create a clone of \p II with a different set of operand bundles and
3794 /// insert it before \p InsertPt.
3795 ///
3796 /// The returned invoke instruction is identical to \p II in every way except
3797 /// that the operand bundle for the new instruction is set to the operand
3798 /// bundle in \p Bundle.
3799 static InvokeInst *CreateWithReplacedBundle(InvokeInst *II,
3800 OperandBundleDef Bundles,
3801 Instruction *InsertPt = nullptr);
3802
3803 // get*Dest - Return the destination basic blocks...
3804 BasicBlock *getNormalDest() const {
3805 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3806 }
3807 BasicBlock *getUnwindDest() const {
3808 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3809 }
3810 void setNormalDest(BasicBlock *B) {
3811 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3812 }
3813 void setUnwindDest(BasicBlock *B) {
3814 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3815 }
3816
3817 /// Get the landingpad instruction from the landing pad
3818 /// block (the unwind destination).
3819 LandingPadInst *getLandingPadInst() const;
3820
3821 BasicBlock *getSuccessor(unsigned i) const {
3822 assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!")
? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3822, __PRETTY_FUNCTION__))
;
3823 return i == 0 ? getNormalDest() : getUnwindDest();
3824 }
3825
3826 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3827 assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!")
? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 3827, __PRETTY_FUNCTION__))
;
3828 if (i == 0)
3829 setNormalDest(NewSucc);
3830 else
3831 setUnwindDest(NewSucc);
3832 }
3833
3834 unsigned getNumSuccessors() const { return 2; }
3835
3836 // Methods for support type inquiry through isa, cast, and dyn_cast:
3837 static bool classof(const Instruction *I) {
3838 return (I->getOpcode() == Instruction::Invoke);
3839 }
3840 static bool classof(const Value *V) {
3841 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3842 }
3843
3844private:
3845 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3846 // method so that subclasses cannot accidentally use it.
3847 template <typename Bitfield>
3848 void setSubclassData(typename Bitfield::Type Value) {
3849 Instruction::setSubclassData<Bitfield>(Value);
3850 }
3851};
3852
3853InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3854 BasicBlock *IfException, ArrayRef<Value *> Args,
3855 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3856 const Twine &NameStr, Instruction *InsertBefore)
3857 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3858 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3859 InsertBefore) {
3860 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3861}
3862
3863InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3864 BasicBlock *IfException, ArrayRef<Value *> Args,
3865 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3866 const Twine &NameStr, BasicBlock *InsertAtEnd)
3867 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3868 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3869 InsertAtEnd) {
3870 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3871}
3872
3873//===----------------------------------------------------------------------===//
3874// CallBrInst Class
3875//===----------------------------------------------------------------------===//
3876
3877/// CallBr instruction, tracking function calls that may not return control but
3878/// instead transfer it to a third location. The SubclassData field is used to
3879/// hold the calling convention of the call.
3880///
3881class CallBrInst : public CallBase {
3882
3883 unsigned NumIndirectDests;
3884
3885 CallBrInst(const CallBrInst &BI);
3886
3887 /// Construct a CallBrInst given a range of arguments.
3888 ///
3889 /// Construct a CallBrInst from a range of arguments
3890 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3891 ArrayRef<BasicBlock *> IndirectDests,
3892 ArrayRef<Value *> Args,
3893 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3894 const Twine &NameStr, Instruction *InsertBefore);
3895
3896 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3897 ArrayRef<BasicBlock *> IndirectDests,
3898 ArrayRef<Value *> Args,
3899 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3900 const Twine &NameStr, BasicBlock *InsertAtEnd);
3901
3902 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3903 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3904 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3905
3906 /// Should the Indirect Destinations change, scan + update the Arg list.
3907 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3908
3909 /// Compute the number of operands to allocate.
3910 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3911 int NumBundleInputs = 0) {
3912 // We need one operand for the called function, plus our extra operands and
3913 // the input operand counts provided.
3914 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3915 }
3916
3917protected:
3918 // Note: Instruction needs to be a friend here to call cloneImpl.
3919 friend class Instruction;
3920
3921 CallBrInst *cloneImpl() const;
3922
3923public:
3924 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3925 BasicBlock *DefaultDest,
3926 ArrayRef<BasicBlock *> IndirectDests,
3927 ArrayRef<Value *> Args, const Twine &NameStr,
3928 Instruction *InsertBefore = nullptr) {
3929 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3930 return new (NumOperands)
3931 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3932 NumOperands, NameStr, InsertBefore);
3933 }
3934
3935 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3936 BasicBlock *DefaultDest,
3937 ArrayRef<BasicBlock *> IndirectDests,
3938 ArrayRef<Value *> Args,
3939 ArrayRef<OperandBundleDef> Bundles = None,
3940 const Twine &NameStr = "",
3941 Instruction *InsertBefore = nullptr) {
3942 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3943 CountBundleInputs(Bundles));
3944 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3945
3946 return new (NumOperands, DescriptorBytes)
3947 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3948 NumOperands, NameStr, InsertBefore);
3949 }
3950
3951 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3952 BasicBlock *DefaultDest,
3953 ArrayRef<BasicBlock *> IndirectDests,
3954 ArrayRef<Value *> Args, const Twine &NameStr,
3955 BasicBlock *InsertAtEnd) {
3956 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3957 return new (NumOperands)
3958 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3959 NumOperands, NameStr, InsertAtEnd);
3960 }
3961
3962 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3963 BasicBlock *DefaultDest,
3964 ArrayRef<BasicBlock *> IndirectDests,
3965 ArrayRef<Value *> Args,
3966 ArrayRef<OperandBundleDef> Bundles,
3967 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3968 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3969 CountBundleInputs(Bundles));
3970 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3971
3972 return new (NumOperands, DescriptorBytes)
3973 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3974 NumOperands, NameStr, InsertAtEnd);
3975 }
3976
3977 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3978 ArrayRef<BasicBlock *> IndirectDests,
3979 ArrayRef<Value *> Args, const Twine &NameStr,
3980 Instruction *InsertBefore = nullptr) {
3981 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3982 IndirectDests, Args, NameStr, InsertBefore);
3983 }
3984
3985 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3986 ArrayRef<BasicBlock *> IndirectDests,
3987 ArrayRef<Value *> Args,
3988 ArrayRef<OperandBundleDef> Bundles = None,
3989 const Twine &NameStr = "",
3990 Instruction *InsertBefore = nullptr) {
3991 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3992 IndirectDests, Args, Bundles, NameStr, InsertBefore);
3993 }
3994
3995 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3996 ArrayRef<BasicBlock *> IndirectDests,
3997 ArrayRef<Value *> Args, const Twine &NameStr,
3998 BasicBlock *InsertAtEnd) {
3999 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4000 IndirectDests, Args, NameStr, InsertAtEnd);
4001 }
4002
4003 static CallBrInst *Create(FunctionCallee Func,
4004 BasicBlock *DefaultDest,
4005 ArrayRef<BasicBlock *> IndirectDests,
4006 ArrayRef<Value *> Args,
4007 ArrayRef<OperandBundleDef> Bundles,
4008 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4009 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4010 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4011 }
4012
4013 /// Create a clone of \p CBI with a different set of operand bundles and
4014 /// insert it before \p InsertPt.
4015 ///
4016 /// The returned callbr instruction is identical to \p CBI in every way
4017 /// except that the operand bundles for the new instruction are set to the
4018 /// operand bundles in \p Bundles.
4019 static CallBrInst *Create(CallBrInst *CBI,
4020 ArrayRef<OperandBundleDef> Bundles,
4021 Instruction *InsertPt = nullptr);
4022
4023 /// Return the number of callbr indirect dest labels.
4024 ///
4025 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4026
4027 /// getIndirectDestLabel - Return the i-th indirect dest label.
4028 ///
4029 Value *getIndirectDestLabel(unsigned i) const {
4030 assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ?
static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4030, __PRETTY_FUNCTION__))
;
4031 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() +
4032 1);
4033 }
4034
4035 Value *getIndirectDestLabelUse(unsigned i) const {
4036 assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ?
static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4036, __PRETTY_FUNCTION__))
;
4037 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() +
4038 1);
4039 }
4040
4041 // Return the destination basic blocks...
4042 BasicBlock *getDefaultDest() const {
4043 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4044 }
4045 BasicBlock *getIndirectDest(unsigned i) const {
4046 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4047 }
4048 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4049 SmallVector<BasicBlock *, 16> IndirectDests;
4050 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4051 IndirectDests.push_back(getIndirectDest(i));
4052 return IndirectDests;
4053 }
4054 void setDefaultDest(BasicBlock *B) {
4055 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4056 }
4057 void setIndirectDest(unsigned i, BasicBlock *B) {
4058 updateArgBlockAddresses(i, B);
4059 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4060 }
4061
4062 BasicBlock *getSuccessor(unsigned i) const {
4063 assert(i < getNumSuccessors() + 1 &&((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4064, __PRETTY_FUNCTION__))
4064 "Successor # out of range for callbr!")((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4064, __PRETTY_FUNCTION__))
;
4065 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4066 }
4067
4068 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4069 assert(i < getNumIndirectDests() + 1 &&((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4070, __PRETTY_FUNCTION__))
4070 "Successor # out of range for callbr!")((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4070, __PRETTY_FUNCTION__))
;
4071 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4072 }
4073
4074 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4075
4076 // Methods for support type inquiry through isa, cast, and dyn_cast:
4077 static bool classof(const Instruction *I) {
4078 return (I->getOpcode() == Instruction::CallBr);
4079 }
4080 static bool classof(const Value *V) {
4081 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4082 }
4083
4084private:
4085 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4086 // method so that subclasses cannot accidentally use it.
4087 template <typename Bitfield>
4088 void setSubclassData(typename Bitfield::Type Value) {
4089 Instruction::setSubclassData<Bitfield>(Value);
4090 }
4091};
4092
4093CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4094 ArrayRef<BasicBlock *> IndirectDests,
4095 ArrayRef<Value *> Args,
4096 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4097 const Twine &NameStr, Instruction *InsertBefore)
4098 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4099 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4100 InsertBefore) {
4101 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4102}
4103
4104CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4105 ArrayRef<BasicBlock *> IndirectDests,
4106 ArrayRef<Value *> Args,
4107 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4108 const Twine &NameStr, BasicBlock *InsertAtEnd)
4109 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4110 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4111 InsertAtEnd) {
4112 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4113}
4114
4115//===----------------------------------------------------------------------===//
4116// ResumeInst Class
4117//===----------------------------------------------------------------------===//
4118
4119//===---------------------------------------------------------------------------
4120/// Resume the propagation of an exception.
4121///
4122class ResumeInst : public Instruction {
4123 ResumeInst(const ResumeInst &RI);
4124
4125 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4126 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4127
4128protected:
4129 // Note: Instruction needs to be a friend here to call cloneImpl.
4130 friend class Instruction;
4131
4132 ResumeInst *cloneImpl() const;
4133
4134public:
4135 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4136 return new(1) ResumeInst(Exn, InsertBefore);
4137 }
4138
4139 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4140 return new(1) ResumeInst(Exn, InsertAtEnd);
4141 }
4142
4143 /// Provide fast operand accessors
4144 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4145
4146 /// Convenience accessor.
4147 Value *getValue() const { return Op<0>(); }
4148
4149 unsigned getNumSuccessors() const { return 0; }
4150
4151 // Methods for support type inquiry through isa, cast, and dyn_cast:
4152 static bool classof(const Instruction *I) {
4153 return I->getOpcode() == Instruction::Resume;
4154 }
4155 static bool classof(const Value *V) {
4156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4157 }
4158
4159private:
4160 BasicBlock *getSuccessor(unsigned idx) const {
4161 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4161)
;
4162 }
4163
4164 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4165 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4165)
;
4166 }
4167};
4168
4169template <>
4170struct OperandTraits<ResumeInst> :
4171 public FixedNumOperandTraits<ResumeInst, 1> {
4172};
4173
4174DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<ResumeInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4174, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<ResumeInst>::op_begin(const_cast<ResumeInst
*>(this))[i_nocapture].get()); } void ResumeInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4174, __PRETTY_FUNCTION__)); OperandTraits<ResumeInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ResumeInst
::getNumOperands() const { return OperandTraits<ResumeInst
>::operands(this); } template <int Idx_nocapture> Use
&ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4175
4176//===----------------------------------------------------------------------===//
4177// CatchSwitchInst Class
4178//===----------------------------------------------------------------------===//
4179class CatchSwitchInst : public Instruction {
4180 using UnwindDestField = BoolBitfieldElementT<0>;
4181
4182 /// The number of operands actually allocated. NumOperands is
4183 /// the number actually in use.
4184 unsigned ReservedSpace;
4185
4186 // Operand[0] = Outer scope
4187 // Operand[1] = Unwind block destination
4188 // Operand[n] = BasicBlock to go to on match
4189 CatchSwitchInst(const CatchSwitchInst &CSI);
4190
4191 /// Create a new switch instruction, specifying a
4192 /// default destination. The number of additional handlers can be specified
4193 /// here to make memory allocation more efficient.
4194 /// This constructor can also autoinsert before another instruction.
4195 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4196 unsigned NumHandlers, const Twine &NameStr,
4197 Instruction *InsertBefore);
4198
4199 /// Create a new switch instruction, specifying a
4200 /// default destination. The number of additional handlers can be specified
4201 /// here to make memory allocation more efficient.
4202 /// This constructor also autoinserts at the end of the specified BasicBlock.
4203 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4204 unsigned NumHandlers, const Twine &NameStr,
4205 BasicBlock *InsertAtEnd);
4206
4207 // allocate space for exactly zero operands
4208 void *operator new(size_t s) { return User::operator new(s); }
4209
4210 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4211 void growOperands(unsigned Size);
4212
4213protected:
4214 // Note: Instruction needs to be a friend here to call cloneImpl.
4215 friend class Instruction;
4216
4217 CatchSwitchInst *cloneImpl() const;
4218
4219public:
4220 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4221 unsigned NumHandlers,
4222 const Twine &NameStr = "",
4223 Instruction *InsertBefore = nullptr) {
4224 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4225 InsertBefore);
4226 }
4227
4228 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4229 unsigned NumHandlers, const Twine &NameStr,
4230 BasicBlock *InsertAtEnd) {
4231 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4232 InsertAtEnd);
4233 }
4234
4235 /// Provide fast operand accessors
4236 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4237
4238 // Accessor Methods for CatchSwitch stmt
4239 Value *getParentPad() const { return getOperand(0); }
4240 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4241
4242 // Accessor Methods for CatchSwitch stmt
4243 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4244 bool unwindsToCaller() const { return !hasUnwindDest(); }
4245 BasicBlock *getUnwindDest() const {
4246 if (hasUnwindDest())
4247 return cast<BasicBlock>(getOperand(1));
4248 return nullptr;
4249 }
4250 void setUnwindDest(BasicBlock *UnwindDest) {
4251 assert(UnwindDest)((UnwindDest) ? static_cast<void> (0) : __assert_fail (
"UnwindDest", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4251, __PRETTY_FUNCTION__))
;
4252 assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4252, __PRETTY_FUNCTION__))
;
4253 setOperand(1, UnwindDest);
4254 }
4255
4256 /// return the number of 'handlers' in this catchswitch
4257 /// instruction, except the default handler
4258 unsigned getNumHandlers() const {
4259 if (hasUnwindDest())
4260 return getNumOperands() - 2;
4261 return getNumOperands() - 1;
4262 }
4263
4264private:
4265 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4266 static const BasicBlock *handler_helper(const Value *V) {
4267 return cast<BasicBlock>(V);
4268 }
4269
4270public:
4271 using DerefFnTy = BasicBlock *(*)(Value *);
4272 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4273 using handler_range = iterator_range<handler_iterator>;
4274 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4275 using const_handler_iterator =
4276 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4277 using const_handler_range = iterator_range<const_handler_iterator>;
4278
4279 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4280 handler_iterator handler_begin() {
4281 op_iterator It = op_begin() + 1;
4282 if (hasUnwindDest())
4283 ++It;
4284 return handler_iterator(It, DerefFnTy(handler_helper));
4285 }
4286
4287 /// Returns an iterator that points to the first handler in the
4288 /// CatchSwitchInst.
4289 const_handler_iterator handler_begin() const {
4290 const_op_iterator It = op_begin() + 1;
4291 if (hasUnwindDest())
4292 ++It;
4293 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4294 }
4295
4296 /// Returns a read-only iterator that points one past the last
4297 /// handler in the CatchSwitchInst.
4298 handler_iterator handler_end() {
4299 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4300 }
4301
4302 /// Returns an iterator that points one past the last handler in the
4303 /// CatchSwitchInst.
4304 const_handler_iterator handler_end() const {
4305 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4306 }
4307
4308 /// iteration adapter for range-for loops.
4309 handler_range handlers() {
4310 return make_range(handler_begin(), handler_end());
4311 }
4312
4313 /// iteration adapter for range-for loops.
4314 const_handler_range handlers() const {
4315 return make_range(handler_begin(), handler_end());
4316 }
4317
4318 /// Add an entry to the switch instruction...
4319 /// Note:
4320 /// This action invalidates handler_end(). Old handler_end() iterator will
4321 /// point to the added handler.
4322 void addHandler(BasicBlock *Dest);
4323
4324 void removeHandler(handler_iterator HI);
4325
4326 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4327 BasicBlock *getSuccessor(unsigned Idx) const {
4328 assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4329, __PRETTY_FUNCTION__))
4329 "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4329, __PRETTY_FUNCTION__))
;
4330 return cast<BasicBlock>(getOperand(Idx + 1));
4331 }
4332 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4333 assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4334, __PRETTY_FUNCTION__))
4334 "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4334, __PRETTY_FUNCTION__))
;
4335 setOperand(Idx + 1, NewSucc);
4336 }
4337
4338 // Methods for support type inquiry through isa, cast, and dyn_cast:
4339 static bool classof(const Instruction *I) {
4340 return I->getOpcode() == Instruction::CatchSwitch;
4341 }
4342 static bool classof(const Value *V) {
4343 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4344 }
4345};
4346
4347template <>
4348struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4349
4350DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<CatchSwitchInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4350, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this))[i_nocapture].get()); } void CatchSwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<CatchSwitchInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4350, __PRETTY_FUNCTION__)); OperandTraits<CatchSwitchInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchSwitchInst::getNumOperands() const { return OperandTraits
<CatchSwitchInst>::operands(this); } template <int Idx_nocapture
> Use &CatchSwitchInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchSwitchInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4351
4352//===----------------------------------------------------------------------===//
4353// CleanupPadInst Class
4354//===----------------------------------------------------------------------===//
4355class CleanupPadInst : public FuncletPadInst {
4356private:
4357 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4358 unsigned Values, const Twine &NameStr,
4359 Instruction *InsertBefore)
4360 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4361 NameStr, InsertBefore) {}
4362 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4363 unsigned Values, const Twine &NameStr,
4364 BasicBlock *InsertAtEnd)
4365 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4366 NameStr, InsertAtEnd) {}
4367
4368public:
4369 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4370 const Twine &NameStr = "",
4371 Instruction *InsertBefore = nullptr) {
4372 unsigned Values = 1 + Args.size();
4373 return new (Values)
4374 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4375 }
4376
4377 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4378 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4379 unsigned Values = 1 + Args.size();
4380 return new (Values)
4381 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4382 }
4383
4384 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4385 static bool classof(const Instruction *I) {
4386 return I->getOpcode() == Instruction::CleanupPad;
4387 }
4388 static bool classof(const Value *V) {
4389 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4390 }
4391};
4392
4393//===----------------------------------------------------------------------===//
4394// CatchPadInst Class
4395//===----------------------------------------------------------------------===//
4396class CatchPadInst : public FuncletPadInst {
4397private:
4398 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4399 unsigned Values, const Twine &NameStr,
4400 Instruction *InsertBefore)
4401 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4402 NameStr, InsertBefore) {}
4403 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4404 unsigned Values, const Twine &NameStr,
4405 BasicBlock *InsertAtEnd)
4406 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4407 NameStr, InsertAtEnd) {}
4408
4409public:
4410 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4411 const Twine &NameStr = "",
4412 Instruction *InsertBefore = nullptr) {
4413 unsigned Values = 1 + Args.size();
4414 return new (Values)
4415 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4416 }
4417
4418 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4419 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4420 unsigned Values = 1 + Args.size();
4421 return new (Values)
4422 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4423 }
4424
4425 /// Convenience accessors
4426 CatchSwitchInst *getCatchSwitch() const {
4427 return cast<CatchSwitchInst>(Op<-1>());
4428 }
4429 void setCatchSwitch(Value *CatchSwitch) {
4430 assert(CatchSwitch)((CatchSwitch) ? static_cast<void> (0) : __assert_fail (
"CatchSwitch", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4430, __PRETTY_FUNCTION__))
;
4431 Op<-1>() = CatchSwitch;
4432 }
4433
4434 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4435 static bool classof(const Instruction *I) {
4436 return I->getOpcode() == Instruction::CatchPad;
4437 }
4438 static bool classof(const Value *V) {
4439 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4440 }
4441};
4442
4443//===----------------------------------------------------------------------===//
4444// CatchReturnInst Class
4445//===----------------------------------------------------------------------===//
4446
4447class CatchReturnInst : public Instruction {
4448 CatchReturnInst(const CatchReturnInst &RI);
4449 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4450 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4451
4452 void init(Value *CatchPad, BasicBlock *BB);
4453
4454protected:
4455 // Note: Instruction needs to be a friend here to call cloneImpl.
4456 friend class Instruction;
4457
4458 CatchReturnInst *cloneImpl() const;
4459
4460public:
4461 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4462 Instruction *InsertBefore = nullptr) {
4463 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4463, __PRETTY_FUNCTION__))
;
4464 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4464, __PRETTY_FUNCTION__))
;
4465 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4466 }
4467
4468 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4469 BasicBlock *InsertAtEnd) {
4470 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4470, __PRETTY_FUNCTION__))
;
4471 assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4471, __PRETTY_FUNCTION__))
;
4472 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4473 }
4474
4475 /// Provide fast operand accessors
4476 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4477
4478 /// Convenience accessors.
4479 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4480 void setCatchPad(CatchPadInst *CatchPad) {
4481 assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4481, __PRETTY_FUNCTION__))
;
4482 Op<0>() = CatchPad;
4483 }
4484
4485 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4486 void setSuccessor(BasicBlock *NewSucc) {
4487 assert(NewSucc)((NewSucc) ? static_cast<void> (0) : __assert_fail ("NewSucc"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4487, __PRETTY_FUNCTION__))
;
4488 Op<1>() = NewSucc;
4489 }
4490 unsigned getNumSuccessors() const { return 1; }
4491
4492 /// Get the parentPad of this catchret's catchpad's catchswitch.
4493 /// The successor block is implicitly a member of this funclet.
4494 Value *getCatchSwitchParentPad() const {
4495 return getCatchPad()->getCatchSwitch()->getParentPad();
4496 }
4497
4498 // Methods for support type inquiry through isa, cast, and dyn_cast:
4499 static bool classof(const Instruction *I) {
4500 return (I->getOpcode() == Instruction::CatchRet);
4501 }
4502 static bool classof(const Value *V) {
4503 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4504 }
4505
4506private:
4507 BasicBlock *getSuccessor(unsigned Idx) const {
4508 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4508, __PRETTY_FUNCTION__))
;
4509 return getSuccessor();
4510 }
4511
4512 void setSuccessor(unsigned Idx, BasicBlock *B) {
4513 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4513, __PRETTY_FUNCTION__))
;
4514 setSuccessor(B);
4515 }
4516};
4517
4518template <>
4519struct OperandTraits<CatchReturnInst>
4520 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4521
4522DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<CatchReturnInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4522, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this))[i_nocapture].get()); } void CatchReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<CatchReturnInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4522, __PRETTY_FUNCTION__)); OperandTraits<CatchReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CatchReturnInst::getNumOperands() const { return OperandTraits
<CatchReturnInst>::operands(this); } template <int Idx_nocapture
> Use &CatchReturnInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &CatchReturnInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
4523
4524//===----------------------------------------------------------------------===//
4525// CleanupReturnInst Class
4526//===----------------------------------------------------------------------===//
4527
4528class CleanupReturnInst : public Instruction {
4529 using UnwindDestField = BoolBitfieldElementT<0>;
4530
4531private:
4532 CleanupReturnInst(const CleanupReturnInst &RI);
4533 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4534 Instruction *InsertBefore = nullptr);
4535 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4536 BasicBlock *InsertAtEnd);
4537
4538 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4539
4540protected:
4541 // Note: Instruction needs to be a friend here to call cloneImpl.
4542 friend class Instruction;
4543
4544 CleanupReturnInst *cloneImpl() const;
4545
4546public:
4547 static CleanupReturnInst *Create(Value *CleanupPad,
4548 BasicBlock *UnwindBB = nullptr,
4549 Instruction *InsertBefore = nullptr) {
4550 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4550, __PRETTY_FUNCTION__))
;
4551 unsigned Values = 1;
4552 if (UnwindBB)
4553 ++Values;
4554 return new (Values)
4555 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4556 }
4557
4558 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4559 BasicBlock *InsertAtEnd) {
4560 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4560, __PRETTY_FUNCTION__))
;
4561 unsigned Values = 1;
4562 if (UnwindBB)
4563 ++Values;
4564 return new (Values)
4565 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4566 }
4567
4568 /// Provide fast operand accessors
4569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4570
4571 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4572 bool unwindsToCaller() const { return !hasUnwindDest(); }
4573
4574 /// Convenience accessor.
4575 CleanupPadInst *getCleanupPad() const {
4576 return cast<CleanupPadInst>(Op<0>());
4577 }
4578 void setCleanupPad(CleanupPadInst *CleanupPad) {
4579 assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail (
"CleanupPad", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4579, __PRETTY_FUNCTION__))
;
4580 Op<0>() = CleanupPad;
4581 }
4582
4583 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4584
4585 BasicBlock *getUnwindDest() const {
4586 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4587 }
4588 void setUnwindDest(BasicBlock *NewDest) {
4589 assert(NewDest)((NewDest) ? static_cast<void> (0) : __assert_fail ("NewDest"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4589, __PRETTY_FUNCTION__))
;
4590 assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4590, __PRETTY_FUNCTION__))
;
4591 Op<1>() = NewDest;
4592 }
4593
4594 // Methods for support type inquiry through isa, cast, and dyn_cast:
4595 static bool classof(const Instruction *I) {
4596 return (I->getOpcode() == Instruction::CleanupRet);
4597 }
4598 static bool classof(const Value *V) {
4599 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4600 }
4601
4602private:
4603 BasicBlock *getSuccessor(unsigned Idx) const {
4604 assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4604, __PRETTY_FUNCTION__))
;
4605 return getUnwindDest();
4606 }
4607
4608 void setSuccessor(unsigned Idx, BasicBlock *B) {
4609 assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4609, __PRETTY_FUNCTION__))
;
4610 setUnwindDest(B);
4611 }
4612
4613 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4614 // method so that subclasses cannot accidentally use it.
4615 template <typename Bitfield>
4616 void setSubclassData(typename Bitfield::Type Value) {
4617 Instruction::setSubclassData<Bitfield>(Value);
4618 }
4619};
4620
4621template <>
4622struct OperandTraits<CleanupReturnInst>
4623 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4624
4625DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CleanupReturnInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4625, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CleanupReturnInst>::op_begin(const_cast
<CleanupReturnInst*>(this))[i_nocapture].get()); } void
CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<CleanupReturnInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4625, __PRETTY_FUNCTION__)); OperandTraits<CleanupReturnInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CleanupReturnInst::getNumOperands() const { return OperandTraits
<CleanupReturnInst>::operands(this); } template <int
Idx_nocapture> Use &CleanupReturnInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &CleanupReturnInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
4626
4627//===----------------------------------------------------------------------===//
4628// UnreachableInst Class
4629//===----------------------------------------------------------------------===//
4630
4631//===---------------------------------------------------------------------------
4632/// This function has undefined behavior. In particular, the
4633/// presence of this instruction indicates some higher level knowledge that the
4634/// end of the block cannot be reached.
4635///
4636class UnreachableInst : public Instruction {
4637protected:
4638 // Note: Instruction needs to be a friend here to call cloneImpl.
4639 friend class Instruction;
4640
4641 UnreachableInst *cloneImpl() const;
4642
4643public:
4644 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4645 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4646
4647 // allocate space for exactly zero operands
4648 void *operator new(size_t s) {
4649 return User::operator new(s, 0);
4650 }
4651
4652 unsigned getNumSuccessors() const { return 0; }
4653
4654 // Methods for support type inquiry through isa, cast, and dyn_cast:
4655 static bool classof(const Instruction *I) {
4656 return I->getOpcode() == Instruction::Unreachable;
4657 }
4658 static bool classof(const Value *V) {
4659 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4660 }
4661
4662private:
4663 BasicBlock *getSuccessor(unsigned idx) const {
4664 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4664)
;
4665 }
4666
4667 void setSuccessor(unsigned idx, BasicBlock *B) {
4668 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 4668)
;
4669 }
4670};
4671
4672//===----------------------------------------------------------------------===//
4673// TruncInst Class
4674//===----------------------------------------------------------------------===//
4675
4676/// This class represents a truncation of integer types.
4677class TruncInst : public CastInst {
4678protected:
4679 // Note: Instruction needs to be a friend here to call cloneImpl.
4680 friend class Instruction;
4681
4682 /// Clone an identical TruncInst
4683 TruncInst *cloneImpl() const;
4684
4685public:
4686 /// Constructor with insert-before-instruction semantics
4687 TruncInst(
4688 Value *S, ///< The value to be truncated
4689 Type *Ty, ///< The (smaller) type to truncate to
4690 const Twine &NameStr = "", ///< A name for the new instruction
4691 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4692 );
4693
4694 /// Constructor with insert-at-end-of-block semantics
4695 TruncInst(
4696 Value *S, ///< The value to be truncated
4697 Type *Ty, ///< The (smaller) type to truncate to
4698 const Twine &NameStr, ///< A name for the new instruction
4699 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4700 );
4701
4702 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4703 static bool classof(const Instruction *I) {
4704 return I->getOpcode() == Trunc;
4705 }
4706 static bool classof(const Value *V) {
4707 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4708 }
4709};
4710
4711//===----------------------------------------------------------------------===//
4712// ZExtInst Class
4713//===----------------------------------------------------------------------===//
4714
4715/// This class represents zero extension of integer types.
4716class ZExtInst : public CastInst {
4717protected:
4718 // Note: Instruction needs to be a friend here to call cloneImpl.
4719 friend class Instruction;
4720
4721 /// Clone an identical ZExtInst
4722 ZExtInst *cloneImpl() const;
4723
4724public:
4725 /// Constructor with insert-before-instruction semantics
4726 ZExtInst(
4727 Value *S, ///< The value to be zero extended
4728 Type *Ty, ///< The type to zero extend to
4729 const Twine &NameStr = "", ///< A name for the new instruction
4730 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4731 );
4732
4733 /// Constructor with insert-at-end semantics.
4734 ZExtInst(
4735 Value *S, ///< The value to be zero extended
4736 Type *Ty, ///< The type to zero extend to
4737 const Twine &NameStr, ///< A name for the new instruction
4738 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4739 );
4740
4741 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4742 static bool classof(const Instruction *I) {
4743 return I->getOpcode() == ZExt;
4744 }
4745 static bool classof(const Value *V) {
4746 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4747 }
4748};
4749
4750//===----------------------------------------------------------------------===//
4751// SExtInst Class
4752//===----------------------------------------------------------------------===//
4753
4754/// This class represents a sign extension of integer types.
4755class SExtInst : public CastInst {
4756protected:
4757 // Note: Instruction needs to be a friend here to call cloneImpl.
4758 friend class Instruction;
4759
4760 /// Clone an identical SExtInst
4761 SExtInst *cloneImpl() const;
4762
4763public:
4764 /// Constructor with insert-before-instruction semantics
4765 SExtInst(
4766 Value *S, ///< The value to be sign extended
4767 Type *Ty, ///< The type to sign extend to
4768 const Twine &NameStr = "", ///< A name for the new instruction
4769 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4770 );
4771
4772 /// Constructor with insert-at-end-of-block semantics
4773 SExtInst(
4774 Value *S, ///< The value to be sign extended
4775 Type *Ty, ///< The type to sign extend to
4776 const Twine &NameStr, ///< A name for the new instruction
4777 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4778 );
4779
4780 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4781 static bool classof(const Instruction *I) {
4782 return I->getOpcode() == SExt;
4783 }
4784 static bool classof(const Value *V) {
4785 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4786 }
4787};
4788
4789//===----------------------------------------------------------------------===//
4790// FPTruncInst Class
4791//===----------------------------------------------------------------------===//
4792
4793/// This class represents a truncation of floating point types.
4794class FPTruncInst : public CastInst {
4795protected:
4796 // Note: Instruction needs to be a friend here to call cloneImpl.
4797 friend class Instruction;
4798
4799 /// Clone an identical FPTruncInst
4800 FPTruncInst *cloneImpl() const;
4801
4802public:
4803 /// Constructor with insert-before-instruction semantics
4804 FPTruncInst(
4805 Value *S, ///< The value to be truncated
4806 Type *Ty, ///< The type to truncate to
4807 const Twine &NameStr = "", ///< A name for the new instruction
4808 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4809 );
4810
4811 /// Constructor with insert-before-instruction semantics
4812 FPTruncInst(
4813 Value *S, ///< The value to be truncated
4814 Type *Ty, ///< The type to truncate to
4815 const Twine &NameStr, ///< A name for the new instruction
4816 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4817 );
4818
4819 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4820 static bool classof(const Instruction *I) {
4821 return I->getOpcode() == FPTrunc;
4822 }
4823 static bool classof(const Value *V) {
4824 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4825 }
4826};
4827
4828//===----------------------------------------------------------------------===//
4829// FPExtInst Class
4830//===----------------------------------------------------------------------===//
4831
4832/// This class represents an extension of floating point types.
4833class FPExtInst : public CastInst {
4834protected:
4835 // Note: Instruction needs to be a friend here to call cloneImpl.
4836 friend class Instruction;
4837
4838 /// Clone an identical FPExtInst
4839 FPExtInst *cloneImpl() const;
4840
4841public:
4842 /// Constructor with insert-before-instruction semantics
4843 FPExtInst(
4844 Value *S, ///< The value to be extended
4845 Type *Ty, ///< The type to extend to
4846 const Twine &NameStr = "", ///< A name for the new instruction
4847 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4848 );
4849
4850 /// Constructor with insert-at-end-of-block semantics
4851 FPExtInst(
4852 Value *S, ///< The value to be extended
4853 Type *Ty, ///< The type to extend to
4854 const Twine &NameStr, ///< A name for the new instruction
4855 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4856 );
4857
4858 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4859 static bool classof(const Instruction *I) {
4860 return I->getOpcode() == FPExt;
4861 }
4862 static bool classof(const Value *V) {
4863 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4864 }
4865};
4866
4867//===----------------------------------------------------------------------===//
4868// UIToFPInst Class
4869//===----------------------------------------------------------------------===//
4870
4871/// This class represents a cast unsigned integer to floating point.
4872class UIToFPInst : public CastInst {
4873protected:
4874 // Note: Instruction needs to be a friend here to call cloneImpl.
4875 friend class Instruction;
4876
4877 /// Clone an identical UIToFPInst
4878 UIToFPInst *cloneImpl() const;
4879
4880public:
4881 /// Constructor with insert-before-instruction semantics
4882 UIToFPInst(
4883 Value *S, ///< The value to be converted
4884 Type *Ty, ///< The type to convert to
4885 const Twine &NameStr = "", ///< A name for the new instruction
4886 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4887 );
4888
4889 /// Constructor with insert-at-end-of-block semantics
4890 UIToFPInst(
4891 Value *S, ///< The value to be converted
4892 Type *Ty, ///< The type to convert to
4893 const Twine &NameStr, ///< A name for the new instruction
4894 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4895 );
4896
4897 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4898 static bool classof(const Instruction *I) {
4899 return I->getOpcode() == UIToFP;
4900 }
4901 static bool classof(const Value *V) {
4902 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4903 }
4904};
4905
4906//===----------------------------------------------------------------------===//
4907// SIToFPInst Class
4908//===----------------------------------------------------------------------===//
4909
4910/// This class represents a cast from signed integer to floating point.
4911class SIToFPInst : public CastInst {
4912protected:
4913 // Note: Instruction needs to be a friend here to call cloneImpl.
4914 friend class Instruction;
4915
4916 /// Clone an identical SIToFPInst
4917 SIToFPInst *cloneImpl() const;
4918
4919public:
4920 /// Constructor with insert-before-instruction semantics
4921 SIToFPInst(
4922 Value *S, ///< The value to be converted
4923 Type *Ty, ///< The type to convert to
4924 const Twine &NameStr = "", ///< A name for the new instruction
4925 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4926 );
4927
4928 /// Constructor with insert-at-end-of-block semantics
4929 SIToFPInst(
4930 Value *S, ///< The value to be converted
4931 Type *Ty, ///< The type to convert to
4932 const Twine &NameStr, ///< A name for the new instruction
4933 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4934 );
4935
4936 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4937 static bool classof(const Instruction *I) {
4938 return I->getOpcode() == SIToFP;
4939 }
4940 static bool classof(const Value *V) {
4941 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4942 }
4943};
4944
4945//===----------------------------------------------------------------------===//
4946// FPToUIInst Class
4947//===----------------------------------------------------------------------===//
4948
4949/// This class represents a cast from floating point to unsigned integer
4950class FPToUIInst : public CastInst {
4951protected:
4952 // Note: Instruction needs to be a friend here to call cloneImpl.
4953 friend class Instruction;
4954
4955 /// Clone an identical FPToUIInst
4956 FPToUIInst *cloneImpl() const;
4957
4958public:
4959 /// Constructor with insert-before-instruction semantics
4960 FPToUIInst(
4961 Value *S, ///< The value to be converted
4962 Type *Ty, ///< The type to convert to
4963 const Twine &NameStr = "", ///< A name for the new instruction
4964 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4965 );
4966
4967 /// Constructor with insert-at-end-of-block semantics
4968 FPToUIInst(
4969 Value *S, ///< The value to be converted
4970 Type *Ty, ///< The type to convert to
4971 const Twine &NameStr, ///< A name for the new instruction
4972 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
4973 );
4974
4975 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4976 static bool classof(const Instruction *I) {
4977 return I->getOpcode() == FPToUI;
4978 }
4979 static bool classof(const Value *V) {
4980 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4981 }
4982};
4983
4984//===----------------------------------------------------------------------===//
4985// FPToSIInst Class
4986//===----------------------------------------------------------------------===//
4987
4988/// This class represents a cast from floating point to signed integer.
4989class FPToSIInst : public CastInst {
4990protected:
4991 // Note: Instruction needs to be a friend here to call cloneImpl.
4992 friend class Instruction;
4993
4994 /// Clone an identical FPToSIInst
4995 FPToSIInst *cloneImpl() const;
4996
4997public:
4998 /// Constructor with insert-before-instruction semantics
4999 FPToSIInst(
5000 Value *S, ///< The value to be converted
5001 Type *Ty, ///< The type to convert to
5002 const Twine &NameStr = "", ///< A name for the new instruction
5003 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5004 );
5005
5006 /// Constructor with insert-at-end-of-block semantics
5007 FPToSIInst(
5008 Value *S, ///< The value to be converted
5009 Type *Ty, ///< The type to convert to
5010 const Twine &NameStr, ///< A name for the new instruction
5011 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5012 );
5013
5014 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5015 static bool classof(const Instruction *I) {
5016 return I->getOpcode() == FPToSI;
5017 }
5018 static bool classof(const Value *V) {
5019 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5020 }
5021};
5022
5023//===----------------------------------------------------------------------===//
5024// IntToPtrInst Class
5025//===----------------------------------------------------------------------===//
5026
5027/// This class represents a cast from an integer to a pointer.
5028class IntToPtrInst : public CastInst {
5029public:
5030 // Note: Instruction needs to be a friend here to call cloneImpl.
5031 friend class Instruction;
5032
5033 /// Constructor with insert-before-instruction semantics
5034 IntToPtrInst(
5035 Value *S, ///< The value to be converted
5036 Type *Ty, ///< The type to convert to
5037 const Twine &NameStr = "", ///< A name for the new instruction
5038 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5039 );
5040
5041 /// Constructor with insert-at-end-of-block semantics
5042 IntToPtrInst(
5043 Value *S, ///< The value to be converted
5044 Type *Ty, ///< The type to convert to
5045 const Twine &NameStr, ///< A name for the new instruction
5046 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5047 );
5048
5049 /// Clone an identical IntToPtrInst.
5050 IntToPtrInst *cloneImpl() const;
5051
5052 /// Returns the address space of this instruction's pointer type.
5053 unsigned getAddressSpace() const {
5054 return getType()->getPointerAddressSpace();
5055 }
5056
5057 // Methods for support type inquiry through isa, cast, and dyn_cast:
5058 static bool classof(const Instruction *I) {
5059 return I->getOpcode() == IntToPtr;
5060 }
5061 static bool classof(const Value *V) {
5062 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5063 }
5064};
5065
5066//===----------------------------------------------------------------------===//
5067// PtrToIntInst Class
5068//===----------------------------------------------------------------------===//
5069
5070/// This class represents a cast from a pointer to an integer.
5071class PtrToIntInst : public CastInst {
5072protected:
5073 // Note: Instruction needs to be a friend here to call cloneImpl.
5074 friend class Instruction;
5075
5076 /// Clone an identical PtrToIntInst.
5077 PtrToIntInst *cloneImpl() const;
5078
5079public:
5080 /// Constructor with insert-before-instruction semantics
5081 PtrToIntInst(
5082 Value *S, ///< The value to be converted
5083 Type *Ty, ///< The type to convert to
5084 const Twine &NameStr = "", ///< A name for the new instruction
5085 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5086 );
5087
5088 /// Constructor with insert-at-end-of-block semantics
5089 PtrToIntInst(
5090 Value *S, ///< The value to be converted
5091 Type *Ty, ///< The type to convert to
5092 const Twine &NameStr, ///< A name for the new instruction
5093 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5094 );
5095
5096 /// Gets the pointer operand.
5097 Value *getPointerOperand() { return getOperand(0); }
5098 /// Gets the pointer operand.
5099 const Value *getPointerOperand() const { return getOperand(0); }
5100 /// Gets the operand index of the pointer operand.
5101 static unsigned getPointerOperandIndex() { return 0U; }
5102
5103 /// Returns the address space of the pointer operand.
5104 unsigned getPointerAddressSpace() const {
5105 return getPointerOperand()->getType()->getPointerAddressSpace();
5106 }
5107
5108 // Methods for support type inquiry through isa, cast, and dyn_cast:
5109 static bool classof(const Instruction *I) {
5110 return I->getOpcode() == PtrToInt;
5111 }
5112 static bool classof(const Value *V) {
5113 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5114 }
5115};
5116
5117//===----------------------------------------------------------------------===//
5118// BitCastInst Class
5119//===----------------------------------------------------------------------===//
5120
5121/// This class represents a no-op cast from one type to another.
5122class BitCastInst : public CastInst {
5123protected:
5124 // Note: Instruction needs to be a friend here to call cloneImpl.
5125 friend class Instruction;
5126
5127 /// Clone an identical BitCastInst.
5128 BitCastInst *cloneImpl() const;
5129
5130public:
5131 /// Constructor with insert-before-instruction semantics
5132 BitCastInst(
5133 Value *S, ///< The value to be casted
5134 Type *Ty, ///< The type to casted to
5135 const Twine &NameStr = "", ///< A name for the new instruction
5136 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5137 );
5138
5139 /// Constructor with insert-at-end-of-block semantics
5140 BitCastInst(
5141 Value *S, ///< The value to be casted
5142 Type *Ty, ///< The type to casted to
5143 const Twine &NameStr, ///< A name for the new instruction
5144 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5145 );
5146
5147 // Methods for support type inquiry through isa, cast, and dyn_cast:
5148 static bool classof(const Instruction *I) {
5149 return I->getOpcode() == BitCast;
5150 }
5151 static bool classof(const Value *V) {
5152 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5153 }
5154};
5155
5156//===----------------------------------------------------------------------===//
5157// AddrSpaceCastInst Class
5158//===----------------------------------------------------------------------===//
5159
5160/// This class represents a conversion between pointers from one address space
5161/// to another.
5162class AddrSpaceCastInst : public CastInst {
5163protected:
5164 // Note: Instruction needs to be a friend here to call cloneImpl.
5165 friend class Instruction;
5166
5167 /// Clone an identical AddrSpaceCastInst.
5168 AddrSpaceCastInst *cloneImpl() const;
5169
5170public:
5171 /// Constructor with insert-before-instruction semantics
5172 AddrSpaceCastInst(
5173 Value *S, ///< The value to be casted
5174 Type *Ty, ///< The type to casted to
5175 const Twine &NameStr = "", ///< A name for the new instruction
5176 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5177 );
5178
5179 /// Constructor with insert-at-end-of-block semantics
5180 AddrSpaceCastInst(
5181 Value *S, ///< The value to be casted
5182 Type *Ty, ///< The type to casted to
5183 const Twine &NameStr, ///< A name for the new instruction
5184 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5185 );
5186
5187 // Methods for support type inquiry through isa, cast, and dyn_cast:
5188 static bool classof(const Instruction *I) {
5189 return I->getOpcode() == AddrSpaceCast;
5190 }
5191 static bool classof(const Value *V) {
5192 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5193 }
5194
5195 /// Gets the pointer operand.
5196 Value *getPointerOperand() {
5197 return getOperand(0);
5198 }
5199
5200 /// Gets the pointer operand.
5201 const Value *getPointerOperand() const {
5202 return getOperand(0);
5203 }
5204
5205 /// Gets the operand index of the pointer operand.
5206 static unsigned getPointerOperandIndex() {
5207 return 0U;
5208 }
5209
5210 /// Returns the address space of the pointer operand.
5211 unsigned getSrcAddressSpace() const {
5212 return getPointerOperand()->getType()->getPointerAddressSpace();
5213 }
5214
5215 /// Returns the address space of the result.
5216 unsigned getDestAddressSpace() const {
5217 return getType()->getPointerAddressSpace();
5218 }
5219};
5220
5221/// A helper function that returns the pointer operand of a load or store
5222/// instruction. Returns nullptr if not load or store.
5223inline const Value *getLoadStorePointerOperand(const Value *V) {
5224 if (auto *Load = dyn_cast<LoadInst>(V))
5225 return Load->getPointerOperand();
5226 if (auto *Store = dyn_cast<StoreInst>(V))
5227 return Store->getPointerOperand();
5228 return nullptr;
5229}
5230inline Value *getLoadStorePointerOperand(Value *V) {
5231 return const_cast<Value *>(
5232 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5233}
5234
5235/// A helper function that returns the pointer operand of a load, store
5236/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5237inline const Value *getPointerOperand(const Value *V) {
5238 if (auto *Ptr = getLoadStorePointerOperand(V))
5239 return Ptr;
5240 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5241 return Gep->getPointerOperand();
5242 return nullptr;
5243}
5244inline Value *getPointerOperand(Value *V) {
5245 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5246}
5247
5248/// A helper function that returns the alignment of load or store instruction.
5249inline Align getLoadStoreAlignment(Value *I) {
5250 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 5251, __PRETTY_FUNCTION__))
5251 "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 5251, __PRETTY_FUNCTION__))
;
5252 if (auto *LI = dyn_cast<LoadInst>(I))
5253 return LI->getAlign();
5254 return cast<StoreInst>(I)->getAlign();
5255}
5256
5257/// A helper function that returns the address space of the pointer operand of
5258/// load or store instruction.
5259inline unsigned getLoadStoreAddressSpace(Value *I) {
5260 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 5261, __PRETTY_FUNCTION__))
5261 "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
"Expected Load or Store instruction") ? static_cast<void>
(0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/IR/Instructions.h"
, 5261, __PRETTY_FUNCTION__))
;
5262 if (auto *LI = dyn_cast<LoadInst>(I))
5263 return LI->getPointerAddressSpace();
5264 return cast<StoreInst>(I)->getPointerAddressSpace();
5265}
5266
5267//===----------------------------------------------------------------------===//
5268// FreezeInst Class
5269//===----------------------------------------------------------------------===//
5270
5271/// This class represents a freeze function that returns random concrete
5272/// value if an operand is either a poison value or an undef value
5273class FreezeInst : public UnaryInstruction {
5274protected:
5275 // Note: Instruction needs to be a friend here to call cloneImpl.
5276 friend class Instruction;
5277
5278 /// Clone an identical FreezeInst
5279 FreezeInst *cloneImpl() const;
5280
5281public:
5282 explicit FreezeInst(Value *S,
5283 const Twine &NameStr = "",
5284 Instruction *InsertBefore = nullptr);
5285 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5286
5287 // Methods for support type inquiry through isa, cast, and dyn_cast:
5288 static inline bool classof(const Instruction *I) {
5289 return I->getOpcode() == Freeze;
5290 }
5291 static inline bool classof(const Value *V) {
5292 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5293 }
5294};
5295
5296} // end namespace llvm
5297
5298#endif // LLVM_IR_INSTRUCTIONS_H