Bug Summary

File:llvm/lib/Transforms/Scalar/LoopFlatten.cpp
Warning:line 189, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LoopFlatten.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Transforms/Scalar -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-21-164211-33944-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/LoopFlatten.cpp

/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/LoopFlatten.cpp

1//===- LoopFlatten.cpp - Loop flattening pass------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass flattens pairs nested loops into a single loop.
10//
11// The intention is to optimise loop nests like this, which together access an
12// array linearly:
13// for (int i = 0; i < N; ++i)
14// for (int j = 0; j < M; ++j)
15// f(A[i*M+j]);
16// into one loop:
17// for (int i = 0; i < (N*M); ++i)
18// f(A[i]);
19//
20// It can also flatten loops where the induction variables are not used in the
21// loop. This is only worth doing if the induction variables are only used in an
22// expression like i*M+j. If they had any other uses, we would have to insert a
23// div/mod to reconstruct the original values, so this wouldn't be profitable.
24//
25// We also need to prove that N*M will not overflow.
26//
27//===----------------------------------------------------------------------===//
28
29#include "llvm/Transforms/Scalar/LoopFlatten.h"
30#include "llvm/Analysis/AssumptionCache.h"
31#include "llvm/Analysis/LoopInfo.h"
32#include "llvm/Analysis/OptimizationRemarkEmitter.h"
33#include "llvm/Analysis/ScalarEvolution.h"
34#include "llvm/Analysis/TargetTransformInfo.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/IRBuilder.h"
39#include "llvm/IR/Module.h"
40#include "llvm/IR/PatternMatch.h"
41#include "llvm/IR/Verifier.h"
42#include "llvm/InitializePasses.h"
43#include "llvm/Pass.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Support/raw_ostream.h"
46#include "llvm/Transforms/Scalar.h"
47#include "llvm/Transforms/Utils/Local.h"
48#include "llvm/Transforms/Utils/LoopUtils.h"
49#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
50#include "llvm/Transforms/Utils/SimplifyIndVar.h"
51
52#define DEBUG_TYPE"loop-flatten" "loop-flatten"
53
54using namespace llvm;
55using namespace llvm::PatternMatch;
56
57static cl::opt<unsigned> RepeatedInstructionThreshold(
58 "loop-flatten-cost-threshold", cl::Hidden, cl::init(2),
59 cl::desc("Limit on the cost of instructions that can be repeated due to "
60 "loop flattening"));
61
62static cl::opt<bool>
63 AssumeNoOverflow("loop-flatten-assume-no-overflow", cl::Hidden,
64 cl::init(false),
65 cl::desc("Assume that the product of the two iteration "
66 "limits will never overflow"));
67
68static cl::opt<bool>
69 WidenIV("loop-flatten-widen-iv", cl::Hidden,
70 cl::init(true),
71 cl::desc("Widen the loop induction variables, if possible, so "
72 "overflow checks won't reject flattening"));
73
74struct FlattenInfo {
75 Loop *OuterLoop = nullptr;
76 Loop *InnerLoop = nullptr;
77 PHINode *InnerInductionPHI = nullptr;
78 PHINode *OuterInductionPHI = nullptr;
79 Value *InnerLimit = nullptr;
80 Value *OuterLimit = nullptr;
81 BinaryOperator *InnerIncrement = nullptr;
82 BinaryOperator *OuterIncrement = nullptr;
83 BranchInst *InnerBranch = nullptr;
84 BranchInst *OuterBranch = nullptr;
85 SmallPtrSet<Value *, 4> LinearIVUses;
86 SmallPtrSet<PHINode *, 4> InnerPHIsToTransform;
87
88 // Whether this holds the flatten info before or after widening.
89 bool Widened = false;
90
91 FlattenInfo(Loop *OL, Loop *IL) : OuterLoop(OL), InnerLoop(IL) {};
92};
93
94// Finds the induction variable, increment and limit for a simple loop that we
95// can flatten.
96static bool findLoopComponents(
97 Loop *L, SmallPtrSetImpl<Instruction *> &IterationInstructions,
98 PHINode *&InductionPHI, Value *&Limit, BinaryOperator *&Increment,
99 BranchInst *&BackBranch, ScalarEvolution *SE) {
100 LLVM_DEBUG(dbgs() << "Finding components of loop: " << L->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Finding components of loop: "
<< L->getName() << "\n"; } } while (false)
;
1
Assuming 'DebugFlag' is false
2
Loop condition is false. Exiting loop
101
102 if (!L->isLoopSimplifyForm()) {
3
Assuming the condition is false
4
Taking false branch
103 LLVM_DEBUG(dbgs() << "Loop is not in normal form\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Loop is not in normal form\n"
; } } while (false)
;
104 return false;
105 }
106
107 // There must be exactly one exiting block, and it must be the same at the
108 // latch.
109 BasicBlock *Latch = L->getLoopLatch();
110 if (L->getExitingBlock() != Latch) {
5
Assuming the condition is false
6
Taking false branch
111 LLVM_DEBUG(dbgs() << "Exiting and latch block are different\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Exiting and latch block are different\n"
; } } while (false)
;
112 return false;
113 }
114 // Latch block must end in a conditional branch.
115 BackBranch = dyn_cast<BranchInst>(Latch->getTerminator());
7
Assuming the object is a 'BranchInst'
116 if (!BackBranch
7.1
'BackBranch' is non-null
7.1
'BackBranch' is non-null
7.1
'BackBranch' is non-null
|| !BackBranch->isConditional()) {
8
Calling 'BranchInst::isConditional'
11
Returning from 'BranchInst::isConditional'
12
Taking false branch
117 LLVM_DEBUG(dbgs() << "Could not find back-branch\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Could not find back-branch\n"
; } } while (false)
;
118 return false;
119 }
120 IterationInstructions.insert(BackBranch);
121 LLVM_DEBUG(dbgs() << "Found back branch: "; BackBranch->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Found back branch: "; BackBranch
->dump(); } } while (false)
;
13
Assuming 'DebugFlag' is false
14
Loop condition is false. Exiting loop
122 bool ContinueOnTrue = L->contains(BackBranch->getSuccessor(0));
123
124 // Find the induction PHI. If there is no induction PHI, we can't do the
125 // transformation. TODO: could other variables trigger this? Do we have to
126 // search for the best one?
127 InductionPHI = nullptr;
128 for (PHINode &PHI : L->getHeader()->phis()) {
129 InductionDescriptor ID;
130 if (InductionDescriptor::isInductionPHI(&PHI, L, SE, ID)) {
15
Assuming the condition is true
16
Taking true branch
131 InductionPHI = &PHI;
132 LLVM_DEBUG(dbgs() << "Found induction PHI: "; InductionPHI->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Found induction PHI: "; InductionPHI
->dump(); } } while (false)
;
17
Assuming 'DebugFlag' is false
18
Loop condition is false. Exiting loop
133 break;
19
Execution continues on line 136
134 }
135 }
136 if (!InductionPHI
19.1
'InductionPHI' is non-null
19.1
'InductionPHI' is non-null
19.1
'InductionPHI' is non-null
) {
20
Taking false branch
137 LLVM_DEBUG(dbgs() << "Could not find induction PHI\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Could not find induction PHI\n"
; } } while (false)
;
138 return false;
139 }
140
141 auto IsValidPredicate = [&](ICmpInst::Predicate Pred) {
142 if (ContinueOnTrue)
23
Assuming 'ContinueOnTrue' is false
24
Taking false branch
143 return Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT;
144 else
145 return Pred == CmpInst::ICMP_EQ;
25
Assuming 'Pred' is equal to ICMP_EQ
26
Returning the value 1, which participates in a condition later
146 };
147
148 // Find Compare and make sure it is valid
149 ICmpInst *Compare = dyn_cast<ICmpInst>(BackBranch->getCondition());
21
Assuming the object is a 'ICmpInst'
150 if (!Compare
21.1
'Compare' is non-null
21.1
'Compare' is non-null
21.1
'Compare' is non-null
|| !IsValidPredicate(Compare->getUnsignedPredicate()) ||
22
Calling 'operator()'
27
Returning from 'operator()'
29
Taking false branch
151 Compare->hasNUsesOrMore(2)) {
28
Assuming the condition is false
152 LLVM_DEBUG(dbgs() << "Could not find valid comparison\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Could not find valid comparison\n"
; } } while (false)
;
153 return false;
154 }
155 IterationInstructions.insert(Compare);
156 LLVM_DEBUG(dbgs() << "Found comparison: "; Compare->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Found comparison: "; Compare
->dump(); } } while (false)
;
30
Assuming 'DebugFlag' is false
31
Loop condition is false. Exiting loop
157
158 // Find increment and limit from the compare
159 Increment = nullptr;
160 if (match(Compare->getOperand(0),
32
Calling 'match<llvm::Value, llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::specificval_ty, llvm::PatternMatch::constantint_match<1>, 13, true>>'
40
Returning from 'match<llvm::Value, llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::specificval_ty, llvm::PatternMatch::constantint_match<1>, 13, true>>'
41
Taking true branch
161 m_c_Add(m_Specific(InductionPHI), m_ConstantInt<1>()))) {
162 Increment = dyn_cast<BinaryOperator>(Compare->getOperand(0));
42
The object is a 'BinaryOperator'
163 Limit = Compare->getOperand(1);
164 } else if (Compare->getUnsignedPredicate() == CmpInst::ICMP_NE &&
165 match(Compare->getOperand(1),
166 m_c_Add(m_Specific(InductionPHI), m_ConstantInt<1>()))) {
167 Increment = dyn_cast<BinaryOperator>(Compare->getOperand(1));
168 Limit = Compare->getOperand(0);
169 }
170 if (!Increment
42.1
'Increment' is non-null
42.1
'Increment' is non-null
42.1
'Increment' is non-null
|| Increment->hasNUsesOrMore(3)) {
43
Assuming the condition is false
44
Taking false branch
171 LLVM_DEBUG(dbgs() << "Cound not find valid increment\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Cound not find valid increment\n"
; } } while (false)
;
172 return false;
173 }
174 IterationInstructions.insert(Increment);
175 LLVM_DEBUG(dbgs() << "Found increment: "; Increment->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Found increment: "; Increment
->dump(); } } while (false)
;
45
Assuming 'DebugFlag' is false
46
Loop condition is false. Exiting loop
176 LLVM_DEBUG(dbgs() << "Found limit: "; Limit->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Found limit: "; Limit->
dump(); } } while (false)
;
47
Loop condition is false. Exiting loop
177
178 assert(InductionPHI->getNumIncomingValues() == 2)(static_cast <bool> (InductionPHI->getNumIncomingValues
() == 2) ? void (0) : __assert_fail ("InductionPHI->getNumIncomingValues() == 2"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/LoopFlatten.cpp"
, 178, __extension__ __PRETTY_FUNCTION__))
;
48
Assuming the condition is true
49
'?' condition is true
179
180 if (InductionPHI->getIncomingValueForBlock(Latch) != Increment) {
50
Assuming the condition is false
51
Taking false branch
181 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Incoming value from latch is not the increment inst\n"
; } } while (false)
182 dbgs() << "Incoming value from latch is not the increment inst\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Incoming value from latch is not the increment inst\n"
; } } while (false)
;
183 return false;
184 }
185
186 auto *CI = dyn_cast<ConstantInt>(
52
Assuming the object is not a 'ConstantInt'
53
'CI' initialized to a null pointer value
187 InductionPHI->getIncomingValueForBlock(L->getLoopPreheader()));
188 if (!CI
53.1
'CI' is null
53.1
'CI' is null
53.1
'CI' is null
|| !CI->isZero()) {
189 LLVM_DEBUG(dbgs() << "PHI value is not zero: "; CI->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "PHI value is not zero: "
; CI->dump(); } } while (false)
;
54
Assuming 'DebugFlag' is true
55
Assuming the condition is true
56
Taking true branch
57
Called C++ object pointer is null
190 return false;
191 }
192
193 LLVM_DEBUG(dbgs() << "Successfully found all loop components\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Successfully found all loop components\n"
; } } while (false)
;
194 return true;
195}
196
197static bool checkPHIs(FlattenInfo &FI, const TargetTransformInfo *TTI) {
198 // All PHIs in the inner and outer headers must either be:
199 // - The induction PHI, which we are going to rewrite as one induction in
200 // the new loop. This is already checked by findLoopComponents.
201 // - An outer header PHI with all incoming values from outside the loop.
202 // LoopSimplify guarantees we have a pre-header, so we don't need to
203 // worry about that here.
204 // - Pairs of PHIs in the inner and outer headers, which implement a
205 // loop-carried dependency that will still be valid in the new loop. To
206 // be valid, this variable must be modified only in the inner loop.
207
208 // The set of PHI nodes in the outer loop header that we know will still be
209 // valid after the transformation. These will not need to be modified (with
210 // the exception of the induction variable), but we do need to check that
211 // there are no unsafe PHI nodes.
212 SmallPtrSet<PHINode *, 4> SafeOuterPHIs;
213 SafeOuterPHIs.insert(FI.OuterInductionPHI);
214
215 // Check that all PHI nodes in the inner loop header match one of the valid
216 // patterns.
217 for (PHINode &InnerPHI : FI.InnerLoop->getHeader()->phis()) {
218 // The induction PHIs break these rules, and that's OK because we treat
219 // them specially when doing the transformation.
220 if (&InnerPHI == FI.InnerInductionPHI)
221 continue;
222
223 // Each inner loop PHI node must have two incoming values/blocks - one
224 // from the pre-header, and one from the latch.
225 assert(InnerPHI.getNumIncomingValues() == 2)(static_cast <bool> (InnerPHI.getNumIncomingValues() ==
2) ? void (0) : __assert_fail ("InnerPHI.getNumIncomingValues() == 2"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/LoopFlatten.cpp"
, 225, __extension__ __PRETTY_FUNCTION__))
;
226 Value *PreHeaderValue =
227 InnerPHI.getIncomingValueForBlock(FI.InnerLoop->getLoopPreheader());
228 Value *LatchValue =
229 InnerPHI.getIncomingValueForBlock(FI.InnerLoop->getLoopLatch());
230
231 // The incoming value from the outer loop must be the PHI node in the
232 // outer loop header, with no modifications made in the top of the outer
233 // loop.
234 PHINode *OuterPHI = dyn_cast<PHINode>(PreHeaderValue);
235 if (!OuterPHI || OuterPHI->getParent() != FI.OuterLoop->getHeader()) {
236 LLVM_DEBUG(dbgs() << "value modified in top of outer loop\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "value modified in top of outer loop\n"
; } } while (false)
;
237 return false;
238 }
239
240 // The other incoming value must come from the inner loop, without any
241 // modifications in the tail end of the outer loop. We are in LCSSA form,
242 // so this will actually be a PHI in the inner loop's exit block, which
243 // only uses values from inside the inner loop.
244 PHINode *LCSSAPHI = dyn_cast<PHINode>(
245 OuterPHI->getIncomingValueForBlock(FI.OuterLoop->getLoopLatch()));
246 if (!LCSSAPHI) {
247 LLVM_DEBUG(dbgs() << "could not find LCSSA PHI\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "could not find LCSSA PHI\n"
; } } while (false)
;
248 return false;
249 }
250
251 // The value used by the LCSSA PHI must be the same one that the inner
252 // loop's PHI uses.
253 if (LCSSAPHI->hasConstantValue() != LatchValue) {
254 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "LCSSA PHI incoming value does not match latch value\n"
; } } while (false)
255 dbgs() << "LCSSA PHI incoming value does not match latch value\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "LCSSA PHI incoming value does not match latch value\n"
; } } while (false)
;
256 return false;
257 }
258
259 LLVM_DEBUG(dbgs() << "PHI pair is safe:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "PHI pair is safe:\n"; } }
while (false)
;
260 LLVM_DEBUG(dbgs() << " Inner: "; InnerPHI.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << " Inner: "; InnerPHI.dump
(); } } while (false)
;
261 LLVM_DEBUG(dbgs() << " Outer: "; OuterPHI->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << " Outer: "; OuterPHI->
dump(); } } while (false)
;
262 SafeOuterPHIs.insert(OuterPHI);
263 FI.InnerPHIsToTransform.insert(&InnerPHI);
264 }
265
266 for (PHINode &OuterPHI : FI.OuterLoop->getHeader()->phis()) {
267 if (!SafeOuterPHIs.count(&OuterPHI)) {
268 LLVM_DEBUG(dbgs() << "found unsafe PHI in outer loop: "; OuterPHI.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "found unsafe PHI in outer loop: "
; OuterPHI.dump(); } } while (false)
;
269 return false;
270 }
271 }
272
273 LLVM_DEBUG(dbgs() << "checkPHIs: OK\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkPHIs: OK\n"; } } while
(false)
;
274 return true;
275}
276
277static bool
278checkOuterLoopInsts(FlattenInfo &FI,
279 SmallPtrSetImpl<Instruction *> &IterationInstructions,
280 const TargetTransformInfo *TTI) {
281 // Check for instructions in the outer but not inner loop. If any of these
282 // have side-effects then this transformation is not legal, and if there is
283 // a significant amount of code here which can't be optimised out that it's
284 // not profitable (as these instructions would get executed for each
285 // iteration of the inner loop).
286 InstructionCost RepeatedInstrCost = 0;
287 for (auto *B : FI.OuterLoop->getBlocks()) {
288 if (FI.InnerLoop->contains(B))
289 continue;
290
291 for (auto &I : *B) {
292 if (!isa<PHINode>(&I) && !I.isTerminator() &&
293 !isSafeToSpeculativelyExecute(&I)) {
294 LLVM_DEBUG(dbgs() << "Cannot flatten because instruction may have "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Cannot flatten because instruction may have "
"side effects: "; I.dump(); } } while (false)
295 "side effects: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Cannot flatten because instruction may have "
"side effects: "; I.dump(); } } while (false)
296 I.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Cannot flatten because instruction may have "
"side effects: "; I.dump(); } } while (false)
;
297 return false;
298 }
299 // The execution count of the outer loop's iteration instructions
300 // (increment, compare and branch) will be increased, but the
301 // equivalent instructions will be removed from the inner loop, so
302 // they make a net difference of zero.
303 if (IterationInstructions.count(&I))
304 continue;
305 // The uncoditional branch to the inner loop's header will turn into
306 // a fall-through, so adds no cost.
307 BranchInst *Br = dyn_cast<BranchInst>(&I);
308 if (Br && Br->isUnconditional() &&
309 Br->getSuccessor(0) == FI.InnerLoop->getHeader())
310 continue;
311 // Multiplies of the outer iteration variable and inner iteration
312 // count will be optimised out.
313 if (match(&I, m_c_Mul(m_Specific(FI.OuterInductionPHI),
314 m_Specific(FI.InnerLimit))))
315 continue;
316 InstructionCost Cost =
317 TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
318 LLVM_DEBUG(dbgs() << "Cost " << Cost << ": "; I.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Cost " << Cost <<
": "; I.dump(); } } while (false)
;
319 RepeatedInstrCost += Cost;
320 }
321 }
322
323 LLVM_DEBUG(dbgs() << "Cost of instructions that will be repeated: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Cost of instructions that will be repeated: "
<< RepeatedInstrCost << "\n"; } } while (false)
324 << RepeatedInstrCost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Cost of instructions that will be repeated: "
<< RepeatedInstrCost << "\n"; } } while (false)
;
325 // Bail out if flattening the loops would cause instructions in the outer
326 // loop but not in the inner loop to be executed extra times.
327 if (RepeatedInstrCost > RepeatedInstructionThreshold) {
328 LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: not profitable, bailing.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkOuterLoopInsts: not profitable, bailing.\n"
; } } while (false)
;
329 return false;
330 }
331
332 LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: OK\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkOuterLoopInsts: OK\n"
; } } while (false)
;
333 return true;
334}
335
336static bool checkIVUsers(FlattenInfo &FI) {
337 // We require all uses of both induction variables to match this pattern:
338 //
339 // (OuterPHI * InnerLimit) + InnerPHI
340 //
341 // Any uses of the induction variables not matching that pattern would
342 // require a div/mod to reconstruct in the flattened loop, so the
343 // transformation wouldn't be profitable.
344
345 Value *InnerLimit = FI.InnerLimit;
346 if (FI.Widened &&
347 (isa<SExtInst>(InnerLimit) || isa<ZExtInst>(InnerLimit)))
348 InnerLimit = cast<Instruction>(InnerLimit)->getOperand(0);
349
350 // Check that all uses of the inner loop's induction variable match the
351 // expected pattern, recording the uses of the outer IV.
352 SmallPtrSet<Value *, 4> ValidOuterPHIUses;
353 for (User *U : FI.InnerInductionPHI->users()) {
354 if (U == FI.InnerIncrement)
355 continue;
356
357 // After widening the IVs, a trunc instruction might have been introduced, so
358 // look through truncs.
359 if (isa<TruncInst>(U)) {
360 if (!U->hasOneUse())
361 return false;
362 U = *U->user_begin();
363 }
364
365 LLVM_DEBUG(dbgs() << "Found use of inner induction variable: "; U->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Found use of inner induction variable: "
; U->dump(); } } while (false)
;
366
367 Value *MatchedMul;
368 Value *MatchedItCount;
369 bool IsAdd = match(U, m_c_Add(m_Specific(FI.InnerInductionPHI),
370 m_Value(MatchedMul))) &&
371 match(MatchedMul, m_c_Mul(m_Specific(FI.OuterInductionPHI),
372 m_Value(MatchedItCount)));
373
374 // Matches the same pattern as above, except it also looks for truncs
375 // on the phi, which can be the result of widening the induction variables.
376 bool IsAddTrunc = match(U, m_c_Add(m_Trunc(m_Specific(FI.InnerInductionPHI)),
377 m_Value(MatchedMul))) &&
378 match(MatchedMul,
379 m_c_Mul(m_Trunc(m_Specific(FI.OuterInductionPHI)),
380 m_Value(MatchedItCount)));
381
382 if ((IsAdd || IsAddTrunc) && MatchedItCount == InnerLimit) {
383 LLVM_DEBUG(dbgs() << "Use is optimisable\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Use is optimisable\n"; }
} while (false)
;
384 ValidOuterPHIUses.insert(MatchedMul);
385 FI.LinearIVUses.insert(U);
386 } else {
387 LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Did not match expected pattern, bailing\n"
; } } while (false)
;
388 return false;
389 }
390 }
391
392 // Check that there are no uses of the outer IV other than the ones found
393 // as part of the pattern above.
394 for (User *U : FI.OuterInductionPHI->users()) {
395 if (U == FI.OuterIncrement)
396 continue;
397
398 auto IsValidOuterPHIUses = [&] (User *U) -> bool {
399 LLVM_DEBUG(dbgs() << "Found use of outer induction variable: "; U->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Found use of outer induction variable: "
; U->dump(); } } while (false)
;
400 if (!ValidOuterPHIUses.count(U)) {
401 LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Did not match expected pattern, bailing\n"
; } } while (false)
;
402 return false;
403 }
404 LLVM_DEBUG(dbgs() << "Use is optimisable\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Use is optimisable\n"; }
} while (false)
;
405 return true;
406 };
407
408 if (auto *V = dyn_cast<TruncInst>(U)) {
409 for (auto *K : V->users()) {
410 if (!IsValidOuterPHIUses(K))
411 return false;
412 }
413 continue;
414 }
415
416 if (!IsValidOuterPHIUses(U))
417 return false;
418 }
419
420 LLVM_DEBUG(dbgs() << "checkIVUsers: OK\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkIVUsers: OK\n"; dbgs
() << "Found " << FI.LinearIVUses.size() <<
" value(s) that can be replaced:\n"; for (Value *V : FI.LinearIVUses
) { dbgs() << " "; V->dump(); }; } } while (false)
421 dbgs() << "Found " << FI.LinearIVUses.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkIVUsers: OK\n"; dbgs
() << "Found " << FI.LinearIVUses.size() <<
" value(s) that can be replaced:\n"; for (Value *V : FI.LinearIVUses
) { dbgs() << " "; V->dump(); }; } } while (false)
422 << " value(s) that can be replaced:\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkIVUsers: OK\n"; dbgs
() << "Found " << FI.LinearIVUses.size() <<
" value(s) that can be replaced:\n"; for (Value *V : FI.LinearIVUses
) { dbgs() << " "; V->dump(); }; } } while (false)
423 for (Value *V : FI.LinearIVUses) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkIVUsers: OK\n"; dbgs
() << "Found " << FI.LinearIVUses.size() <<
" value(s) that can be replaced:\n"; for (Value *V : FI.LinearIVUses
) { dbgs() << " "; V->dump(); }; } } while (false)
424 dbgs() << " ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkIVUsers: OK\n"; dbgs
() << "Found " << FI.LinearIVUses.size() <<
" value(s) that can be replaced:\n"; for (Value *V : FI.LinearIVUses
) { dbgs() << " "; V->dump(); }; } } while (false)
425 V->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkIVUsers: OK\n"; dbgs
() << "Found " << FI.LinearIVUses.size() <<
" value(s) that can be replaced:\n"; for (Value *V : FI.LinearIVUses
) { dbgs() << " "; V->dump(); }; } } while (false)
426 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "checkIVUsers: OK\n"; dbgs
() << "Found " << FI.LinearIVUses.size() <<
" value(s) that can be replaced:\n"; for (Value *V : FI.LinearIVUses
) { dbgs() << " "; V->dump(); }; } } while (false)
;
427 return true;
428}
429
430// Return an OverflowResult dependant on if overflow of the multiplication of
431// InnerLimit and OuterLimit can be assumed not to happen.
432static OverflowResult checkOverflow(FlattenInfo &FI, DominatorTree *DT,
433 AssumptionCache *AC) {
434 Function *F = FI.OuterLoop->getHeader()->getParent();
435 const DataLayout &DL = F->getParent()->getDataLayout();
436
437 // For debugging/testing.
438 if (AssumeNoOverflow)
439 return OverflowResult::NeverOverflows;
440
441 // Check if the multiply could not overflow due to known ranges of the
442 // input values.
443 OverflowResult OR = computeOverflowForUnsignedMul(
444 FI.InnerLimit, FI.OuterLimit, DL, AC,
445 FI.OuterLoop->getLoopPreheader()->getTerminator(), DT);
446 if (OR != OverflowResult::MayOverflow)
447 return OR;
448
449 for (Value *V : FI.LinearIVUses) {
450 for (Value *U : V->users()) {
451 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
452 // The IV is used as the operand of a GEP, and the IV is at least as
453 // wide as the address space of the GEP. In this case, the GEP would
454 // wrap around the address space before the IV increment wraps, which
455 // would be UB.
456 if (GEP->isInBounds() &&
457 V->getType()->getIntegerBitWidth() >=
458 DL.getPointerTypeSizeInBits(GEP->getType())) {
459 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "use of linear IV would be UB if overflow occurred: "
; GEP->dump(); } } while (false)
460 dbgs() << "use of linear IV would be UB if overflow occurred: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "use of linear IV would be UB if overflow occurred: "
; GEP->dump(); } } while (false)
461 GEP->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "use of linear IV would be UB if overflow occurred: "
; GEP->dump(); } } while (false)
;
462 return OverflowResult::NeverOverflows;
463 }
464 }
465 }
466 }
467
468 return OverflowResult::MayOverflow;
469}
470
471static bool CanFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
472 ScalarEvolution *SE, AssumptionCache *AC,
473 const TargetTransformInfo *TTI) {
474 SmallPtrSet<Instruction *, 8> IterationInstructions;
475 if (!findLoopComponents(FI.InnerLoop, IterationInstructions, FI.InnerInductionPHI,
476 FI.InnerLimit, FI.InnerIncrement, FI.InnerBranch, SE))
477 return false;
478 if (!findLoopComponents(FI.OuterLoop, IterationInstructions, FI.OuterInductionPHI,
479 FI.OuterLimit, FI.OuterIncrement, FI.OuterBranch, SE))
480 return false;
481
482 // Both of the loop limit values must be invariant in the outer loop
483 // (non-instructions are all inherently invariant).
484 if (!FI.OuterLoop->isLoopInvariant(FI.InnerLimit)) {
485 LLVM_DEBUG(dbgs() << "inner loop limit not invariant\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "inner loop limit not invariant\n"
; } } while (false)
;
486 return false;
487 }
488 if (!FI.OuterLoop->isLoopInvariant(FI.OuterLimit)) {
489 LLVM_DEBUG(dbgs() << "outer loop limit not invariant\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "outer loop limit not invariant\n"
; } } while (false)
;
490 return false;
491 }
492
493 if (!checkPHIs(FI, TTI))
494 return false;
495
496 // FIXME: it should be possible to handle different types correctly.
497 if (FI.InnerInductionPHI->getType() != FI.OuterInductionPHI->getType())
498 return false;
499
500 if (!checkOuterLoopInsts(FI, IterationInstructions, TTI))
501 return false;
502
503 // Find the values in the loop that can be replaced with the linearized
504 // induction variable, and check that there are no other uses of the inner
505 // or outer induction variable. If there were, we could still do this
506 // transformation, but we'd have to insert a div/mod to calculate the
507 // original IVs, so it wouldn't be profitable.
508 if (!checkIVUsers(FI))
509 return false;
510
511 LLVM_DEBUG(dbgs() << "CanFlattenLoopPair: OK\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "CanFlattenLoopPair: OK\n"
; } } while (false)
;
512 return true;
513}
514
515static bool DoFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
516 ScalarEvolution *SE, AssumptionCache *AC,
517 const TargetTransformInfo *TTI) {
518 Function *F = FI.OuterLoop->getHeader()->getParent();
519 LLVM_DEBUG(dbgs() << "Checks all passed, doing the transformation\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Checks all passed, doing the transformation\n"
; } } while (false)
;
520 {
521 using namespace ore;
522 OptimizationRemark Remark(DEBUG_TYPE"loop-flatten", "Flattened", FI.InnerLoop->getStartLoc(),
523 FI.InnerLoop->getHeader());
524 OptimizationRemarkEmitter ORE(F);
525 Remark << "Flattened into outer loop";
526 ORE.emit(Remark);
527 }
528
529 Value *NewTripCount =
530 BinaryOperator::CreateMul(FI.InnerLimit, FI.OuterLimit, "flatten.tripcount",
531 FI.OuterLoop->getLoopPreheader()->getTerminator());
532 LLVM_DEBUG(dbgs() << "Created new trip count in preheader: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Created new trip count in preheader: "
; NewTripCount->dump(); } } while (false)
533 NewTripCount->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Created new trip count in preheader: "
; NewTripCount->dump(); } } while (false)
;
534
535 // Fix up PHI nodes that take values from the inner loop back-edge, which
536 // we are about to remove.
537 FI.InnerInductionPHI->removeIncomingValue(FI.InnerLoop->getLoopLatch());
538
539 // The old Phi will be optimised away later, but for now we can't leave
540 // leave it in an invalid state, so are updating them too.
541 for (PHINode *PHI : FI.InnerPHIsToTransform)
542 PHI->removeIncomingValue(FI.InnerLoop->getLoopLatch());
543
544 // Modify the trip count of the outer loop to be the product of the two
545 // trip counts.
546 cast<User>(FI.OuterBranch->getCondition())->setOperand(1, NewTripCount);
547
548 // Replace the inner loop backedge with an unconditional branch to the exit.
549 BasicBlock *InnerExitBlock = FI.InnerLoop->getExitBlock();
550 BasicBlock *InnerExitingBlock = FI.InnerLoop->getExitingBlock();
551 InnerExitingBlock->getTerminator()->eraseFromParent();
552 BranchInst::Create(InnerExitBlock, InnerExitingBlock);
553 DT->deleteEdge(InnerExitingBlock, FI.InnerLoop->getHeader());
554
555 // Replace all uses of the polynomial calculated from the two induction
556 // variables with the one new one.
557 IRBuilder<> Builder(FI.OuterInductionPHI->getParent()->getTerminator());
558 for (Value *V : FI.LinearIVUses) {
559 Value *OuterValue = FI.OuterInductionPHI;
560 if (FI.Widened)
561 OuterValue = Builder.CreateTrunc(FI.OuterInductionPHI, V->getType(),
562 "flatten.trunciv");
563
564 LLVM_DEBUG(dbgs() << "Replacing: "; V->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Replacing: "; V->dump
(); dbgs() << "with: "; OuterValue->dump(); } }
while (false)
565 dbgs() << "with: "; OuterValue->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Replacing: "; V->dump
(); dbgs() << "with: "; OuterValue->dump(); } }
while (false)
;
566 V->replaceAllUsesWith(OuterValue);
567 }
568
569 // Tell LoopInfo, SCEV and the pass manager that the inner loop has been
570 // deleted, and any information that have about the outer loop invalidated.
571 SE->forgetLoop(FI.OuterLoop);
572 SE->forgetLoop(FI.InnerLoop);
573 LI->erase(FI.InnerLoop);
574 return true;
575}
576
577static bool CanWidenIV(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
578 ScalarEvolution *SE, AssumptionCache *AC,
579 const TargetTransformInfo *TTI) {
580 if (!WidenIV) {
581 LLVM_DEBUG(dbgs() << "Widening the IVs is disabled\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Widening the IVs is disabled\n"
; } } while (false)
;
582 return false;
583 }
584
585 LLVM_DEBUG(dbgs() << "Try widening the IVs\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Try widening the IVs\n";
} } while (false)
;
586 Module *M = FI.InnerLoop->getHeader()->getParent()->getParent();
587 auto &DL = M->getDataLayout();
588 auto *InnerType = FI.InnerInductionPHI->getType();
589 auto *OuterType = FI.OuterInductionPHI->getType();
590 unsigned MaxLegalSize = DL.getLargestLegalIntTypeSizeInBits();
591 auto *MaxLegalType = DL.getLargestLegalIntType(M->getContext());
592
593 // If both induction types are less than the maximum legal integer width,
594 // promote both to the widest type available so we know calculating
595 // (OuterLimit * InnerLimit) as the new trip count is safe.
596 if (InnerType != OuterType ||
597 InnerType->getScalarSizeInBits() >= MaxLegalSize ||
598 MaxLegalType->getScalarSizeInBits() < InnerType->getScalarSizeInBits() * 2) {
599 LLVM_DEBUG(dbgs() << "Can't widen the IV\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Can't widen the IV\n"; }
} while (false)
;
600 return false;
601 }
602
603 SCEVExpander Rewriter(*SE, DL, "loopflatten");
604 SmallVector<WideIVInfo, 2> WideIVs;
605 SmallVector<WeakTrackingVH, 4> DeadInsts;
606 WideIVs.push_back( {FI.InnerInductionPHI, MaxLegalType, false });
607 WideIVs.push_back( {FI.OuterInductionPHI, MaxLegalType, false });
608 unsigned ElimExt = 0;
609 unsigned Widened = 0;
610
611 for (const auto &WideIV : WideIVs) {
612 PHINode *WidePhi = createWideIV(WideIV, LI, SE, Rewriter, DT, DeadInsts,
613 ElimExt, Widened, true /* HasGuards */,
614 true /* UsePostIncrementRanges */);
615 if (!WidePhi)
616 return false;
617 LLVM_DEBUG(dbgs() << "Created wide phi: "; WidePhi->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Created wide phi: "; WidePhi
->dump(); } } while (false)
;
618 LLVM_DEBUG(dbgs() << "Deleting old phi: "; WideIV.NarrowIV->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Deleting old phi: "; WideIV
.NarrowIV->dump(); } } while (false)
;
619 RecursivelyDeleteDeadPHINode(WideIV.NarrowIV);
620 }
621 // After widening, rediscover all the loop components.
622 assert(Widened && "Widened IV expected")(static_cast <bool> (Widened && "Widened IV expected"
) ? void (0) : __assert_fail ("Widened && \"Widened IV expected\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/LoopFlatten.cpp"
, 622, __extension__ __PRETTY_FUNCTION__))
;
623 FI.Widened = true;
624 return CanFlattenLoopPair(FI, DT, LI, SE, AC, TTI);
625}
626
627static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
628 ScalarEvolution *SE, AssumptionCache *AC,
629 const TargetTransformInfo *TTI) {
630 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Loop flattening running on outer loop "
<< FI.OuterLoop->getHeader()->getName() <<
" and inner loop " << FI.InnerLoop->getHeader()->
getName() << " in " << FI.OuterLoop->getHeader
()->getParent()->getName() << "\n"; } } while (false
)
631 dbgs() << "Loop flattening running on outer loop "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Loop flattening running on outer loop "
<< FI.OuterLoop->getHeader()->getName() <<
" and inner loop " << FI.InnerLoop->getHeader()->
getName() << " in " << FI.OuterLoop->getHeader
()->getParent()->getName() << "\n"; } } while (false
)
632 << FI.OuterLoop->getHeader()->getName() << " and inner loop "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Loop flattening running on outer loop "
<< FI.OuterLoop->getHeader()->getName() <<
" and inner loop " << FI.InnerLoop->getHeader()->
getName() << " in " << FI.OuterLoop->getHeader
()->getParent()->getName() << "\n"; } } while (false
)
633 << FI.InnerLoop->getHeader()->getName() << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Loop flattening running on outer loop "
<< FI.OuterLoop->getHeader()->getName() <<
" and inner loop " << FI.InnerLoop->getHeader()->
getName() << " in " << FI.OuterLoop->getHeader
()->getParent()->getName() << "\n"; } } while (false
)
634 << FI.OuterLoop->getHeader()->getParent()->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Loop flattening running on outer loop "
<< FI.OuterLoop->getHeader()->getName() <<
" and inner loop " << FI.InnerLoop->getHeader()->
getName() << " in " << FI.OuterLoop->getHeader
()->getParent()->getName() << "\n"; } } while (false
)
;
635
636 if (!CanFlattenLoopPair(FI, DT, LI, SE, AC, TTI))
637 return false;
638
639 // Check if we can widen the induction variables to avoid overflow checks.
640 if (CanWidenIV(FI, DT, LI, SE, AC, TTI))
641 return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI);
642
643 // Check if the new iteration variable might overflow. In this case, we
644 // need to version the loop, and select the original version at runtime if
645 // the iteration space is too large.
646 // TODO: We currently don't version the loop.
647 OverflowResult OR = checkOverflow(FI, DT, AC);
648 if (OR == OverflowResult::AlwaysOverflowsHigh ||
649 OR == OverflowResult::AlwaysOverflowsLow) {
650 LLVM_DEBUG(dbgs() << "Multiply would always overflow, so not profitable\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Multiply would always overflow, so not profitable\n"
; } } while (false)
;
651 return false;
652 } else if (OR == OverflowResult::MayOverflow) {
653 LLVM_DEBUG(dbgs() << "Multiply might overflow, not flattening\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Multiply might overflow, not flattening\n"
; } } while (false)
;
654 return false;
655 }
656
657 LLVM_DEBUG(dbgs() << "Multiply cannot overflow, modifying loop in-place\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-flatten")) { dbgs() << "Multiply cannot overflow, modifying loop in-place\n"
; } } while (false)
;
658 return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI);
659}
660
661bool Flatten(LoopNest &LN, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
662 AssumptionCache *AC, TargetTransformInfo *TTI) {
663 bool Changed = false;
664 for (Loop *InnerLoop : LN.getLoops()) {
665 auto *OuterLoop = InnerLoop->getParentLoop();
666 if (!OuterLoop)
667 continue;
668 FlattenInfo FI(OuterLoop, InnerLoop);
669 Changed |= FlattenLoopPair(FI, DT, LI, SE, AC, TTI);
670 }
671 return Changed;
672}
673
674PreservedAnalyses LoopFlattenPass::run(LoopNest &LN, LoopAnalysisManager &LAM,
675 LoopStandardAnalysisResults &AR,
676 LPMUpdater &U) {
677
678 bool Changed = false;
679
680 // The loop flattening pass requires loops to be
681 // in simplified form, and also needs LCSSA. Running
682 // this pass will simplify all loops that contain inner loops,
683 // regardless of whether anything ends up being flattened.
684 Changed |= Flatten(LN, &AR.DT, &AR.LI, &AR.SE, &AR.AC, &AR.TTI);
685
686 if (!Changed)
687 return PreservedAnalyses::all();
688
689 return PreservedAnalyses::none();
690}
691
692namespace {
693class LoopFlattenLegacyPass : public FunctionPass {
694public:
695 static char ID; // Pass ID, replacement for typeid
696 LoopFlattenLegacyPass() : FunctionPass(ID) {
697 initializeLoopFlattenLegacyPassPass(*PassRegistry::getPassRegistry());
698 }
699
700 // Possibly flatten loop L into its child.
701 bool runOnFunction(Function &F) override;
702
703 void getAnalysisUsage(AnalysisUsage &AU) const override {
704 getLoopAnalysisUsage(AU);
705 AU.addRequired<TargetTransformInfoWrapperPass>();
706 AU.addPreserved<TargetTransformInfoWrapperPass>();
707 AU.addRequired<AssumptionCacheTracker>();
708 AU.addPreserved<AssumptionCacheTracker>();
709 }
710};
711} // namespace
712
713char LoopFlattenLegacyPass::ID = 0;
714INITIALIZE_PASS_BEGIN(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops",static void *initializeLoopFlattenLegacyPassPassOnce(PassRegistry
&Registry) {
715 false, false)static void *initializeLoopFlattenLegacyPassPassOnce(PassRegistry
&Registry) {
716INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
717INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
718INITIALIZE_PASS_END(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops",PassInfo *PI = new PassInfo( "Flattens loops", "loop-flatten"
, &LoopFlattenLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LoopFlattenLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLoopFlattenLegacyPassPassFlag
; void llvm::initializeLoopFlattenLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLoopFlattenLegacyPassPassFlag
, initializeLoopFlattenLegacyPassPassOnce, std::ref(Registry)
); }
719 false, false)PassInfo *PI = new PassInfo( "Flattens loops", "loop-flatten"
, &LoopFlattenLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LoopFlattenLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLoopFlattenLegacyPassPassFlag
; void llvm::initializeLoopFlattenLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLoopFlattenLegacyPassPassFlag
, initializeLoopFlattenLegacyPassPassOnce, std::ref(Registry)
); }
720
721FunctionPass *llvm::createLoopFlattenPass() { return new LoopFlattenLegacyPass(); }
722
723bool LoopFlattenLegacyPass::runOnFunction(Function &F) {
724 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
725 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
726 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
727 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
728 auto &TTIP = getAnalysis<TargetTransformInfoWrapperPass>();
729 auto *TTI = &TTIP.getTTI(F);
730 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
731 bool Changed = false;
732 for (Loop *L : *LI) {
733 auto LN = LoopNest::getLoopNest(*L, *SE);
734 Changed |= Flatten(*LN, DT, LI, SE, AC, TTI);
735 }
736 return Changed;
737}

/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/InstrTypes.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/OperandTraits.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Use.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <iterator>
48
49namespace llvm {
50
51class APInt;
52class ConstantInt;
53class DataLayout;
54class LLVMContext;
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60/// an instruction to allocate memory on the stack
61class AllocaInst : public UnaryInstruction {
62 Type *AllocatedType;
63
64 using AlignmentField = AlignmentBitfieldElementT<0>;
65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68 SwiftErrorField>(),
69 "Bitfields must be contiguous");
70
71protected:
72 // Note: Instruction needs to be a friend here to call cloneImpl.
73 friend class Instruction;
74
75 AllocaInst *cloneImpl() const;
76
77public:
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 Instruction *InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace,
86 const Twine &Name, BasicBlock *InsertAtEnd);
87
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name = "", Instruction *InsertBefore = nullptr);
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock *InsertAtEnd);
92
93 /// Return true if there is an allocation size parameter to the allocation
94 /// instruction that is not 1.
95 bool isArrayAllocation() const;
96
97 /// Get the number of elements allocated. For a simple allocation of a single
98 /// element, this will return a constant 1 value.
99 const Value *getArraySize() const { return getOperand(0); }
100 Value *getArraySize() { return getOperand(0); }
101
102 /// Overload to return most specific pointer type.
103 PointerType *getType() const {
104 return cast<PointerType>(Instruction::getType());
105 }
106
107 /// Get allocation size in bits. Returns None if size can't be determined,
108 /// e.g. in case of a VLA.
109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
110
111 /// Return the type that is being allocated by the instruction.
112 Type *getAllocatedType() const { return AllocatedType; }
113 /// for use only in special circumstances that need to generically
114 /// transform a whole instruction (eg: IR linking and vectorization).
115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116
117 /// Return the alignment of the memory that is being allocated by the
118 /// instruction.
119 Align getAlign() const {
120 return Align(1ULL << getSubclassData<AlignmentField>());
121 }
122
123 void setAlignment(Align Align) {
124 setSubclassData<AlignmentField>(Log2(Align));
125 }
126
127 // FIXME: Remove this one transition to Align is over.
128 unsigned getAlignment() const { return getAlign().value(); }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 /// FIXME: Remove this function once transition to Align is over.
218 /// Use getAlign() instead.
219 unsigned getAlignment() const { return getAlign().value(); }
220
221 /// Return the alignment of the access that is being performed.
222 Align getAlign() const {
223 return Align(1ULL << (getSubclassData<AlignmentField>()));
224 }
225
226 void setAlignment(Align Align) {
227 setSubclassData<AlignmentField>(Log2(Align));
228 }
229
230 /// Returns the ordering constraint of this load instruction.
231 AtomicOrdering getOrdering() const {
232 return getSubclassData<OrderingField>();
233 }
234 /// Sets the ordering constraint of this load instruction. May not be Release
235 /// or AcquireRelease.
236 void setOrdering(AtomicOrdering Ordering) {
237 setSubclassData<OrderingField>(Ordering);
238 }
239
240 /// Returns the synchronization scope ID of this load instruction.
241 SyncScope::ID getSyncScopeID() const {
242 return SSID;
243 }
244
245 /// Sets the synchronization scope ID of this load instruction.
246 void setSyncScopeID(SyncScope::ID SSID) {
247 this->SSID = SSID;
248 }
249
250 /// Sets the ordering constraint and the synchronization scope ID of this load
251 /// instruction.
252 void setAtomic(AtomicOrdering Ordering,
253 SyncScope::ID SSID = SyncScope::System) {
254 setOrdering(Ordering);
255 setSyncScopeID(SSID);
256 }
257
258 bool isSimple() const { return !isAtomic() && !isVolatile(); }
259
260 bool isUnordered() const {
261 return (getOrdering() == AtomicOrdering::NotAtomic ||
262 getOrdering() == AtomicOrdering::Unordered) &&
263 !isVolatile();
264 }
265
266 Value *getPointerOperand() { return getOperand(0); }
267 const Value *getPointerOperand() const { return getOperand(0); }
268 static unsigned getPointerOperandIndex() { return 0U; }
269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
270
271 /// Returns the address space of the pointer operand.
272 unsigned getPointerAddressSpace() const {
273 return getPointerOperandType()->getPointerAddressSpace();
274 }
275
276 // Methods for support type inquiry through isa, cast, and dyn_cast:
277 static bool classof(const Instruction *I) {
278 return I->getOpcode() == Instruction::Load;
279 }
280 static bool classof(const Value *V) {
281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
282 }
283
284private:
285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
286 // method so that subclasses cannot accidentally use it.
287 template <typename Bitfield>
288 void setSubclassData(typename Bitfield::Type Value) {
289 Instruction::setSubclassData<Bitfield>(Value);
290 }
291
292 /// The synchronization scope ID of this load instruction. Not quite enough
293 /// room in SubClassData for everything, so synchronization scope ID gets its
294 /// own field.
295 SyncScope::ID SSID;
296};
297
298//===----------------------------------------------------------------------===//
299// StoreInst Class
300//===----------------------------------------------------------------------===//
301
302/// An instruction for storing to memory.
303class StoreInst : public Instruction {
304 using VolatileField = BoolBitfieldElementT<0>;
305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
307 static_assert(
308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309 "Bitfields must be contiguous");
310
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
330 Instruction *InsertBefore = nullptr);
331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333
334 // allocate space for exactly two operands
335 void *operator new(size_t s) {
336 return User::operator new(s, 2);
337 }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t s) {
466 return User::operator new(s, 0);
467 }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t s) {
550 return User::operator new(s, 3);
551 }
552
553 using VolatileField = BoolBitfieldElementT<0>;
554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
555 using SuccessOrderingField =
556 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
557 using FailureOrderingField =
558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
559 using AlignmentField =
560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
561 static_assert(
562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
563 FailureOrderingField, AlignmentField>(),
564 "Bitfields must be contiguous");
565
566 /// Return the alignment of the memory that is being allocated by the
567 /// instruction.
568 Align getAlign() const {
569 return Align(1ULL << getSubclassData<AlignmentField>());
570 }
571
572 void setAlignment(Align Align) {
573 setSubclassData<AlignmentField>(Log2(Align));
574 }
575
576 /// Return true if this is a cmpxchg from a volatile memory
577 /// location.
578 ///
579 bool isVolatile() const { return getSubclassData<VolatileField>(); }
580
581 /// Specify whether this is a volatile cmpxchg.
582 ///
583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584
585 /// Return true if this cmpxchg may spuriously fail.
586 bool isWeak() const { return getSubclassData<WeakField>(); }
587
588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589
590 /// Transparently provide more efficient getOperand methods.
591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
592
593 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
594 return Ordering != AtomicOrdering::NotAtomic &&
595 Ordering != AtomicOrdering::Unordered;
596 }
597
598 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
599 return Ordering != AtomicOrdering::NotAtomic &&
600 Ordering != AtomicOrdering::Unordered &&
601 Ordering != AtomicOrdering::AcquireRelease &&
602 Ordering != AtomicOrdering::Release;
603 }
604
605 /// Returns the success ordering constraint of this cmpxchg instruction.
606 AtomicOrdering getSuccessOrdering() const {
607 return getSubclassData<SuccessOrderingField>();
608 }
609
610 /// Sets the success ordering constraint of this cmpxchg instruction.
611 void setSuccessOrdering(AtomicOrdering Ordering) {
612 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 613, __extension__ __PRETTY_FUNCTION__))
613 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 613, __extension__ __PRETTY_FUNCTION__))
;
614 setSubclassData<SuccessOrderingField>(Ordering);
615 }
616
617 /// Returns the failure ordering constraint of this cmpxchg instruction.
618 AtomicOrdering getFailureOrdering() const {
619 return getSubclassData<FailureOrderingField>();
620 }
621
622 /// Sets the failure ordering constraint of this cmpxchg instruction.
623 void setFailureOrdering(AtomicOrdering Ordering) {
624 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 625, __extension__ __PRETTY_FUNCTION__))
625 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 625, __extension__ __PRETTY_FUNCTION__))
;
626 setSubclassData<FailureOrderingField>(Ordering);
627 }
628
629 /// Returns a single ordering which is at least as strong as both the
630 /// success and failure orderings for this cmpxchg.
631 AtomicOrdering getMergedOrdering() const {
632 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
633 return AtomicOrdering::SequentiallyConsistent;
634 if (getFailureOrdering() == AtomicOrdering::Acquire) {
635 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
636 return AtomicOrdering::Acquire;
637 if (getSuccessOrdering() == AtomicOrdering::Release)
638 return AtomicOrdering::AcquireRelease;
639 }
640 return getSuccessOrdering();
641 }
642
643 /// Returns the synchronization scope ID of this cmpxchg instruction.
644 SyncScope::ID getSyncScopeID() const {
645 return SSID;
646 }
647
648 /// Sets the synchronization scope ID of this cmpxchg instruction.
649 void setSyncScopeID(SyncScope::ID SSID) {
650 this->SSID = SSID;
651 }
652
653 Value *getPointerOperand() { return getOperand(0); }
654 const Value *getPointerOperand() const { return getOperand(0); }
655 static unsigned getPointerOperandIndex() { return 0U; }
656
657 Value *getCompareOperand() { return getOperand(1); }
658 const Value *getCompareOperand() const { return getOperand(1); }
659
660 Value *getNewValOperand() { return getOperand(2); }
661 const Value *getNewValOperand() const { return getOperand(2); }
662
663 /// Returns the address space of the pointer operand.
664 unsigned getPointerAddressSpace() const {
665 return getPointerOperand()->getType()->getPointerAddressSpace();
666 }
667
668 /// Returns the strongest permitted ordering on failure, given the
669 /// desired ordering on success.
670 ///
671 /// If the comparison in a cmpxchg operation fails, there is no atomic store
672 /// so release semantics cannot be provided. So this function drops explicit
673 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
674 /// operation would remain SequentiallyConsistent.
675 static AtomicOrdering
676 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
677 switch (SuccessOrdering) {
678 default:
679 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 679)
;
680 case AtomicOrdering::Release:
681 case AtomicOrdering::Monotonic:
682 return AtomicOrdering::Monotonic;
683 case AtomicOrdering::AcquireRelease:
684 case AtomicOrdering::Acquire:
685 return AtomicOrdering::Acquire;
686 case AtomicOrdering::SequentiallyConsistent:
687 return AtomicOrdering::SequentiallyConsistent;
688 }
689 }
690
691 // Methods for support type inquiry through isa, cast, and dyn_cast:
692 static bool classof(const Instruction *I) {
693 return I->getOpcode() == Instruction::AtomicCmpXchg;
694 }
695 static bool classof(const Value *V) {
696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
697 }
698
699private:
700 // Shadow Instruction::setInstructionSubclassData with a private forwarding
701 // method so that subclasses cannot accidentally use it.
702 template <typename Bitfield>
703 void setSubclassData(typename Bitfield::Type Value) {
704 Instruction::setSubclassData<Bitfield>(Value);
705 }
706
707 /// The synchronization scope ID of this cmpxchg instruction. Not quite
708 /// enough room in SubClassData for everything, so synchronization scope ID
709 /// gets its own field.
710 SyncScope::ID SSID;
711};
712
713template <>
714struct OperandTraits<AtomicCmpXchgInst> :
715 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
716};
717
718DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 718, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 718, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
719
720//===----------------------------------------------------------------------===//
721// AtomicRMWInst Class
722//===----------------------------------------------------------------------===//
723
724/// an instruction that atomically reads a memory location,
725/// combines it with another value, and then stores the result back. Returns
726/// the old value.
727///
728class AtomicRMWInst : public Instruction {
729protected:
730 // Note: Instruction needs to be a friend here to call cloneImpl.
731 friend class Instruction;
732
733 AtomicRMWInst *cloneImpl() const;
734
735public:
736 /// This enumeration lists the possible modifications atomicrmw can make. In
737 /// the descriptions, 'p' is the pointer to the instruction's memory location,
738 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
739 /// instruction. These instructions always return 'old'.
740 enum BinOp : unsigned {
741 /// *p = v
742 Xchg,
743 /// *p = old + v
744 Add,
745 /// *p = old - v
746 Sub,
747 /// *p = old & v
748 And,
749 /// *p = ~(old & v)
750 Nand,
751 /// *p = old | v
752 Or,
753 /// *p = old ^ v
754 Xor,
755 /// *p = old >signed v ? old : v
756 Max,
757 /// *p = old <signed v ? old : v
758 Min,
759 /// *p = old >unsigned v ? old : v
760 UMax,
761 /// *p = old <unsigned v ? old : v
762 UMin,
763
764 /// *p = old + v
765 FAdd,
766
767 /// *p = old - v
768 FSub,
769
770 FIRST_BINOP = Xchg,
771 LAST_BINOP = FSub,
772 BAD_BINOP
773 };
774
775private:
776 template <unsigned Offset>
777 using AtomicOrderingBitfieldElement =
778 typename Bitfield::Element<AtomicOrdering, Offset, 3,
779 AtomicOrdering::LAST>;
780
781 template <unsigned Offset>
782 using BinOpBitfieldElement =
783 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
784
785public:
786 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
787 AtomicOrdering Ordering, SyncScope::ID SSID,
788 Instruction *InsertBefore = nullptr);
789 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
790 AtomicOrdering Ordering, SyncScope::ID SSID,
791 BasicBlock *InsertAtEnd);
792
793 // allocate space for exactly two operands
794 void *operator new(size_t s) {
795 return User::operator new(s, 2);
796 }
797
798 using VolatileField = BoolBitfieldElementT<0>;
799 using AtomicOrderingField =
800 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
801 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
802 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
803 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
804 OperationField, AlignmentField>(),
805 "Bitfields must be contiguous");
806
807 BinOp getOperation() const { return getSubclassData<OperationField>(); }
808
809 static StringRef getOperationName(BinOp Op);
810
811 static bool isFPOperation(BinOp Op) {
812 switch (Op) {
813 case AtomicRMWInst::FAdd:
814 case AtomicRMWInst::FSub:
815 return true;
816 default:
817 return false;
818 }
819 }
820
821 void setOperation(BinOp Operation) {
822 setSubclassData<OperationField>(Operation);
823 }
824
825 /// Return the alignment of the memory that is being allocated by the
826 /// instruction.
827 Align getAlign() const {
828 return Align(1ULL << getSubclassData<AlignmentField>());
829 }
830
831 void setAlignment(Align Align) {
832 setSubclassData<AlignmentField>(Log2(Align));
833 }
834
835 /// Return true if this is a RMW on a volatile memory location.
836 ///
837 bool isVolatile() const { return getSubclassData<VolatileField>(); }
838
839 /// Specify whether this is a volatile RMW or not.
840 ///
841 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
842
843 /// Transparently provide more efficient getOperand methods.
844 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
845
846 /// Returns the ordering constraint of this rmw instruction.
847 AtomicOrdering getOrdering() const {
848 return getSubclassData<AtomicOrderingField>();
849 }
850
851 /// Sets the ordering constraint of this rmw instruction.
852 void setOrdering(AtomicOrdering Ordering) {
853 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 854, __extension__ __PRETTY_FUNCTION__))
854 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 854, __extension__ __PRETTY_FUNCTION__))
;
855 setSubclassData<AtomicOrderingField>(Ordering);
856 }
857
858 /// Returns the synchronization scope ID of this rmw instruction.
859 SyncScope::ID getSyncScopeID() const {
860 return SSID;
861 }
862
863 /// Sets the synchronization scope ID of this rmw instruction.
864 void setSyncScopeID(SyncScope::ID SSID) {
865 this->SSID = SSID;
866 }
867
868 Value *getPointerOperand() { return getOperand(0); }
869 const Value *getPointerOperand() const { return getOperand(0); }
870 static unsigned getPointerOperandIndex() { return 0U; }
871
872 Value *getValOperand() { return getOperand(1); }
873 const Value *getValOperand() const { return getOperand(1); }
874
875 /// Returns the address space of the pointer operand.
876 unsigned getPointerAddressSpace() const {
877 return getPointerOperand()->getType()->getPointerAddressSpace();
878 }
879
880 bool isFloatingPointOperation() const {
881 return isFPOperation(getOperation());
882 }
883
884 // Methods for support type inquiry through isa, cast, and dyn_cast:
885 static bool classof(const Instruction *I) {
886 return I->getOpcode() == Instruction::AtomicRMW;
887 }
888 static bool classof(const Value *V) {
889 return isa<Instruction>(V) && classof(cast<Instruction>(V));
890 }
891
892private:
893 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
894 AtomicOrdering Ordering, SyncScope::ID SSID);
895
896 // Shadow Instruction::setInstructionSubclassData with a private forwarding
897 // method so that subclasses cannot accidentally use it.
898 template <typename Bitfield>
899 void setSubclassData(typename Bitfield::Type Value) {
900 Instruction::setSubclassData<Bitfield>(Value);
901 }
902
903 /// The synchronization scope ID of this rmw instruction. Not quite enough
904 /// room in SubClassData for everything, so synchronization scope ID gets its
905 /// own field.
906 SyncScope::ID SSID;
907};
908
909template <>
910struct OperandTraits<AtomicRMWInst>
911 : public FixedNumOperandTraits<AtomicRMWInst,2> {
912};
913
914DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 914, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 914, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
915
916//===----------------------------------------------------------------------===//
917// GetElementPtrInst Class
918//===----------------------------------------------------------------------===//
919
920// checkGEPType - Simple wrapper function to give a better assertion failure
921// message on bad indexes for a gep instruction.
922//
923inline Type *checkGEPType(Type *Ty) {
924 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 924, __extension__ __PRETTY_FUNCTION__))
;
925 return Ty;
926}
927
928/// an instruction for type-safe pointer arithmetic to
929/// access elements of arrays and structs
930///
931class GetElementPtrInst : public Instruction {
932 Type *SourceElementType;
933 Type *ResultElementType;
934
935 GetElementPtrInst(const GetElementPtrInst &GEPI);
936
937 /// Constructors - Create a getelementptr instruction with a base pointer an
938 /// list of indices. The first ctor can optionally insert before an existing
939 /// instruction, the second appends the new instruction to the specified
940 /// BasicBlock.
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, Instruction *InsertBefore);
944 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
945 ArrayRef<Value *> IdxList, unsigned Values,
946 const Twine &NameStr, BasicBlock *InsertAtEnd);
947
948 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
949
950protected:
951 // Note: Instruction needs to be a friend here to call cloneImpl.
952 friend class Instruction;
953
954 GetElementPtrInst *cloneImpl() const;
955
956public:
957 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
958 ArrayRef<Value *> IdxList,
959 const Twine &NameStr = "",
960 Instruction *InsertBefore = nullptr) {
961 unsigned Values = 1 + unsigned(IdxList.size());
962 if (!PointeeType) {
963 PointeeType =
964 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
965 } else {
966 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 967, __extension__ __PRETTY_FUNCTION__))
967 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 967, __extension__ __PRETTY_FUNCTION__))
;
968 }
969 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
970 NameStr, InsertBefore);
971 }
972
973 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
974 ArrayRef<Value *> IdxList,
975 const Twine &NameStr,
976 BasicBlock *InsertAtEnd) {
977 unsigned Values = 1 + unsigned(IdxList.size());
978 if (!PointeeType) {
979 PointeeType =
980 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
981 } else {
982 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 983, __extension__ __PRETTY_FUNCTION__))
983 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 983, __extension__ __PRETTY_FUNCTION__))
;
984 }
985 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
986 NameStr, InsertAtEnd);
987 }
988
989 /// Create an "inbounds" getelementptr. See the documentation for the
990 /// "inbounds" flag in LangRef.html for details.
991 static GetElementPtrInst *CreateInBounds(Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr = "",
994 Instruction *InsertBefore = nullptr){
995 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
996 }
997
998 static GetElementPtrInst *
999 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
1000 const Twine &NameStr = "",
1001 Instruction *InsertBefore = nullptr) {
1002 GetElementPtrInst *GEP =
1003 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1004 GEP->setIsInBounds(true);
1005 return GEP;
1006 }
1007
1008 static GetElementPtrInst *CreateInBounds(Value *Ptr,
1009 ArrayRef<Value *> IdxList,
1010 const Twine &NameStr,
1011 BasicBlock *InsertAtEnd) {
1012 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
1013 }
1014
1015 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1016 ArrayRef<Value *> IdxList,
1017 const Twine &NameStr,
1018 BasicBlock *InsertAtEnd) {
1019 GetElementPtrInst *GEP =
1020 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1021 GEP->setIsInBounds(true);
1022 return GEP;
1023 }
1024
1025 /// Transparently provide more efficient getOperand methods.
1026 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1027
1028 Type *getSourceElementType() const { return SourceElementType; }
1029
1030 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1031 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1032
1033 Type *getResultElementType() const {
1034 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
1035 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
;
1036 return ResultElementType;
1037 }
1038
1039 /// Returns the address space of this instruction's pointer type.
1040 unsigned getAddressSpace() const {
1041 // Note that this is always the same as the pointer operand's address space
1042 // and that is cheaper to compute, so cheat here.
1043 return getPointerAddressSpace();
1044 }
1045
1046 /// Returns the result type of a getelementptr with the given source
1047 /// element type and indexes.
1048 ///
1049 /// Null is returned if the indices are invalid for the specified
1050 /// source element type.
1051 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1052 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1053 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1054
1055 /// Return the type of the element at the given index of an indexable
1056 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1057 ///
1058 /// Returns null if the type can't be indexed, or the given index is not
1059 /// legal for the given type.
1060 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1061 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1062
1063 inline op_iterator idx_begin() { return op_begin()+1; }
1064 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1065 inline op_iterator idx_end() { return op_end(); }
1066 inline const_op_iterator idx_end() const { return op_end(); }
1067
1068 inline iterator_range<op_iterator> indices() {
1069 return make_range(idx_begin(), idx_end());
1070 }
1071
1072 inline iterator_range<const_op_iterator> indices() const {
1073 return make_range(idx_begin(), idx_end());
1074 }
1075
1076 Value *getPointerOperand() {
1077 return getOperand(0);
1078 }
1079 const Value *getPointerOperand() const {
1080 return getOperand(0);
1081 }
1082 static unsigned getPointerOperandIndex() {
1083 return 0U; // get index for modifying correct operand.
1084 }
1085
1086 /// Method to return the pointer operand as a
1087 /// PointerType.
1088 Type *getPointerOperandType() const {
1089 return getPointerOperand()->getType();
1090 }
1091
1092 /// Returns the address space of the pointer operand.
1093 unsigned getPointerAddressSpace() const {
1094 return getPointerOperandType()->getPointerAddressSpace();
1095 }
1096
1097 /// Returns the pointer type returned by the GEP
1098 /// instruction, which may be a vector of pointers.
1099 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1100 ArrayRef<Value *> IdxList) {
1101 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1102 Ptr->getType()->getPointerAddressSpace());
1103 // Vector GEP
1104 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1105 ElementCount EltCount = PtrVTy->getElementCount();
1106 return VectorType::get(PtrTy, EltCount);
1107 }
1108 for (Value *Index : IdxList)
1109 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1110 ElementCount EltCount = IndexVTy->getElementCount();
1111 return VectorType::get(PtrTy, EltCount);
1112 }
1113 // Scalar GEP
1114 return PtrTy;
1115 }
1116
1117 unsigned getNumIndices() const { // Note: always non-negative
1118 return getNumOperands() - 1;
1119 }
1120
1121 bool hasIndices() const {
1122 return getNumOperands() > 1;
1123 }
1124
1125 /// Return true if all of the indices of this GEP are
1126 /// zeros. If so, the result pointer and the first operand have the same
1127 /// value, just potentially different types.
1128 bool hasAllZeroIndices() const;
1129
1130 /// Return true if all of the indices of this GEP are
1131 /// constant integers. If so, the result pointer and the first operand have
1132 /// a constant offset between them.
1133 bool hasAllConstantIndices() const;
1134
1135 /// Set or clear the inbounds flag on this GEP instruction.
1136 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1137 void setIsInBounds(bool b = true);
1138
1139 /// Determine whether the GEP has the inbounds flag.
1140 bool isInBounds() const;
1141
1142 /// Accumulate the constant address offset of this GEP if possible.
1143 ///
1144 /// This routine accepts an APInt into which it will accumulate the constant
1145 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1146 /// all-constant, it returns false and the value of the offset APInt is
1147 /// undefined (it is *not* preserved!). The APInt passed into this routine
1148 /// must be at least as wide as the IntPtr type for the address space of
1149 /// the base GEP pointer.
1150 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1151
1152 // Methods for support type inquiry through isa, cast, and dyn_cast:
1153 static bool classof(const Instruction *I) {
1154 return (I->getOpcode() == Instruction::GetElementPtr);
1155 }
1156 static bool classof(const Value *V) {
1157 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1158 }
1159};
1160
1161template <>
1162struct OperandTraits<GetElementPtrInst> :
1163 public VariadicOperandTraits<GetElementPtrInst, 1> {
1164};
1165
1166GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167 ArrayRef<Value *> IdxList, unsigned Values,
1168 const Twine &NameStr,
1169 Instruction *InsertBefore)
1170 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1171 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172 Values, InsertBefore),
1173 SourceElementType(PointeeType),
1174 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
1176 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
;
1177 init(Ptr, IdxList, NameStr);
1178}
1179
1180GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1181 ArrayRef<Value *> IdxList, unsigned Values,
1182 const Twine &NameStr,
1183 BasicBlock *InsertAtEnd)
1184 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1185 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1186 Values, InsertAtEnd),
1187 SourceElementType(PointeeType),
1188 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1189 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1190, __extension__ __PRETTY_FUNCTION__))
1190 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1190, __extension__ __PRETTY_FUNCTION__))
;
1191 init(Ptr, IdxList, NameStr);
1192}
1193
1194DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1194, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1194, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1195
1196//===----------------------------------------------------------------------===//
1197// ICmpInst Class
1198//===----------------------------------------------------------------------===//
1199
1200/// This instruction compares its operands according to the predicate given
1201/// to the constructor. It only operates on integers or pointers. The operands
1202/// must be identical types.
1203/// Represent an integer comparison operator.
1204class ICmpInst: public CmpInst {
1205 void AssertOK() {
1206 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1207, __extension__ __PRETTY_FUNCTION__))
1207 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1207, __extension__ __PRETTY_FUNCTION__))
;
1208 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1209, __extension__ __PRETTY_FUNCTION__))
1209 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1209, __extension__ __PRETTY_FUNCTION__))
;
1210 // Check that the operands are the right type
1211 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
1212 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
1213 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
;
1214 }
1215
1216protected:
1217 // Note: Instruction needs to be a friend here to call cloneImpl.
1218 friend class Instruction;
1219
1220 /// Clone an identical ICmpInst
1221 ICmpInst *cloneImpl() const;
1222
1223public:
1224 /// Constructor with insert-before-instruction semantics.
1225 ICmpInst(
1226 Instruction *InsertBefore, ///< Where to insert
1227 Predicate pred, ///< The predicate to use for the comparison
1228 Value *LHS, ///< The left-hand-side of the expression
1229 Value *RHS, ///< The right-hand-side of the expression
1230 const Twine &NameStr = "" ///< Name of the instruction
1231 ) : CmpInst(makeCmpResultType(LHS->getType()),
1232 Instruction::ICmp, pred, LHS, RHS, NameStr,
1233 InsertBefore) {
1234#ifndef NDEBUG
1235 AssertOK();
1236#endif
1237 }
1238
1239 /// Constructor with insert-at-end semantics.
1240 ICmpInst(
1241 BasicBlock &InsertAtEnd, ///< Block to insert into.
1242 Predicate pred, ///< The predicate to use for the comparison
1243 Value *LHS, ///< The left-hand-side of the expression
1244 Value *RHS, ///< The right-hand-side of the expression
1245 const Twine &NameStr = "" ///< Name of the instruction
1246 ) : CmpInst(makeCmpResultType(LHS->getType()),
1247 Instruction::ICmp, pred, LHS, RHS, NameStr,
1248 &InsertAtEnd) {
1249#ifndef NDEBUG
1250 AssertOK();
1251#endif
1252 }
1253
1254 /// Constructor with no-insertion semantics
1255 ICmpInst(
1256 Predicate pred, ///< The predicate to use for the comparison
1257 Value *LHS, ///< The left-hand-side of the expression
1258 Value *RHS, ///< The right-hand-side of the expression
1259 const Twine &NameStr = "" ///< Name of the instruction
1260 ) : CmpInst(makeCmpResultType(LHS->getType()),
1261 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1262#ifndef NDEBUG
1263 AssertOK();
1264#endif
1265 }
1266
1267 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1268 /// @returns the predicate that would be the result if the operand were
1269 /// regarded as signed.
1270 /// Return the signed version of the predicate
1271 Predicate getSignedPredicate() const {
1272 return getSignedPredicate(getPredicate());
1273 }
1274
1275 /// This is a static version that you can use without an instruction.
1276 /// Return the signed version of the predicate.
1277 static Predicate getSignedPredicate(Predicate pred);
1278
1279 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1280 /// @returns the predicate that would be the result if the operand were
1281 /// regarded as unsigned.
1282 /// Return the unsigned version of the predicate
1283 Predicate getUnsignedPredicate() const {
1284 return getUnsignedPredicate(getPredicate());
1285 }
1286
1287 /// This is a static version that you can use without an instruction.
1288 /// Return the unsigned version of the predicate.
1289 static Predicate getUnsignedPredicate(Predicate pred);
1290
1291 /// Return true if this predicate is either EQ or NE. This also
1292 /// tests for commutativity.
1293 static bool isEquality(Predicate P) {
1294 return P == ICMP_EQ || P == ICMP_NE;
1295 }
1296
1297 /// Return true if this predicate is either EQ or NE. This also
1298 /// tests for commutativity.
1299 bool isEquality() const {
1300 return isEquality(getPredicate());
1301 }
1302
1303 /// @returns true if the predicate of this ICmpInst is commutative
1304 /// Determine if this relation is commutative.
1305 bool isCommutative() const { return isEquality(); }
1306
1307 /// Return true if the predicate is relational (not EQ or NE).
1308 ///
1309 bool isRelational() const {
1310 return !isEquality();
1311 }
1312
1313 /// Return true if the predicate is relational (not EQ or NE).
1314 ///
1315 static bool isRelational(Predicate P) {
1316 return !isEquality(P);
1317 }
1318
1319 /// Return true if the predicate is SGT or UGT.
1320 ///
1321 static bool isGT(Predicate P) {
1322 return P == ICMP_SGT || P == ICMP_UGT;
1323 }
1324
1325 /// Return true if the predicate is SLT or ULT.
1326 ///
1327 static bool isLT(Predicate P) {
1328 return P == ICMP_SLT || P == ICMP_ULT;
1329 }
1330
1331 /// Return true if the predicate is SGE or UGE.
1332 ///
1333 static bool isGE(Predicate P) {
1334 return P == ICMP_SGE || P == ICMP_UGE;
1335 }
1336
1337 /// Return true if the predicate is SLE or ULE.
1338 ///
1339 static bool isLE(Predicate P) {
1340 return P == ICMP_SLE || P == ICMP_ULE;
1341 }
1342
1343 /// Exchange the two operands to this instruction in such a way that it does
1344 /// not modify the semantics of the instruction. The predicate value may be
1345 /// changed to retain the same result if the predicate is order dependent
1346 /// (e.g. ult).
1347 /// Swap operands and adjust predicate.
1348 void swapOperands() {
1349 setPredicate(getSwappedPredicate());
1350 Op<0>().swap(Op<1>());
1351 }
1352
1353 // Methods for support type inquiry through isa, cast, and dyn_cast:
1354 static bool classof(const Instruction *I) {
1355 return I->getOpcode() == Instruction::ICmp;
1356 }
1357 static bool classof(const Value *V) {
1358 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1359 }
1360};
1361
1362//===----------------------------------------------------------------------===//
1363// FCmpInst Class
1364//===----------------------------------------------------------------------===//
1365
1366/// This instruction compares its operands according to the predicate given
1367/// to the constructor. It only operates on floating point values or packed
1368/// vectors of floating point values. The operands must be identical types.
1369/// Represents a floating point comparison operator.
1370class FCmpInst: public CmpInst {
1371 void AssertOK() {
1372 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1372, __extension__ __PRETTY_FUNCTION__))
;
1373 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1374, __extension__ __PRETTY_FUNCTION__))
1374 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1374, __extension__ __PRETTY_FUNCTION__))
;
1375 // Check that the operands are the right type
1376 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
1377 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
;
1378 }
1379
1380protected:
1381 // Note: Instruction needs to be a friend here to call cloneImpl.
1382 friend class Instruction;
1383
1384 /// Clone an identical FCmpInst
1385 FCmpInst *cloneImpl() const;
1386
1387public:
1388 /// Constructor with insert-before-instruction semantics.
1389 FCmpInst(
1390 Instruction *InsertBefore, ///< Where to insert
1391 Predicate pred, ///< The predicate to use for the comparison
1392 Value *LHS, ///< The left-hand-side of the expression
1393 Value *RHS, ///< The right-hand-side of the expression
1394 const Twine &NameStr = "" ///< Name of the instruction
1395 ) : CmpInst(makeCmpResultType(LHS->getType()),
1396 Instruction::FCmp, pred, LHS, RHS, NameStr,
1397 InsertBefore) {
1398 AssertOK();
1399 }
1400
1401 /// Constructor with insert-at-end semantics.
1402 FCmpInst(
1403 BasicBlock &InsertAtEnd, ///< Block to insert into.
1404 Predicate pred, ///< The predicate to use for the comparison
1405 Value *LHS, ///< The left-hand-side of the expression
1406 Value *RHS, ///< The right-hand-side of the expression
1407 const Twine &NameStr = "" ///< Name of the instruction
1408 ) : CmpInst(makeCmpResultType(LHS->getType()),
1409 Instruction::FCmp, pred, LHS, RHS, NameStr,
1410 &InsertAtEnd) {
1411 AssertOK();
1412 }
1413
1414 /// Constructor with no-insertion semantics
1415 FCmpInst(
1416 Predicate Pred, ///< The predicate to use for the comparison
1417 Value *LHS, ///< The left-hand-side of the expression
1418 Value *RHS, ///< The right-hand-side of the expression
1419 const Twine &NameStr = "", ///< Name of the instruction
1420 Instruction *FlagsSource = nullptr
1421 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1422 RHS, NameStr, nullptr, FlagsSource) {
1423 AssertOK();
1424 }
1425
1426 /// @returns true if the predicate of this instruction is EQ or NE.
1427 /// Determine if this is an equality predicate.
1428 static bool isEquality(Predicate Pred) {
1429 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1430 Pred == FCMP_UNE;
1431 }
1432
1433 /// @returns true if the predicate of this instruction is EQ or NE.
1434 /// Determine if this is an equality predicate.
1435 bool isEquality() const { return isEquality(getPredicate()); }
1436
1437 /// @returns true if the predicate of this instruction is commutative.
1438 /// Determine if this is a commutative predicate.
1439 bool isCommutative() const {
1440 return isEquality() ||
1441 getPredicate() == FCMP_FALSE ||
1442 getPredicate() == FCMP_TRUE ||
1443 getPredicate() == FCMP_ORD ||
1444 getPredicate() == FCMP_UNO;
1445 }
1446
1447 /// @returns true if the predicate is relational (not EQ or NE).
1448 /// Determine if this a relational predicate.
1449 bool isRelational() const { return !isEquality(); }
1450
1451 /// Exchange the two operands to this instruction in such a way that it does
1452 /// not modify the semantics of the instruction. The predicate value may be
1453 /// changed to retain the same result if the predicate is order dependent
1454 /// (e.g. ult).
1455 /// Swap operands and adjust predicate.
1456 void swapOperands() {
1457 setPredicate(getSwappedPredicate());
1458 Op<0>().swap(Op<1>());
1459 }
1460
1461 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1462 static bool classof(const Instruction *I) {
1463 return I->getOpcode() == Instruction::FCmp;
1464 }
1465 static bool classof(const Value *V) {
1466 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1467 }
1468};
1469
1470//===----------------------------------------------------------------------===//
1471/// This class represents a function call, abstracting a target
1472/// machine's calling convention. This class uses low bit of the SubClassData
1473/// field to indicate whether or not this is a tail call. The rest of the bits
1474/// hold the calling convention of the call.
1475///
1476class CallInst : public CallBase {
1477 CallInst(const CallInst &CI);
1478
1479 /// Construct a CallInst given a range of arguments.
1480 /// Construct a CallInst from a range of arguments
1481 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1482 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1483 Instruction *InsertBefore);
1484
1485 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1486 const Twine &NameStr, Instruction *InsertBefore)
1487 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1488
1489 /// Construct a CallInst given a range of arguments.
1490 /// Construct a CallInst from a range of arguments
1491 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1492 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1493 BasicBlock *InsertAtEnd);
1494
1495 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1496 Instruction *InsertBefore);
1497
1498 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1499 BasicBlock *InsertAtEnd);
1500
1501 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1502 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1503 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1504
1505 /// Compute the number of operands to allocate.
1506 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1507 // We need one operand for the called function, plus the input operand
1508 // counts provided.
1509 return 1 + NumArgs + NumBundleInputs;
1510 }
1511
1512protected:
1513 // Note: Instruction needs to be a friend here to call cloneImpl.
1514 friend class Instruction;
1515
1516 CallInst *cloneImpl() const;
1517
1518public:
1519 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1520 Instruction *InsertBefore = nullptr) {
1521 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1522 }
1523
1524 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1525 const Twine &NameStr,
1526 Instruction *InsertBefore = nullptr) {
1527 return new (ComputeNumOperands(Args.size()))
1528 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1529 }
1530
1531 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1532 ArrayRef<OperandBundleDef> Bundles = None,
1533 const Twine &NameStr = "",
1534 Instruction *InsertBefore = nullptr) {
1535 const int NumOperands =
1536 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1537 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1538
1539 return new (NumOperands, DescriptorBytes)
1540 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1541 }
1542
1543 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1544 BasicBlock *InsertAtEnd) {
1545 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1546 }
1547
1548 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1549 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1550 return new (ComputeNumOperands(Args.size()))
1551 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1552 }
1553
1554 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1555 ArrayRef<OperandBundleDef> Bundles,
1556 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1557 const int NumOperands =
1558 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1559 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1560
1561 return new (NumOperands, DescriptorBytes)
1562 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1563 }
1564
1565 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1566 Instruction *InsertBefore = nullptr) {
1567 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1568 InsertBefore);
1569 }
1570
1571 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1572 ArrayRef<OperandBundleDef> Bundles = None,
1573 const Twine &NameStr = "",
1574 Instruction *InsertBefore = nullptr) {
1575 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1576 NameStr, InsertBefore);
1577 }
1578
1579 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1580 const Twine &NameStr,
1581 Instruction *InsertBefore = nullptr) {
1582 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1583 InsertBefore);
1584 }
1585
1586 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1587 BasicBlock *InsertAtEnd) {
1588 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1589 InsertAtEnd);
1590 }
1591
1592 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1593 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1594 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1595 InsertAtEnd);
1596 }
1597
1598 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1599 ArrayRef<OperandBundleDef> Bundles,
1600 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1601 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1602 NameStr, InsertAtEnd);
1603 }
1604
1605 /// Create a clone of \p CI with a different set of operand bundles and
1606 /// insert it before \p InsertPt.
1607 ///
1608 /// The returned call instruction is identical \p CI in every way except that
1609 /// the operand bundles for the new instruction are set to the operand bundles
1610 /// in \p Bundles.
1611 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1612 Instruction *InsertPt = nullptr);
1613
1614 /// Generate the IR for a call to malloc:
1615 /// 1. Compute the malloc call's argument as the specified type's size,
1616 /// possibly multiplied by the array size if the array size is not
1617 /// constant 1.
1618 /// 2. Call malloc with that argument.
1619 /// 3. Bitcast the result of the malloc call to the specified type.
1620 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1621 Type *AllocTy, Value *AllocSize,
1622 Value *ArraySize = nullptr,
1623 Function *MallocF = nullptr,
1624 const Twine &Name = "");
1625 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1626 Type *AllocTy, Value *AllocSize,
1627 Value *ArraySize = nullptr,
1628 Function *MallocF = nullptr,
1629 const Twine &Name = "");
1630 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1631 Type *AllocTy, Value *AllocSize,
1632 Value *ArraySize = nullptr,
1633 ArrayRef<OperandBundleDef> Bundles = None,
1634 Function *MallocF = nullptr,
1635 const Twine &Name = "");
1636 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1637 Type *AllocTy, Value *AllocSize,
1638 Value *ArraySize = nullptr,
1639 ArrayRef<OperandBundleDef> Bundles = None,
1640 Function *MallocF = nullptr,
1641 const Twine &Name = "");
1642 /// Generate the IR for a call to the builtin free function.
1643 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1644 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1645 static Instruction *CreateFree(Value *Source,
1646 ArrayRef<OperandBundleDef> Bundles,
1647 Instruction *InsertBefore);
1648 static Instruction *CreateFree(Value *Source,
1649 ArrayRef<OperandBundleDef> Bundles,
1650 BasicBlock *InsertAtEnd);
1651
1652 // Note that 'musttail' implies 'tail'.
1653 enum TailCallKind : unsigned {
1654 TCK_None = 0,
1655 TCK_Tail = 1,
1656 TCK_MustTail = 2,
1657 TCK_NoTail = 3,
1658 TCK_LAST = TCK_NoTail
1659 };
1660
1661 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1662 static_assert(
1663 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1664 "Bitfields must be contiguous");
1665
1666 TailCallKind getTailCallKind() const {
1667 return getSubclassData<TailCallKindField>();
1668 }
1669
1670 bool isTailCall() const {
1671 TailCallKind Kind = getTailCallKind();
1672 return Kind == TCK_Tail || Kind == TCK_MustTail;
1673 }
1674
1675 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1676
1677 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1678
1679 void setTailCallKind(TailCallKind TCK) {
1680 setSubclassData<TailCallKindField>(TCK);
1681 }
1682
1683 void setTailCall(bool IsTc = true) {
1684 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1685 }
1686
1687 /// Return true if the call can return twice
1688 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1689 void setCanReturnTwice() {
1690 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1691 }
1692
1693 // Methods for support type inquiry through isa, cast, and dyn_cast:
1694 static bool classof(const Instruction *I) {
1695 return I->getOpcode() == Instruction::Call;
1696 }
1697 static bool classof(const Value *V) {
1698 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1699 }
1700
1701 /// Updates profile metadata by scaling it by \p S / \p T.
1702 void updateProfWeight(uint64_t S, uint64_t T);
1703
1704private:
1705 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1706 // method so that subclasses cannot accidentally use it.
1707 template <typename Bitfield>
1708 void setSubclassData(typename Bitfield::Type Value) {
1709 Instruction::setSubclassData<Bitfield>(Value);
1710 }
1711};
1712
1713CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1714 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1715 BasicBlock *InsertAtEnd)
1716 : CallBase(Ty->getReturnType(), Instruction::Call,
1717 OperandTraits<CallBase>::op_end(this) -
1718 (Args.size() + CountBundleInputs(Bundles) + 1),
1719 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1720 InsertAtEnd) {
1721 init(Ty, Func, Args, Bundles, NameStr);
1722}
1723
1724CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1725 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1726 Instruction *InsertBefore)
1727 : CallBase(Ty->getReturnType(), Instruction::Call,
1728 OperandTraits<CallBase>::op_end(this) -
1729 (Args.size() + CountBundleInputs(Bundles) + 1),
1730 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1731 InsertBefore) {
1732 init(Ty, Func, Args, Bundles, NameStr);
1733}
1734
1735//===----------------------------------------------------------------------===//
1736// SelectInst Class
1737//===----------------------------------------------------------------------===//
1738
1739/// This class represents the LLVM 'select' instruction.
1740///
1741class SelectInst : public Instruction {
1742 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1743 Instruction *InsertBefore)
1744 : Instruction(S1->getType(), Instruction::Select,
1745 &Op<0>(), 3, InsertBefore) {
1746 init(C, S1, S2);
1747 setName(NameStr);
1748 }
1749
1750 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1751 BasicBlock *InsertAtEnd)
1752 : Instruction(S1->getType(), Instruction::Select,
1753 &Op<0>(), 3, InsertAtEnd) {
1754 init(C, S1, S2);
1755 setName(NameStr);
1756 }
1757
1758 void init(Value *C, Value *S1, Value *S2) {
1759 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1759, __extension__ __PRETTY_FUNCTION__))
;
1760 Op<0>() = C;
1761 Op<1>() = S1;
1762 Op<2>() = S2;
1763 }
1764
1765protected:
1766 // Note: Instruction needs to be a friend here to call cloneImpl.
1767 friend class Instruction;
1768
1769 SelectInst *cloneImpl() const;
1770
1771public:
1772 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1773 const Twine &NameStr = "",
1774 Instruction *InsertBefore = nullptr,
1775 Instruction *MDFrom = nullptr) {
1776 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1777 if (MDFrom)
1778 Sel->copyMetadata(*MDFrom);
1779 return Sel;
1780 }
1781
1782 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1783 const Twine &NameStr,
1784 BasicBlock *InsertAtEnd) {
1785 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1786 }
1787
1788 const Value *getCondition() const { return Op<0>(); }
1789 const Value *getTrueValue() const { return Op<1>(); }
1790 const Value *getFalseValue() const { return Op<2>(); }
1791 Value *getCondition() { return Op<0>(); }
1792 Value *getTrueValue() { return Op<1>(); }
1793 Value *getFalseValue() { return Op<2>(); }
1794
1795 void setCondition(Value *V) { Op<0>() = V; }
1796 void setTrueValue(Value *V) { Op<1>() = V; }
1797 void setFalseValue(Value *V) { Op<2>() = V; }
1798
1799 /// Swap the true and false values of the select instruction.
1800 /// This doesn't swap prof metadata.
1801 void swapValues() { Op<1>().swap(Op<2>()); }
1802
1803 /// Return a string if the specified operands are invalid
1804 /// for a select operation, otherwise return null.
1805 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1806
1807 /// Transparently provide more efficient getOperand methods.
1808 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1809
1810 OtherOps getOpcode() const {
1811 return static_cast<OtherOps>(Instruction::getOpcode());
1812 }
1813
1814 // Methods for support type inquiry through isa, cast, and dyn_cast:
1815 static bool classof(const Instruction *I) {
1816 return I->getOpcode() == Instruction::Select;
1817 }
1818 static bool classof(const Value *V) {
1819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1820 }
1821};
1822
1823template <>
1824struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1825};
1826
1827DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1827, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1827, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1828
1829//===----------------------------------------------------------------------===//
1830// VAArgInst Class
1831//===----------------------------------------------------------------------===//
1832
1833/// This class represents the va_arg llvm instruction, which returns
1834/// an argument of the specified type given a va_list and increments that list
1835///
1836class VAArgInst : public UnaryInstruction {
1837protected:
1838 // Note: Instruction needs to be a friend here to call cloneImpl.
1839 friend class Instruction;
1840
1841 VAArgInst *cloneImpl() const;
1842
1843public:
1844 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1845 Instruction *InsertBefore = nullptr)
1846 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1847 setName(NameStr);
1848 }
1849
1850 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1851 BasicBlock *InsertAtEnd)
1852 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1853 setName(NameStr);
1854 }
1855
1856 Value *getPointerOperand() { return getOperand(0); }
1857 const Value *getPointerOperand() const { return getOperand(0); }
1858 static unsigned getPointerOperandIndex() { return 0U; }
1859
1860 // Methods for support type inquiry through isa, cast, and dyn_cast:
1861 static bool classof(const Instruction *I) {
1862 return I->getOpcode() == VAArg;
1863 }
1864 static bool classof(const Value *V) {
1865 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1866 }
1867};
1868
1869//===----------------------------------------------------------------------===//
1870// ExtractElementInst Class
1871//===----------------------------------------------------------------------===//
1872
1873/// This instruction extracts a single (scalar)
1874/// element from a VectorType value
1875///
1876class ExtractElementInst : public Instruction {
1877 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1878 Instruction *InsertBefore = nullptr);
1879 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1880 BasicBlock *InsertAtEnd);
1881
1882protected:
1883 // Note: Instruction needs to be a friend here to call cloneImpl.
1884 friend class Instruction;
1885
1886 ExtractElementInst *cloneImpl() const;
1887
1888public:
1889 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1890 const Twine &NameStr = "",
1891 Instruction *InsertBefore = nullptr) {
1892 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1893 }
1894
1895 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1896 const Twine &NameStr,
1897 BasicBlock *InsertAtEnd) {
1898 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1899 }
1900
1901 /// Return true if an extractelement instruction can be
1902 /// formed with the specified operands.
1903 static bool isValidOperands(const Value *Vec, const Value *Idx);
1904
1905 Value *getVectorOperand() { return Op<0>(); }
1906 Value *getIndexOperand() { return Op<1>(); }
1907 const Value *getVectorOperand() const { return Op<0>(); }
1908 const Value *getIndexOperand() const { return Op<1>(); }
1909
1910 VectorType *getVectorOperandType() const {
1911 return cast<VectorType>(getVectorOperand()->getType());
1912 }
1913
1914 /// Transparently provide more efficient getOperand methods.
1915 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1916
1917 // Methods for support type inquiry through isa, cast, and dyn_cast:
1918 static bool classof(const Instruction *I) {
1919 return I->getOpcode() == Instruction::ExtractElement;
1920 }
1921 static bool classof(const Value *V) {
1922 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1923 }
1924};
1925
1926template <>
1927struct OperandTraits<ExtractElementInst> :
1928 public FixedNumOperandTraits<ExtractElementInst, 2> {
1929};
1930
1931DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1931, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1931, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1932
1933//===----------------------------------------------------------------------===//
1934// InsertElementInst Class
1935//===----------------------------------------------------------------------===//
1936
1937/// This instruction inserts a single (scalar)
1938/// element into a VectorType value
1939///
1940class InsertElementInst : public Instruction {
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1942 const Twine &NameStr = "",
1943 Instruction *InsertBefore = nullptr);
1944 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1945 BasicBlock *InsertAtEnd);
1946
1947protected:
1948 // Note: Instruction needs to be a friend here to call cloneImpl.
1949 friend class Instruction;
1950
1951 InsertElementInst *cloneImpl() const;
1952
1953public:
1954 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1955 const Twine &NameStr = "",
1956 Instruction *InsertBefore = nullptr) {
1957 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1958 }
1959
1960 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1961 const Twine &NameStr,
1962 BasicBlock *InsertAtEnd) {
1963 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1964 }
1965
1966 /// Return true if an insertelement instruction can be
1967 /// formed with the specified operands.
1968 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1969 const Value *Idx);
1970
1971 /// Overload to return most specific vector type.
1972 ///
1973 VectorType *getType() const {
1974 return cast<VectorType>(Instruction::getType());
1975 }
1976
1977 /// Transparently provide more efficient getOperand methods.
1978 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1979
1980 // Methods for support type inquiry through isa, cast, and dyn_cast:
1981 static bool classof(const Instruction *I) {
1982 return I->getOpcode() == Instruction::InsertElement;
1983 }
1984 static bool classof(const Value *V) {
1985 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1986 }
1987};
1988
1989template <>
1990struct OperandTraits<InsertElementInst> :
1991 public FixedNumOperandTraits<InsertElementInst, 3> {
1992};
1993
1994DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1994, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1994, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1995
1996//===----------------------------------------------------------------------===//
1997// ShuffleVectorInst Class
1998//===----------------------------------------------------------------------===//
1999
2000constexpr int UndefMaskElem = -1;
2001
2002/// This instruction constructs a fixed permutation of two
2003/// input vectors.
2004///
2005/// For each element of the result vector, the shuffle mask selects an element
2006/// from one of the input vectors to copy to the result. Non-negative elements
2007/// in the mask represent an index into the concatenated pair of input vectors.
2008/// UndefMaskElem (-1) specifies that the result element is undefined.
2009///
2010/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2011/// requirement may be relaxed in the future.
2012class ShuffleVectorInst : public Instruction {
2013 SmallVector<int, 4> ShuffleMask;
2014 Constant *ShuffleMaskForBitcode;
2015
2016protected:
2017 // Note: Instruction needs to be a friend here to call cloneImpl.
2018 friend class Instruction;
2019
2020 ShuffleVectorInst *cloneImpl() const;
2021
2022public:
2023 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2024 const Twine &NameStr = "",
2025 Instruction *InsertBefor = nullptr);
2026 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2027 const Twine &NameStr, BasicBlock *InsertAtEnd);
2028 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2029 const Twine &NameStr = "",
2030 Instruction *InsertBefor = nullptr);
2031 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2032 const Twine &NameStr, BasicBlock *InsertAtEnd);
2033
2034 void *operator new(size_t s) { return User::operator new(s, 2); }
2035
2036 /// Swap the operands and adjust the mask to preserve the semantics
2037 /// of the instruction.
2038 void commute();
2039
2040 /// Return true if a shufflevector instruction can be
2041 /// formed with the specified operands.
2042 static bool isValidOperands(const Value *V1, const Value *V2,
2043 const Value *Mask);
2044 static bool isValidOperands(const Value *V1, const Value *V2,
2045 ArrayRef<int> Mask);
2046
2047 /// Overload to return most specific vector type.
2048 ///
2049 VectorType *getType() const {
2050 return cast<VectorType>(Instruction::getType());
2051 }
2052
2053 /// Transparently provide more efficient getOperand methods.
2054 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2055
2056 /// Return the shuffle mask value of this instruction for the given element
2057 /// index. Return UndefMaskElem if the element is undef.
2058 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2059
2060 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2061 /// elements of the mask are returned as UndefMaskElem.
2062 static void getShuffleMask(const Constant *Mask,
2063 SmallVectorImpl<int> &Result);
2064
2065 /// Return the mask for this instruction as a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2068 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2069 }
2070
2071 /// Return the mask for this instruction, for use in bitcode.
2072 ///
2073 /// TODO: This is temporary until we decide a new bitcode encoding for
2074 /// shufflevector.
2075 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2076
2077 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2078 Type *ResultTy);
2079
2080 void setShuffleMask(ArrayRef<int> Mask);
2081
2082 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2083
2084 /// Return true if this shuffle returns a vector with a different number of
2085 /// elements than its source vectors.
2086 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2087 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2088 bool changesLength() const {
2089 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2090 ->getElementCount()
2091 .getKnownMinValue();
2092 unsigned NumMaskElts = ShuffleMask.size();
2093 return NumSourceElts != NumMaskElts;
2094 }
2095
2096 /// Return true if this shuffle returns a vector with a greater number of
2097 /// elements than its source vectors.
2098 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2099 bool increasesLength() const {
2100 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2101 ->getElementCount()
2102 .getKnownMinValue();
2103 unsigned NumMaskElts = ShuffleMask.size();
2104 return NumSourceElts < NumMaskElts;
2105 }
2106
2107 /// Return true if this shuffle mask chooses elements from exactly one source
2108 /// vector.
2109 /// Example: <7,5,undef,7>
2110 /// This assumes that vector operands are the same length as the mask.
2111 static bool isSingleSourceMask(ArrayRef<int> Mask);
2112 static bool isSingleSourceMask(const Constant *Mask) {
2113 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2113, __extension__ __PRETTY_FUNCTION__))
;
2114 SmallVector<int, 16> MaskAsInts;
2115 getShuffleMask(Mask, MaskAsInts);
2116 return isSingleSourceMask(MaskAsInts);
2117 }
2118
2119 /// Return true if this shuffle chooses elements from exactly one source
2120 /// vector without changing the length of that vector.
2121 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2122 /// TODO: Optionally allow length-changing shuffles.
2123 bool isSingleSource() const {
2124 return !changesLength() && isSingleSourceMask(ShuffleMask);
2125 }
2126
2127 /// Return true if this shuffle mask chooses elements from exactly one source
2128 /// vector without lane crossings. A shuffle using this mask is not
2129 /// necessarily a no-op because it may change the number of elements from its
2130 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2131 /// Example: <undef,undef,2,3>
2132 static bool isIdentityMask(ArrayRef<int> Mask);
2133 static bool isIdentityMask(const Constant *Mask) {
2134 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2134, __extension__ __PRETTY_FUNCTION__))
;
2135 SmallVector<int, 16> MaskAsInts;
2136 getShuffleMask(Mask, MaskAsInts);
2137 return isIdentityMask(MaskAsInts);
2138 }
2139
2140 /// Return true if this shuffle chooses elements from exactly one source
2141 /// vector without lane crossings and does not change the number of elements
2142 /// from its input vectors.
2143 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2144 bool isIdentity() const {
2145 return !changesLength() && isIdentityMask(ShuffleMask);
2146 }
2147
2148 /// Return true if this shuffle lengthens exactly one source vector with
2149 /// undefs in the high elements.
2150 bool isIdentityWithPadding() const;
2151
2152 /// Return true if this shuffle extracts the first N elements of exactly one
2153 /// source vector.
2154 bool isIdentityWithExtract() const;
2155
2156 /// Return true if this shuffle concatenates its 2 source vectors. This
2157 /// returns false if either input is undefined. In that case, the shuffle is
2158 /// is better classified as an identity with padding operation.
2159 bool isConcat() const;
2160
2161 /// Return true if this shuffle mask chooses elements from its source vectors
2162 /// without lane crossings. A shuffle using this mask would be
2163 /// equivalent to a vector select with a constant condition operand.
2164 /// Example: <4,1,6,undef>
2165 /// This returns false if the mask does not choose from both input vectors.
2166 /// In that case, the shuffle is better classified as an identity shuffle.
2167 /// This assumes that vector operands are the same length as the mask
2168 /// (a length-changing shuffle can never be equivalent to a vector select).
2169 static bool isSelectMask(ArrayRef<int> Mask);
2170 static bool isSelectMask(const Constant *Mask) {
2171 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2171, __extension__ __PRETTY_FUNCTION__))
;
2172 SmallVector<int, 16> MaskAsInts;
2173 getShuffleMask(Mask, MaskAsInts);
2174 return isSelectMask(MaskAsInts);
2175 }
2176
2177 /// Return true if this shuffle chooses elements from its source vectors
2178 /// without lane crossings and all operands have the same number of elements.
2179 /// In other words, this shuffle is equivalent to a vector select with a
2180 /// constant condition operand.
2181 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2182 /// This returns false if the mask does not choose from both input vectors.
2183 /// In that case, the shuffle is better classified as an identity shuffle.
2184 /// TODO: Optionally allow length-changing shuffles.
2185 bool isSelect() const {
2186 return !changesLength() && isSelectMask(ShuffleMask);
2187 }
2188
2189 /// Return true if this shuffle mask swaps the order of elements from exactly
2190 /// one source vector.
2191 /// Example: <7,6,undef,4>
2192 /// This assumes that vector operands are the same length as the mask.
2193 static bool isReverseMask(ArrayRef<int> Mask);
2194 static bool isReverseMask(const Constant *Mask) {
2195 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2195, __extension__ __PRETTY_FUNCTION__))
;
2196 SmallVector<int, 16> MaskAsInts;
2197 getShuffleMask(Mask, MaskAsInts);
2198 return isReverseMask(MaskAsInts);
2199 }
2200
2201 /// Return true if this shuffle swaps the order of elements from exactly
2202 /// one source vector.
2203 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2204 /// TODO: Optionally allow length-changing shuffles.
2205 bool isReverse() const {
2206 return !changesLength() && isReverseMask(ShuffleMask);
2207 }
2208
2209 /// Return true if this shuffle mask chooses all elements with the same value
2210 /// as the first element of exactly one source vector.
2211 /// Example: <4,undef,undef,4>
2212 /// This assumes that vector operands are the same length as the mask.
2213 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2214 static bool isZeroEltSplatMask(const Constant *Mask) {
2215 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2215, __extension__ __PRETTY_FUNCTION__))
;
2216 SmallVector<int, 16> MaskAsInts;
2217 getShuffleMask(Mask, MaskAsInts);
2218 return isZeroEltSplatMask(MaskAsInts);
2219 }
2220
2221 /// Return true if all elements of this shuffle are the same value as the
2222 /// first element of exactly one source vector without changing the length
2223 /// of that vector.
2224 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2225 /// TODO: Optionally allow length-changing shuffles.
2226 /// TODO: Optionally allow splats from other elements.
2227 bool isZeroEltSplat() const {
2228 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2229 }
2230
2231 /// Return true if this shuffle mask is a transpose mask.
2232 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2233 /// even- or odd-numbered vector elements from two n-dimensional source
2234 /// vectors and write each result into consecutive elements of an
2235 /// n-dimensional destination vector. Two shuffles are necessary to complete
2236 /// the transpose, one for the even elements and another for the odd elements.
2237 /// This description closely follows how the TRN1 and TRN2 AArch64
2238 /// instructions operate.
2239 ///
2240 /// For example, a simple 2x2 matrix can be transposed with:
2241 ///
2242 /// ; Original matrix
2243 /// m0 = < a, b >
2244 /// m1 = < c, d >
2245 ///
2246 /// ; Transposed matrix
2247 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2248 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2249 ///
2250 /// For matrices having greater than n columns, the resulting nx2 transposed
2251 /// matrix is stored in two result vectors such that one vector contains
2252 /// interleaved elements from all the even-numbered rows and the other vector
2253 /// contains interleaved elements from all the odd-numbered rows. For example,
2254 /// a 2x4 matrix can be transposed with:
2255 ///
2256 /// ; Original matrix
2257 /// m0 = < a, b, c, d >
2258 /// m1 = < e, f, g, h >
2259 ///
2260 /// ; Transposed matrix
2261 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2262 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2263 static bool isTransposeMask(ArrayRef<int> Mask);
2264 static bool isTransposeMask(const Constant *Mask) {
2265 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2265, __extension__ __PRETTY_FUNCTION__))
;
2266 SmallVector<int, 16> MaskAsInts;
2267 getShuffleMask(Mask, MaskAsInts);
2268 return isTransposeMask(MaskAsInts);
2269 }
2270
2271 /// Return true if this shuffle transposes the elements of its inputs without
2272 /// changing the length of the vectors. This operation may also be known as a
2273 /// merge or interleave. See the description for isTransposeMask() for the
2274 /// exact specification.
2275 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2276 bool isTranspose() const {
2277 return !changesLength() && isTransposeMask(ShuffleMask);
2278 }
2279
2280 /// Return true if this shuffle mask is an extract subvector mask.
2281 /// A valid extract subvector mask returns a smaller vector from a single
2282 /// source operand. The base extraction index is returned as well.
2283 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2284 int &Index);
2285 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2286 int &Index) {
2287 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2287, __extension__ __PRETTY_FUNCTION__))
;
2288 // Not possible to express a shuffle mask for a scalable vector for this
2289 // case.
2290 if (isa<ScalableVectorType>(Mask->getType()))
2291 return false;
2292 SmallVector<int, 16> MaskAsInts;
2293 getShuffleMask(Mask, MaskAsInts);
2294 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2295 }
2296
2297 /// Return true if this shuffle mask is an extract subvector mask.
2298 bool isExtractSubvectorMask(int &Index) const {
2299 // Not possible to express a shuffle mask for a scalable vector for this
2300 // case.
2301 if (isa<ScalableVectorType>(getType()))
2302 return false;
2303
2304 int NumSrcElts =
2305 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2306 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2307 }
2308
2309 /// Change values in a shuffle permute mask assuming the two vector operands
2310 /// of length InVecNumElts have swapped position.
2311 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2312 unsigned InVecNumElts) {
2313 for (int &Idx : Mask) {
2314 if (Idx == -1)
2315 continue;
2316 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2317 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
2318 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
;
2319 }
2320 }
2321
2322 // Methods for support type inquiry through isa, cast, and dyn_cast:
2323 static bool classof(const Instruction *I) {
2324 return I->getOpcode() == Instruction::ShuffleVector;
2325 }
2326 static bool classof(const Value *V) {
2327 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2328 }
2329};
2330
2331template <>
2332struct OperandTraits<ShuffleVectorInst>
2333 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2334
2335DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2336
2337//===----------------------------------------------------------------------===//
2338// ExtractValueInst Class
2339//===----------------------------------------------------------------------===//
2340
2341/// This instruction extracts a struct member or array
2342/// element value from an aggregate value.
2343///
2344class ExtractValueInst : public UnaryInstruction {
2345 SmallVector<unsigned, 4> Indices;
2346
2347 ExtractValueInst(const ExtractValueInst &EVI);
2348
2349 /// Constructors - Create a extractvalue instruction with a base aggregate
2350 /// value and a list of indices. The first ctor can optionally insert before
2351 /// an existing instruction, the second appends the new instruction to the
2352 /// specified BasicBlock.
2353 inline ExtractValueInst(Value *Agg,
2354 ArrayRef<unsigned> Idxs,
2355 const Twine &NameStr,
2356 Instruction *InsertBefore);
2357 inline ExtractValueInst(Value *Agg,
2358 ArrayRef<unsigned> Idxs,
2359 const Twine &NameStr, BasicBlock *InsertAtEnd);
2360
2361 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2362
2363protected:
2364 // Note: Instruction needs to be a friend here to call cloneImpl.
2365 friend class Instruction;
2366
2367 ExtractValueInst *cloneImpl() const;
2368
2369public:
2370 static ExtractValueInst *Create(Value *Agg,
2371 ArrayRef<unsigned> Idxs,
2372 const Twine &NameStr = "",
2373 Instruction *InsertBefore = nullptr) {
2374 return new
2375 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2376 }
2377
2378 static ExtractValueInst *Create(Value *Agg,
2379 ArrayRef<unsigned> Idxs,
2380 const Twine &NameStr,
2381 BasicBlock *InsertAtEnd) {
2382 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2383 }
2384
2385 /// Returns the type of the element that would be extracted
2386 /// with an extractvalue instruction with the specified parameters.
2387 ///
2388 /// Null is returned if the indices are invalid for the specified type.
2389 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2390
2391 using idx_iterator = const unsigned*;
2392
2393 inline idx_iterator idx_begin() const { return Indices.begin(); }
2394 inline idx_iterator idx_end() const { return Indices.end(); }
2395 inline iterator_range<idx_iterator> indices() const {
2396 return make_range(idx_begin(), idx_end());
2397 }
2398
2399 Value *getAggregateOperand() {
2400 return getOperand(0);
2401 }
2402 const Value *getAggregateOperand() const {
2403 return getOperand(0);
2404 }
2405 static unsigned getAggregateOperandIndex() {
2406 return 0U; // get index for modifying correct operand
2407 }
2408
2409 ArrayRef<unsigned> getIndices() const {
2410 return Indices;
2411 }
2412
2413 unsigned getNumIndices() const {
2414 return (unsigned)Indices.size();
2415 }
2416
2417 bool hasIndices() const {
2418 return true;
2419 }
2420
2421 // Methods for support type inquiry through isa, cast, and dyn_cast:
2422 static bool classof(const Instruction *I) {
2423 return I->getOpcode() == Instruction::ExtractValue;
2424 }
2425 static bool classof(const Value *V) {
2426 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2427 }
2428};
2429
2430ExtractValueInst::ExtractValueInst(Value *Agg,
2431 ArrayRef<unsigned> Idxs,
2432 const Twine &NameStr,
2433 Instruction *InsertBefore)
2434 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2435 ExtractValue, Agg, InsertBefore) {
2436 init(Idxs, NameStr);
2437}
2438
2439ExtractValueInst::ExtractValueInst(Value *Agg,
2440 ArrayRef<unsigned> Idxs,
2441 const Twine &NameStr,
2442 BasicBlock *InsertAtEnd)
2443 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2444 ExtractValue, Agg, InsertAtEnd) {
2445 init(Idxs, NameStr);
2446}
2447
2448//===----------------------------------------------------------------------===//
2449// InsertValueInst Class
2450//===----------------------------------------------------------------------===//
2451
2452/// This instruction inserts a struct field of array element
2453/// value into an aggregate value.
2454///
2455class InsertValueInst : public Instruction {
2456 SmallVector<unsigned, 4> Indices;
2457
2458 InsertValueInst(const InsertValueInst &IVI);
2459
2460 /// Constructors - Create a insertvalue instruction with a base aggregate
2461 /// value, a value to insert, and a list of indices. The first ctor can
2462 /// optionally insert before an existing instruction, the second appends
2463 /// the new instruction to the specified BasicBlock.
2464 inline InsertValueInst(Value *Agg, Value *Val,
2465 ArrayRef<unsigned> Idxs,
2466 const Twine &NameStr,
2467 Instruction *InsertBefore);
2468 inline InsertValueInst(Value *Agg, Value *Val,
2469 ArrayRef<unsigned> Idxs,
2470 const Twine &NameStr, BasicBlock *InsertAtEnd);
2471
2472 /// Constructors - These two constructors are convenience methods because one
2473 /// and two index insertvalue instructions are so common.
2474 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2475 const Twine &NameStr = "",
2476 Instruction *InsertBefore = nullptr);
2477 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2478 BasicBlock *InsertAtEnd);
2479
2480 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2481 const Twine &NameStr);
2482
2483protected:
2484 // Note: Instruction needs to be a friend here to call cloneImpl.
2485 friend class Instruction;
2486
2487 InsertValueInst *cloneImpl() const;
2488
2489public:
2490 // allocate space for exactly two operands
2491 void *operator new(size_t s) {
2492 return User::operator new(s, 2);
2493 }
2494
2495 static InsertValueInst *Create(Value *Agg, Value *Val,
2496 ArrayRef<unsigned> Idxs,
2497 const Twine &NameStr = "",
2498 Instruction *InsertBefore = nullptr) {
2499 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2500 }
2501
2502 static InsertValueInst *Create(Value *Agg, Value *Val,
2503 ArrayRef<unsigned> Idxs,
2504 const Twine &NameStr,
2505 BasicBlock *InsertAtEnd) {
2506 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2507 }
2508
2509 /// Transparently provide more efficient getOperand methods.
2510 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2511
2512 using idx_iterator = const unsigned*;
2513
2514 inline idx_iterator idx_begin() const { return Indices.begin(); }
2515 inline idx_iterator idx_end() const { return Indices.end(); }
2516 inline iterator_range<idx_iterator> indices() const {
2517 return make_range(idx_begin(), idx_end());
2518 }
2519
2520 Value *getAggregateOperand() {
2521 return getOperand(0);
2522 }
2523 const Value *getAggregateOperand() const {
2524 return getOperand(0);
2525 }
2526 static unsigned getAggregateOperandIndex() {
2527 return 0U; // get index for modifying correct operand
2528 }
2529
2530 Value *getInsertedValueOperand() {
2531 return getOperand(1);
2532 }
2533 const Value *getInsertedValueOperand() const {
2534 return getOperand(1);
2535 }
2536 static unsigned getInsertedValueOperandIndex() {
2537 return 1U; // get index for modifying correct operand
2538 }
2539
2540 ArrayRef<unsigned> getIndices() const {
2541 return Indices;
2542 }
2543
2544 unsigned getNumIndices() const {
2545 return (unsigned)Indices.size();
2546 }
2547
2548 bool hasIndices() const {
2549 return true;
2550 }
2551
2552 // Methods for support type inquiry through isa, cast, and dyn_cast:
2553 static bool classof(const Instruction *I) {
2554 return I->getOpcode() == Instruction::InsertValue;
2555 }
2556 static bool classof(const Value *V) {
2557 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2558 }
2559};
2560
2561template <>
2562struct OperandTraits<InsertValueInst> :
2563 public FixedNumOperandTraits<InsertValueInst, 2> {
2564};
2565
2566InsertValueInst::InsertValueInst(Value *Agg,
2567 Value *Val,
2568 ArrayRef<unsigned> Idxs,
2569 const Twine &NameStr,
2570 Instruction *InsertBefore)
2571 : Instruction(Agg->getType(), InsertValue,
2572 OperandTraits<InsertValueInst>::op_begin(this),
2573 2, InsertBefore) {
2574 init(Agg, Val, Idxs, NameStr);
2575}
2576
2577InsertValueInst::InsertValueInst(Value *Agg,
2578 Value *Val,
2579 ArrayRef<unsigned> Idxs,
2580 const Twine &NameStr,
2581 BasicBlock *InsertAtEnd)
2582 : Instruction(Agg->getType(), InsertValue,
2583 OperandTraits<InsertValueInst>::op_begin(this),
2584 2, InsertAtEnd) {
2585 init(Agg, Val, Idxs, NameStr);
2586}
2587
2588DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2588, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2588, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2589
2590//===----------------------------------------------------------------------===//
2591// PHINode Class
2592//===----------------------------------------------------------------------===//
2593
2594// PHINode - The PHINode class is used to represent the magical mystical PHI
2595// node, that can not exist in nature, but can be synthesized in a computer
2596// scientist's overactive imagination.
2597//
2598class PHINode : public Instruction {
2599 /// The number of operands actually allocated. NumOperands is
2600 /// the number actually in use.
2601 unsigned ReservedSpace;
2602
2603 PHINode(const PHINode &PN);
2604
2605 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2606 const Twine &NameStr = "",
2607 Instruction *InsertBefore = nullptr)
2608 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2609 ReservedSpace(NumReservedValues) {
2610 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2610, __extension__ __PRETTY_FUNCTION__))
;
2611 setName(NameStr);
2612 allocHungoffUses(ReservedSpace);
2613 }
2614
2615 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2616 BasicBlock *InsertAtEnd)
2617 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2618 ReservedSpace(NumReservedValues) {
2619 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2619, __extension__ __PRETTY_FUNCTION__))
;
2620 setName(NameStr);
2621 allocHungoffUses(ReservedSpace);
2622 }
2623
2624protected:
2625 // Note: Instruction needs to be a friend here to call cloneImpl.
2626 friend class Instruction;
2627
2628 PHINode *cloneImpl() const;
2629
2630 // allocHungoffUses - this is more complicated than the generic
2631 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2632 // values and pointers to the incoming blocks, all in one allocation.
2633 void allocHungoffUses(unsigned N) {
2634 User::allocHungoffUses(N, /* IsPhi */ true);
2635 }
2636
2637public:
2638 /// Constructors - NumReservedValues is a hint for the number of incoming
2639 /// edges that this phi node will have (use 0 if you really have no idea).
2640 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2641 const Twine &NameStr = "",
2642 Instruction *InsertBefore = nullptr) {
2643 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2644 }
2645
2646 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2647 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2648 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2649 }
2650
2651 /// Provide fast operand accessors
2652 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2653
2654 // Block iterator interface. This provides access to the list of incoming
2655 // basic blocks, which parallels the list of incoming values.
2656
2657 using block_iterator = BasicBlock **;
2658 using const_block_iterator = BasicBlock * const *;
2659
2660 block_iterator block_begin() {
2661 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2662 }
2663
2664 const_block_iterator block_begin() const {
2665 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2666 }
2667
2668 block_iterator block_end() {
2669 return block_begin() + getNumOperands();
2670 }
2671
2672 const_block_iterator block_end() const {
2673 return block_begin() + getNumOperands();
2674 }
2675
2676 iterator_range<block_iterator> blocks() {
2677 return make_range(block_begin(), block_end());
2678 }
2679
2680 iterator_range<const_block_iterator> blocks() const {
2681 return make_range(block_begin(), block_end());
2682 }
2683
2684 op_range incoming_values() { return operands(); }
2685
2686 const_op_range incoming_values() const { return operands(); }
2687
2688 /// Return the number of incoming edges
2689 ///
2690 unsigned getNumIncomingValues() const { return getNumOperands(); }
2691
2692 /// Return incoming value number x
2693 ///
2694 Value *getIncomingValue(unsigned i) const {
2695 return getOperand(i);
2696 }
2697 void setIncomingValue(unsigned i, Value *V) {
2698 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2698, __extension__ __PRETTY_FUNCTION__))
;
2699 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2700, __extension__ __PRETTY_FUNCTION__))
2700 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2700, __extension__ __PRETTY_FUNCTION__))
;
2701 setOperand(i, V);
2702 }
2703
2704 static unsigned getOperandNumForIncomingValue(unsigned i) {
2705 return i;
2706 }
2707
2708 static unsigned getIncomingValueNumForOperand(unsigned i) {
2709 return i;
2710 }
2711
2712 /// Return incoming basic block number @p i.
2713 ///
2714 BasicBlock *getIncomingBlock(unsigned i) const {
2715 return block_begin()[i];
2716 }
2717
2718 /// Return incoming basic block corresponding
2719 /// to an operand of the PHI.
2720 ///
2721 BasicBlock *getIncomingBlock(const Use &U) const {
2722 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2722, __extension__ __PRETTY_FUNCTION__))
;
2723 return getIncomingBlock(unsigned(&U - op_begin()));
2724 }
2725
2726 /// Return incoming basic block corresponding
2727 /// to value use iterator.
2728 ///
2729 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2730 return getIncomingBlock(I.getUse());
2731 }
2732
2733 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2734 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2734, __extension__ __PRETTY_FUNCTION__))
;
2735 block_begin()[i] = BB;
2736 }
2737
2738 /// Replace every incoming basic block \p Old to basic block \p New.
2739 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2740 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2740, __extension__ __PRETTY_FUNCTION__))
;
2741 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2742 if (getIncomingBlock(Op) == Old)
2743 setIncomingBlock(Op, New);
2744 }
2745
2746 /// Add an incoming value to the end of the PHI list
2747 ///
2748 void addIncoming(Value *V, BasicBlock *BB) {
2749 if (getNumOperands() == ReservedSpace)
2750 growOperands(); // Get more space!
2751 // Initialize some new operands.
2752 setNumHungOffUseOperands(getNumOperands() + 1);
2753 setIncomingValue(getNumOperands() - 1, V);
2754 setIncomingBlock(getNumOperands() - 1, BB);
2755 }
2756
2757 /// Remove an incoming value. This is useful if a
2758 /// predecessor basic block is deleted. The value removed is returned.
2759 ///
2760 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2761 /// is true), the PHI node is destroyed and any uses of it are replaced with
2762 /// dummy values. The only time there should be zero incoming values to a PHI
2763 /// node is when the block is dead, so this strategy is sound.
2764 ///
2765 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2766
2767 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2768 int Idx = getBasicBlockIndex(BB);
2769 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2769, __extension__ __PRETTY_FUNCTION__))
;
2770 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2771 }
2772
2773 /// Return the first index of the specified basic
2774 /// block in the value list for this PHI. Returns -1 if no instance.
2775 ///
2776 int getBasicBlockIndex(const BasicBlock *BB) const {
2777 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2778 if (block_begin()[i] == BB)
2779 return i;
2780 return -1;
2781 }
2782
2783 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2784 int Idx = getBasicBlockIndex(BB);
2785 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2785, __extension__ __PRETTY_FUNCTION__))
;
2786 return getIncomingValue(Idx);
2787 }
2788
2789 /// Set every incoming value(s) for block \p BB to \p V.
2790 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2791 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2791, __extension__ __PRETTY_FUNCTION__))
;
2792 bool Found = false;
2793 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2794 if (getIncomingBlock(Op) == BB) {
2795 Found = true;
2796 setIncomingValue(Op, V);
2797 }
2798 (void)Found;
2799 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2799, __extension__ __PRETTY_FUNCTION__))
;
2800 }
2801
2802 /// If the specified PHI node always merges together the
2803 /// same value, return the value, otherwise return null.
2804 Value *hasConstantValue() const;
2805
2806 /// Whether the specified PHI node always merges
2807 /// together the same value, assuming undefs are equal to a unique
2808 /// non-undef value.
2809 bool hasConstantOrUndefValue() const;
2810
2811 /// If the PHI node is complete which means all of its parent's predecessors
2812 /// have incoming value in this PHI, return true, otherwise return false.
2813 bool isComplete() const {
2814 return llvm::all_of(predecessors(getParent()),
2815 [this](const BasicBlock *Pred) {
2816 return getBasicBlockIndex(Pred) >= 0;
2817 });
2818 }
2819
2820 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2821 static bool classof(const Instruction *I) {
2822 return I->getOpcode() == Instruction::PHI;
2823 }
2824 static bool classof(const Value *V) {
2825 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2826 }
2827
2828private:
2829 void growOperands();
2830};
2831
2832template <>
2833struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2834};
2835
2836DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2836, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2836, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2837
2838//===----------------------------------------------------------------------===//
2839// LandingPadInst Class
2840//===----------------------------------------------------------------------===//
2841
2842//===---------------------------------------------------------------------------
2843/// The landingpad instruction holds all of the information
2844/// necessary to generate correct exception handling. The landingpad instruction
2845/// cannot be moved from the top of a landing pad block, which itself is
2846/// accessible only from the 'unwind' edge of an invoke. This uses the
2847/// SubclassData field in Value to store whether or not the landingpad is a
2848/// cleanup.
2849///
2850class LandingPadInst : public Instruction {
2851 using CleanupField = BoolBitfieldElementT<0>;
2852
2853 /// The number of operands actually allocated. NumOperands is
2854 /// the number actually in use.
2855 unsigned ReservedSpace;
2856
2857 LandingPadInst(const LandingPadInst &LP);
2858
2859public:
2860 enum ClauseType { Catch, Filter };
2861
2862private:
2863 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2864 const Twine &NameStr, Instruction *InsertBefore);
2865 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2866 const Twine &NameStr, BasicBlock *InsertAtEnd);
2867
2868 // Allocate space for exactly zero operands.
2869 void *operator new(size_t s) {
2870 return User::operator new(s);
2871 }
2872
2873 void growOperands(unsigned Size);
2874 void init(unsigned NumReservedValues, const Twine &NameStr);
2875
2876protected:
2877 // Note: Instruction needs to be a friend here to call cloneImpl.
2878 friend class Instruction;
2879
2880 LandingPadInst *cloneImpl() const;
2881
2882public:
2883 /// Constructors - NumReservedClauses is a hint for the number of incoming
2884 /// clauses that this landingpad will have (use 0 if you really have no idea).
2885 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2886 const Twine &NameStr = "",
2887 Instruction *InsertBefore = nullptr);
2888 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2889 const Twine &NameStr, BasicBlock *InsertAtEnd);
2890
2891 /// Provide fast operand accessors
2892 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2893
2894 /// Return 'true' if this landingpad instruction is a
2895 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2896 /// doesn't catch the exception.
2897 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2898
2899 /// Indicate that this landingpad instruction is a cleanup.
2900 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2901
2902 /// Add a catch or filter clause to the landing pad.
2903 void addClause(Constant *ClauseVal);
2904
2905 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2906 /// determine what type of clause this is.
2907 Constant *getClause(unsigned Idx) const {
2908 return cast<Constant>(getOperandList()[Idx]);
2909 }
2910
2911 /// Return 'true' if the clause and index Idx is a catch clause.
2912 bool isCatch(unsigned Idx) const {
2913 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2914 }
2915
2916 /// Return 'true' if the clause and index Idx is a filter clause.
2917 bool isFilter(unsigned Idx) const {
2918 return isa<ArrayType>(getOperandList()[Idx]->getType());
2919 }
2920
2921 /// Get the number of clauses for this landing pad.
2922 unsigned getNumClauses() const { return getNumOperands(); }
2923
2924 /// Grow the size of the operand list to accommodate the new
2925 /// number of clauses.
2926 void reserveClauses(unsigned Size) { growOperands(Size); }
2927
2928 // Methods for support type inquiry through isa, cast, and dyn_cast:
2929 static bool classof(const Instruction *I) {
2930 return I->getOpcode() == Instruction::LandingPad;
2931 }
2932 static bool classof(const Value *V) {
2933 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2934 }
2935};
2936
2937template <>
2938struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2939};
2940
2941DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2941, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2941, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2942
2943//===----------------------------------------------------------------------===//
2944// ReturnInst Class
2945//===----------------------------------------------------------------------===//
2946
2947//===---------------------------------------------------------------------------
2948/// Return a value (possibly void), from a function. Execution
2949/// does not continue in this function any longer.
2950///
2951class ReturnInst : public Instruction {
2952 ReturnInst(const ReturnInst &RI);
2953
2954private:
2955 // ReturnInst constructors:
2956 // ReturnInst() - 'ret void' instruction
2957 // ReturnInst( null) - 'ret void' instruction
2958 // ReturnInst(Value* X) - 'ret X' instruction
2959 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2960 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2961 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2962 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2963 //
2964 // NOTE: If the Value* passed is of type void then the constructor behaves as
2965 // if it was passed NULL.
2966 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2967 Instruction *InsertBefore = nullptr);
2968 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2969 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2970
2971protected:
2972 // Note: Instruction needs to be a friend here to call cloneImpl.
2973 friend class Instruction;
2974
2975 ReturnInst *cloneImpl() const;
2976
2977public:
2978 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2979 Instruction *InsertBefore = nullptr) {
2980 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2981 }
2982
2983 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2984 BasicBlock *InsertAtEnd) {
2985 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2986 }
2987
2988 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2989 return new(0) ReturnInst(C, InsertAtEnd);
2990 }
2991
2992 /// Provide fast operand accessors
2993 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2994
2995 /// Convenience accessor. Returns null if there is no return value.
2996 Value *getReturnValue() const {
2997 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2998 }
2999
3000 unsigned getNumSuccessors() const { return 0; }
3001
3002 // Methods for support type inquiry through isa, cast, and dyn_cast:
3003 static bool classof(const Instruction *I) {
3004 return (I->getOpcode() == Instruction::Ret);
3005 }
3006 static bool classof(const Value *V) {
3007 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3008 }
3009
3010private:
3011 BasicBlock *getSuccessor(unsigned idx) const {
3012 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3012)
;
3013 }
3014
3015 void setSuccessor(unsigned idx, BasicBlock *B) {
3016 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3016)
;
3017 }
3018};
3019
3020template <>
3021struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3022};
3023
3024DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3024, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3024, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3025
3026//===----------------------------------------------------------------------===//
3027// BranchInst Class
3028//===----------------------------------------------------------------------===//
3029
3030//===---------------------------------------------------------------------------
3031/// Conditional or Unconditional Branch instruction.
3032///
3033class BranchInst : public Instruction {
3034 /// Ops list - Branches are strange. The operands are ordered:
3035 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3036 /// they don't have to check for cond/uncond branchness. These are mostly
3037 /// accessed relative from op_end().
3038 BranchInst(const BranchInst &BI);
3039 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3040 // BranchInst(BB *B) - 'br B'
3041 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3042 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3043 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3044 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3045 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3046 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3047 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3048 Instruction *InsertBefore = nullptr);
3049 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3050 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3051 BasicBlock *InsertAtEnd);
3052
3053 void AssertOK();
3054
3055protected:
3056 // Note: Instruction needs to be a friend here to call cloneImpl.
3057 friend class Instruction;
3058
3059 BranchInst *cloneImpl() const;
3060
3061public:
3062 /// Iterator type that casts an operand to a basic block.
3063 ///
3064 /// This only makes sense because the successors are stored as adjacent
3065 /// operands for branch instructions.
3066 struct succ_op_iterator
3067 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3068 std::random_access_iterator_tag, BasicBlock *,
3069 ptrdiff_t, BasicBlock *, BasicBlock *> {
3070 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3071
3072 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3073 BasicBlock *operator->() const { return operator*(); }
3074 };
3075
3076 /// The const version of `succ_op_iterator`.
3077 struct const_succ_op_iterator
3078 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3079 std::random_access_iterator_tag,
3080 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3081 const BasicBlock *> {
3082 explicit const_succ_op_iterator(const_value_op_iterator I)
3083 : iterator_adaptor_base(I) {}
3084
3085 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3086 const BasicBlock *operator->() const { return operator*(); }
3087 };
3088
3089 static BranchInst *Create(BasicBlock *IfTrue,
3090 Instruction *InsertBefore = nullptr) {
3091 return new(1) BranchInst(IfTrue, InsertBefore);
3092 }
3093
3094 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3095 Value *Cond, Instruction *InsertBefore = nullptr) {
3096 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3097 }
3098
3099 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3100 return new(1) BranchInst(IfTrue, InsertAtEnd);
3101 }
3102
3103 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3104 Value *Cond, BasicBlock *InsertAtEnd) {
3105 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3106 }
3107
3108 /// Transparently provide more efficient getOperand methods.
3109 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3110
3111 bool isUnconditional() const { return getNumOperands() == 1; }
3112 bool isConditional() const { return getNumOperands() == 3; }
9
Assuming the condition is true
10
Returning the value 1, which participates in a condition later
3113
3114 Value *getCondition() const {
3115 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3115, __extension__ __PRETTY_FUNCTION__))
;
3116 return Op<-3>();
3117 }
3118
3119 void setCondition(Value *V) {
3120 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3120, __extension__ __PRETTY_FUNCTION__))
;
3121 Op<-3>() = V;
3122 }
3123
3124 unsigned getNumSuccessors() const { return 1+isConditional(); }
3125
3126 BasicBlock *getSuccessor(unsigned i) const {
3127 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3127, __extension__ __PRETTY_FUNCTION__))
;
3128 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3129 }
3130
3131 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3132 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3132, __extension__ __PRETTY_FUNCTION__))
;
3133 *(&Op<-1>() - idx) = NewSucc;
3134 }
3135
3136 /// Swap the successors of this branch instruction.
3137 ///
3138 /// Swaps the successors of the branch instruction. This also swaps any
3139 /// branch weight metadata associated with the instruction so that it
3140 /// continues to map correctly to each operand.
3141 void swapSuccessors();
3142
3143 iterator_range<succ_op_iterator> successors() {
3144 return make_range(
3145 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3146 succ_op_iterator(value_op_end()));
3147 }
3148
3149 iterator_range<const_succ_op_iterator> successors() const {
3150 return make_range(const_succ_op_iterator(
3151 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3152 const_succ_op_iterator(value_op_end()));
3153 }
3154
3155 // Methods for support type inquiry through isa, cast, and dyn_cast:
3156 static bool classof(const Instruction *I) {
3157 return (I->getOpcode() == Instruction::Br);
3158 }
3159 static bool classof(const Value *V) {
3160 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3161 }
3162};
3163
3164template <>
3165struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3166};
3167
3168DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3168, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3168, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3169
3170//===----------------------------------------------------------------------===//
3171// SwitchInst Class
3172//===----------------------------------------------------------------------===//
3173
3174//===---------------------------------------------------------------------------
3175/// Multiway switch
3176///
3177class SwitchInst : public Instruction {
3178 unsigned ReservedSpace;
3179
3180 // Operand[0] = Value to switch on
3181 // Operand[1] = Default basic block destination
3182 // Operand[2n ] = Value to match
3183 // Operand[2n+1] = BasicBlock to go to on match
3184 SwitchInst(const SwitchInst &SI);
3185
3186 /// Create a new switch instruction, specifying a value to switch on and a
3187 /// default destination. The number of additional cases can be specified here
3188 /// to make memory allocation more efficient. This constructor can also
3189 /// auto-insert before another instruction.
3190 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3191 Instruction *InsertBefore);
3192
3193 /// Create a new switch instruction, specifying a value to switch on and a
3194 /// default destination. The number of additional cases can be specified here
3195 /// to make memory allocation more efficient. This constructor also
3196 /// auto-inserts at the end of the specified BasicBlock.
3197 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3198 BasicBlock *InsertAtEnd);
3199
3200 // allocate space for exactly zero operands
3201 void *operator new(size_t s) {
3202 return User::operator new(s);
3203 }
3204
3205 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3206 void growOperands();
3207
3208protected:
3209 // Note: Instruction needs to be a friend here to call cloneImpl.
3210 friend class Instruction;
3211
3212 SwitchInst *cloneImpl() const;
3213
3214public:
3215 // -2
3216 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3217
3218 template <typename CaseHandleT> class CaseIteratorImpl;
3219
3220 /// A handle to a particular switch case. It exposes a convenient interface
3221 /// to both the case value and the successor block.
3222 ///
3223 /// We define this as a template and instantiate it to form both a const and
3224 /// non-const handle.
3225 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3226 class CaseHandleImpl {
3227 // Directly befriend both const and non-const iterators.
3228 friend class SwitchInst::CaseIteratorImpl<
3229 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3230
3231 protected:
3232 // Expose the switch type we're parameterized with to the iterator.
3233 using SwitchInstType = SwitchInstT;
3234
3235 SwitchInstT *SI;
3236 ptrdiff_t Index;
3237
3238 CaseHandleImpl() = default;
3239 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3240
3241 public:
3242 /// Resolves case value for current case.
3243 ConstantIntT *getCaseValue() const {
3244 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3245, __extension__ __PRETTY_FUNCTION__))
3245 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3245, __extension__ __PRETTY_FUNCTION__))
;
3246 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3247 }
3248
3249 /// Resolves successor for current case.
3250 BasicBlockT *getCaseSuccessor() const {
3251 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
3252 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
3253 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
;
3254 return SI->getSuccessor(getSuccessorIndex());
3255 }
3256
3257 /// Returns number of current case.
3258 unsigned getCaseIndex() const { return Index; }
3259
3260 /// Returns successor index for current case successor.
3261 unsigned getSuccessorIndex() const {
3262 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
3263 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
3264 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
;
3265 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3266 }
3267
3268 bool operator==(const CaseHandleImpl &RHS) const {
3269 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3269, __extension__ __PRETTY_FUNCTION__))
;
3270 return Index == RHS.Index;
3271 }
3272 };
3273
3274 using ConstCaseHandle =
3275 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3276
3277 class CaseHandle
3278 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3279 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3280
3281 public:
3282 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3283
3284 /// Sets the new value for current case.
3285 void setValue(ConstantInt *V) {
3286 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3287, __extension__ __PRETTY_FUNCTION__))
3287 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3287, __extension__ __PRETTY_FUNCTION__))
;
3288 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3289 }
3290
3291 /// Sets the new successor for current case.
3292 void setSuccessor(BasicBlock *S) {
3293 SI->setSuccessor(getSuccessorIndex(), S);
3294 }
3295 };
3296
3297 template <typename CaseHandleT>
3298 class CaseIteratorImpl
3299 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3300 std::random_access_iterator_tag,
3301 CaseHandleT> {
3302 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3303
3304 CaseHandleT Case;
3305
3306 public:
3307 /// Default constructed iterator is in an invalid state until assigned to
3308 /// a case for a particular switch.
3309 CaseIteratorImpl() = default;
3310
3311 /// Initializes case iterator for given SwitchInst and for given
3312 /// case number.
3313 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3314
3315 /// Initializes case iterator for given SwitchInst and for given
3316 /// successor index.
3317 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3318 unsigned SuccessorIndex) {
3319 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3320, __extension__ __PRETTY_FUNCTION__))
3320 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3320, __extension__ __PRETTY_FUNCTION__))
;
3321 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3322 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3323 }
3324
3325 /// Support converting to the const variant. This will be a no-op for const
3326 /// variant.
3327 operator CaseIteratorImpl<ConstCaseHandle>() const {
3328 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3329 }
3330
3331 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3332 // Check index correctness after addition.
3333 // Note: Index == getNumCases() means end().
3334 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
3335 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
3336 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
;
3337 Case.Index += N;
3338 return *this;
3339 }
3340 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3341 // Check index correctness after subtraction.
3342 // Note: Case.Index == getNumCases() means end().
3343 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3344 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3345 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
;
3346 Case.Index -= N;
3347 return *this;
3348 }
3349 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3350 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3350, __extension__ __PRETTY_FUNCTION__))
;
3351 return Case.Index - RHS.Case.Index;
3352 }
3353 bool operator==(const CaseIteratorImpl &RHS) const {
3354 return Case == RHS.Case;
3355 }
3356 bool operator<(const CaseIteratorImpl &RHS) const {
3357 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3357, __extension__ __PRETTY_FUNCTION__))
;
3358 return Case.Index < RHS.Case.Index;
3359 }
3360 CaseHandleT &operator*() { return Case; }
3361 const CaseHandleT &operator*() const { return Case; }
3362 };
3363
3364 using CaseIt = CaseIteratorImpl<CaseHandle>;
3365 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3366
3367 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3368 unsigned NumCases,
3369 Instruction *InsertBefore = nullptr) {
3370 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3371 }
3372
3373 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3374 unsigned NumCases, BasicBlock *InsertAtEnd) {
3375 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3376 }
3377
3378 /// Provide fast operand accessors
3379 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3380
3381 // Accessor Methods for Switch stmt
3382 Value *getCondition() const { return getOperand(0); }
3383 void setCondition(Value *V) { setOperand(0, V); }
3384
3385 BasicBlock *getDefaultDest() const {
3386 return cast<BasicBlock>(getOperand(1));
3387 }
3388
3389 void setDefaultDest(BasicBlock *DefaultCase) {
3390 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3391 }
3392
3393 /// Return the number of 'cases' in this switch instruction, excluding the
3394 /// default case.
3395 unsigned getNumCases() const {
3396 return getNumOperands()/2 - 1;
3397 }
3398
3399 /// Returns a read/write iterator that points to the first case in the
3400 /// SwitchInst.
3401 CaseIt case_begin() {
3402 return CaseIt(this, 0);
3403 }
3404
3405 /// Returns a read-only iterator that points to the first case in the
3406 /// SwitchInst.
3407 ConstCaseIt case_begin() const {
3408 return ConstCaseIt(this, 0);
3409 }
3410
3411 /// Returns a read/write iterator that points one past the last in the
3412 /// SwitchInst.
3413 CaseIt case_end() {
3414 return CaseIt(this, getNumCases());
3415 }
3416
3417 /// Returns a read-only iterator that points one past the last in the
3418 /// SwitchInst.
3419 ConstCaseIt case_end() const {
3420 return ConstCaseIt(this, getNumCases());
3421 }
3422
3423 /// Iteration adapter for range-for loops.
3424 iterator_range<CaseIt> cases() {
3425 return make_range(case_begin(), case_end());
3426 }
3427
3428 /// Constant iteration adapter for range-for loops.
3429 iterator_range<ConstCaseIt> cases() const {
3430 return make_range(case_begin(), case_end());
3431 }
3432
3433 /// Returns an iterator that points to the default case.
3434 /// Note: this iterator allows to resolve successor only. Attempt
3435 /// to resolve case value causes an assertion.
3436 /// Also note, that increment and decrement also causes an assertion and
3437 /// makes iterator invalid.
3438 CaseIt case_default() {
3439 return CaseIt(this, DefaultPseudoIndex);
3440 }
3441 ConstCaseIt case_default() const {
3442 return ConstCaseIt(this, DefaultPseudoIndex);
3443 }
3444
3445 /// Search all of the case values for the specified constant. If it is
3446 /// explicitly handled, return the case iterator of it, otherwise return
3447 /// default case iterator to indicate that it is handled by the default
3448 /// handler.
3449 CaseIt findCaseValue(const ConstantInt *C) {
3450 CaseIt I = llvm::find_if(
3451 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3452 if (I != case_end())
3453 return I;
3454
3455 return case_default();
3456 }
3457 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3458 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3459 return Case.getCaseValue() == C;
3460 });
3461 if (I != case_end())
3462 return I;
3463
3464 return case_default();
3465 }
3466
3467 /// Finds the unique case value for a given successor. Returns null if the
3468 /// successor is not found, not unique, or is the default case.
3469 ConstantInt *findCaseDest(BasicBlock *BB) {
3470 if (BB == getDefaultDest())
3471 return nullptr;
3472
3473 ConstantInt *CI = nullptr;
3474 for (auto Case : cases()) {
3475 if (Case.getCaseSuccessor() != BB)
3476 continue;
3477
3478 if (CI)
3479 return nullptr; // Multiple cases lead to BB.
3480
3481 CI = Case.getCaseValue();
3482 }
3483
3484 return CI;
3485 }
3486
3487 /// Add an entry to the switch instruction.
3488 /// Note:
3489 /// This action invalidates case_end(). Old case_end() iterator will
3490 /// point to the added case.
3491 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3492
3493 /// This method removes the specified case and its successor from the switch
3494 /// instruction. Note that this operation may reorder the remaining cases at
3495 /// index idx and above.
3496 /// Note:
3497 /// This action invalidates iterators for all cases following the one removed,
3498 /// including the case_end() iterator. It returns an iterator for the next
3499 /// case.
3500 CaseIt removeCase(CaseIt I);
3501
3502 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3503 BasicBlock *getSuccessor(unsigned idx) const {
3504 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3504, __extension__ __PRETTY_FUNCTION__))
;
3505 return cast<BasicBlock>(getOperand(idx*2+1));
3506 }
3507 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3508 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3508, __extension__ __PRETTY_FUNCTION__))
;
3509 setOperand(idx * 2 + 1, NewSucc);
3510 }
3511
3512 // Methods for support type inquiry through isa, cast, and dyn_cast:
3513 static bool classof(const Instruction *I) {
3514 return I->getOpcode() == Instruction::Switch;
3515 }
3516 static bool classof(const Value *V) {
3517 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3518 }
3519};
3520
3521/// A wrapper class to simplify modification of SwitchInst cases along with
3522/// their prof branch_weights metadata.
3523class SwitchInstProfUpdateWrapper {
3524 SwitchInst &SI;
3525 Optional<SmallVector<uint32_t, 8> > Weights = None;
3526 bool Changed = false;
3527
3528protected:
3529 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3530
3531 MDNode *buildProfBranchWeightsMD();
3532
3533 void init();
3534
3535public:
3536 using CaseWeightOpt = Optional<uint32_t>;
3537 SwitchInst *operator->() { return &SI; }
3538 SwitchInst &operator*() { return SI; }
3539 operator SwitchInst *() { return &SI; }
3540
3541 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3542
3543 ~SwitchInstProfUpdateWrapper() {
3544 if (Changed)
3545 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3546 }
3547
3548 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3549 /// correspondent branch weight.
3550 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3551
3552 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3553 /// specified branch weight for the added case.
3554 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3555
3556 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3557 /// this object to not touch the underlying SwitchInst in destructor.
3558 SymbolTableList<Instruction>::iterator eraseFromParent();
3559
3560 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3561 CaseWeightOpt getSuccessorWeight(unsigned idx);
3562
3563 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3564};
3565
3566template <>
3567struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3568};
3569
3570DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3570, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SwitchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3570, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3571
3572//===----------------------------------------------------------------------===//
3573// IndirectBrInst Class
3574//===----------------------------------------------------------------------===//
3575
3576//===---------------------------------------------------------------------------
3577/// Indirect Branch Instruction.
3578///
3579class IndirectBrInst : public Instruction {
3580 unsigned ReservedSpace;
3581
3582 // Operand[0] = Address to jump to
3583 // Operand[n+1] = n-th destination
3584 IndirectBrInst(const IndirectBrInst &IBI);
3585
3586 /// Create a new indirectbr instruction, specifying an
3587 /// Address to jump to. The number of expected destinations can be specified
3588 /// here to make memory allocation more efficient. This constructor can also
3589 /// autoinsert before another instruction.
3590 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3591
3592 /// Create a new indirectbr instruction, specifying an
3593 /// Address to jump to. The number of expected destinations can be specified
3594 /// here to make memory allocation more efficient. This constructor also
3595 /// autoinserts at the end of the specified BasicBlock.
3596 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3597
3598 // allocate space for exactly zero operands
3599 void *operator new(size_t s) {
3600 return User::operator new(s);
3601 }
3602
3603 void init(Value *Address, unsigned NumDests);
3604 void growOperands();
3605
3606protected:
3607 // Note: Instruction needs to be a friend here to call cloneImpl.
3608 friend class Instruction;
3609
3610 IndirectBrInst *cloneImpl() const;
3611
3612public:
3613 /// Iterator type that casts an operand to a basic block.
3614 ///
3615 /// This only makes sense because the successors are stored as adjacent
3616 /// operands for indirectbr instructions.
3617 struct succ_op_iterator
3618 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3619 std::random_access_iterator_tag, BasicBlock *,
3620 ptrdiff_t, BasicBlock *, BasicBlock *> {
3621 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3622
3623 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3624 BasicBlock *operator->() const { return operator*(); }
3625 };
3626
3627 /// The const version of `succ_op_iterator`.
3628 struct const_succ_op_iterator
3629 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3630 std::random_access_iterator_tag,
3631 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3632 const BasicBlock *> {
3633 explicit const_succ_op_iterator(const_value_op_iterator I)
3634 : iterator_adaptor_base(I) {}
3635
3636 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3637 const BasicBlock *operator->() const { return operator*(); }
3638 };
3639
3640 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3641 Instruction *InsertBefore = nullptr) {
3642 return new IndirectBrInst(Address, NumDests, InsertBefore);
3643 }
3644
3645 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3646 BasicBlock *InsertAtEnd) {
3647 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3648 }
3649
3650 /// Provide fast operand accessors.
3651 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3652
3653 // Accessor Methods for IndirectBrInst instruction.
3654 Value *getAddress() { return getOperand(0); }
3655 const Value *getAddress() const { return getOperand(0); }
3656 void setAddress(Value *V) { setOperand(0, V); }
3657
3658 /// return the number of possible destinations in this
3659 /// indirectbr instruction.
3660 unsigned getNumDestinations() const { return getNumOperands()-1; }
3661
3662 /// Return the specified destination.
3663 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3664 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3665
3666 /// Add a destination.
3667 ///
3668 void addDestination(BasicBlock *Dest);
3669
3670 /// This method removes the specified successor from the
3671 /// indirectbr instruction.
3672 void removeDestination(unsigned i);
3673
3674 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3675 BasicBlock *getSuccessor(unsigned i) const {
3676 return cast<BasicBlock>(getOperand(i+1));
3677 }
3678 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3679 setOperand(i + 1, NewSucc);
3680 }
3681
3682 iterator_range<succ_op_iterator> successors() {
3683 return make_range(succ_op_iterator(std::next(value_op_begin())),
3684 succ_op_iterator(value_op_end()));
3685 }
3686
3687 iterator_range<const_succ_op_iterator> successors() const {
3688 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3689 const_succ_op_iterator(value_op_end()));
3690 }
3691
3692 // Methods for support type inquiry through isa, cast, and dyn_cast:
3693 static bool classof(const Instruction *I) {
3694 return I->getOpcode() == Instruction::IndirectBr;
3695 }
3696 static bool classof(const Value *V) {
3697 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3698 }
3699};
3700
3701template <>
3702struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3703};
3704
3705DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3705, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<IndirectBrInst>::op_begin(
const_cast<IndirectBrInst*>(this))[i_nocapture].get());
} void IndirectBrInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3705, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
IndirectBrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3706
3707//===----------------------------------------------------------------------===//
3708// InvokeInst Class
3709//===----------------------------------------------------------------------===//
3710
3711/// Invoke instruction. The SubclassData field is used to hold the
3712/// calling convention of the call.
3713///
3714class InvokeInst : public CallBase {
3715 /// The number of operands for this call beyond the called function,
3716 /// arguments, and operand bundles.
3717 static constexpr int NumExtraOperands = 2;
3718
3719 /// The index from the end of the operand array to the normal destination.
3720 static constexpr int NormalDestOpEndIdx = -3;
3721
3722 /// The index from the end of the operand array to the unwind destination.
3723 static constexpr int UnwindDestOpEndIdx = -2;
3724
3725 InvokeInst(const InvokeInst &BI);
3726
3727 /// Construct an InvokeInst given a range of arguments.
3728 ///
3729 /// Construct an InvokeInst from a range of arguments
3730 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3731 BasicBlock *IfException, ArrayRef<Value *> Args,
3732 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3733 const Twine &NameStr, Instruction *InsertBefore);
3734
3735 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3736 BasicBlock *IfException, ArrayRef<Value *> Args,
3737 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3738 const Twine &NameStr, BasicBlock *InsertAtEnd);
3739
3740 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3741 BasicBlock *IfException, ArrayRef<Value *> Args,
3742 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3743
3744 /// Compute the number of operands to allocate.
3745 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3746 // We need one operand for the called function, plus our extra operands and
3747 // the input operand counts provided.
3748 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3749 }
3750
3751protected:
3752 // Note: Instruction needs to be a friend here to call cloneImpl.
3753 friend class Instruction;
3754
3755 InvokeInst *cloneImpl() const;
3756
3757public:
3758 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3759 BasicBlock *IfException, ArrayRef<Value *> Args,
3760 const Twine &NameStr,
3761 Instruction *InsertBefore = nullptr) {
3762 int NumOperands = ComputeNumOperands(Args.size());
3763 return new (NumOperands)
3764 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3765 NameStr, InsertBefore);
3766 }
3767
3768 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3769 BasicBlock *IfException, ArrayRef<Value *> Args,
3770 ArrayRef<OperandBundleDef> Bundles = None,
3771 const Twine &NameStr = "",
3772 Instruction *InsertBefore = nullptr) {
3773 int NumOperands =
3774 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3775 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3776
3777 return new (NumOperands, DescriptorBytes)
3778 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3779 NameStr, InsertBefore);
3780 }
3781
3782 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3783 BasicBlock *IfException, ArrayRef<Value *> Args,
3784 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3785 int NumOperands = ComputeNumOperands(Args.size());
3786 return new (NumOperands)
3787 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3788 NameStr, InsertAtEnd);
3789 }
3790
3791 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3792 BasicBlock *IfException, ArrayRef<Value *> Args,
3793 ArrayRef<OperandBundleDef> Bundles,
3794 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3795 int NumOperands =
3796 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3797 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3798
3799 return new (NumOperands, DescriptorBytes)
3800 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3801 NameStr, InsertAtEnd);
3802 }
3803
3804 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3805 BasicBlock *IfException, ArrayRef<Value *> Args,
3806 const Twine &NameStr,
3807 Instruction *InsertBefore = nullptr) {
3808 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3809 IfException, Args, None, NameStr, InsertBefore);
3810 }
3811
3812 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3813 BasicBlock *IfException, ArrayRef<Value *> Args,
3814 ArrayRef<OperandBundleDef> Bundles = None,
3815 const Twine &NameStr = "",
3816 Instruction *InsertBefore = nullptr) {
3817 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3818 IfException, Args, Bundles, NameStr, InsertBefore);
3819 }
3820
3821 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3822 BasicBlock *IfException, ArrayRef<Value *> Args,
3823 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3824 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3825 IfException, Args, NameStr, InsertAtEnd);
3826 }
3827
3828 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3829 BasicBlock *IfException, ArrayRef<Value *> Args,
3830 ArrayRef<OperandBundleDef> Bundles,
3831 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3832 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3833 IfException, Args, Bundles, NameStr, InsertAtEnd);
3834 }
3835
3836 /// Create a clone of \p II with a different set of operand bundles and
3837 /// insert it before \p InsertPt.
3838 ///
3839 /// The returned invoke instruction is identical to \p II in every way except
3840 /// that the operand bundles for the new instruction are set to the operand
3841 /// bundles in \p Bundles.
3842 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3843 Instruction *InsertPt = nullptr);
3844
3845 // get*Dest - Return the destination basic blocks...
3846 BasicBlock *getNormalDest() const {
3847 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3848 }
3849 BasicBlock *getUnwindDest() const {
3850 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3851 }
3852 void setNormalDest(BasicBlock *B) {
3853 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3854 }
3855 void setUnwindDest(BasicBlock *B) {
3856 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3857 }
3858
3859 /// Get the landingpad instruction from the landing pad
3860 /// block (the unwind destination).
3861 LandingPadInst *getLandingPadInst() const;
3862
3863 BasicBlock *getSuccessor(unsigned i) const {
3864 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3864, __extension__ __PRETTY_FUNCTION__))
;
3865 return i == 0 ? getNormalDest() : getUnwindDest();
3866 }
3867
3868 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3869 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3869, __extension__ __PRETTY_FUNCTION__))
;
3870 if (i == 0)
3871 setNormalDest(NewSucc);
3872 else
3873 setUnwindDest(NewSucc);
3874 }
3875
3876 unsigned getNumSuccessors() const { return 2; }
3877
3878 // Methods for support type inquiry through isa, cast, and dyn_cast:
3879 static bool classof(const Instruction *I) {
3880 return (I->getOpcode() == Instruction::Invoke);
3881 }
3882 static bool classof(const Value *V) {
3883 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3884 }
3885
3886private:
3887 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3888 // method so that subclasses cannot accidentally use it.
3889 template <typename Bitfield>
3890 void setSubclassData(typename Bitfield::Type Value) {
3891 Instruction::setSubclassData<Bitfield>(Value);
3892 }
3893};
3894
3895InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3896 BasicBlock *IfException, ArrayRef<Value *> Args,
3897 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3898 const Twine &NameStr, Instruction *InsertBefore)
3899 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3900 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3901 InsertBefore) {
3902 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3903}
3904
3905InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3906 BasicBlock *IfException, ArrayRef<Value *> Args,
3907 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3908 const Twine &NameStr, BasicBlock *InsertAtEnd)
3909 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3910 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3911 InsertAtEnd) {
3912 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3913}
3914
3915//===----------------------------------------------------------------------===//
3916// CallBrInst Class
3917//===----------------------------------------------------------------------===//
3918
3919/// CallBr instruction, tracking function calls that may not return control but
3920/// instead transfer it to a third location. The SubclassData field is used to
3921/// hold the calling convention of the call.
3922///
3923class CallBrInst : public CallBase {
3924
3925 unsigned NumIndirectDests;
3926
3927 CallBrInst(const CallBrInst &BI);
3928
3929 /// Construct a CallBrInst given a range of arguments.
3930 ///
3931 /// Construct a CallBrInst from a range of arguments
3932 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3933 ArrayRef<BasicBlock *> IndirectDests,
3934 ArrayRef<Value *> Args,
3935 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3936 const Twine &NameStr, Instruction *InsertBefore);
3937
3938 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3939 ArrayRef<BasicBlock *> IndirectDests,
3940 ArrayRef<Value *> Args,
3941 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3942 const Twine &NameStr, BasicBlock *InsertAtEnd);
3943
3944 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3945 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3946 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3947
3948 /// Should the Indirect Destinations change, scan + update the Arg list.
3949 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3950
3951 /// Compute the number of operands to allocate.
3952 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3953 int NumBundleInputs = 0) {
3954 // We need one operand for the called function, plus our extra operands and
3955 // the input operand counts provided.
3956 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3957 }
3958
3959protected:
3960 // Note: Instruction needs to be a friend here to call cloneImpl.
3961 friend class Instruction;
3962
3963 CallBrInst *cloneImpl() const;
3964
3965public:
3966 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3967 BasicBlock *DefaultDest,
3968 ArrayRef<BasicBlock *> IndirectDests,
3969 ArrayRef<Value *> Args, const Twine &NameStr,
3970 Instruction *InsertBefore = nullptr) {
3971 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3972 return new (NumOperands)
3973 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3974 NumOperands, NameStr, InsertBefore);
3975 }
3976
3977 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3978 BasicBlock *DefaultDest,
3979 ArrayRef<BasicBlock *> IndirectDests,
3980 ArrayRef<Value *> Args,
3981 ArrayRef<OperandBundleDef> Bundles = None,
3982 const Twine &NameStr = "",
3983 Instruction *InsertBefore = nullptr) {
3984 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3985 CountBundleInputs(Bundles));
3986 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3987
3988 return new (NumOperands, DescriptorBytes)
3989 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3990 NumOperands, NameStr, InsertBefore);
3991 }
3992
3993 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3994 BasicBlock *DefaultDest,
3995 ArrayRef<BasicBlock *> IndirectDests,
3996 ArrayRef<Value *> Args, const Twine &NameStr,
3997 BasicBlock *InsertAtEnd) {
3998 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3999 return new (NumOperands)
4000 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4001 NumOperands, NameStr, InsertAtEnd);
4002 }
4003
4004 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4005 BasicBlock *DefaultDest,
4006 ArrayRef<BasicBlock *> IndirectDests,
4007 ArrayRef<Value *> Args,
4008 ArrayRef<OperandBundleDef> Bundles,
4009 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4010 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4011 CountBundleInputs(Bundles));
4012 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4013
4014 return new (NumOperands, DescriptorBytes)
4015 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4016 NumOperands, NameStr, InsertAtEnd);
4017 }
4018
4019 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4020 ArrayRef<BasicBlock *> IndirectDests,
4021 ArrayRef<Value *> Args, const Twine &NameStr,
4022 Instruction *InsertBefore = nullptr) {
4023 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4024 IndirectDests, Args, NameStr, InsertBefore);
4025 }
4026
4027 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4028 ArrayRef<BasicBlock *> IndirectDests,
4029 ArrayRef<Value *> Args,
4030 ArrayRef<OperandBundleDef> Bundles = None,
4031 const Twine &NameStr = "",
4032 Instruction *InsertBefore = nullptr) {
4033 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4034 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4035 }
4036
4037 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4038 ArrayRef<BasicBlock *> IndirectDests,
4039 ArrayRef<Value *> Args, const Twine &NameStr,
4040 BasicBlock *InsertAtEnd) {
4041 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4042 IndirectDests, Args, NameStr, InsertAtEnd);
4043 }
4044
4045 static CallBrInst *Create(FunctionCallee Func,
4046 BasicBlock *DefaultDest,
4047 ArrayRef<BasicBlock *> IndirectDests,
4048 ArrayRef<Value *> Args,
4049 ArrayRef<OperandBundleDef> Bundles,
4050 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4051 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4052 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4053 }
4054
4055 /// Create a clone of \p CBI with a different set of operand bundles and
4056 /// insert it before \p InsertPt.
4057 ///
4058 /// The returned callbr instruction is identical to \p CBI in every way
4059 /// except that the operand bundles for the new instruction are set to the
4060 /// operand bundles in \p Bundles.
4061 static CallBrInst *Create(CallBrInst *CBI,
4062 ArrayRef<OperandBundleDef> Bundles,
4063 Instruction *InsertPt = nullptr);
4064
4065 /// Return the number of callbr indirect dest labels.
4066 ///
4067 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4068
4069 /// getIndirectDestLabel - Return the i-th indirect dest label.
4070 ///
4071 Value *getIndirectDestLabel(unsigned i) const {
4072 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4072, __extension__ __PRETTY_FUNCTION__))
;
4073 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() +
4074 1);
4075 }
4076
4077 Value *getIndirectDestLabelUse(unsigned i) const {
4078 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4078, __extension__ __PRETTY_FUNCTION__))
;
4079 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() +
4080 1);
4081 }
4082
4083 // Return the destination basic blocks...
4084 BasicBlock *getDefaultDest() const {
4085 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4086 }
4087 BasicBlock *getIndirectDest(unsigned i) const {
4088 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4089 }
4090 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4091 SmallVector<BasicBlock *, 16> IndirectDests;
4092 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4093 IndirectDests.push_back(getIndirectDest(i));
4094 return IndirectDests;
4095 }
4096 void setDefaultDest(BasicBlock *B) {
4097 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4098 }
4099 void setIndirectDest(unsigned i, BasicBlock *B) {
4100 updateArgBlockAddresses(i, B);
4101 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4102 }
4103
4104 BasicBlock *getSuccessor(unsigned i) const {
4105 assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4106, __extension__ __PRETTY_FUNCTION__))
4106 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4106, __extension__ __PRETTY_FUNCTION__))
;
4107 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4108 }
4109
4110 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4111 assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4112, __extension__ __PRETTY_FUNCTION__))
4112 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4112, __extension__ __PRETTY_FUNCTION__))
;
4113 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4114 }
4115
4116 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4117
4118 // Methods for support type inquiry through isa, cast, and dyn_cast:
4119 static bool classof(const Instruction *I) {
4120 return (I->getOpcode() == Instruction::CallBr);
4121 }
4122 static bool classof(const Value *V) {
4123 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4124 }
4125
4126private:
4127 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4128 // method so that subclasses cannot accidentally use it.
4129 template <typename Bitfield>
4130 void setSubclassData(typename Bitfield::Type Value) {
4131 Instruction::setSubclassData<Bitfield>(Value);
4132 }
4133};
4134
4135CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4136 ArrayRef<BasicBlock *> IndirectDests,
4137 ArrayRef<Value *> Args,
4138 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4139 const Twine &NameStr, Instruction *InsertBefore)
4140 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4141 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4142 InsertBefore) {
4143 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4144}
4145
4146CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4147 ArrayRef<BasicBlock *> IndirectDests,
4148 ArrayRef<Value *> Args,
4149 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4150 const Twine &NameStr, BasicBlock *InsertAtEnd)
4151 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4152 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4153 InsertAtEnd) {
4154 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4155}
4156
4157//===----------------------------------------------------------------------===//
4158// ResumeInst Class
4159//===----------------------------------------------------------------------===//
4160
4161//===---------------------------------------------------------------------------
4162/// Resume the propagation of an exception.
4163///
4164class ResumeInst : public Instruction {
4165 ResumeInst(const ResumeInst &RI);
4166
4167 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4168 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4169
4170protected:
4171 // Note: Instruction needs to be a friend here to call cloneImpl.
4172 friend class Instruction;
4173
4174 ResumeInst *cloneImpl() const;
4175
4176public:
4177 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4178 return new(1) ResumeInst(Exn, InsertBefore);
4179 }
4180
4181 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4182 return new(1) ResumeInst(Exn, InsertAtEnd);
4183 }
4184
4185 /// Provide fast operand accessors
4186 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4187
4188 /// Convenience accessor.
4189 Value *getValue() const { return Op<0>(); }
4190
4191 unsigned getNumSuccessors() const { return 0; }
4192
4193 // Methods for support type inquiry through isa, cast, and dyn_cast:
4194 static bool classof(const Instruction *I) {
4195 return I->getOpcode() == Instruction::Resume;
4196 }
4197 static bool classof(const Value *V) {
4198 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4199 }
4200
4201private:
4202 BasicBlock *getSuccessor(unsigned idx) const {
4203 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4203)
;
4204 }
4205
4206 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4207 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4207)
;
4208 }
4209};
4210
4211template <>
4212struct OperandTraits<ResumeInst> :
4213 public FixedNumOperandTraits<ResumeInst, 1> {
4214};
4215
4216DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4216, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ResumeInst>::op_begin(const_cast
<ResumeInst*>(this))[i_nocapture].get()); } void ResumeInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ResumeInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4216, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ResumeInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ResumeInst::getNumOperands() const { return OperandTraits
<ResumeInst>::operands(this); } template <int Idx_nocapture
> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4217
4218//===----------------------------------------------------------------------===//
4219// CatchSwitchInst Class
4220//===----------------------------------------------------------------------===//
4221class CatchSwitchInst : public Instruction {
4222 using UnwindDestField = BoolBitfieldElementT<0>;
4223
4224 /// The number of operands actually allocated. NumOperands is
4225 /// the number actually in use.
4226 unsigned ReservedSpace;
4227
4228 // Operand[0] = Outer scope
4229 // Operand[1] = Unwind block destination
4230 // Operand[n] = BasicBlock to go to on match
4231 CatchSwitchInst(const CatchSwitchInst &CSI);
4232
4233 /// Create a new switch instruction, specifying a
4234 /// default destination. The number of additional handlers can be specified
4235 /// here to make memory allocation more efficient.
4236 /// This constructor can also autoinsert before another instruction.
4237 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4238 unsigned NumHandlers, const Twine &NameStr,
4239 Instruction *InsertBefore);
4240
4241 /// Create a new switch instruction, specifying a
4242 /// default destination. The number of additional handlers can be specified
4243 /// here to make memory allocation more efficient.
4244 /// This constructor also autoinserts at the end of the specified BasicBlock.
4245 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4246 unsigned NumHandlers, const Twine &NameStr,
4247 BasicBlock *InsertAtEnd);
4248
4249 // allocate space for exactly zero operands
4250 void *operator new(size_t s) { return User::operator new(s); }
4251
4252 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4253 void growOperands(unsigned Size);
4254
4255protected:
4256 // Note: Instruction needs to be a friend here to call cloneImpl.
4257 friend class Instruction;
4258
4259 CatchSwitchInst *cloneImpl() const;
4260
4261public:
4262 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4263 unsigned NumHandlers,
4264 const Twine &NameStr = "",
4265 Instruction *InsertBefore = nullptr) {
4266 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4267 InsertBefore);
4268 }
4269
4270 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4271 unsigned NumHandlers, const Twine &NameStr,
4272 BasicBlock *InsertAtEnd) {
4273 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4274 InsertAtEnd);
4275 }
4276
4277 /// Provide fast operand accessors
4278 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4279
4280 // Accessor Methods for CatchSwitch stmt
4281 Value *getParentPad() const { return getOperand(0); }
4282 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4283
4284 // Accessor Methods for CatchSwitch stmt
4285 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4286 bool unwindsToCaller() const { return !hasUnwindDest(); }
4287 BasicBlock *getUnwindDest() const {
4288 if (hasUnwindDest())
4289 return cast<BasicBlock>(getOperand(1));
4290 return nullptr;
4291 }
4292 void setUnwindDest(BasicBlock *UnwindDest) {
4293 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4293, __extension__ __PRETTY_FUNCTION__))
;
4294 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4294, __extension__ __PRETTY_FUNCTION__))
;
4295 setOperand(1, UnwindDest);
4296 }
4297
4298 /// return the number of 'handlers' in this catchswitch
4299 /// instruction, except the default handler
4300 unsigned getNumHandlers() const {
4301 if (hasUnwindDest())
4302 return getNumOperands() - 2;
4303 return getNumOperands() - 1;
4304 }
4305
4306private:
4307 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4308 static const BasicBlock *handler_helper(const Value *V) {
4309 return cast<BasicBlock>(V);
4310 }
4311
4312public:
4313 using DerefFnTy = BasicBlock *(*)(Value *);
4314 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4315 using handler_range = iterator_range<handler_iterator>;
4316 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4317 using const_handler_iterator =
4318 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4319 using const_handler_range = iterator_range<const_handler_iterator>;
4320
4321 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4322 handler_iterator handler_begin() {
4323 op_iterator It = op_begin() + 1;
4324 if (hasUnwindDest())
4325 ++It;
4326 return handler_iterator(It, DerefFnTy(handler_helper));
4327 }
4328
4329 /// Returns an iterator that points to the first handler in the
4330 /// CatchSwitchInst.
4331 const_handler_iterator handler_begin() const {
4332 const_op_iterator It = op_begin() + 1;
4333 if (hasUnwindDest())
4334 ++It;
4335 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4336 }
4337
4338 /// Returns a read-only iterator that points one past the last
4339 /// handler in the CatchSwitchInst.
4340 handler_iterator handler_end() {
4341 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4342 }
4343
4344 /// Returns an iterator that points one past the last handler in the
4345 /// CatchSwitchInst.
4346 const_handler_iterator handler_end() const {
4347 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4348 }
4349
4350 /// iteration adapter for range-for loops.
4351 handler_range handlers() {
4352 return make_range(handler_begin(), handler_end());
4353 }
4354
4355 /// iteration adapter for range-for loops.
4356 const_handler_range handlers() const {
4357 return make_range(handler_begin(), handler_end());
4358 }
4359
4360 /// Add an entry to the switch instruction...
4361 /// Note:
4362 /// This action invalidates handler_end(). Old handler_end() iterator will
4363 /// point to the added handler.
4364 void addHandler(BasicBlock *Dest);
4365
4366 void removeHandler(handler_iterator HI);
4367
4368 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4369 BasicBlock *getSuccessor(unsigned Idx) const {
4370 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4371, __extension__ __PRETTY_FUNCTION__))
4371 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4371, __extension__ __PRETTY_FUNCTION__))
;
4372 return cast<BasicBlock>(getOperand(Idx + 1));
4373 }
4374 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4375 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4376, __extension__ __PRETTY_FUNCTION__))
4376 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4376, __extension__ __PRETTY_FUNCTION__))
;
4377 setOperand(Idx + 1, NewSucc);
4378 }
4379
4380 // Methods for support type inquiry through isa, cast, and dyn_cast:
4381 static bool classof(const Instruction *I) {
4382 return I->getOpcode() == Instruction::CatchSwitch;
4383 }
4384 static bool classof(const Value *V) {
4385 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4386 }
4387};
4388
4389template <>
4390struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4391
4392DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4392, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CatchSwitchInst>::op_begin
(const_cast<CatchSwitchInst*>(this))[i_nocapture].get()
); } void CatchSwitchInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4392, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CatchSwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CatchSwitchInst::getNumOperands() const { return
OperandTraits<CatchSwitchInst>::operands(this); } template
<int Idx_nocapture> Use &CatchSwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &CatchSwitchInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
4393
4394//===----------------------------------------------------------------------===//
4395// CleanupPadInst Class
4396//===----------------------------------------------------------------------===//
4397class CleanupPadInst : public FuncletPadInst {
4398private:
4399 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4400 unsigned Values, const Twine &NameStr,
4401 Instruction *InsertBefore)
4402 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4403 NameStr, InsertBefore) {}
4404 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4405 unsigned Values, const Twine &NameStr,
4406 BasicBlock *InsertAtEnd)
4407 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4408 NameStr, InsertAtEnd) {}
4409
4410public:
4411 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4412 const Twine &NameStr = "",
4413 Instruction *InsertBefore = nullptr) {
4414 unsigned Values = 1 + Args.size();
4415 return new (Values)
4416 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4417 }
4418
4419 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4420 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4421 unsigned Values = 1 + Args.size();
4422 return new (Values)
4423 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4424 }
4425
4426 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4427 static bool classof(const Instruction *I) {
4428 return I->getOpcode() == Instruction::CleanupPad;
4429 }
4430 static bool classof(const Value *V) {
4431 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4432 }
4433};
4434
4435//===----------------------------------------------------------------------===//
4436// CatchPadInst Class
4437//===----------------------------------------------------------------------===//
4438class CatchPadInst : public FuncletPadInst {
4439private:
4440 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4441 unsigned Values, const Twine &NameStr,
4442 Instruction *InsertBefore)
4443 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4444 NameStr, InsertBefore) {}
4445 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4446 unsigned Values, const Twine &NameStr,
4447 BasicBlock *InsertAtEnd)
4448 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4449 NameStr, InsertAtEnd) {}
4450
4451public:
4452 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4453 const Twine &NameStr = "",
4454 Instruction *InsertBefore = nullptr) {
4455 unsigned Values = 1 + Args.size();
4456 return new (Values)
4457 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4458 }
4459
4460 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4461 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4462 unsigned Values = 1 + Args.size();
4463 return new (Values)
4464 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4465 }
4466
4467 /// Convenience accessors
4468 CatchSwitchInst *getCatchSwitch() const {
4469 return cast<CatchSwitchInst>(Op<-1>());
4470 }
4471 void setCatchSwitch(Value *CatchSwitch) {
4472 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4472, __extension__ __PRETTY_FUNCTION__))
;
4473 Op<-1>() = CatchSwitch;
4474 }
4475
4476 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4477 static bool classof(const Instruction *I) {
4478 return I->getOpcode() == Instruction::CatchPad;
4479 }
4480 static bool classof(const Value *V) {
4481 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4482 }
4483};
4484
4485//===----------------------------------------------------------------------===//
4486// CatchReturnInst Class
4487//===----------------------------------------------------------------------===//
4488
4489class CatchReturnInst : public Instruction {
4490 CatchReturnInst(const CatchReturnInst &RI);
4491 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4492 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4493
4494 void init(Value *CatchPad, BasicBlock *BB);
4495
4496protected:
4497 // Note: Instruction needs to be a friend here to call cloneImpl.
4498 friend class Instruction;
4499
4500 CatchReturnInst *cloneImpl() const;
4501
4502public:
4503 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4504 Instruction *InsertBefore = nullptr) {
4505 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4505, __extension__ __PRETTY_FUNCTION__))
;
4506 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4506, __extension__ __PRETTY_FUNCTION__))
;
4507 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4508 }
4509
4510 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4511 BasicBlock *InsertAtEnd) {
4512 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4512, __extension__ __PRETTY_FUNCTION__))
;
4513 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 4513, __extension__ __PRETTY_FUNCTION__))
;
4514 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4515 }
4516
4517 /// Provide fast operand accessors
4518 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); t