Bug Summary

File:llvm/lib/Target/X86/X86PartialReduction.cpp
Warning:line 238, column 26
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86PartialReduction.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/X86 -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-07-26-235520-9401-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/X86/X86PartialReduction.cpp

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/X86/X86PartialReduction.cpp

1//===-- X86PartialReduction.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass looks for add instructions used by a horizontal reduction to see
10// if we might be able to use pmaddwd or psadbw. Some cases of this require
11// cross basic block knowledge and can't be done in SelectionDAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "llvm/Analysis/ValueTracking.h"
17#include "llvm/CodeGen/TargetPassConfig.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Instructions.h"
20#include "llvm/IR/IntrinsicsX86.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/Operator.h"
23#include "llvm/Pass.h"
24#include "X86TargetMachine.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE"x86-partial-reduction" "x86-partial-reduction"
29
30namespace {
31
32class X86PartialReduction : public FunctionPass {
33 const DataLayout *DL;
34 const X86Subtarget *ST;
35
36public:
37 static char ID; // Pass identification, replacement for typeid.
38
39 X86PartialReduction() : FunctionPass(ID) { }
40
41 bool runOnFunction(Function &Fn) override;
42
43 void getAnalysisUsage(AnalysisUsage &AU) const override {
44 AU.setPreservesCFG();
45 }
46
47 StringRef getPassName() const override {
48 return "X86 Partial Reduction";
49 }
50
51private:
52 bool tryMAddReplacement(Instruction *Op);
53 bool trySADReplacement(Instruction *Op);
54};
55}
56
57FunctionPass *llvm::createX86PartialReductionPass() {
58 return new X86PartialReduction();
59}
60
61char X86PartialReduction::ID = 0;
62
63INITIALIZE_PASS(X86PartialReduction, DEBUG_TYPE,static void *initializeX86PartialReductionPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "X86 Partial Reduction"
, "x86-partial-reduction", &X86PartialReduction::ID, PassInfo
::NormalCtor_t(callDefaultCtor<X86PartialReduction>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeX86PartialReductionPassFlag; void llvm
::initializeX86PartialReductionPass(PassRegistry &Registry
) { llvm::call_once(InitializeX86PartialReductionPassFlag, initializeX86PartialReductionPassOnce
, std::ref(Registry)); }
64 "X86 Partial Reduction", false, false)static void *initializeX86PartialReductionPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "X86 Partial Reduction"
, "x86-partial-reduction", &X86PartialReduction::ID, PassInfo
::NormalCtor_t(callDefaultCtor<X86PartialReduction>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeX86PartialReductionPassFlag; void llvm
::initializeX86PartialReductionPass(PassRegistry &Registry
) { llvm::call_once(InitializeX86PartialReductionPassFlag, initializeX86PartialReductionPassOnce
, std::ref(Registry)); }
65
66bool X86PartialReduction::tryMAddReplacement(Instruction *Op) {
67 if (!ST->hasSSE2())
68 return false;
69
70 // Need at least 8 elements.
71 if (cast<FixedVectorType>(Op->getType())->getNumElements() < 8)
72 return false;
73
74 // Element type should be i32.
75 if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
76 return false;
77
78 auto *Mul = dyn_cast<BinaryOperator>(Op);
79 if (!Mul || Mul->getOpcode() != Instruction::Mul)
80 return false;
81
82 Value *LHS = Mul->getOperand(0);
83 Value *RHS = Mul->getOperand(1);
84
85 // LHS and RHS should be only used once or if they are the same then only
86 // used twice. Only check this when SSE4.1 is enabled and we have zext/sext
87 // instructions, otherwise we use punpck to emulate zero extend in stages. The
88 // trunc/ we need to do likely won't introduce new instructions in that case.
89 if (ST->hasSSE41()) {
90 if (LHS == RHS) {
91 if (!isa<Constant>(LHS) && !LHS->hasNUses(2))
92 return false;
93 } else {
94 if (!isa<Constant>(LHS) && !LHS->hasOneUse())
95 return false;
96 if (!isa<Constant>(RHS) && !RHS->hasOneUse())
97 return false;
98 }
99 }
100
101 auto CanShrinkOp = [&](Value *Op) {
102 auto IsFreeTruncation = [&](Value *Op) {
103 if (auto *Cast = dyn_cast<CastInst>(Op)) {
104 if (Cast->getParent() == Mul->getParent() &&
105 (Cast->getOpcode() == Instruction::SExt ||
106 Cast->getOpcode() == Instruction::ZExt) &&
107 Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 16)
108 return true;
109 }
110
111 return isa<Constant>(Op);
112 };
113
114 // If the operation can be freely truncated and has enough sign bits we
115 // can shrink.
116 if (IsFreeTruncation(Op) &&
117 ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
118 return true;
119
120 // SelectionDAG has limited support for truncating through an add or sub if
121 // the inputs are freely truncatable.
122 if (auto *BO = dyn_cast<BinaryOperator>(Op)) {
123 if (BO->getParent() == Mul->getParent() &&
124 IsFreeTruncation(BO->getOperand(0)) &&
125 IsFreeTruncation(BO->getOperand(1)) &&
126 ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
127 return true;
128 }
129
130 return false;
131 };
132
133 // Both Ops need to be shrinkable.
134 if (!CanShrinkOp(LHS) && !CanShrinkOp(RHS))
135 return false;
136
137 IRBuilder<> Builder(Mul);
138
139 auto *MulTy = cast<FixedVectorType>(Op->getType());
140 unsigned NumElts = MulTy->getNumElements();
141
142 // Extract even elements and odd elements and add them together. This will
143 // be pattern matched by SelectionDAG to pmaddwd. This instruction will be
144 // half the original width.
145 SmallVector<int, 16> EvenMask(NumElts / 2);
146 SmallVector<int, 16> OddMask(NumElts / 2);
147 for (int i = 0, e = NumElts / 2; i != e; ++i) {
148 EvenMask[i] = i * 2;
149 OddMask[i] = i * 2 + 1;
150 }
151 // Creating a new mul so the replaceAllUsesWith below doesn't replace the
152 // uses in the shuffles we're creating.
153 Value *NewMul = Builder.CreateMul(Mul->getOperand(0), Mul->getOperand(1));
154 Value *EvenElts = Builder.CreateShuffleVector(NewMul, NewMul, EvenMask);
155 Value *OddElts = Builder.CreateShuffleVector(NewMul, NewMul, OddMask);
156 Value *MAdd = Builder.CreateAdd(EvenElts, OddElts);
157
158 // Concatenate zeroes to extend back to the original type.
159 SmallVector<int, 32> ConcatMask(NumElts);
160 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
161 Value *Zero = Constant::getNullValue(MAdd->getType());
162 Value *Concat = Builder.CreateShuffleVector(MAdd, Zero, ConcatMask);
163
164 Mul->replaceAllUsesWith(Concat);
165 Mul->eraseFromParent();
166
167 return true;
168}
169
170bool X86PartialReduction::trySADReplacement(Instruction *Op) {
171 if (!ST->hasSSE2())
12
Calling 'X86Subtarget::hasSSE2'
14
Returning from 'X86Subtarget::hasSSE2'
15
Taking false branch
172 return false;
173
174 // TODO: There's nothing special about i32, any integer type above i16 should
175 // work just as well.
176 if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
16
The object is a 'VectorType'
17
Assuming the condition is false
18
Taking false branch
177 return false;
178
179 // Operand should be a select.
180 auto *SI = dyn_cast<SelectInst>(Op);
19
Assuming 'Op' is a 'SelectInst'
181 if (!SI
19.1
'SI' is non-null
19.1
'SI' is non-null
19.1
'SI' is non-null
)
20
Taking false branch
182 return false;
183
184 // Select needs to implement absolute value.
185 Value *LHS, *RHS;
186 auto SPR = matchSelectPattern(SI, LHS, RHS);
187 if (SPR.Flavor != SPF_ABS)
21
Assuming field 'Flavor' is equal to SPF_ABS
22
Taking false branch
188 return false;
189
190 // Need a subtract of two values.
191 auto *Sub = dyn_cast<BinaryOperator>(LHS);
23
Assuming 'LHS' is a 'BinaryOperator'
192 if (!Sub
23.1
'Sub' is non-null
23.1
'Sub' is non-null
23.1
'Sub' is non-null
|| Sub->getOpcode() != Instruction::Sub)
24
Assuming the condition is false
25
Taking false branch
193 return false;
194
195 // Look for zero extend from i8.
196 auto getZeroExtendedVal = [](Value *Op) -> Value * {
197 if (auto *ZExt = dyn_cast<ZExtInst>(Op))
27
Assuming 'ZExt' is non-null
28
Taking true branch
40
Assuming 'ZExt' is non-null
41
Taking true branch
198 if (cast<VectorType>(ZExt->getOperand(0)->getType())
29
The object is a 'VectorType'
30
Assuming the condition is true
31
Taking true branch
42
The object is a 'VectorType'
43
Assuming the condition is true
44
Taking true branch
199 ->getElementType()
200 ->isIntegerTy(8))
201 return ZExt->getOperand(0);
32
Calling 'UnaryInstruction::getOperand'
36
Returning from 'UnaryInstruction::getOperand'
37
Returning pointer, which participates in a condition later
45
Calling 'UnaryInstruction::getOperand'
49
Returning from 'UnaryInstruction::getOperand'
50
Returning pointer, which participates in a condition later
202
203 return nullptr;
204 };
205
206 // Both operands of the subtract should be extends from vXi8.
207 Value *Op0 = getZeroExtendedVal(Sub->getOperand(0));
26
Calling 'operator()'
38
Returning from 'operator()'
208 Value *Op1 = getZeroExtendedVal(Sub->getOperand(1));
39
Calling 'operator()'
51
Returning from 'operator()'
209 if (!Op0
51.1
'Op0' is non-null
51.1
'Op0' is non-null
51.1
'Op0' is non-null
|| !Op1
51.2
'Op1' is non-null
51.2
'Op1' is non-null
51.2
'Op1' is non-null
)
52
Taking false branch
210 return false;
211
212 IRBuilder<> Builder(SI);
213
214 auto *OpTy = cast<FixedVectorType>(Op->getType());
53
The object is a 'FixedVectorType'
215 unsigned NumElts = OpTy->getNumElements();
54
'NumElts' initialized here
216
217 unsigned IntrinsicNumElts;
218 Intrinsic::ID IID;
219 if (ST->hasBWI() && NumElts >= 64) {
55
Assuming the condition is false
220 IID = Intrinsic::x86_avx512_psad_bw_512;
221 IntrinsicNumElts = 64;
222 } else if (ST->hasAVX2() && NumElts >= 32) {
223 IID = Intrinsic::x86_avx2_psad_bw;
224 IntrinsicNumElts = 32;
225 } else {
226 IID = Intrinsic::x86_sse2_psad_bw;
227 IntrinsicNumElts = 16;
228 }
229
230 Function *PSADBWFn = Intrinsic::getDeclaration(SI->getModule(), IID);
231
232 if (NumElts < 16) {
56
Assuming 'NumElts' is < 16
57
Taking true branch
233 // Pad input with zeroes.
234 SmallVector<int, 32> ConcatMask(16);
235 for (unsigned i = 0; i != NumElts; ++i)
58
Assuming 'i' is equal to 'NumElts'
59
Loop condition is false. Execution continues on line 237
236 ConcatMask[i] = i;
237 for (unsigned i = NumElts; i != 16; ++i)
60
Loop condition is true. Entering loop body
238 ConcatMask[i] = (i % NumElts) + NumElts;
61
Division by zero
239
240 Value *Zero = Constant::getNullValue(Op0->getType());
241 Op0 = Builder.CreateShuffleVector(Op0, Zero, ConcatMask);
242 Op1 = Builder.CreateShuffleVector(Op1, Zero, ConcatMask);
243 NumElts = 16;
244 }
245
246 // Intrinsics produce vXi64 and need to be casted to vXi32.
247 auto *I32Ty =
248 FixedVectorType::get(Builder.getInt32Ty(), IntrinsicNumElts / 4);
249
250 assert(NumElts % IntrinsicNumElts == 0 && "Unexpected number of elements!")(static_cast <bool> (NumElts % IntrinsicNumElts == 0 &&
"Unexpected number of elements!") ? void (0) : __assert_fail
("NumElts % IntrinsicNumElts == 0 && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/X86/X86PartialReduction.cpp"
, 250, __extension__ __PRETTY_FUNCTION__))
;
251 unsigned NumSplits = NumElts / IntrinsicNumElts;
252
253 // First collect the pieces we need.
254 SmallVector<Value *, 4> Ops(NumSplits);
255 for (unsigned i = 0; i != NumSplits; ++i) {
256 SmallVector<int, 64> ExtractMask(IntrinsicNumElts);
257 std::iota(ExtractMask.begin(), ExtractMask.end(), i * IntrinsicNumElts);
258 Value *ExtractOp0 = Builder.CreateShuffleVector(Op0, Op0, ExtractMask);
259 Value *ExtractOp1 = Builder.CreateShuffleVector(Op1, Op0, ExtractMask);
260 Ops[i] = Builder.CreateCall(PSADBWFn, {ExtractOp0, ExtractOp1});
261 Ops[i] = Builder.CreateBitCast(Ops[i], I32Ty);
262 }
263
264 assert(isPowerOf2_32(NumSplits) && "Expected power of 2 splits")(static_cast <bool> (isPowerOf2_32(NumSplits) &&
"Expected power of 2 splits") ? void (0) : __assert_fail ("isPowerOf2_32(NumSplits) && \"Expected power of 2 splits\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/X86/X86PartialReduction.cpp"
, 264, __extension__ __PRETTY_FUNCTION__))
;
265 unsigned Stages = Log2_32(NumSplits);
266 for (unsigned s = Stages; s > 0; --s) {
267 unsigned NumConcatElts =
268 cast<FixedVectorType>(Ops[0]->getType())->getNumElements() * 2;
269 for (unsigned i = 0; i != 1U << (s - 1); ++i) {
270 SmallVector<int, 64> ConcatMask(NumConcatElts);
271 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
272 Ops[i] = Builder.CreateShuffleVector(Ops[i*2], Ops[i*2+1], ConcatMask);
273 }
274 }
275
276 // At this point the final value should be in Ops[0]. Now we need to adjust
277 // it to the final original type.
278 NumElts = cast<FixedVectorType>(OpTy)->getNumElements();
279 if (NumElts == 2) {
280 // Extract down to 2 elements.
281 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{0, 1});
282 } else if (NumElts >= 8) {
283 SmallVector<int, 32> ConcatMask(NumElts);
284 unsigned SubElts =
285 cast<FixedVectorType>(Ops[0]->getType())->getNumElements();
286 for (unsigned i = 0; i != SubElts; ++i)
287 ConcatMask[i] = i;
288 for (unsigned i = SubElts; i != NumElts; ++i)
289 ConcatMask[i] = (i % SubElts) + SubElts;
290
291 Value *Zero = Constant::getNullValue(Ops[0]->getType());
292 Ops[0] = Builder.CreateShuffleVector(Ops[0], Zero, ConcatMask);
293 }
294
295 SI->replaceAllUsesWith(Ops[0]);
296 SI->eraseFromParent();
297
298 return true;
299}
300
301// Walk backwards from the ExtractElementInst and determine if it is the end of
302// a horizontal reduction. Return the input to the reduction if we find one.
303static Value *matchAddReduction(const ExtractElementInst &EE) {
304 // Make sure we're extracting index 0.
305 auto *Index = dyn_cast<ConstantInt>(EE.getIndexOperand());
306 if (!Index || !Index->isNullValue())
307 return nullptr;
308
309 const auto *BO = dyn_cast<BinaryOperator>(EE.getVectorOperand());
310 if (!BO || BO->getOpcode() != Instruction::Add || !BO->hasOneUse())
311 return nullptr;
312
313 unsigned NumElems = cast<FixedVectorType>(BO->getType())->getNumElements();
314 // Ensure the reduction size is a power of 2.
315 if (!isPowerOf2_32(NumElems))
316 return nullptr;
317
318 const Value *Op = BO;
319 unsigned Stages = Log2_32(NumElems);
320 for (unsigned i = 0; i != Stages; ++i) {
321 const auto *BO = dyn_cast<BinaryOperator>(Op);
322 if (!BO || BO->getOpcode() != Instruction::Add)
323 return nullptr;
324
325 // If this isn't the first add, then it should only have 2 users, the
326 // shuffle and another add which we checked in the previous iteration.
327 if (i != 0 && !BO->hasNUses(2))
328 return nullptr;
329
330 Value *LHS = BO->getOperand(0);
331 Value *RHS = BO->getOperand(1);
332
333 auto *Shuffle = dyn_cast<ShuffleVectorInst>(LHS);
334 if (Shuffle) {
335 Op = RHS;
336 } else {
337 Shuffle = dyn_cast<ShuffleVectorInst>(RHS);
338 Op = LHS;
339 }
340
341 // The first operand of the shuffle should be the same as the other operand
342 // of the bin op.
343 if (!Shuffle || Shuffle->getOperand(0) != Op)
344 return nullptr;
345
346 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
347 unsigned MaskEnd = 1 << i;
348 for (unsigned Index = 0; Index < MaskEnd; ++Index)
349 if (Shuffle->getMaskValue(Index) != (int)(MaskEnd + Index))
350 return nullptr;
351 }
352
353 return const_cast<Value *>(Op);
354}
355
356// See if this BO is reachable from this Phi by walking forward through single
357// use BinaryOperators with the same opcode. If we get back then we know we've
358// found a loop and it is safe to step through this Add to find more leaves.
359static bool isReachableFromPHI(PHINode *Phi, BinaryOperator *BO) {
360 // The PHI itself should only have one use.
361 if (!Phi->hasOneUse())
362 return false;
363
364 Instruction *U = cast<Instruction>(*Phi->user_begin());
365 if (U == BO)
366 return true;
367
368 while (U->hasOneUse() && U->getOpcode() == BO->getOpcode())
369 U = cast<Instruction>(*U->user_begin());
370
371 return U == BO;
372}
373
374// Collect all the leaves of the tree of adds that feeds into the horizontal
375// reduction. Root is the Value that is used by the horizontal reduction.
376// We look through single use phis, single use adds, or adds that are used by
377// a phi that forms a loop with the add.
378static void collectLeaves(Value *Root, SmallVectorImpl<Instruction *> &Leaves) {
379 SmallPtrSet<Value *, 8> Visited;
380 SmallVector<Value *, 8> Worklist;
381 Worklist.push_back(Root);
382
383 while (!Worklist.empty()) {
384 Value *V = Worklist.pop_back_val();
385 if (!Visited.insert(V).second)
386 continue;
387
388 if (auto *PN = dyn_cast<PHINode>(V)) {
389 // PHI node should have single use unless it is the root node, then it
390 // has 2 uses.
391 if (!PN->hasNUses(PN == Root ? 2 : 1))
392 break;
393
394 // Push incoming values to the worklist.
395 append_range(Worklist, PN->incoming_values());
396
397 continue;
398 }
399
400 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
401 if (BO->getOpcode() == Instruction::Add) {
402 // Simple case. Single use, just push its operands to the worklist.
403 if (BO->hasNUses(BO == Root ? 2 : 1)) {
404 append_range(Worklist, BO->operands());
405 continue;
406 }
407
408 // If there is additional use, make sure it is an unvisited phi that
409 // gets us back to this node.
410 if (BO->hasNUses(BO == Root ? 3 : 2)) {
411 PHINode *PN = nullptr;
412 for (auto *U : Root->users())
413 if (auto *P = dyn_cast<PHINode>(U))
414 if (!Visited.count(P))
415 PN = P;
416
417 // If we didn't find a 2-input PHI then this isn't a case we can
418 // handle.
419 if (!PN || PN->getNumIncomingValues() != 2)
420 continue;
421
422 // Walk forward from this phi to see if it reaches back to this add.
423 if (!isReachableFromPHI(PN, BO))
424 continue;
425
426 // The phi forms a loop with this Add, push its operands.
427 append_range(Worklist, BO->operands());
428 }
429 }
430 }
431
432 // Not an add or phi, make it a leaf.
433 if (auto *I = dyn_cast<Instruction>(V)) {
434 if (!V->hasNUses(I == Root ? 2 : 1))
435 continue;
436
437 // Add this as a leaf.
438 Leaves.push_back(I);
439 }
440 }
441}
442
443bool X86PartialReduction::runOnFunction(Function &F) {
444 if (skipFunction(F))
1
Assuming the condition is false
2
Taking false branch
445 return false;
446
447 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
448 if (!TPC)
3
Assuming 'TPC' is non-null
4
Taking false branch
449 return false;
450
451 auto &TM = TPC->getTM<X86TargetMachine>();
452 ST = TM.getSubtargetImpl(F);
453
454 DL = &F.getParent()->getDataLayout();
455
456 bool MadeChange = false;
457 for (auto &BB : F) {
458 for (auto &I : BB) {
459 auto *EE = dyn_cast<ExtractElementInst>(&I);
5
Assuming the object is a 'ExtractElementInst'
460 if (!EE
5.1
'EE' is non-null
5.1
'EE' is non-null
5.1
'EE' is non-null
)
6
Taking false branch
461 continue;
462
463 // First find a reduction tree.
464 // FIXME: Do we need to handle other opcodes than Add?
465 Value *Root = matchAddReduction(*EE);
466 if (!Root
6.1
'Root' is non-null
6.1
'Root' is non-null
6.1
'Root' is non-null
)
7
Taking false branch
467 continue;
468
469 SmallVector<Instruction *, 8> Leaves;
470 collectLeaves(Root, Leaves);
471
472 for (Instruction *I : Leaves) {
8
Assuming '__begin3' is not equal to '__end3'
473 if (tryMAddReplacement(I)) {
9
Taking false branch
474 MadeChange = true;
475 continue;
476 }
477
478 // Don't do SAD matching on the root node. SelectionDAG already
479 // has support for that and currently generates better code.
480 if (I != Root && trySADReplacement(I))
10
Assuming 'I' is not equal to 'Root'
11
Calling 'X86PartialReduction::trySADReplacement'
481 MadeChange = true;
482 }
483 }
484 }
485
486 return MadeChange;
487}

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/X86/X86Subtarget.h

1//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the X86 specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_X86_X86SUBTARGET_H
14#define LLVM_LIB_TARGET_X86_X86SUBTARGET_H
15
16#include "X86FrameLowering.h"
17#include "X86ISelLowering.h"
18#include "X86InstrInfo.h"
19#include "X86SelectionDAGInfo.h"
20#include "llvm/ADT/Triple.h"
21#include "llvm/CodeGen/TargetSubtargetInfo.h"
22#include "llvm/IR/CallingConv.h"
23#include <climits>
24#include <memory>
25
26#define GET_SUBTARGETINFO_HEADER
27#include "X86GenSubtargetInfo.inc"
28
29namespace llvm {
30
31class CallLowering;
32class GlobalValue;
33class InstructionSelector;
34class LegalizerInfo;
35class RegisterBankInfo;
36class StringRef;
37class TargetMachine;
38
39/// The X86 backend supports a number of different styles of PIC.
40///
41namespace PICStyles {
42
43enum class Style {
44 StubPIC, // Used on i386-darwin in pic mode.
45 GOT, // Used on 32 bit elf on when in pic mode.
46 RIPRel, // Used on X86-64 when in pic mode.
47 None // Set when not in pic mode.
48};
49
50} // end namespace PICStyles
51
52class X86Subtarget final : public X86GenSubtargetInfo {
53 // NOTE: Do not add anything new to this list. Coarse, CPU name based flags
54 // are not a good idea. We should be migrating away from these.
55 enum X86ProcFamilyEnum {
56 Others,
57 IntelAtom,
58 IntelSLM
59 };
60
61 enum X86SSEEnum {
62 NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
63 };
64
65 enum X863DNowEnum {
66 NoThreeDNow, MMX, ThreeDNow, ThreeDNowA
67 };
68
69 /// X86 processor family: Intel Atom, and others
70 X86ProcFamilyEnum X86ProcFamily = Others;
71
72 /// Which PIC style to use
73 PICStyles::Style PICStyle;
74
75 const TargetMachine &TM;
76
77 /// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
78 X86SSEEnum X86SSELevel = NoSSE;
79
80 /// MMX, 3DNow, 3DNow Athlon, or none supported.
81 X863DNowEnum X863DNowLevel = NoThreeDNow;
82
83 /// True if the processor supports X87 instructions.
84 bool HasX87 = false;
85
86 /// True if the processor supports CMPXCHG8B.
87 bool HasCmpxchg8b = false;
88
89 /// True if this processor has NOPL instruction
90 /// (generally pentium pro+).
91 bool HasNOPL = false;
92
93 /// True if this processor has conditional move instructions
94 /// (generally pentium pro+).
95 bool HasCMov = false;
96
97 /// True if the processor supports X86-64 instructions.
98 bool HasX86_64 = false;
99
100 /// True if the processor supports POPCNT.
101 bool HasPOPCNT = false;
102
103 /// True if the processor supports SSE4A instructions.
104 bool HasSSE4A = false;
105
106 /// Target has AES instructions
107 bool HasAES = false;
108 bool HasVAES = false;
109
110 /// Target has FXSAVE/FXRESTOR instructions
111 bool HasFXSR = false;
112
113 /// Target has XSAVE instructions
114 bool HasXSAVE = false;
115
116 /// Target has XSAVEOPT instructions
117 bool HasXSAVEOPT = false;
118
119 /// Target has XSAVEC instructions
120 bool HasXSAVEC = false;
121
122 /// Target has XSAVES instructions
123 bool HasXSAVES = false;
124
125 /// Target has carry-less multiplication
126 bool HasPCLMUL = false;
127 bool HasVPCLMULQDQ = false;
128
129 /// Target has Galois Field Arithmetic instructions
130 bool HasGFNI = false;
131
132 /// Target has 3-operand fused multiply-add
133 bool HasFMA = false;
134
135 /// Target has 4-operand fused multiply-add
136 bool HasFMA4 = false;
137
138 /// Target has XOP instructions
139 bool HasXOP = false;
140
141 /// Target has TBM instructions.
142 bool HasTBM = false;
143
144 /// Target has LWP instructions
145 bool HasLWP = false;
146
147 /// True if the processor has the MOVBE instruction.
148 bool HasMOVBE = false;
149
150 /// True if the processor has the RDRAND instruction.
151 bool HasRDRAND = false;
152
153 /// Processor has 16-bit floating point conversion instructions.
154 bool HasF16C = false;
155
156 /// Processor has FS/GS base insturctions.
157 bool HasFSGSBase = false;
158
159 /// Processor has LZCNT instruction.
160 bool HasLZCNT = false;
161
162 /// Processor has BMI1 instructions.
163 bool HasBMI = false;
164
165 /// Processor has BMI2 instructions.
166 bool HasBMI2 = false;
167
168 /// Processor has VBMI instructions.
169 bool HasVBMI = false;
170
171 /// Processor has VBMI2 instructions.
172 bool HasVBMI2 = false;
173
174 /// Processor has Integer Fused Multiply Add
175 bool HasIFMA = false;
176
177 /// Processor has RTM instructions.
178 bool HasRTM = false;
179
180 /// Processor has ADX instructions.
181 bool HasADX = false;
182
183 /// Processor has SHA instructions.
184 bool HasSHA = false;
185
186 /// Processor has PRFCHW instructions.
187 bool HasPRFCHW = false;
188
189 /// Processor has RDSEED instructions.
190 bool HasRDSEED = false;
191
192 /// Processor has LAHF/SAHF instructions in 64-bit mode.
193 bool HasLAHFSAHF64 = false;
194
195 /// Processor has MONITORX/MWAITX instructions.
196 bool HasMWAITX = false;
197
198 /// Processor has Cache Line Zero instruction
199 bool HasCLZERO = false;
200
201 /// Processor has Cache Line Demote instruction
202 bool HasCLDEMOTE = false;
203
204 /// Processor has MOVDIRI instruction (direct store integer).
205 bool HasMOVDIRI = false;
206
207 /// Processor has MOVDIR64B instruction (direct store 64 bytes).
208 bool HasMOVDIR64B = false;
209
210 /// Processor has ptwrite instruction.
211 bool HasPTWRITE = false;
212
213 /// Processor has Prefetch with intent to Write instruction
214 bool HasPREFETCHWT1 = false;
215
216 /// True if SHLD instructions are slow.
217 bool IsSHLDSlow = false;
218
219 /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
220 // PMULUDQ.
221 bool IsPMULLDSlow = false;
222
223 /// True if the PMADDWD instruction is slow compared to PMULLD.
224 bool IsPMADDWDSlow = false;
225
226 /// True if unaligned memory accesses of 16-bytes are slow.
227 bool IsUAMem16Slow = false;
228
229 /// True if unaligned memory accesses of 32-bytes are slow.
230 bool IsUAMem32Slow = false;
231
232 /// True if SSE operations can have unaligned memory operands.
233 /// This may require setting a configuration bit in the processor.
234 bool HasSSEUnalignedMem = false;
235
236 /// True if this processor has the CMPXCHG16B instruction;
237 /// this is true for most x86-64 chips, but not the first AMD chips.
238 bool HasCmpxchg16b = false;
239
240 /// True if the LEA instruction should be used for adjusting
241 /// the stack pointer. This is an optimization for Intel Atom processors.
242 bool UseLeaForSP = false;
243
244 /// True if POPCNT instruction has a false dependency on the destination register.
245 bool HasPOPCNTFalseDeps = false;
246
247 /// True if LZCNT/TZCNT instructions have a false dependency on the destination register.
248 bool HasLZCNTFalseDeps = false;
249
250 /// True if its preferable to combine to a single cross-lane shuffle
251 /// using a variable mask over multiple fixed shuffles.
252 bool HasFastVariableCrossLaneShuffle = false;
253
254 /// True if its preferable to combine to a single per-lane shuffle
255 /// using a variable mask over multiple fixed shuffles.
256 bool HasFastVariablePerLaneShuffle = false;
257
258 /// True if vzeroupper instructions should be inserted after code that uses
259 /// ymm or zmm registers.
260 bool InsertVZEROUPPER = false;
261
262 /// True if there is no performance penalty for writing NOPs with up to
263 /// 7 bytes.
264 bool HasFast7ByteNOP = false;
265
266 /// True if there is no performance penalty for writing NOPs with up to
267 /// 11 bytes.
268 bool HasFast11ByteNOP = false;
269
270 /// True if there is no performance penalty for writing NOPs with up to
271 /// 15 bytes.
272 bool HasFast15ByteNOP = false;
273
274 /// True if gather is reasonably fast. This is true for Skylake client and
275 /// all AVX-512 CPUs.
276 bool HasFastGather = false;
277
278 /// True if hardware SQRTSS instruction is at least as fast (latency) as
279 /// RSQRTSS followed by a Newton-Raphson iteration.
280 bool HasFastScalarFSQRT = false;
281
282 /// True if hardware SQRTPS/VSQRTPS instructions are at least as fast
283 /// (throughput) as RSQRTPS/VRSQRTPS followed by a Newton-Raphson iteration.
284 bool HasFastVectorFSQRT = false;
285
286 /// True if 8-bit divisions are significantly faster than
287 /// 32-bit divisions and should be used when possible.
288 bool HasSlowDivide32 = false;
289
290 /// True if 32-bit divides are significantly faster than
291 /// 64-bit divisions and should be used when possible.
292 bool HasSlowDivide64 = false;
293
294 /// True if LZCNT instruction is fast.
295 bool HasFastLZCNT = false;
296
297 /// True if SHLD based rotate is fast.
298 bool HasFastSHLDRotate = false;
299
300 /// True if the processor supports macrofusion.
301 bool HasMacroFusion = false;
302
303 /// True if the processor supports branch fusion.
304 bool HasBranchFusion = false;
305
306 /// True if the processor has enhanced REP MOVSB/STOSB.
307 bool HasERMSB = false;
308
309 /// True if the processor has fast short REP MOV.
310 bool HasFSRM = false;
311
312 /// True if the short functions should be padded to prevent
313 /// a stall when returning too early.
314 bool PadShortFunctions = false;
315
316 /// True if two memory operand instructions should use a temporary register
317 /// instead.
318 bool SlowTwoMemOps = false;
319
320 /// True if the LEA instruction inputs have to be ready at address generation
321 /// (AG) time.
322 bool LEAUsesAG = false;
323
324 /// True if the LEA instruction with certain arguments is slow
325 bool SlowLEA = false;
326
327 /// True if the LEA instruction has all three source operands: base, index,
328 /// and offset or if the LEA instruction uses base and index registers where
329 /// the base is EBP, RBP,or R13
330 bool Slow3OpsLEA = false;
331
332 /// True if INC and DEC instructions are slow when writing to flags
333 bool SlowIncDec = false;
334
335 /// Processor has AVX-512 PreFetch Instructions
336 bool HasPFI = false;
337
338 /// Processor has AVX-512 Exponential and Reciprocal Instructions
339 bool HasERI = false;
340
341 /// Processor has AVX-512 Conflict Detection Instructions
342 bool HasCDI = false;
343
344 /// Processor has AVX-512 population count Instructions
345 bool HasVPOPCNTDQ = false;
346
347 /// Processor has AVX-512 Doubleword and Quadword instructions
348 bool HasDQI = false;
349
350 /// Processor has AVX-512 Byte and Word instructions
351 bool HasBWI = false;
352
353 /// Processor has AVX-512 Vector Length eXtenstions
354 bool HasVLX = false;
355
356 /// Processor has PKU extenstions
357 bool HasPKU = false;
358
359 /// Processor has AVX-512 Vector Neural Network Instructions
360 bool HasVNNI = false;
361
362 /// Processor has AVX Vector Neural Network Instructions
363 bool HasAVXVNNI = false;
364
365 /// Processor has AVX-512 bfloat16 floating-point extensions
366 bool HasBF16 = false;
367
368 /// Processor supports ENQCMD instructions
369 bool HasENQCMD = false;
370
371 /// Processor has AVX-512 Bit Algorithms instructions
372 bool HasBITALG = false;
373
374 /// Processor has AVX-512 vp2intersect instructions
375 bool HasVP2INTERSECT = false;
376
377 /// Processor supports CET SHSTK - Control-Flow Enforcement Technology
378 /// using Shadow Stack
379 bool HasSHSTK = false;
380
381 /// Processor supports Invalidate Process-Context Identifier
382 bool HasINVPCID = false;
383
384 /// Processor has Software Guard Extensions
385 bool HasSGX = false;
386
387 /// Processor supports Flush Cache Line instruction
388 bool HasCLFLUSHOPT = false;
389
390 /// Processor supports Cache Line Write Back instruction
391 bool HasCLWB = false;
392
393 /// Processor supports Write Back No Invalidate instruction
394 bool HasWBNOINVD = false;
395
396 /// Processor support RDPID instruction
397 bool HasRDPID = false;
398
399 /// Processor supports WaitPKG instructions
400 bool HasWAITPKG = false;
401
402 /// Processor supports PCONFIG instruction
403 bool HasPCONFIG = false;
404
405 /// Processor support key locker instructions
406 bool HasKL = false;
407
408 /// Processor support key locker wide instructions
409 bool HasWIDEKL = false;
410
411 /// Processor supports HRESET instruction
412 bool HasHRESET = false;
413
414 /// Processor supports SERIALIZE instruction
415 bool HasSERIALIZE = false;
416
417 /// Processor supports TSXLDTRK instruction
418 bool HasTSXLDTRK = false;
419
420 /// Processor has AMX support
421 bool HasAMXTILE = false;
422 bool HasAMXBF16 = false;
423 bool HasAMXINT8 = false;
424
425 /// Processor supports User Level Interrupt instructions
426 bool HasUINTR = false;
427
428 /// Processor has a single uop BEXTR implementation.
429 bool HasFastBEXTR = false;
430
431 /// Try harder to combine to horizontal vector ops if they are fast.
432 bool HasFastHorizontalOps = false;
433
434 /// Prefer a left/right scalar logical shifts pair over a shift+and pair.
435 bool HasFastScalarShiftMasks = false;
436
437 /// Prefer a left/right vector logical shifts pair over a shift+and pair.
438 bool HasFastVectorShiftMasks = false;
439
440 /// Prefer a movbe over a single-use load + bswap / single-use bswap + store.
441 bool HasFastMOVBE = false;
442
443 /// Use a retpoline thunk rather than indirect calls to block speculative
444 /// execution.
445 bool UseRetpolineIndirectCalls = false;
446
447 /// Use a retpoline thunk or remove any indirect branch to block speculative
448 /// execution.
449 bool UseRetpolineIndirectBranches = false;
450
451 /// Deprecated flag, query `UseRetpolineIndirectCalls` and
452 /// `UseRetpolineIndirectBranches` instead.
453 bool DeprecatedUseRetpoline = false;
454
455 /// When using a retpoline thunk, call an externally provided thunk rather
456 /// than emitting one inside the compiler.
457 bool UseRetpolineExternalThunk = false;
458
459 /// Prevent generation of indirect call/branch instructions from memory,
460 /// and force all indirect call/branch instructions from a register to be
461 /// preceded by an LFENCE. Also decompose RET instructions into a
462 /// POP+LFENCE+JMP sequence.
463 bool UseLVIControlFlowIntegrity = false;
464
465 /// Enable Speculative Execution Side Effect Suppression
466 bool UseSpeculativeExecutionSideEffectSuppression = false;
467
468 /// Insert LFENCE instructions to prevent data speculatively injected into
469 /// loads from being used maliciously.
470 bool UseLVILoadHardening = false;
471
472 /// Use software floating point for code generation.
473 bool UseSoftFloat = false;
474
475 /// Use alias analysis during code generation.
476 bool UseAA = false;
477
478 /// The minimum alignment known to hold of the stack frame on
479 /// entry to the function and which must be maintained by every function.
480 Align stackAlignment = Align(4);
481
482 Align TileConfigAlignment = Align(4);
483
484 /// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
485 ///
486 // FIXME: this is a known good value for Yonah. How about others?
487 unsigned MaxInlineSizeThreshold = 128;
488
489 /// Indicates target prefers 128 bit instructions.
490 bool Prefer128Bit = false;
491
492 /// Indicates target prefers 256 bit instructions.
493 bool Prefer256Bit = false;
494
495 /// Indicates target prefers AVX512 mask registers.
496 bool PreferMaskRegisters = false;
497
498 /// Use Goldmont specific floating point div/sqrt costs.
499 bool UseGLMDivSqrtCosts = false;
500
501 /// What processor and OS we're targeting.
502 Triple TargetTriple;
503
504 /// GlobalISel related APIs.
505 std::unique_ptr<CallLowering> CallLoweringInfo;
506 std::unique_ptr<LegalizerInfo> Legalizer;
507 std::unique_ptr<RegisterBankInfo> RegBankInfo;
508 std::unique_ptr<InstructionSelector> InstSelector;
509
510private:
511 /// Override the stack alignment.
512 MaybeAlign StackAlignOverride;
513
514 /// Preferred vector width from function attribute.
515 unsigned PreferVectorWidthOverride;
516
517 /// Resolved preferred vector width from function attribute and subtarget
518 /// features.
519 unsigned PreferVectorWidth = UINT32_MAX(4294967295U);
520
521 /// Required vector width from function attribute.
522 unsigned RequiredVectorWidth;
523
524 /// True if compiling for 64-bit, false for 16-bit or 32-bit.
525 bool In64BitMode = false;
526
527 /// True if compiling for 32-bit, false for 16-bit or 64-bit.
528 bool In32BitMode = false;
529
530 /// True if compiling for 16-bit, false for 32-bit or 64-bit.
531 bool In16BitMode = false;
532
533 X86SelectionDAGInfo TSInfo;
534 // Ordering here is important. X86InstrInfo initializes X86RegisterInfo which
535 // X86TargetLowering needs.
536 X86InstrInfo InstrInfo;
537 X86TargetLowering TLInfo;
538 X86FrameLowering FrameLowering;
539
540public:
541 /// This constructor initializes the data members to match that
542 /// of the specified triple.
543 ///
544 X86Subtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS,
545 const X86TargetMachine &TM, MaybeAlign StackAlignOverride,
546 unsigned PreferVectorWidthOverride,
547 unsigned RequiredVectorWidth);
548
549 const X86TargetLowering *getTargetLowering() const override {
550 return &TLInfo;
551 }
552
553 const X86InstrInfo *getInstrInfo() const override { return &InstrInfo; }
554
555 const X86FrameLowering *getFrameLowering() const override {
556 return &FrameLowering;
557 }
558
559 const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
560 return &TSInfo;
561 }
562
563 const X86RegisterInfo *getRegisterInfo() const override {
564 return &getInstrInfo()->getRegisterInfo();
565 }
566
567 unsigned getTileConfigSize() const { return 64; }
568 Align getTileConfigAlignment() const { return TileConfigAlignment; }
569
570 /// Returns the minimum alignment known to hold of the
571 /// stack frame on entry to the function and which must be maintained by every
572 /// function for this subtarget.
573 Align getStackAlignment() const { return stackAlignment; }
574
575 /// Returns the maximum memset / memcpy size
576 /// that still makes it profitable to inline the call.
577 unsigned getMaxInlineSizeThreshold() const { return MaxInlineSizeThreshold; }
578
579 /// ParseSubtargetFeatures - Parses features string setting specified
580 /// subtarget options. Definition of function is auto generated by tblgen.
581 void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
582
583 /// Methods used by Global ISel
584 const CallLowering *getCallLowering() const override;
585 InstructionSelector *getInstructionSelector() const override;
586 const LegalizerInfo *getLegalizerInfo() const override;
587 const RegisterBankInfo *getRegBankInfo() const override;
588
589private:
590 /// Initialize the full set of dependencies so we can use an initializer
591 /// list for X86Subtarget.
592 X86Subtarget &initializeSubtargetDependencies(StringRef CPU,
593 StringRef TuneCPU,
594 StringRef FS);
595 void initSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
596
597public:
598 /// Is this x86_64? (disregarding specific ABI / programming model)
599 bool is64Bit() const {
600 return In64BitMode;
601 }
602
603 bool is32Bit() const {
604 return In32BitMode;
605 }
606
607 bool is16Bit() const {
608 return In16BitMode;
609 }
610
611 /// Is this x86_64 with the ILP32 programming model (x32 ABI)?
612 bool isTarget64BitILP32() const {
613 return In64BitMode && (TargetTriple.isX32() || TargetTriple.isOSNaCl());
614 }
615
616 /// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
617 bool isTarget64BitLP64() const {
618 return In64BitMode && (!TargetTriple.isX32() && !TargetTriple.isOSNaCl());
619 }
620
621 PICStyles::Style getPICStyle() const { return PICStyle; }
622 void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
623
624 bool hasX87() const { return HasX87; }
625 bool hasCmpxchg8b() const { return HasCmpxchg8b; }
626 bool hasNOPL() const { return HasNOPL; }
627 // SSE codegen depends on cmovs, and all SSE1+ processors support them.
628 // All 64-bit processors support cmov.
629 bool hasCMov() const { return HasCMov || X86SSELevel >= SSE1 || is64Bit(); }
630 bool hasSSE1() const { return X86SSELevel >= SSE1; }
631 bool hasSSE2() const { return X86SSELevel
12.1
Field 'X86SSELevel' is >= SSE2
12.1
Field 'X86SSELevel' is >= SSE2
12.1
Field 'X86SSELevel' is >= SSE2
>= SSE2
; }
13
Returning the value 1, which participates in a condition later
632 bool hasSSE3() const { return X86SSELevel >= SSE3; }
633 bool hasSSSE3() const { return X86SSELevel >= SSSE3; }
634 bool hasSSE41() const { return X86SSELevel >= SSE41; }
635 bool hasSSE42() const { return X86SSELevel >= SSE42; }
636 bool hasAVX() const { return X86SSELevel >= AVX; }
637 bool hasAVX2() const { return X86SSELevel >= AVX2; }
638 bool hasAVX512() const { return X86SSELevel >= AVX512F; }
639 bool hasInt256() const { return hasAVX2(); }
640 bool hasSSE4A() const { return HasSSE4A; }
641 bool hasMMX() const { return X863DNowLevel >= MMX; }
642 bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
643 bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
644 bool hasPOPCNT() const { return HasPOPCNT; }
645 bool hasAES() const { return HasAES; }
646 bool hasVAES() const { return HasVAES; }
647 bool hasFXSR() const { return HasFXSR; }
648 bool hasXSAVE() const { return HasXSAVE; }
649 bool hasXSAVEOPT() const { return HasXSAVEOPT; }
650 bool hasXSAVEC() const { return HasXSAVEC; }
651 bool hasXSAVES() const { return HasXSAVES; }
652 bool hasPCLMUL() const { return HasPCLMUL; }
653 bool hasVPCLMULQDQ() const { return HasVPCLMULQDQ; }
654 bool hasGFNI() const { return HasGFNI; }
655 // Prefer FMA4 to FMA - its better for commutation/memory folding and
656 // has equal or better performance on all supported targets.
657 bool hasFMA() const { return HasFMA; }
658 bool hasFMA4() const { return HasFMA4; }
659 bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
660 bool hasXOP() const { return HasXOP; }
661 bool hasTBM() const { return HasTBM; }
662 bool hasLWP() const { return HasLWP; }
663 bool hasMOVBE() const { return HasMOVBE; }
664 bool hasRDRAND() const { return HasRDRAND; }
665 bool hasF16C() const { return HasF16C; }
666 bool hasFSGSBase() const { return HasFSGSBase; }
667 bool hasLZCNT() const { return HasLZCNT; }
668 bool hasBMI() const { return HasBMI; }
669 bool hasBMI2() const { return HasBMI2; }
670 bool hasVBMI() const { return HasVBMI; }
671 bool hasVBMI2() const { return HasVBMI2; }
672 bool hasIFMA() const { return HasIFMA; }
673 bool hasRTM() const { return HasRTM; }
674 bool hasADX() const { return HasADX; }
675 bool hasSHA() const { return HasSHA; }
676 bool hasPRFCHW() const { return HasPRFCHW; }
677 bool hasPREFETCHWT1() const { return HasPREFETCHWT1; }
678 bool hasPrefetchW() const {
679 // The PREFETCHW instruction was added with 3DNow but later CPUs gave it
680 // its own CPUID bit as part of deprecating 3DNow. Intel eventually added
681 // it and KNL has another that prefetches to L2 cache. We assume the
682 // L1 version exists if the L2 version does.
683 return has3DNow() || hasPRFCHW() || hasPREFETCHWT1();
684 }
685 bool hasSSEPrefetch() const {
686 // We implicitly enable these when we have a write prefix supporting cache
687 // level OR if we have prfchw, but don't already have a read prefetch from
688 // 3dnow.
689 return hasSSE1() || (hasPRFCHW() && !has3DNow()) || hasPREFETCHWT1();
690 }
691 bool hasRDSEED() const { return HasRDSEED; }
692 bool hasLAHFSAHF() const { return HasLAHFSAHF64 || !is64Bit(); }
693 bool hasMWAITX() const { return HasMWAITX; }
694 bool hasCLZERO() const { return HasCLZERO; }
695 bool hasCLDEMOTE() const { return HasCLDEMOTE; }
696 bool hasMOVDIRI() const { return HasMOVDIRI; }
697 bool hasMOVDIR64B() const { return HasMOVDIR64B; }
698 bool hasPTWRITE() const { return HasPTWRITE; }
699 bool isSHLDSlow() const { return IsSHLDSlow; }
700 bool isPMULLDSlow() const { return IsPMULLDSlow; }
701 bool isPMADDWDSlow() const { return IsPMADDWDSlow; }
702 bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
703 bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
704 bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
705 bool hasCmpxchg16b() const { return HasCmpxchg16b && is64Bit(); }
706 bool useLeaForSP() const { return UseLeaForSP; }
707 bool hasPOPCNTFalseDeps() const { return HasPOPCNTFalseDeps; }
708 bool hasLZCNTFalseDeps() const { return HasLZCNTFalseDeps; }
709 bool hasFastVariableCrossLaneShuffle() const {
710 return HasFastVariableCrossLaneShuffle;
711 }
712 bool hasFastVariablePerLaneShuffle() const {
713 return HasFastVariablePerLaneShuffle;
714 }
715 bool insertVZEROUPPER() const { return InsertVZEROUPPER; }
716 bool hasFastGather() const { return HasFastGather; }
717 bool hasFastScalarFSQRT() const { return HasFastScalarFSQRT; }
718 bool hasFastVectorFSQRT() const { return HasFastVectorFSQRT; }
719 bool hasFastLZCNT() const { return HasFastLZCNT; }
720 bool hasFastSHLDRotate() const { return HasFastSHLDRotate; }
721 bool hasFastBEXTR() const { return HasFastBEXTR; }
722 bool hasFastHorizontalOps() const { return HasFastHorizontalOps; }
723 bool hasFastScalarShiftMasks() const { return HasFastScalarShiftMasks; }
724 bool hasFastVectorShiftMasks() const { return HasFastVectorShiftMasks; }
725 bool hasFastMOVBE() const { return HasFastMOVBE; }
726 bool hasMacroFusion() const { return HasMacroFusion; }
727 bool hasBranchFusion() const { return HasBranchFusion; }
728 bool hasERMSB() const { return HasERMSB; }
729 bool hasFSRM() const { return HasFSRM; }
730 bool hasSlowDivide32() const { return HasSlowDivide32; }
731 bool hasSlowDivide64() const { return HasSlowDivide64; }
732 bool padShortFunctions() const { return PadShortFunctions; }
733 bool slowTwoMemOps() const { return SlowTwoMemOps; }
734 bool LEAusesAG() const { return LEAUsesAG; }
735 bool slowLEA() const { return SlowLEA; }
736 bool slow3OpsLEA() const { return Slow3OpsLEA; }
737 bool slowIncDec() const { return SlowIncDec; }
738 bool hasCDI() const { return HasCDI; }
739 bool hasVPOPCNTDQ() const { return HasVPOPCNTDQ; }
740 bool hasPFI() const { return HasPFI; }
741 bool hasERI() const { return HasERI; }
742 bool hasDQI() const { return HasDQI; }
743 bool hasBWI() const { return HasBWI; }
744 bool hasVLX() const { return HasVLX; }
745 bool hasPKU() const { return HasPKU; }
746 bool hasVNNI() const { return HasVNNI; }
747 bool hasBF16() const { return HasBF16; }
748 bool hasVP2INTERSECT() const { return HasVP2INTERSECT; }
749 bool hasBITALG() const { return HasBITALG; }
750 bool hasSHSTK() const { return HasSHSTK; }
751 bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; }
752 bool hasCLWB() const { return HasCLWB; }
753 bool hasWBNOINVD() const { return HasWBNOINVD; }
754 bool hasRDPID() const { return HasRDPID; }
755 bool hasWAITPKG() const { return HasWAITPKG; }
756 bool hasPCONFIG() const { return HasPCONFIG; }
757 bool hasSGX() const { return HasSGX; }
758 bool hasINVPCID() const { return HasINVPCID; }
759 bool hasENQCMD() const { return HasENQCMD; }
760 bool hasKL() const { return HasKL; }
761 bool hasWIDEKL() const { return HasWIDEKL; }
762 bool hasHRESET() const { return HasHRESET; }
763 bool hasSERIALIZE() const { return HasSERIALIZE; }
764 bool hasTSXLDTRK() const { return HasTSXLDTRK; }
765 bool hasUINTR() const { return HasUINTR; }
766 bool useRetpolineIndirectCalls() const { return UseRetpolineIndirectCalls; }
767 bool useRetpolineIndirectBranches() const {
768 return UseRetpolineIndirectBranches;
769 }
770 bool hasAVXVNNI() const { return HasAVXVNNI; }
771 bool hasAMXTILE() const { return HasAMXTILE; }
772 bool hasAMXBF16() const { return HasAMXBF16; }
773 bool hasAMXINT8() const { return HasAMXINT8; }
774 bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; }
775
776 // These are generic getters that OR together all of the thunk types
777 // supported by the subtarget. Therefore useIndirectThunk*() will return true
778 // if any respective thunk feature is enabled.
779 bool useIndirectThunkCalls() const {
780 return useRetpolineIndirectCalls() || useLVIControlFlowIntegrity();
781 }
782 bool useIndirectThunkBranches() const {
783 return useRetpolineIndirectBranches() || useLVIControlFlowIntegrity();
784 }
785
786 bool preferMaskRegisters() const { return PreferMaskRegisters; }
787 bool useGLMDivSqrtCosts() const { return UseGLMDivSqrtCosts; }
788 bool useLVIControlFlowIntegrity() const { return UseLVIControlFlowIntegrity; }
789 bool useLVILoadHardening() const { return UseLVILoadHardening; }
790 bool useSpeculativeExecutionSideEffectSuppression() const {
791 return UseSpeculativeExecutionSideEffectSuppression;
792 }
793
794 unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
795 unsigned getRequiredVectorWidth() const { return RequiredVectorWidth; }
796
797 // Helper functions to determine when we should allow widening to 512-bit
798 // during codegen.
799 // TODO: Currently we're always allowing widening on CPUs without VLX,
800 // because for many cases we don't have a better option.
801 bool canExtendTo512DQ() const {
802 return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
803 }
804 bool canExtendTo512BW() const {
805 return hasBWI() && canExtendTo512DQ();
806 }
807
808 // If there are no 512-bit vectors and we prefer not to use 512-bit registers,
809 // disable them in the legalizer.
810 bool useAVX512Regs() const {
811 return hasAVX512() && (canExtendTo512DQ() || RequiredVectorWidth > 256);
812 }
813
814 bool useBWIRegs() const {
815 return hasBWI() && useAVX512Regs();
816 }
817
818 bool isXRaySupported() const override { return is64Bit(); }
819
820 /// TODO: to be removed later and replaced with suitable properties
821 bool isAtom() const { return X86ProcFamily == IntelAtom; }
822 bool isSLM() const { return X86ProcFamily == IntelSLM; }
823 bool useSoftFloat() const { return UseSoftFloat; }
824 bool useAA() const override { return UseAA; }
825
826 /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
827 /// no-sse2). There isn't any reason to disable it if the target processor
828 /// supports it.
829 bool hasMFence() const { return hasSSE2() || is64Bit(); }
830
831 const Triple &getTargetTriple() const { return TargetTriple; }
832
833 bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
834 bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
835 bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
836 bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
837 bool isTargetPS4() const { return TargetTriple.isPS4CPU(); }
838
839 bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
840 bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
841 bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
842
843 bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
844 bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
845 bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
846 bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
847 bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
848 bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
849 bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
850 bool isTargetMCU() const { return TargetTriple.isOSIAMCU(); }
851 bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }
852
853 bool isTargetWindowsMSVC() const {
854 return TargetTriple.isWindowsMSVCEnvironment();
855 }
856
857 bool isTargetWindowsCoreCLR() const {
858 return TargetTriple.isWindowsCoreCLREnvironment();
859 }
860
861 bool isTargetWindowsCygwin() const {
862 return TargetTriple.isWindowsCygwinEnvironment();
863 }
864
865 bool isTargetWindowsGNU() const {
866 return TargetTriple.isWindowsGNUEnvironment();
867 }
868
869 bool isTargetWindowsItanium() const {
870 return TargetTriple.isWindowsItaniumEnvironment();
871 }
872
873 bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
874
875 bool isOSWindows() const { return TargetTriple.isOSWindows(); }
876
877 bool isTargetWin64() const { return In64BitMode && isOSWindows(); }
878
879 bool isTargetWin32() const { return !In64BitMode && isOSWindows(); }
880
881 bool isPICStyleGOT() const { return PICStyle == PICStyles::Style::GOT; }
882 bool isPICStyleRIPRel() const { return PICStyle == PICStyles::Style::RIPRel; }
883
884 bool isPICStyleStubPIC() const {
885 return PICStyle == PICStyles::Style::StubPIC;
886 }
887
888 bool isPositionIndependent() const;
889
890 bool isCallingConvWin64(CallingConv::ID CC) const {
891 switch (CC) {
892 // On Win64, all these conventions just use the default convention.
893 case CallingConv::C:
894 case CallingConv::Fast:
895 case CallingConv::Tail:
896 case CallingConv::Swift:
897 case CallingConv::SwiftTail:
898 case CallingConv::X86_FastCall:
899 case CallingConv::X86_StdCall:
900 case CallingConv::X86_ThisCall:
901 case CallingConv::X86_VectorCall:
902 case CallingConv::Intel_OCL_BI:
903 return isTargetWin64();
904 // This convention allows using the Win64 convention on other targets.
905 case CallingConv::Win64:
906 return true;
907 // This convention allows using the SysV convention on Windows targets.
908 case CallingConv::X86_64_SysV:
909 return false;
910 // Otherwise, who knows what this is.
911 default:
912 return false;
913 }
914 }
915
916 /// Classify a global variable reference for the current subtarget according
917 /// to how we should reference it in a non-pcrel context.
918 unsigned char classifyLocalReference(const GlobalValue *GV) const;
919
920 unsigned char classifyGlobalReference(const GlobalValue *GV,
921 const Module &M) const;
922 unsigned char classifyGlobalReference(const GlobalValue *GV) const;
923
924 /// Classify a global function reference for the current subtarget.
925 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
926 const Module &M) const;
927 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
928
929 /// Classify a blockaddress reference for the current subtarget according to
930 /// how we should reference it in a non-pcrel context.
931 unsigned char classifyBlockAddressReference() const;
932
933 /// Return true if the subtarget allows calls to immediate address.
934 bool isLegalToCallImmediateAddr() const;
935
936 /// If we are using indirect thunks, we need to expand indirectbr to avoid it
937 /// lowering to an actual indirect jump.
938 bool enableIndirectBrExpand() const override {
939 return useIndirectThunkBranches();
940 }
941
942 /// Enable the MachineScheduler pass for all X86 subtargets.
943 bool enableMachineScheduler() const override { return true; }
944
945 bool enableEarlyIfConversion() const override;
946
947 void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
948 &Mutations) const override;
949
950 AntiDepBreakMode getAntiDepBreakMode() const override {
951 return TargetSubtargetInfo::ANTIDEP_CRITICAL;
952 }
953
954 bool enableAdvancedRASplitCost() const override { return false; }
955};
956
957} // end namespace llvm
958
959#endif // LLVM_LIB_TARGET_X86_X86SUBTARGET_H

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50typedef unsigned ID;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t S) { return User::operator new(S, 1); }
72 void operator delete(void *Ptr) { User::operator delete(Ptr); }
73
74 /// Transparently provide more efficient getOperand methods.
75 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
76
77 // Methods for support type inquiry through isa, cast, and dyn_cast:
78 static bool classof(const Instruction *I) {
79 return I->isUnaryOp() ||
80 I->getOpcode() == Instruction::Alloca ||
81 I->getOpcode() == Instruction::Load ||
82 I->getOpcode() == Instruction::VAArg ||
83 I->getOpcode() == Instruction::ExtractValue ||
84 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
85 }
86 static bool classof(const Value *V) {
87 return isa<Instruction>(V) && classof(cast<Instruction>(V));
88 }
89};
90
91template <>
92struct OperandTraits<UnaryInstruction> :
93 public FixedNumOperandTraits<UnaryInstruction, 1> {
94};
95
96DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { (static_cast
<bool> (i_nocapture < OperandTraits<UnaryInstruction
>::operands(this) && "getOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 96, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<UnaryInstruction>::op_begin
(const_cast<UnaryInstruction*>(this))[i_nocapture].get(
)); } void UnaryInstruction::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<UnaryInstruction>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 96, __extension__ __PRETTY_FUNCTION__)); OperandTraits<UnaryInstruction
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
UnaryInstruction::getNumOperands() const { return OperandTraits
<UnaryInstruction>::operands(this); } template <int Idx_nocapture
> Use &UnaryInstruction::Op() { return this->OpFrom
<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &UnaryInstruction::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
33
'?' condition is true
34
The object is a 'Value'
35
Returning pointer, which participates in a condition later
46
'?' condition is true
47
The object is a 'Value'
48
Returning pointer, which participates in a condition later
97
98//===----------------------------------------------------------------------===//
99// UnaryOperator Class
100//===----------------------------------------------------------------------===//
101
102class UnaryOperator : public UnaryInstruction {
103 void AssertOK();
104
105protected:
106 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
107 const Twine &Name, Instruction *InsertBefore);
108 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
109 const Twine &Name, BasicBlock *InsertAtEnd);
110
111 // Note: Instruction needs to be a friend here to call cloneImpl.
112 friend class Instruction;
113
114 UnaryOperator *cloneImpl() const;
115
116public:
117
118 /// Construct a unary instruction, given the opcode and an operand.
119 /// Optionally (if InstBefore is specified) insert the instruction
120 /// into a BasicBlock right before the specified instruction. The specified
121 /// Instruction is allowed to be a dereferenced end iterator.
122 ///
123 static UnaryOperator *Create(UnaryOps Op, Value *S,
124 const Twine &Name = Twine(),
125 Instruction *InsertBefore = nullptr);
126
127 /// Construct a unary instruction, given the opcode and an operand.
128 /// Also automatically insert this instruction to the end of the
129 /// BasicBlock specified.
130 ///
131 static UnaryOperator *Create(UnaryOps Op, Value *S,
132 const Twine &Name,
133 BasicBlock *InsertAtEnd);
134
135 /// These methods just forward to Create, and are useful when you
136 /// statically know what type of instruction you're going to create. These
137 /// helpers just save some typing.
138#define HANDLE_UNARY_INST(N, OPC, CLASS) \
139 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
140 return Create(Instruction::OPC, V, Name);\
141 }
142#include "llvm/IR/Instruction.def"
143#define HANDLE_UNARY_INST(N, OPC, CLASS) \
144 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
145 BasicBlock *BB) {\
146 return Create(Instruction::OPC, V, Name, BB);\
147 }
148#include "llvm/IR/Instruction.def"
149#define HANDLE_UNARY_INST(N, OPC, CLASS) \
150 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
151 Instruction *I) {\
152 return Create(Instruction::OPC, V, Name, I);\
153 }
154#include "llvm/IR/Instruction.def"
155
156 static UnaryOperator *
157 CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
158 const Twine &Name = "",
159 Instruction *InsertBefore = nullptr) {
160 UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
161 UO->copyIRFlags(CopyO);
162 return UO;
163 }
164
165 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
166 const Twine &Name = "",
167 Instruction *InsertBefore = nullptr) {
168 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
169 InsertBefore);
170 }
171
172 UnaryOps getOpcode() const {
173 return static_cast<UnaryOps>(Instruction::getOpcode());
174 }
175
176 // Methods for support type inquiry through isa, cast, and dyn_cast:
177 static bool classof(const Instruction *I) {
178 return I->isUnaryOp();
179 }
180 static bool classof(const Value *V) {
181 return isa<Instruction>(V) && classof(cast<Instruction>(V));
182 }
183};
184
185//===----------------------------------------------------------------------===//
186// BinaryOperator Class
187//===----------------------------------------------------------------------===//
188
189class BinaryOperator : public Instruction {
190 void AssertOK();
191
192protected:
193 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
194 const Twine &Name, Instruction *InsertBefore);
195 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
196 const Twine &Name, BasicBlock *InsertAtEnd);
197
198 // Note: Instruction needs to be a friend here to call cloneImpl.
199 friend class Instruction;
200
201 BinaryOperator *cloneImpl() const;
202
203public:
204 // allocate space for exactly two operands
205 void *operator new(size_t S) { return User::operator new(S, 2); }
206 void operator delete(void *Ptr) { User::operator delete(Ptr); }
207
208 /// Transparently provide more efficient getOperand methods.
209 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
210
211 /// Construct a binary instruction, given the opcode and the two
212 /// operands. Optionally (if InstBefore is specified) insert the instruction
213 /// into a BasicBlock right before the specified instruction. The specified
214 /// Instruction is allowed to be a dereferenced end iterator.
215 ///
216 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
217 const Twine &Name = Twine(),
218 Instruction *InsertBefore = nullptr);
219
220 /// Construct a binary instruction, given the opcode and the two
221 /// operands. Also automatically insert this instruction to the end of the
222 /// BasicBlock specified.
223 ///
224 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
225 const Twine &Name, BasicBlock *InsertAtEnd);
226
227 /// These methods just forward to Create, and are useful when you
228 /// statically know what type of instruction you're going to create. These
229 /// helpers just save some typing.
230#define HANDLE_BINARY_INST(N, OPC, CLASS) \
231 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
232 const Twine &Name = "") {\
233 return Create(Instruction::OPC, V1, V2, Name);\
234 }
235#include "llvm/IR/Instruction.def"
236#define HANDLE_BINARY_INST(N, OPC, CLASS) \
237 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
238 const Twine &Name, BasicBlock *BB) {\
239 return Create(Instruction::OPC, V1, V2, Name, BB);\
240 }
241#include "llvm/IR/Instruction.def"
242#define HANDLE_BINARY_INST(N, OPC, CLASS) \
243 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
244 const Twine &Name, Instruction *I) {\
245 return Create(Instruction::OPC, V1, V2, Name, I);\
246 }
247#include "llvm/IR/Instruction.def"
248
249 static BinaryOperator *
250 CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Instruction *CopyO,
251 const Twine &Name = "",
252 Instruction *InsertBefore = nullptr) {
253 BinaryOperator *BO = Create(Opc, V1, V2, Name, InsertBefore);
254 BO->copyIRFlags(CopyO);
255 return BO;
256 }
257
258 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
259 Instruction *FMFSource,
260 const Twine &Name = "") {
261 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
262 }
263 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
264 Instruction *FMFSource,
265 const Twine &Name = "") {
266 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
267 }
268 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
269 Instruction *FMFSource,
270 const Twine &Name = "") {
271 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
272 }
273 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
274 Instruction *FMFSource,
275 const Twine &Name = "") {
276 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
277 }
278 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
279 Instruction *FMFSource,
280 const Twine &Name = "") {
281 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
282 }
283
284 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
285 const Twine &Name = "") {
286 BinaryOperator *BO = Create(Opc, V1, V2, Name);
287 BO->setHasNoSignedWrap(true);
288 return BO;
289 }
290 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
291 const Twine &Name, BasicBlock *BB) {
292 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
293 BO->setHasNoSignedWrap(true);
294 return BO;
295 }
296 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
297 const Twine &Name, Instruction *I) {
298 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
299 BO->setHasNoSignedWrap(true);
300 return BO;
301 }
302
303 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
304 const Twine &Name = "") {
305 BinaryOperator *BO = Create(Opc, V1, V2, Name);
306 BO->setHasNoUnsignedWrap(true);
307 return BO;
308 }
309 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
310 const Twine &Name, BasicBlock *BB) {
311 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
312 BO->setHasNoUnsignedWrap(true);
313 return BO;
314 }
315 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
316 const Twine &Name, Instruction *I) {
317 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
318 BO->setHasNoUnsignedWrap(true);
319 return BO;
320 }
321
322 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
323 const Twine &Name = "") {
324 BinaryOperator *BO = Create(Opc, V1, V2, Name);
325 BO->setIsExact(true);
326 return BO;
327 }
328 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
329 const Twine &Name, BasicBlock *BB) {
330 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
331 BO->setIsExact(true);
332 return BO;
333 }
334 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
335 const Twine &Name, Instruction *I) {
336 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
337 BO->setIsExact(true);
338 return BO;
339 }
340
341#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
342 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
343 const Twine &Name = "") { \
344 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
345 } \
346 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
347 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
348 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
349 } \
350 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
351 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
352 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
353 }
354
355 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
356 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
357 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
358 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
359 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
360 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
361 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
362 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
363
364 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
365 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
366 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
367 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
368
369#undef DEFINE_HELPERS
370
371 /// Helper functions to construct and inspect unary operations (NEG and NOT)
372 /// via binary operators SUB and XOR:
373 ///
374 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
375 ///
376 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
377 Instruction *InsertBefore = nullptr);
378 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
379 BasicBlock *InsertAtEnd);
380 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
381 Instruction *InsertBefore = nullptr);
382 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
383 BasicBlock *InsertAtEnd);
384 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
385 Instruction *InsertBefore = nullptr);
386 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
387 BasicBlock *InsertAtEnd);
388 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
389 Instruction *InsertBefore = nullptr);
390 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
391 BasicBlock *InsertAtEnd);
392
393 BinaryOps getOpcode() const {
394 return static_cast<BinaryOps>(Instruction::getOpcode());
395 }
396
397 /// Exchange the two operands to this instruction.
398 /// This instruction is safe to use on any binary instruction and
399 /// does not modify the semantics of the instruction. If the instruction
400 /// cannot be reversed (ie, it's a Div), then return true.
401 ///
402 bool swapOperands();
403
404 // Methods for support type inquiry through isa, cast, and dyn_cast:
405 static bool classof(const Instruction *I) {
406 return I->isBinaryOp();
407 }
408 static bool classof(const Value *V) {
409 return isa<Instruction>(V) && classof(cast<Instruction>(V));
410 }
411};
412
413template <>
414struct OperandTraits<BinaryOperator> :
415 public FixedNumOperandTraits<BinaryOperator, 2> {
416};
417
418DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BinaryOperator>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 418, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BinaryOperator>::op_begin(
const_cast<BinaryOperator*>(this))[i_nocapture].get());
} void BinaryOperator::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<BinaryOperator>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 418, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BinaryOperator>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned BinaryOperator::getNumOperands() const { return OperandTraits
<BinaryOperator>::operands(this); } template <int Idx_nocapture
> Use &BinaryOperator::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &BinaryOperator::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
419
420//===----------------------------------------------------------------------===//
421// CastInst Class
422//===----------------------------------------------------------------------===//
423
424/// This is the base class for all instructions that perform data
425/// casts. It is simply provided so that instruction category testing
426/// can be performed with code like:
427///
428/// if (isa<CastInst>(Instr)) { ... }
429/// Base class of casting instructions.
430class CastInst : public UnaryInstruction {
431protected:
432 /// Constructor with insert-before-instruction semantics for subclasses
433 CastInst(Type *Ty, unsigned iType, Value *S,
434 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
435 : UnaryInstruction(Ty, iType, S, InsertBefore) {
436 setName(NameStr);
437 }
438 /// Constructor with insert-at-end-of-block semantics for subclasses
439 CastInst(Type *Ty, unsigned iType, Value *S,
440 const Twine &NameStr, BasicBlock *InsertAtEnd)
441 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
442 setName(NameStr);
443 }
444
445public:
446 /// Provides a way to construct any of the CastInst subclasses using an
447 /// opcode instead of the subclass's constructor. The opcode must be in the
448 /// CastOps category (Instruction::isCast(opcode) returns true). This
449 /// constructor has insert-before-instruction semantics to automatically
450 /// insert the new CastInst before InsertBefore (if it is non-null).
451 /// Construct any of the CastInst subclasses
452 static CastInst *Create(
453 Instruction::CastOps, ///< The opcode of the cast instruction
454 Value *S, ///< The value to be casted (operand 0)
455 Type *Ty, ///< The type to which cast should be made
456 const Twine &Name = "", ///< Name for the instruction
457 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
458 );
459 /// Provides a way to construct any of the CastInst subclasses using an
460 /// opcode instead of the subclass's constructor. The opcode must be in the
461 /// CastOps category. This constructor has insert-at-end-of-block semantics
462 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
463 /// its non-null).
464 /// Construct any of the CastInst subclasses
465 static CastInst *Create(
466 Instruction::CastOps, ///< The opcode for the cast instruction
467 Value *S, ///< The value to be casted (operand 0)
468 Type *Ty, ///< The type to which operand is casted
469 const Twine &Name, ///< The name for the instruction
470 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
471 );
472
473 /// Create a ZExt or BitCast cast instruction
474 static CastInst *CreateZExtOrBitCast(
475 Value *S, ///< The value to be casted (operand 0)
476 Type *Ty, ///< The type to which cast should be made
477 const Twine &Name = "", ///< Name for the instruction
478 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
479 );
480
481 /// Create a ZExt or BitCast cast instruction
482 static CastInst *CreateZExtOrBitCast(
483 Value *S, ///< The value to be casted (operand 0)
484 Type *Ty, ///< The type to which operand is casted
485 const Twine &Name, ///< The name for the instruction
486 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
487 );
488
489 /// Create a SExt or BitCast cast instruction
490 static CastInst *CreateSExtOrBitCast(
491 Value *S, ///< The value to be casted (operand 0)
492 Type *Ty, ///< The type to which cast should be made
493 const Twine &Name = "", ///< Name for the instruction
494 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
495 );
496
497 /// Create a SExt or BitCast cast instruction
498 static CastInst *CreateSExtOrBitCast(
499 Value *S, ///< The value to be casted (operand 0)
500 Type *Ty, ///< The type to which operand is casted
501 const Twine &Name, ///< The name for the instruction
502 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
503 );
504
505 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
506 static CastInst *CreatePointerCast(
507 Value *S, ///< The pointer value to be casted (operand 0)
508 Type *Ty, ///< The type to which operand is casted
509 const Twine &Name, ///< The name for the instruction
510 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
511 );
512
513 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
514 static CastInst *CreatePointerCast(
515 Value *S, ///< The pointer value to be casted (operand 0)
516 Type *Ty, ///< The type to which cast should be made
517 const Twine &Name = "", ///< Name for the instruction
518 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
519 );
520
521 /// Create a BitCast or an AddrSpaceCast cast instruction.
522 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
523 Value *S, ///< The pointer value to be casted (operand 0)
524 Type *Ty, ///< The type to which operand is casted
525 const Twine &Name, ///< The name for the instruction
526 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
527 );
528
529 /// Create a BitCast or an AddrSpaceCast cast instruction.
530 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
531 Value *S, ///< The pointer value to be casted (operand 0)
532 Type *Ty, ///< The type to which cast should be made
533 const Twine &Name = "", ///< Name for the instruction
534 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
535 );
536
537 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
538 ///
539 /// If the value is a pointer type and the destination an integer type,
540 /// creates a PtrToInt cast. If the value is an integer type and the
541 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
542 /// a bitcast.
543 static CastInst *CreateBitOrPointerCast(
544 Value *S, ///< The pointer value to be casted (operand 0)
545 Type *Ty, ///< The type to which cast should be made
546 const Twine &Name = "", ///< Name for the instruction
547 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
548 );
549
550 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
551 static CastInst *CreateIntegerCast(
552 Value *S, ///< The pointer value to be casted (operand 0)
553 Type *Ty, ///< The type to which cast should be made
554 bool isSigned, ///< Whether to regard S as signed or not
555 const Twine &Name = "", ///< Name for the instruction
556 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
557 );
558
559 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
560 static CastInst *CreateIntegerCast(
561 Value *S, ///< The integer value to be casted (operand 0)
562 Type *Ty, ///< The integer type to which operand is casted
563 bool isSigned, ///< Whether to regard S as signed or not
564 const Twine &Name, ///< The name for the instruction
565 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
566 );
567
568 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
569 static CastInst *CreateFPCast(
570 Value *S, ///< The floating point value to be casted
571 Type *Ty, ///< The floating point type to cast to
572 const Twine &Name = "", ///< Name for the instruction
573 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
574 );
575
576 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
577 static CastInst *CreateFPCast(
578 Value *S, ///< The floating point value to be casted
579 Type *Ty, ///< The floating point type to cast to
580 const Twine &Name, ///< The name for the instruction
581 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
582 );
583
584 /// Create a Trunc or BitCast cast instruction
585 static CastInst *CreateTruncOrBitCast(
586 Value *S, ///< The value to be casted (operand 0)
587 Type *Ty, ///< The type to which cast should be made
588 const Twine &Name = "", ///< Name for the instruction
589 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
590 );
591
592 /// Create a Trunc or BitCast cast instruction
593 static CastInst *CreateTruncOrBitCast(
594 Value *S, ///< The value to be casted (operand 0)
595 Type *Ty, ///< The type to which operand is casted
596 const Twine &Name, ///< The name for the instruction
597 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
598 );
599
600 /// Check whether a bitcast between these types is valid
601 static bool isBitCastable(
602 Type *SrcTy, ///< The Type from which the value should be cast.
603 Type *DestTy ///< The Type to which the value should be cast.
604 );
605
606 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
607 /// types is valid and a no-op.
608 ///
609 /// This ensures that any pointer<->integer cast has enough bits in the
610 /// integer and any other cast is a bitcast.
611 static bool isBitOrNoopPointerCastable(
612 Type *SrcTy, ///< The Type from which the value should be cast.
613 Type *DestTy, ///< The Type to which the value should be cast.
614 const DataLayout &DL);
615
616 /// Returns the opcode necessary to cast Val into Ty using usual casting
617 /// rules.
618 /// Infer the opcode for cast operand and type
619 static Instruction::CastOps getCastOpcode(
620 const Value *Val, ///< The value to cast
621 bool SrcIsSigned, ///< Whether to treat the source as signed
622 Type *Ty, ///< The Type to which the value should be casted
623 bool DstIsSigned ///< Whether to treate the dest. as signed
624 );
625
626 /// There are several places where we need to know if a cast instruction
627 /// only deals with integer source and destination types. To simplify that
628 /// logic, this method is provided.
629 /// @returns true iff the cast has only integral typed operand and dest type.
630 /// Determine if this is an integer-only cast.
631 bool isIntegerCast() const;
632
633 /// A lossless cast is one that does not alter the basic value. It implies
634 /// a no-op cast but is more stringent, preventing things like int->float,
635 /// long->double, or int->ptr.
636 /// @returns true iff the cast is lossless.
637 /// Determine if this is a lossless cast.
638 bool isLosslessCast() const;
639
640 /// A no-op cast is one that can be effected without changing any bits.
641 /// It implies that the source and destination types are the same size. The
642 /// DataLayout argument is to determine the pointer size when examining casts
643 /// involving Integer and Pointer types. They are no-op casts if the integer
644 /// is the same size as the pointer. However, pointer size varies with
645 /// platform. Note that a precondition of this method is that the cast is
646 /// legal - i.e. the instruction formed with these operands would verify.
647 static bool isNoopCast(
648 Instruction::CastOps Opcode, ///< Opcode of cast
649 Type *SrcTy, ///< SrcTy of cast
650 Type *DstTy, ///< DstTy of cast
651 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
652 );
653
654 /// Determine if this cast is a no-op cast.
655 ///
656 /// \param DL is the DataLayout to determine pointer size.
657 bool isNoopCast(const DataLayout &DL) const;
658
659 /// Determine how a pair of casts can be eliminated, if they can be at all.
660 /// This is a helper function for both CastInst and ConstantExpr.
661 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
662 /// returns Instruction::CastOps value for a cast that can replace
663 /// the pair, casting SrcTy to DstTy.
664 /// Determine if a cast pair is eliminable
665 static unsigned isEliminableCastPair(
666 Instruction::CastOps firstOpcode, ///< Opcode of first cast
667 Instruction::CastOps secondOpcode, ///< Opcode of second cast
668 Type *SrcTy, ///< SrcTy of 1st cast
669 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
670 Type *DstTy, ///< DstTy of 2nd cast
671 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
672 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
673 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
674 );
675
676 /// Return the opcode of this CastInst
677 Instruction::CastOps getOpcode() const {
678 return Instruction::CastOps(Instruction::getOpcode());
679 }
680
681 /// Return the source type, as a convenience
682 Type* getSrcTy() const { return getOperand(0)->getType(); }
683 /// Return the destination type, as a convenience
684 Type* getDestTy() const { return getType(); }
685
686 /// This method can be used to determine if a cast from SrcTy to DstTy using
687 /// Opcode op is valid or not.
688 /// @returns true iff the proposed cast is valid.
689 /// Determine if a cast is valid without creating one.
690 static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy);
691 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
692 return castIsValid(op, S->getType(), DstTy);
693 }
694
695 /// Methods for support type inquiry through isa, cast, and dyn_cast:
696 static bool classof(const Instruction *I) {
697 return I->isCast();
698 }
699 static bool classof(const Value *V) {
700 return isa<Instruction>(V) && classof(cast<Instruction>(V));
701 }
702};
703
704//===----------------------------------------------------------------------===//
705// CmpInst Class
706//===----------------------------------------------------------------------===//
707
708/// This class is the base class for the comparison instructions.
709/// Abstract base class of comparison instructions.
710class CmpInst : public Instruction {
711public:
712 /// This enumeration lists the possible predicates for CmpInst subclasses.
713 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
714 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
715 /// predicate values are not overlapping between the classes.
716 ///
717 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
718 /// FCMP_* values. Changing the bit patterns requires a potential change to
719 /// those passes.
720 enum Predicate : unsigned {
721 // Opcode U L G E Intuitive operation
722 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
723 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
724 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
725 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
726 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
727 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
728 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
729 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
730 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
731 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
732 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
733 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
734 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
735 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
736 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
737 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
738 FIRST_FCMP_PREDICATE = FCMP_FALSE,
739 LAST_FCMP_PREDICATE = FCMP_TRUE,
740 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
741 ICMP_EQ = 32, ///< equal
742 ICMP_NE = 33, ///< not equal
743 ICMP_UGT = 34, ///< unsigned greater than
744 ICMP_UGE = 35, ///< unsigned greater or equal
745 ICMP_ULT = 36, ///< unsigned less than
746 ICMP_ULE = 37, ///< unsigned less or equal
747 ICMP_SGT = 38, ///< signed greater than
748 ICMP_SGE = 39, ///< signed greater or equal
749 ICMP_SLT = 40, ///< signed less than
750 ICMP_SLE = 41, ///< signed less or equal
751 FIRST_ICMP_PREDICATE = ICMP_EQ,
752 LAST_ICMP_PREDICATE = ICMP_SLE,
753 BAD_ICMP_PREDICATE = ICMP_SLE + 1
754 };
755 using PredicateField =
756 Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;
757
758protected:
759 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
760 Value *LHS, Value *RHS, const Twine &Name = "",
761 Instruction *InsertBefore = nullptr,
762 Instruction *FlagsSource = nullptr);
763
764 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
765 Value *LHS, Value *RHS, const Twine &Name,
766 BasicBlock *InsertAtEnd);
767
768public:
769 // allocate space for exactly two operands
770 void *operator new(size_t S) { return User::operator new(S, 2); }
771 void operator delete(void *Ptr) { User::operator delete(Ptr); }
772
773 /// Construct a compare instruction, given the opcode, the predicate and
774 /// the two operands. Optionally (if InstBefore is specified) insert the
775 /// instruction into a BasicBlock right before the specified instruction.
776 /// The specified Instruction is allowed to be a dereferenced end iterator.
777 /// Create a CmpInst
778 static CmpInst *Create(OtherOps Op,
779 Predicate predicate, Value *S1,
780 Value *S2, const Twine &Name = "",
781 Instruction *InsertBefore = nullptr);
782
783 /// Construct a compare instruction, given the opcode, the predicate and the
784 /// two operands. Also automatically insert this instruction to the end of
785 /// the BasicBlock specified.
786 /// Create a CmpInst
787 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
788 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
789
790 /// Get the opcode casted to the right type
791 OtherOps getOpcode() const {
792 return static_cast<OtherOps>(Instruction::getOpcode());
793 }
794
795 /// Return the predicate for this instruction.
796 Predicate getPredicate() const { return getSubclassData<PredicateField>(); }
797
798 /// Set the predicate for this instruction to the specified value.
799 void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }
800
801 static bool isFPPredicate(Predicate P) {
802 static_assert(FIRST_FCMP_PREDICATE == 0,
803 "FIRST_FCMP_PREDICATE is required to be 0");
804 return P <= LAST_FCMP_PREDICATE;
805 }
806
807 static bool isIntPredicate(Predicate P) {
808 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
809 }
810
811 static StringRef getPredicateName(Predicate P);
812
813 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
814 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
815
816 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
817 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
818 /// @returns the inverse predicate for the instruction's current predicate.
819 /// Return the inverse of the instruction's predicate.
820 Predicate getInversePredicate() const {
821 return getInversePredicate(getPredicate());
822 }
823
824 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
825 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
826 /// @returns the inverse predicate for predicate provided in \p pred.
827 /// Return the inverse of a given predicate
828 static Predicate getInversePredicate(Predicate pred);
829
830 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
831 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
832 /// @returns the predicate that would be the result of exchanging the two
833 /// operands of the CmpInst instruction without changing the result
834 /// produced.
835 /// Return the predicate as if the operands were swapped
836 Predicate getSwappedPredicate() const {
837 return getSwappedPredicate(getPredicate());
838 }
839
840 /// This is a static version that you can use without an instruction
841 /// available.
842 /// Return the predicate as if the operands were swapped.
843 static Predicate getSwappedPredicate(Predicate pred);
844
845 /// This is a static version that you can use without an instruction
846 /// available.
847 /// @returns true if the comparison predicate is strict, false otherwise.
848 static bool isStrictPredicate(Predicate predicate);
849
850 /// @returns true if the comparison predicate is strict, false otherwise.
851 /// Determine if this instruction is using an strict comparison predicate.
852 bool isStrictPredicate() const { return isStrictPredicate(getPredicate()); }
853
854 /// This is a static version that you can use without an instruction
855 /// available.
856 /// @returns true if the comparison predicate is non-strict, false otherwise.
857 static bool isNonStrictPredicate(Predicate predicate);
858
859 /// @returns true if the comparison predicate is non-strict, false otherwise.
860 /// Determine if this instruction is using an non-strict comparison predicate.
861 bool isNonStrictPredicate() const {
862 return isNonStrictPredicate(getPredicate());
863 }
864
865 /// For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
866 /// Returns the strict version of non-strict comparisons.
867 Predicate getStrictPredicate() const {
868 return getStrictPredicate(getPredicate());
869 }
870
871 /// This is a static version that you can use without an instruction
872 /// available.
873 /// @returns the strict version of comparison provided in \p pred.
874 /// If \p pred is not a strict comparison predicate, returns \p pred.
875 /// Returns the strict version of non-strict comparisons.
876 static Predicate getStrictPredicate(Predicate pred);
877
878 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
879 /// Returns the non-strict version of strict comparisons.
880 Predicate getNonStrictPredicate() const {
881 return getNonStrictPredicate(getPredicate());
882 }
883
884 /// This is a static version that you can use without an instruction
885 /// available.
886 /// @returns the non-strict version of comparison provided in \p pred.
887 /// If \p pred is not a strict comparison predicate, returns \p pred.
888 /// Returns the non-strict version of strict comparisons.
889 static Predicate getNonStrictPredicate(Predicate pred);
890
891 /// This is a static version that you can use without an instruction
892 /// available.
893 /// Return the flipped strictness of predicate
894 static Predicate getFlippedStrictnessPredicate(Predicate pred);
895
896 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
897 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
898 /// does not support other kind of predicates.
899 /// @returns the predicate that does not contains is equal to zero if
900 /// it had and vice versa.
901 /// Return the flipped strictness of predicate
902 Predicate getFlippedStrictnessPredicate() const {
903 return getFlippedStrictnessPredicate(getPredicate());
904 }
905
906 /// Provide more efficient getOperand methods.
907 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
908
909 /// This is just a convenience that dispatches to the subclasses.
910 /// Swap the operands and adjust predicate accordingly to retain
911 /// the same comparison.
912 void swapOperands();
913
914 /// This is just a convenience that dispatches to the subclasses.
915 /// Determine if this CmpInst is commutative.
916 bool isCommutative() const;
917
918 /// Determine if this is an equals/not equals predicate.
919 /// This is a static version that you can use without an instruction
920 /// available.
921 static bool isEquality(Predicate pred);
922
923 /// Determine if this is an equals/not equals predicate.
924 bool isEquality() const { return isEquality(getPredicate()); }
925
926 /// Return true if the predicate is relational (not EQ or NE).
927 static bool isRelational(Predicate P) { return !isEquality(P); }
928
929 /// Return true if the predicate is relational (not EQ or NE).
930 bool isRelational() const { return !isEquality(); }
931
932 /// @returns true if the comparison is signed, false otherwise.
933 /// Determine if this instruction is using a signed comparison.
934 bool isSigned() const {
935 return isSigned(getPredicate());
936 }
937
938 /// @returns true if the comparison is unsigned, false otherwise.
939 /// Determine if this instruction is using an unsigned comparison.
940 bool isUnsigned() const {
941 return isUnsigned(getPredicate());
942 }
943
944 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
945 /// @returns the signed version of the unsigned predicate pred.
946 /// return the signed version of a predicate
947 static Predicate getSignedPredicate(Predicate pred);
948
949 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
950 /// @returns the signed version of the predicate for this instruction (which
951 /// has to be an unsigned predicate).
952 /// return the signed version of a predicate
953 Predicate getSignedPredicate() {
954 return getSignedPredicate(getPredicate());
955 }
956
957 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
958 /// @returns the unsigned version of the signed predicate pred.
959 static Predicate getUnsignedPredicate(Predicate pred);
960
961 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
962 /// @returns the unsigned version of the predicate for this instruction (which
963 /// has to be an signed predicate).
964 /// return the unsigned version of a predicate
965 Predicate getUnsignedPredicate() {
966 return getUnsignedPredicate(getPredicate());
967 }
968
969 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
970 /// @returns the unsigned version of the signed predicate pred or
971 /// the signed version of the signed predicate pred.
972 static Predicate getFlippedSignednessPredicate(Predicate pred);
973
974 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
975 /// @returns the unsigned version of the signed predicate pred or
976 /// the signed version of the signed predicate pred.
977 Predicate getFlippedSignednessPredicate() {
978 return getFlippedSignednessPredicate(getPredicate());
979 }
980
981 /// This is just a convenience.
982 /// Determine if this is true when both operands are the same.
983 bool isTrueWhenEqual() const {
984 return isTrueWhenEqual(getPredicate());
985 }
986
987 /// This is just a convenience.
988 /// Determine if this is false when both operands are the same.
989 bool isFalseWhenEqual() const {
990 return isFalseWhenEqual(getPredicate());
991 }
992
993 /// @returns true if the predicate is unsigned, false otherwise.
994 /// Determine if the predicate is an unsigned operation.
995 static bool isUnsigned(Predicate predicate);
996
997 /// @returns true if the predicate is signed, false otherwise.
998 /// Determine if the predicate is an signed operation.
999 static bool isSigned(Predicate predicate);
1000
1001 /// Determine if the predicate is an ordered operation.
1002 static bool isOrdered(Predicate predicate);
1003
1004 /// Determine if the predicate is an unordered operation.
1005 static bool isUnordered(Predicate predicate);
1006
1007 /// Determine if the predicate is true when comparing a value with itself.
1008 static bool isTrueWhenEqual(Predicate predicate);
1009
1010 /// Determine if the predicate is false when comparing a value with itself.
1011 static bool isFalseWhenEqual(Predicate predicate);
1012
1013 /// Determine if Pred1 implies Pred2 is true when two compares have matching
1014 /// operands.
1015 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
1016
1017 /// Determine if Pred1 implies Pred2 is false when two compares have matching
1018 /// operands.
1019 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
1020
1021 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1022 static bool classof(const Instruction *I) {
1023 return I->getOpcode() == Instruction::ICmp ||
1024 I->getOpcode() == Instruction::FCmp;
1025 }
1026 static bool classof(const Value *V) {
1027 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1028 }
1029
1030 /// Create a result type for fcmp/icmp
1031 static Type* makeCmpResultType(Type* opnd_type) {
1032 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
1033 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
1034 vt->getElementCount());
1035 }
1036 return Type::getInt1Ty(opnd_type->getContext());
1037 }
1038
1039private:
1040 // Shadow Value::setValueSubclassData with a private forwarding method so that
1041 // subclasses cannot accidentally use it.
1042 void setValueSubclassData(unsigned short D) {
1043 Value::setValueSubclassData(D);
1044 }
1045};
1046
1047// FIXME: these are redundant if CmpInst < BinaryOperator
1048template <>
1049struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
1050};
1051
1052DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<CmpInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1052, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CmpInst>::op_begin(const_cast
<CmpInst*>(this))[i_nocapture].get()); } void CmpInst::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<CmpInst>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1052, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CmpInst>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CmpInst::getNumOperands() const { return OperandTraits<CmpInst
>::operands(this); } template <int Idx_nocapture> Use
&CmpInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
CmpInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1053
1054/// A lightweight accessor for an operand bundle meant to be passed
1055/// around by value.
1056struct OperandBundleUse {
1057 ArrayRef<Use> Inputs;
1058
1059 OperandBundleUse() = default;
1060 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1061 : Inputs(Inputs), Tag(Tag) {}
1062
1063 /// Return true if the operand at index \p Idx in this operand bundle
1064 /// has the attribute A.
1065 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1066 if (isDeoptOperandBundle())
1067 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1068 return Inputs[Idx]->getType()->isPointerTy();
1069
1070 // Conservative answer: no operands have any attributes.
1071 return false;
1072 }
1073
1074 /// Return the tag of this operand bundle as a string.
1075 StringRef getTagName() const {
1076 return Tag->getKey();
1077 }
1078
1079 /// Return the tag of this operand bundle as an integer.
1080 ///
1081 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1082 /// and this function returns the unique integer getOrInsertBundleTag
1083 /// associated the tag of this operand bundle to.
1084 uint32_t getTagID() const {
1085 return Tag->getValue();
1086 }
1087
1088 /// Return true if this is a "deopt" operand bundle.
1089 bool isDeoptOperandBundle() const {
1090 return getTagID() == LLVMContext::OB_deopt;
1091 }
1092
1093 /// Return true if this is a "funclet" operand bundle.
1094 bool isFuncletOperandBundle() const {
1095 return getTagID() == LLVMContext::OB_funclet;
1096 }
1097
1098 /// Return true if this is a "cfguardtarget" operand bundle.
1099 bool isCFGuardTargetOperandBundle() const {
1100 return getTagID() == LLVMContext::OB_cfguardtarget;
1101 }
1102
1103private:
1104 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1105 StringMapEntry<uint32_t> *Tag;
1106};
1107
1108/// A container for an operand bundle being viewed as a set of values
1109/// rather than a set of uses.
1110///
1111/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1112/// so it is possible to create and pass around "self-contained" instances of
1113/// OperandBundleDef and ConstOperandBundleDef.
1114template <typename InputTy> class OperandBundleDefT {
1115 std::string Tag;
1116 std::vector<InputTy> Inputs;
1117
1118public:
1119 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1120 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1121 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1122 : Tag(std::move(Tag)), Inputs(Inputs) {}
1123
1124 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1125 Tag = std::string(OBU.getTagName());
1126 llvm::append_range(Inputs, OBU.Inputs);
1127 }
1128
1129 ArrayRef<InputTy> inputs() const { return Inputs; }
1130
1131 using input_iterator = typename std::vector<InputTy>::const_iterator;
1132
1133 size_t input_size() const { return Inputs.size(); }
1134 input_iterator input_begin() const { return Inputs.begin(); }
1135 input_iterator input_end() const { return Inputs.end(); }
1136
1137 StringRef getTag() const { return Tag; }
1138};
1139
1140using OperandBundleDef = OperandBundleDefT<Value *>;
1141using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1142
1143//===----------------------------------------------------------------------===//
1144// CallBase Class
1145//===----------------------------------------------------------------------===//
1146
1147/// Base class for all callable instructions (InvokeInst and CallInst)
1148/// Holds everything related to calling a function.
1149///
1150/// All call-like instructions are required to use a common operand layout:
1151/// - Zero or more arguments to the call,
1152/// - Zero or more operand bundles with zero or more operand inputs each
1153/// bundle,
1154/// - Zero or more subclass controlled operands
1155/// - The called function.
1156///
1157/// This allows this base class to easily access the called function and the
1158/// start of the arguments without knowing how many other operands a particular
1159/// subclass requires. Note that accessing the end of the argument list isn't
1160/// as cheap as most other operations on the base class.
1161class CallBase : public Instruction {
1162protected:
1163 // The first two bits are reserved by CallInst for fast retrieval,
1164 using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
1165 using CallingConvField =
1166 Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
1167 CallingConv::MaxID>;
1168 static_assert(
1169 Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
1170 "Bitfields must be contiguous");
1171
1172 /// The last operand is the called operand.
1173 static constexpr int CalledOperandOpEndIdx = -1;
1174
1175 AttributeList Attrs; ///< parameter attributes for callable
1176 FunctionType *FTy;
1177
1178 template <class... ArgsTy>
1179 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1180 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1181
1182 using Instruction::Instruction;
1183
1184 bool hasDescriptor() const { return Value::HasDescriptor; }
1185
1186 unsigned getNumSubclassExtraOperands() const {
1187 switch (getOpcode()) {
1188 case Instruction::Call:
1189 return 0;
1190 case Instruction::Invoke:
1191 return 2;
1192 case Instruction::CallBr:
1193 return getNumSubclassExtraOperandsDynamic();
1194 }
1195 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1195)
;
1196 }
1197
1198 /// Get the number of extra operands for instructions that don't have a fixed
1199 /// number of extra operands.
1200 unsigned getNumSubclassExtraOperandsDynamic() const;
1201
1202public:
1203 using Instruction::getContext;
1204
1205 /// Create a clone of \p CB with a different set of operand bundles and
1206 /// insert it before \p InsertPt.
1207 ///
1208 /// The returned call instruction is identical \p CB in every way except that
1209 /// the operand bundles for the new instruction are set to the operand bundles
1210 /// in \p Bundles.
1211 static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
1212 Instruction *InsertPt = nullptr);
1213
1214 /// Create a clone of \p CB with the operand bundle with the tag matching
1215 /// \p Bundle's tag replaced with Bundle, and insert it before \p InsertPt.
1216 ///
1217 /// The returned call instruction is identical \p CI in every way except that
1218 /// the specified operand bundle has been replaced.
1219 static CallBase *Create(CallBase *CB,
1220 OperandBundleDef Bundle,
1221 Instruction *InsertPt = nullptr);
1222
1223 /// Create a clone of \p CB with operand bundle \p OB added.
1224 static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
1225 OperandBundleDef OB,
1226 Instruction *InsertPt = nullptr);
1227
1228 /// Create a clone of \p CB with operand bundle \p ID removed.
1229 static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
1230 Instruction *InsertPt = nullptr);
1231
1232 static bool classof(const Instruction *I) {
1233 return I->getOpcode() == Instruction::Call ||
1234 I->getOpcode() == Instruction::Invoke ||
1235 I->getOpcode() == Instruction::CallBr;
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1239 }
1240
1241 FunctionType *getFunctionType() const { return FTy; }
1242
1243 void mutateFunctionType(FunctionType *FTy) {
1244 Value::mutateType(FTy->getReturnType());
1245 this->FTy = FTy;
1246 }
1247
1248 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1249
1250 /// data_operands_begin/data_operands_end - Return iterators iterating over
1251 /// the call / invoke argument list and bundle operands. For invokes, this is
1252 /// the set of instruction operands except the invoke target and the two
1253 /// successor blocks; and for calls this is the set of instruction operands
1254 /// except the call target.
1255 User::op_iterator data_operands_begin() { return op_begin(); }
1256 User::const_op_iterator data_operands_begin() const {
1257 return const_cast<CallBase *>(this)->data_operands_begin();
1258 }
1259 User::op_iterator data_operands_end() {
1260 // Walk from the end of the operands over the called operand and any
1261 // subclass operands.
1262 return op_end() - getNumSubclassExtraOperands() - 1;
1263 }
1264 User::const_op_iterator data_operands_end() const {
1265 return const_cast<CallBase *>(this)->data_operands_end();
1266 }
1267 iterator_range<User::op_iterator> data_ops() {
1268 return make_range(data_operands_begin(), data_operands_end());
1269 }
1270 iterator_range<User::const_op_iterator> data_ops() const {
1271 return make_range(data_operands_begin(), data_operands_end());
1272 }
1273 bool data_operands_empty() const {
1274 return data_operands_end() == data_operands_begin();
1275 }
1276 unsigned data_operands_size() const {
1277 return std::distance(data_operands_begin(), data_operands_end());
1278 }
1279
1280 bool isDataOperand(const Use *U) const {
1281 assert(this == U->getUser() &&(static_cast <bool> (this == U->getUser() &&
"Only valid to query with a use of this instruction!") ? void
(0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1282, __extension__ __PRETTY_FUNCTION__))
1282 "Only valid to query with a use of this instruction!")(static_cast <bool> (this == U->getUser() &&
"Only valid to query with a use of this instruction!") ? void
(0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1282, __extension__ __PRETTY_FUNCTION__))
;
1283 return data_operands_begin() <= U && U < data_operands_end();
1284 }
1285 bool isDataOperand(Value::const_user_iterator UI) const {
1286 return isDataOperand(&UI.getUse());
1287 }
1288
1289 /// Given a value use iterator, return the data operand corresponding to it.
1290 /// Iterator must actually correspond to a data operand.
1291 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1292 return getDataOperandNo(&UI.getUse());
1293 }
1294
1295 /// Given a use for a data operand, get the data operand number that
1296 /// corresponds to it.
1297 unsigned getDataOperandNo(const Use *U) const {
1298 assert(isDataOperand(U) && "Data operand # out of range!")(static_cast <bool> (isDataOperand(U) && "Data operand # out of range!"
) ? void (0) : __assert_fail ("isDataOperand(U) && \"Data operand # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1298, __extension__ __PRETTY_FUNCTION__))
;
1299 return U - data_operands_begin();
1300 }
1301
1302 /// Return the iterator pointing to the beginning of the argument list.
1303 User::op_iterator arg_begin() { return op_begin(); }
1304 User::const_op_iterator arg_begin() const {
1305 return const_cast<CallBase *>(this)->arg_begin();
1306 }
1307
1308 /// Return the iterator pointing to the end of the argument list.
1309 User::op_iterator arg_end() {
1310 // From the end of the data operands, walk backwards past the bundle
1311 // operands.
1312 return data_operands_end() - getNumTotalBundleOperands();
1313 }
1314 User::const_op_iterator arg_end() const {
1315 return const_cast<CallBase *>(this)->arg_end();
1316 }
1317
1318 /// Iteration adapter for range-for loops.
1319 iterator_range<User::op_iterator> args() {
1320 return make_range(arg_begin(), arg_end());
1321 }
1322 iterator_range<User::const_op_iterator> args() const {
1323 return make_range(arg_begin(), arg_end());
1324 }
1325 bool arg_empty() const { return arg_end() == arg_begin(); }
1326 unsigned arg_size() const { return arg_end() - arg_begin(); }
1327
1328 // Legacy API names that duplicate the above and will be removed once users
1329 // are migrated.
1330 iterator_range<User::op_iterator> arg_operands() {
1331 return make_range(arg_begin(), arg_end());
1332 }
1333 iterator_range<User::const_op_iterator> arg_operands() const {
1334 return make_range(arg_begin(), arg_end());
1335 }
1336 unsigned getNumArgOperands() const { return arg_size(); }
1337
1338 Value *getArgOperand(unsigned i) const {
1339 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1339, __extension__ __PRETTY_FUNCTION__))
;
1340 return getOperand(i);
1341 }
1342
1343 void setArgOperand(unsigned i, Value *v) {
1344 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1344, __extension__ __PRETTY_FUNCTION__))
;
1345 setOperand(i, v);
1346 }
1347
1348 /// Wrappers for getting the \c Use of a call argument.
1349 const Use &getArgOperandUse(unsigned i) const {
1350 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1350, __extension__ __PRETTY_FUNCTION__))
;
1351 return User::getOperandUse(i);
1352 }
1353 Use &getArgOperandUse(unsigned i) {
1354 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast <bool> (i < getNumArgOperands() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1354, __extension__ __PRETTY_FUNCTION__))
;
1355 return User::getOperandUse(i);
1356 }
1357
1358 bool isArgOperand(const Use *U) const {
1359 assert(this == U->getUser() &&(static_cast <bool> (this == U->getUser() &&
"Only valid to query with a use of this instruction!") ? void
(0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1360, __extension__ __PRETTY_FUNCTION__))
1360 "Only valid to query with a use of this instruction!")(static_cast <bool> (this == U->getUser() &&
"Only valid to query with a use of this instruction!") ? void
(0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1360, __extension__ __PRETTY_FUNCTION__))
;
1361 return arg_begin() <= U && U < arg_end();
1362 }
1363 bool isArgOperand(Value::const_user_iterator UI) const {
1364 return isArgOperand(&UI.getUse());
1365 }
1366
1367 /// Given a use for a arg operand, get the arg operand number that
1368 /// corresponds to it.
1369 unsigned getArgOperandNo(const Use *U) const {
1370 assert(isArgOperand(U) && "Arg operand # out of range!")(static_cast <bool> (isArgOperand(U) && "Arg operand # out of range!"
) ? void (0) : __assert_fail ("isArgOperand(U) && \"Arg operand # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1370, __extension__ __PRETTY_FUNCTION__))
;
1371 return U - arg_begin();
1372 }
1373
1374 /// Given a value use iterator, return the arg operand number corresponding to
1375 /// it. Iterator must actually correspond to a data operand.
1376 unsigned getArgOperandNo(Value::const_user_iterator UI) const {
1377 return getArgOperandNo(&UI.getUse());
1378 }
1379
1380 /// Returns true if this CallSite passes the given Value* as an argument to
1381 /// the called function.
1382 bool hasArgument(const Value *V) const {
1383 return llvm::is_contained(args(), V);
1384 }
1385
1386 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1387
1388 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1389 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1390
1391 /// Returns the function called, or null if this is an
1392 /// indirect function invocation.
1393 Function *getCalledFunction() const {
1394 return dyn_cast_or_null<Function>(getCalledOperand());
1395 }
1396
1397 /// Return true if the callsite is an indirect call.
1398 bool isIndirectCall() const;
1399
1400 /// Determine whether the passed iterator points to the callee operand's Use.
1401 bool isCallee(Value::const_user_iterator UI) const {
1402 return isCallee(&UI.getUse());
1403 }
1404
1405 /// Determine whether this Use is the callee operand's Use.
1406 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1407
1408 /// Helper to get the caller (the parent function).
1409 Function *getCaller();
1410 const Function *getCaller() const {
1411 return const_cast<CallBase *>(this)->getCaller();
1412 }
1413
1414 /// Tests if this call site must be tail call optimized. Only a CallInst can
1415 /// be tail call optimized.
1416 bool isMustTailCall() const;
1417
1418 /// Tests if this call site is marked as a tail call.
1419 bool isTailCall() const;
1420
1421 /// Returns the intrinsic ID of the intrinsic called or
1422 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1423 /// this is an indirect call.
1424 Intrinsic::ID getIntrinsicID() const;
1425
1426 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1427
1428 /// Sets the function called, including updating the function type.
1429 void setCalledFunction(Function *Fn) {
1430 setCalledFunction(Fn->getFunctionType(), Fn);
1431 }
1432
1433 /// Sets the function called, including updating the function type.
1434 void setCalledFunction(FunctionCallee Fn) {
1435 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1436 }
1437
1438 /// Sets the function called, including updating to the specified function
1439 /// type.
1440 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1441 this->FTy = FTy;
1442 assert(cast<PointerType>(Fn->getType())->isOpaqueOrPointeeTypeMatches(FTy))(static_cast <bool> (cast<PointerType>(Fn->getType
())->isOpaqueOrPointeeTypeMatches(FTy)) ? void (0) : __assert_fail
("cast<PointerType>(Fn->getType())->isOpaqueOrPointeeTypeMatches(FTy)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1442, __extension__ __PRETTY_FUNCTION__))
;
1443 // This function doesn't mutate the return type, only the function
1444 // type. Seems broken, but I'm just gonna stick an assert in for now.
1445 assert(getType() == FTy->getReturnType())(static_cast <bool> (getType() == FTy->getReturnType
()) ? void (0) : __assert_fail ("getType() == FTy->getReturnType()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1445, __extension__ __PRETTY_FUNCTION__))
;
1446 setCalledOperand(Fn);
1447 }
1448
1449 CallingConv::ID getCallingConv() const {
1450 return getSubclassData<CallingConvField>();
1451 }
1452
1453 void setCallingConv(CallingConv::ID CC) {
1454 setSubclassData<CallingConvField>(CC);
1455 }
1456
1457 /// Check if this call is an inline asm statement.
1458 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1459
1460 /// \name Attribute API
1461 ///
1462 /// These methods access and modify attributes on this call (including
1463 /// looking through to the attributes on the called function when necessary).
1464 ///@{
1465
1466 /// Return the parameter attributes for this call.
1467 ///
1468 AttributeList getAttributes() const { return Attrs; }
1469
1470 /// Set the parameter attributes for this call.
1471 ///
1472 void setAttributes(AttributeList A) { Attrs = A; }
1473
1474 /// Determine whether this call has the given attribute. If it does not
1475 /// then determine if the called function has the attribute, but only if
1476 /// the attribute is allowed for the call.
1477 bool hasFnAttr(Attribute::AttrKind Kind) const {
1478 assert(Kind != Attribute::NoBuiltin &&(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1479, __extension__ __PRETTY_FUNCTION__))
1479 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast <bool> (Kind != Attribute::NoBuiltin &&
"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? void (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1479, __extension__ __PRETTY_FUNCTION__))
;
1480 return hasFnAttrImpl(Kind);
1481 }
1482
1483 /// Determine whether this call has the given attribute. If it does not
1484 /// then determine if the called function has the attribute, but only if
1485 /// the attribute is allowed for the call.
1486 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1487
1488 /// adds the attribute to the list of attributes.
1489 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1490 AttributeList PAL = getAttributes();
1491 PAL = PAL.addAttribute(getContext(), i, Kind);
1492 setAttributes(PAL);
1493 }
1494
1495 /// adds the attribute to the list of attributes.
1496 void addAttribute(unsigned i, Attribute Attr) {
1497 AttributeList PAL = getAttributes();
1498 PAL = PAL.addAttribute(getContext(), i, Attr);
1499 setAttributes(PAL);
1500 }
1501
1502 /// Adds the attribute to the indicated argument
1503 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1504 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1504, __extension__ __PRETTY_FUNCTION__))
;
1505 AttributeList PAL = getAttributes();
1506 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1507 setAttributes(PAL);
1508 }
1509
1510 /// Adds the attribute to the indicated argument
1511 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1512 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1512, __extension__ __PRETTY_FUNCTION__))
;
1513 AttributeList PAL = getAttributes();
1514 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1515 setAttributes(PAL);
1516 }
1517
1518 /// removes the attribute from the list of attributes.
1519 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1520 AttributeList PAL = getAttributes();
1521 PAL = PAL.removeAttribute(getContext(), i, Kind);
1522 setAttributes(PAL);
1523 }
1524
1525 /// removes the attribute from the list of attributes.
1526 void removeAttribute(unsigned i, StringRef Kind) {
1527 AttributeList PAL = getAttributes();
1528 PAL = PAL.removeAttribute(getContext(), i, Kind);
1529 setAttributes(PAL);
1530 }
1531
1532 void removeAttributes(unsigned i, const AttrBuilder &Attrs) {
1533 AttributeList PAL = getAttributes();
1534 PAL = PAL.removeAttributes(getContext(), i, Attrs);
1535 setAttributes(PAL);
1536 }
1537
1538 /// Removes the attribute from the given argument
1539 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1540 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1540, __extension__ __PRETTY_FUNCTION__))
;
1541 AttributeList PAL = getAttributes();
1542 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1543 setAttributes(PAL);
1544 }
1545
1546 /// Removes the attribute from the given argument
1547 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1548 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1548, __extension__ __PRETTY_FUNCTION__))
;
1549 AttributeList PAL = getAttributes();
1550 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1551 setAttributes(PAL);
1552 }
1553
1554 /// Removes the attributes from the given argument
1555 void removeParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
1556 AttributeList PAL = getAttributes();
1557 PAL = PAL.removeParamAttributes(getContext(), ArgNo, Attrs);
1558 setAttributes(PAL);
1559 }
1560
1561 /// adds the dereferenceable attribute to the list of attributes.
1562 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1563 AttributeList PAL = getAttributes();
1564 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1565 setAttributes(PAL);
1566 }
1567
1568 /// adds the dereferenceable_or_null attribute to the list of
1569 /// attributes.
1570 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1571 AttributeList PAL = getAttributes();
1572 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1573 setAttributes(PAL);
1574 }
1575
1576 /// Determine whether the return value has the given attribute.
1577 bool hasRetAttr(Attribute::AttrKind Kind) const {
1578 return hasRetAttrImpl(Kind);
1579 }
1580 /// Determine whether the return value has the given attribute.
1581 bool hasRetAttr(StringRef Kind) const { return hasRetAttrImpl(Kind); }
1582
1583 /// Determine whether the argument or parameter has the given attribute.
1584 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1585
1586 /// Get the attribute of a given kind at a position.
1587 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1588 return getAttributes().getAttribute(i, Kind);
1589 }
1590
1591 /// Get the attribute of a given kind at a position.
1592 Attribute getAttribute(unsigned i, StringRef Kind) const {
1593 return getAttributes().getAttribute(i, Kind);
1594 }
1595
1596 /// Get the attribute of a given kind from a given arg
1597 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1598 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1598, __extension__ __PRETTY_FUNCTION__))
;
1599 return getAttributes().getParamAttr(ArgNo, Kind);
1600 }
1601
1602 /// Get the attribute of a given kind from a given arg
1603 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1604 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast <bool> (ArgNo < getNumArgOperands() &&
"Out of bounds") ? void (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1604, __extension__ __PRETTY_FUNCTION__))
;
1605 return getAttributes().getParamAttr(ArgNo, Kind);
1606 }
1607
1608 /// Return true if the data operand at index \p i has the attribute \p
1609 /// A.
1610 ///
1611 /// Data operands include call arguments and values used in operand bundles,
1612 /// but does not include the callee operand. This routine dispatches to the
1613 /// underlying AttributeList or the OperandBundleUser as appropriate.
1614 ///
1615 /// The index \p i is interpreted as
1616 ///
1617 /// \p i == Attribute::ReturnIndex -> the return value
1618 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1619 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1620 /// (\p i - 1) in the operand list.
1621 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1622 // Note that we have to add one because `i` isn't zero-indexed.
1623 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&(static_cast <bool> (i < (getNumArgOperands() + getNumTotalBundleOperands
() + 1) && "Data operand index out of bounds!") ? void
(0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1624, __extension__ __PRETTY_FUNCTION__))
1624 "Data operand index out of bounds!")(static_cast <bool> (i < (getNumArgOperands() + getNumTotalBundleOperands
() + 1) && "Data operand index out of bounds!") ? void
(0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1624, __extension__ __PRETTY_FUNCTION__))
;
1625
1626 // The attribute A can either be directly specified, if the operand in
1627 // question is a call argument; or be indirectly implied by the kind of its
1628 // containing operand bundle, if the operand is a bundle operand.
1629
1630 if (i == AttributeList::ReturnIndex)
1631 return hasRetAttr(Kind);
1632
1633 // FIXME: Avoid these i - 1 calculations and update the API to use
1634 // zero-based indices.
1635 if (i < (getNumArgOperands() + 1))
1636 return paramHasAttr(i - 1, Kind);
1637
1638 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1639, __extension__ __PRETTY_FUNCTION__))
1639 "Must be either a call argument or an operand bundle!")(static_cast <bool> (hasOperandBundles() && i >=
(getBundleOperandsStartIndex() + 1) && "Must be either a call argument or an operand bundle!"
) ? void (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1639, __extension__ __PRETTY_FUNCTION__))
;
1640 return bundleOperandHasAttr(i - 1, Kind);
1641 }
1642
1643 /// Determine whether this data operand is not captured.
1644 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1645 // better indicate that this may return a conservative answer.
1646 bool doesNotCapture(unsigned OpNo) const {
1647 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1648 }
1649
1650 /// Determine whether this argument is passed by value.
1651 bool isByValArgument(unsigned ArgNo) const {
1652 return paramHasAttr(ArgNo, Attribute::ByVal);
1653 }
1654
1655 /// Determine whether this argument is passed in an alloca.
1656 bool isInAllocaArgument(unsigned ArgNo) const {
1657 return paramHasAttr(ArgNo, Attribute::InAlloca);
1658 }
1659
1660 /// Determine whether this argument is passed by value, in an alloca, or is
1661 /// preallocated.
1662 bool isPassPointeeByValueArgument(unsigned ArgNo) const {
1663 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1664 paramHasAttr(ArgNo, Attribute::InAlloca) ||
1665 paramHasAttr(ArgNo, Attribute::Preallocated);
1666 }
1667
1668 /// Determine whether passing undef to this argument is undefined behavior.
1669 /// If passing undef to this argument is UB, passing poison is UB as well
1670 /// because poison is more undefined than undef.
1671 bool isPassingUndefUB(unsigned ArgNo) const {
1672 return paramHasAttr(ArgNo, Attribute::NoUndef) ||
1673 // dereferenceable implies noundef.
1674 paramHasAttr(ArgNo, Attribute::Dereferenceable) ||
1675 // dereferenceable implies noundef, and null is a well-defined value.
1676 paramHasAttr(ArgNo, Attribute::DereferenceableOrNull);
1677 }
1678
1679 /// Determine if there are is an inalloca argument. Only the last argument can
1680 /// have the inalloca attribute.
1681 bool hasInAllocaArgument() const {
1682 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1683 }
1684
1685 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1686 // better indicate that this may return a conservative answer.
1687 bool doesNotAccessMemory(unsigned OpNo) const {
1688 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1689 }
1690
1691 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1692 // better indicate that this may return a conservative answer.
1693 bool onlyReadsMemory(unsigned OpNo) const {
1694 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1695 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1696 }
1697
1698 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1699 // better indicate that this may return a conservative answer.
1700 bool doesNotReadMemory(unsigned OpNo) const {
1701 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1702 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1703 }
1704
1705 /// Extract the alignment of the return value.
1706 MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
1707
1708 /// Extract the alignment for a call or parameter (0=unknown).
1709 MaybeAlign getParamAlign(unsigned ArgNo) const {
1710 return Attrs.getParamAlignment(ArgNo);
1711 }
1712
1713 MaybeAlign getParamStackAlign(unsigned ArgNo) const {
1714 return Attrs.getParamStackAlignment(ArgNo);
1715 }
1716
1717 /// Extract the byval type for a call or parameter.
1718 Type *getParamByValType(unsigned ArgNo) const {
1719 if (auto *Ty = Attrs.getParamByValType(ArgNo))
1720 return Ty;
1721 if (const Function *F = getCalledFunction())
1722 return F->getAttributes().getParamByValType(ArgNo);
1723 return nullptr;
1724 }
1725
1726 /// Extract the preallocated type for a call or parameter.
1727 Type *getParamPreallocatedType(unsigned ArgNo) const {
1728 if (auto *Ty = Attrs.getParamPreallocatedType(ArgNo))
1729 return Ty;
1730 if (const Function *F = getCalledFunction())
1731 return F->getAttributes().getParamPreallocatedType(ArgNo);
1732 return nullptr;
1733 }
1734
1735 /// Extract the preallocated type for a call or parameter.
1736 Type *getParamInAllocaType(unsigned ArgNo) const {
1737 if (auto *Ty = Attrs.getParamInAllocaType(ArgNo))
1738 return Ty;
1739 if (const Function *F = getCalledFunction())
1740 return F->getAttributes().getParamInAllocaType(ArgNo);
1741 return nullptr;
1742 }
1743
1744 /// Extract the number of dereferenceable bytes for a call or
1745 /// parameter (0=unknown).
1746 uint64_t getDereferenceableBytes(unsigned i) const {
1747 return Attrs.getDereferenceableBytes(i);
1748 }
1749
1750 /// Extract the number of dereferenceable_or_null bytes for a call or
1751 /// parameter (0=unknown).
1752 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1753 return Attrs.getDereferenceableOrNullBytes(i);
1754 }
1755
1756 /// Return true if the return value is known to be not null.
1757 /// This may be because it has the nonnull attribute, or because at least
1758 /// one byte is dereferenceable and the pointer is in addrspace(0).
1759 bool isReturnNonNull() const;
1760
1761 /// Determine if the return value is marked with NoAlias attribute.
1762 bool returnDoesNotAlias() const {
1763 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1764 }
1765
1766 /// If one of the arguments has the 'returned' attribute, returns its
1767 /// operand value. Otherwise, return nullptr.
1768 Value *getReturnedArgOperand() const;
1769
1770 /// Return true if the call should not be treated as a call to a
1771 /// builtin.
1772 bool isNoBuiltin() const {
1773 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1774 !hasFnAttrImpl(Attribute::Builtin);
1775 }
1776
1777 /// Determine if the call requires strict floating point semantics.
1778 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1779
1780 /// Return true if the call should not be inlined.
1781 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1782 void setIsNoInline() {
1783 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1784 }
1785 /// Determine if the call does not access memory.
1786 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1787 void setDoesNotAccessMemory() {
1788 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1789 }
1790
1791 /// Determine if the call does not access or only reads memory.
1792 bool onlyReadsMemory() const {
1793 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1794 }
1795
1796 void setOnlyReadsMemory() {
1797 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1798 }
1799
1800 /// Determine if the call does not access or only writes memory.
1801 bool doesNotReadMemory() const {
1802 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1803 }
1804 void setDoesNotReadMemory() {
1805 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1806 }
1807
1808 /// Determine if the call can access memmory only using pointers based
1809 /// on its arguments.
1810 bool onlyAccessesArgMemory() const {
1811 return hasFnAttr(Attribute::ArgMemOnly);
1812 }
1813 void setOnlyAccessesArgMemory() {
1814 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1815 }
1816
1817 /// Determine if the function may only access memory that is
1818 /// inaccessible from the IR.
1819 bool onlyAccessesInaccessibleMemory() const {
1820 return hasFnAttr(Attribute::InaccessibleMemOnly);
1821 }
1822 void setOnlyAccessesInaccessibleMemory() {
1823 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1824 }
1825
1826 /// Determine if the function may only access memory that is
1827 /// either inaccessible from the IR or pointed to by its arguments.
1828 bool onlyAccessesInaccessibleMemOrArgMem() const {
1829 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1830 }
1831 void setOnlyAccessesInaccessibleMemOrArgMem() {
1832 addAttribute(AttributeList::FunctionIndex,
1833 Attribute::InaccessibleMemOrArgMemOnly);
1834 }
1835 /// Determine if the call cannot return.
1836 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1837 void setDoesNotReturn() {
1838 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1839 }
1840
1841 /// Determine if the call should not perform indirect branch tracking.
1842 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1843
1844 /// Determine if the call cannot unwind.
1845 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1846 void setDoesNotThrow() {
1847 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1848 }
1849
1850 /// Determine if the invoke cannot be duplicated.
1851 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1852 void setCannotDuplicate() {
1853 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1854 }
1855
1856 /// Determine if the call cannot be tail merged.
1857 bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
1858 void setCannotMerge() {
1859 addAttribute(AttributeList::FunctionIndex, Attribute::NoMerge);
1860 }
1861
1862 /// Determine if the invoke is convergent
1863 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1864 void setConvergent() {
1865 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1866 }
1867 void setNotConvergent() {
1868 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1869 }
1870
1871 /// Determine if the call returns a structure through first
1872 /// pointer argument.
1873 bool hasStructRetAttr() const {
1874 if (getNumArgOperands() == 0)
1875 return false;
1876
1877 // Be friendly and also check the callee.
1878 return paramHasAttr(0, Attribute::StructRet);
1879 }
1880
1881 /// Determine if any call argument is an aggregate passed by value.
1882 bool hasByValArgument() const {
1883 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1884 }
1885
1886 ///@{
1887 // End of attribute API.
1888
1889 /// \name Operand Bundle API
1890 ///
1891 /// This group of methods provides the API to access and manipulate operand
1892 /// bundles on this call.
1893 /// @{
1894
1895 /// Return the number of operand bundles associated with this User.
1896 unsigned getNumOperandBundles() const {
1897 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1898 }
1899
1900 /// Return true if this User has any operand bundles.
1901 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1902
1903 /// Return the index of the first bundle operand in the Use array.
1904 unsigned getBundleOperandsStartIndex() const {
1905 assert(hasOperandBundles() && "Don't call otherwise!")(static_cast <bool> (hasOperandBundles() && "Don't call otherwise!"
) ? void (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1905, __extension__ __PRETTY_FUNCTION__))
;
1906 return bundle_op_info_begin()->Begin;
1907 }
1908
1909 /// Return the index of the last bundle operand in the Use array.
1910 unsigned getBundleOperandsEndIndex() const {
1911 assert(hasOperandBundles() && "Don't call otherwise!")(static_cast <bool> (hasOperandBundles() && "Don't call otherwise!"
) ? void (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1911, __extension__ __PRETTY_FUNCTION__))
;
1912 return bundle_op_info_end()[-1].End;
1913 }
1914
1915 /// Return true if the operand at index \p Idx is a bundle operand.
1916 bool isBundleOperand(unsigned Idx) const {
1917 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1918 Idx < getBundleOperandsEndIndex();
1919 }
1920
1921 /// Returns true if the use is a bundle operand.
1922 bool isBundleOperand(const Use *U) const {
1923 assert(this == U->getUser() &&(static_cast <bool> (this == U->getUser() &&
"Only valid to query with a use of this instruction!") ? void
(0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1924, __extension__ __PRETTY_FUNCTION__))
1924 "Only valid to query with a use of this instruction!")(static_cast <bool> (this == U->getUser() &&
"Only valid to query with a use of this instruction!") ? void
(0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1924, __extension__ __PRETTY_FUNCTION__))
;
1925 return hasOperandBundles() && isBundleOperand(U - op_begin());
1926 }
1927 bool isBundleOperand(Value::const_user_iterator UI) const {
1928 return isBundleOperand(&UI.getUse());
1929 }
1930
1931 /// Return the total number operands (not operand bundles) used by
1932 /// every operand bundle in this OperandBundleUser.
1933 unsigned getNumTotalBundleOperands() const {
1934 if (!hasOperandBundles())
1935 return 0;
1936
1937 unsigned Begin = getBundleOperandsStartIndex();
1938 unsigned End = getBundleOperandsEndIndex();
1939
1940 assert(Begin <= End && "Should be!")(static_cast <bool> (Begin <= End && "Should be!"
) ? void (0) : __assert_fail ("Begin <= End && \"Should be!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1940, __extension__ __PRETTY_FUNCTION__))
;
1941 return End - Begin;
1942 }
1943
1944 /// Return the operand bundle at a specific index.
1945 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1946 assert(Index < getNumOperandBundles() && "Index out of bounds!")(static_cast <bool> (Index < getNumOperandBundles() &&
"Index out of bounds!") ? void (0) : __assert_fail ("Index < getNumOperandBundles() && \"Index out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1946, __extension__ __PRETTY_FUNCTION__))
;
1947 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1948 }
1949
1950 /// Return the number of operand bundles with the tag Name attached to
1951 /// this instruction.
1952 unsigned countOperandBundlesOfType(StringRef Name) const {
1953 unsigned Count = 0;
1954 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1955 if (getOperandBundleAt(i).getTagName() == Name)
1956 Count++;
1957
1958 return Count;
1959 }
1960
1961 /// Return the number of operand bundles with the tag ID attached to
1962 /// this instruction.
1963 unsigned countOperandBundlesOfType(uint32_t ID) const {
1964 unsigned Count = 0;
1965 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1966 if (getOperandBundleAt(i).getTagID() == ID)
1967 Count++;
1968
1969 return Count;
1970 }
1971
1972 /// Return an operand bundle by name, if present.
1973 ///
1974 /// It is an error to call this for operand bundle types that may have
1975 /// multiple instances of them on the same instruction.
1976 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1977 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")(static_cast <bool> (countOperandBundlesOfType(Name) <
2 && "Precondition violated!") ? void (0) : __assert_fail
("countOperandBundlesOfType(Name) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1977, __extension__ __PRETTY_FUNCTION__))
;
1978
1979 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1980 OperandBundleUse U = getOperandBundleAt(i);
1981 if (U.getTagName() == Name)
1982 return U;
1983 }
1984
1985 return None;
1986 }
1987
1988 /// Return an operand bundle by tag ID, if present.
1989 ///
1990 /// It is an error to call this for operand bundle types that may have
1991 /// multiple instances of them on the same instruction.
1992 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
1993 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")(static_cast <bool> (countOperandBundlesOfType(ID) <
2 && "Precondition violated!") ? void (0) : __assert_fail
("countOperandBundlesOfType(ID) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 1993, __extension__ __PRETTY_FUNCTION__))
;
1994
1995 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1996 OperandBundleUse U = getOperandBundleAt(i);
1997 if (U.getTagID() == ID)
1998 return U;
1999 }
2000
2001 return None;
2002 }
2003
2004 /// Return the list of operand bundles attached to this instruction as
2005 /// a vector of OperandBundleDefs.
2006 ///
2007 /// This function copies the OperandBundeUse instances associated with this
2008 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
2009 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
2010 /// representations of operand bundles (see documentation above).
2011 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;
2012
2013 /// Return the operand bundle for the operand at index OpIdx.
2014 ///
2015 /// It is an error to call this with an OpIdx that does not correspond to an
2016 /// bundle operand.
2017 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
2018 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
2019 }
2020
2021 /// Return true if this operand bundle user has operand bundles that
2022 /// may read from the heap.
2023 bool hasReadingOperandBundles() const;
2024
2025 /// Return true if this operand bundle user has operand bundles that
2026 /// may write to the heap.
2027 bool hasClobberingOperandBundles() const {
2028 for (auto &BOI : bundle_op_infos()) {
2029 if (BOI.Tag->second == LLVMContext::OB_deopt ||
2030 BOI.Tag->second == LLVMContext::OB_funclet)
2031 continue;
2032
2033 // This instruction has an operand bundle that is not known to us.
2034 // Assume the worst.
2035 return true;
2036 }
2037
2038 return false;
2039 }
2040
2041 /// Return true if the bundle operand at index \p OpIdx has the
2042 /// attribute \p A.
2043 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
2044 auto &BOI = getBundleOpInfoForOperand(OpIdx);
2045 auto OBU = operandBundleFromBundleOpInfo(BOI);
2046 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
2047 }
2048
2049 /// Return true if \p Other has the same sequence of operand bundle
2050 /// tags with the same number of operands on each one of them as this
2051 /// OperandBundleUser.
2052 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
2053 if (getNumOperandBundles() != Other.getNumOperandBundles())
2054 return false;
2055
2056 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
2057 Other.bundle_op_info_begin());
2058 }
2059
2060 /// Return true if this operand bundle user contains operand bundles
2061 /// with tags other than those specified in \p IDs.
2062 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
2063 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
2064 uint32_t ID = getOperandBundleAt(i).getTagID();
2065 if (!is_contained(IDs, ID))
2066 return true;
2067 }
2068 return false;
2069 }
2070
2071 /// Is the function attribute S disallowed by some operand bundle on
2072 /// this operand bundle user?
2073 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
2074 // Operand bundles only possibly disallow readnone, readonly and argmemonly
2075 // attributes. All String attributes are fine.
2076 return false;
2077 }
2078
2079 /// Is the function attribute A disallowed by some operand bundle on
2080 /// this operand bundle user?
2081 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
2082 switch (A) {
2083 default:
2084 return false;
2085
2086 case Attribute::InaccessibleMemOrArgMemOnly:
2087 return hasReadingOperandBundles();
2088
2089 case Attribute::InaccessibleMemOnly:
2090 return hasReadingOperandBundles();
2091
2092 case Attribute::ArgMemOnly:
2093 return hasReadingOperandBundles();
2094
2095 case Attribute::ReadNone:
2096 return hasReadingOperandBundles();
2097
2098 case Attribute::ReadOnly:
2099 return hasClobberingOperandBundles();
2100 }
2101
2102 llvm_unreachable("switch has a default case!")::llvm::llvm_unreachable_internal("switch has a default case!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 2102)
;
2103 }
2104
2105 /// Used to keep track of an operand bundle. See the main comment on
2106 /// OperandBundleUser above.
2107 struct BundleOpInfo {
2108 /// The operand bundle tag, interned by
2109 /// LLVMContextImpl::getOrInsertBundleTag.
2110 StringMapEntry<uint32_t> *Tag;
2111
2112 /// The index in the Use& vector where operands for this operand
2113 /// bundle starts.
2114 uint32_t Begin;
2115
2116 /// The index in the Use& vector where operands for this operand
2117 /// bundle ends.
2118 uint32_t End;
2119
2120 bool operator==(const BundleOpInfo &Other) const {
2121 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
2122 }
2123 };
2124
2125 /// Simple helper function to map a BundleOpInfo to an
2126 /// OperandBundleUse.
2127 OperandBundleUse
2128 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
2129 auto begin = op_begin();
2130 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
2131 return OperandBundleUse(BOI.Tag, Inputs);
2132 }
2133
2134 using bundle_op_iterator = BundleOpInfo *;
2135 using const_bundle_op_iterator = const BundleOpInfo *;
2136
2137 /// Return the start of the list of BundleOpInfo instances associated
2138 /// with this OperandBundleUser.
2139 ///
2140 /// OperandBundleUser uses the descriptor area co-allocated with the host User
2141 /// to store some meta information about which operands are "normal" operands,
2142 /// and which ones belong to some operand bundle.
2143 ///
2144 /// The layout of an operand bundle user is
2145 ///
2146 /// +-----------uint32_t End-------------------------------------+
2147 /// | |
2148 /// | +--------uint32_t Begin--------------------+ |
2149 /// | | | |
2150 /// ^ ^ v v
2151 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2152 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
2153 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2154 /// v v ^ ^
2155 /// | | | |
2156 /// | +--------uint32_t Begin------------+ |
2157 /// | |
2158 /// +-----------uint32_t End-----------------------------+
2159 ///
2160 ///
2161 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2162 /// list. These descriptions are installed and managed by this class, and
2163 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2164 ///
2165 /// DU is an additional descriptor installed by User's 'operator new' to keep
2166 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2167 /// access or modify DU in any way, it's an implementation detail private to
2168 /// User.
2169 ///
2170 /// The regular Use& vector for the User starts at U0. The operand bundle
2171 /// uses are part of the Use& vector, just like normal uses. In the diagram
2172 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2173 /// BundleOpInfo has information about a contiguous set of uses constituting
2174 /// an operand bundle, and the total set of operand bundle uses themselves
2175 /// form a contiguous set of uses (i.e. there are no gaps between uses
2176 /// corresponding to individual operand bundles).
2177 ///
2178 /// This class does not know the location of the set of operand bundle uses
2179 /// within the use list -- that is decided by the User using this class via
2180 /// the BeginIdx argument in populateBundleOperandInfos.
2181 ///
2182 /// Currently operand bundle users with hung-off operands are not supported.
2183 bundle_op_iterator bundle_op_info_begin() {
2184 if (!hasDescriptor())
2185 return nullptr;
2186
2187 uint8_t *BytesBegin = getDescriptor().begin();
2188 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2189 }
2190
2191 /// Return the start of the list of BundleOpInfo instances associated
2192 /// with this OperandBundleUser.
2193 const_bundle_op_iterator bundle_op_info_begin() const {
2194 auto *NonConstThis = const_cast<CallBase *>(this);
2195 return NonConstThis->bundle_op_info_begin();
2196 }
2197
2198 /// Return the end of the list of BundleOpInfo instances associated
2199 /// with this OperandBundleUser.
2200 bundle_op_iterator bundle_op_info_end() {
2201 if (!hasDescriptor())
2202 return nullptr;
2203
2204 uint8_t *BytesEnd = getDescriptor().end();
2205 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2206 }
2207
2208 /// Return the end of the list of BundleOpInfo instances associated
2209 /// with this OperandBundleUser.
2210 const_bundle_op_iterator bundle_op_info_end() const {
2211 auto *NonConstThis = const_cast<CallBase *>(this);
2212 return NonConstThis->bundle_op_info_end();
2213 }
2214
2215 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2216 iterator_range<bundle_op_iterator> bundle_op_infos() {
2217 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2218 }
2219
2220 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2221 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2222 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2223 }
2224
2225 /// Populate the BundleOpInfo instances and the Use& vector from \p
2226 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2227 /// last bundle operand use.
2228 ///
2229 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2230 /// instance allocated in this User's descriptor.
2231 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2232 const unsigned BeginIndex);
2233
2234public:
2235 /// Return the BundleOpInfo for the operand at index OpIdx.
2236 ///
2237 /// It is an error to call this with an OpIdx that does not correspond to an
2238 /// bundle operand.
2239 BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
2240 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2241 return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
2242 }
2243
2244protected:
2245 /// Return the total number of values used in \p Bundles.
2246 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2247 unsigned Total = 0;
2248 for (auto &B : Bundles)
2249 Total += B.input_size();
2250 return Total;
2251 }
2252
2253 /// @}
2254 // End of operand bundle API.
2255
2256private:
2257 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2258 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2259
2260 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2261 if (Attrs.hasFnAttribute(Kind))
2262 return true;
2263
2264 // Operand bundles override attributes on the called function, but don't
2265 // override attributes directly present on the call instruction.
2266 if (isFnAttrDisallowedByOpBundle(Kind))
2267 return false;
2268
2269 return hasFnAttrOnCalledFunction(Kind);
2270 }
2271
2272 /// Determine whether the return value has the given attribute. Supports
2273 /// Attribute::AttrKind and StringRef as \p AttrKind types.
2274 template <typename AttrKind> bool hasRetAttrImpl(AttrKind Kind) const {
2275 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
2276 return true;
2277
2278 // Look at the callee, if available.
2279 if (const Function *F = getCalledFunction())
2280 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
2281 return false;
2282 }
2283};
2284
2285template <>
2286struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2287
2288DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CallBase>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 2288, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CallBase>::op_begin(const_cast
<CallBase*>(this))[i_nocapture].get()); } void CallBase
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<CallBase>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 2288, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CallBase>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
CallBase::getNumOperands() const { return OperandTraits<CallBase
>::operands(this); } template <int Idx_nocapture> Use
&CallBase::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
CallBase::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2289
2290//===----------------------------------------------------------------------===//
2291// FuncletPadInst Class
2292//===----------------------------------------------------------------------===//
2293class FuncletPadInst : public Instruction {
2294private:
2295 FuncletPadInst(const FuncletPadInst &CPI);
2296
2297 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2298 ArrayRef<Value *> Args, unsigned Values,
2299 const Twine &NameStr, Instruction *InsertBefore);
2300 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2301 ArrayRef<Value *> Args, unsigned Values,
2302 const Twine &NameStr, BasicBlock *InsertAtEnd);
2303
2304 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2305
2306protected:
2307 // Note: Instruction needs to be a friend here to call cloneImpl.
2308 friend class Instruction;
2309 friend class CatchPadInst;
2310 friend class CleanupPadInst;
2311
2312 FuncletPadInst *cloneImpl() const;
2313
2314public:
2315 /// Provide fast operand accessors
2316 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2317
2318 /// getNumArgOperands - Return the number of funcletpad arguments.
2319 ///
2320 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2321
2322 /// Convenience accessors
2323
2324 /// Return the outer EH-pad this funclet is nested within.
2325 ///
2326 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2327 /// is a CatchPadInst.
2328 Value *getParentPad() const { return Op<-1>(); }
2329 void setParentPad(Value *ParentPad) {
2330 assert(ParentPad)(static_cast <bool> (ParentPad) ? void (0) : __assert_fail
("ParentPad", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 2330, __extension__ __PRETTY_FUNCTION__))
;
2331 Op<-1>() = ParentPad;
2332 }
2333
2334 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2335 ///
2336 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2337 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2338
2339 /// arg_operands - iteration adapter for range-for loops.
2340 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2341
2342 /// arg_operands - iteration adapter for range-for loops.
2343 const_op_range arg_operands() const {
2344 return const_op_range(op_begin(), op_end() - 1);
2345 }
2346
2347 // Methods for support type inquiry through isa, cast, and dyn_cast:
2348 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2349 static bool classof(const Value *V) {
2350 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2351 }
2352};
2353
2354template <>
2355struct OperandTraits<FuncletPadInst>
2356 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2357
2358DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<FuncletPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 2358, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<FuncletPadInst>::op_begin(
const_cast<FuncletPadInst*>(this))[i_nocapture].get());
} void FuncletPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<FuncletPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/InstrTypes.h"
, 2358, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
FuncletPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned FuncletPadInst::getNumOperands() const { return OperandTraits
<FuncletPadInst>::operands(this); } template <int Idx_nocapture
> Use &FuncletPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &FuncletPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2359
2360} // end namespace llvm
2361
2362#endif // LLVM_IR_INSTRTYPES_H