Bug Summary

File:llvm/lib/Target/X86/X86PartialReduction.cpp
Warning:line 289, column 26
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86PartialReduction.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2021-01-24-223304-31662-1 -x c++ /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86PartialReduction.cpp

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86PartialReduction.cpp

1//===-- X86PartialReduction.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass looks for add instructions used by a horizontal reduction to see
10// if we might be able to use pmaddwd or psadbw. Some cases of this require
11// cross basic block knowledge and can't be done in SelectionDAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "llvm/Analysis/ValueTracking.h"
17#include "llvm/CodeGen/TargetPassConfig.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Instructions.h"
20#include "llvm/IR/IntrinsicsX86.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/Operator.h"
23#include "llvm/Pass.h"
24#include "X86TargetMachine.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE"x86-partial-reduction" "x86-partial-reduction"
29
30namespace {
31
32class X86PartialReduction : public FunctionPass {
33 const DataLayout *DL;
34 const X86Subtarget *ST;
35
36public:
37 static char ID; // Pass identification, replacement for typeid.
38
39 X86PartialReduction() : FunctionPass(ID) { }
40
41 bool runOnFunction(Function &Fn) override;
42
43 void getAnalysisUsage(AnalysisUsage &AU) const override {
44 AU.setPreservesCFG();
45 }
46
47 StringRef getPassName() const override {
48 return "X86 Partial Reduction";
49 }
50
51private:
52 bool tryMAddReplacement(Instruction *Op);
53 bool trySADReplacement(Instruction *Op);
54};
55}
56
57FunctionPass *llvm::createX86PartialReductionPass() {
58 return new X86PartialReduction();
59}
60
61char X86PartialReduction::ID = 0;
62
63INITIALIZE_PASS(X86PartialReduction, DEBUG_TYPE,static void *initializeX86PartialReductionPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "X86 Partial Reduction"
, "x86-partial-reduction", &X86PartialReduction::ID, PassInfo
::NormalCtor_t(callDefaultCtor<X86PartialReduction>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeX86PartialReductionPassFlag; void llvm
::initializeX86PartialReductionPass(PassRegistry &Registry
) { llvm::call_once(InitializeX86PartialReductionPassFlag, initializeX86PartialReductionPassOnce
, std::ref(Registry)); }
64 "X86 Partial Reduction", false, false)static void *initializeX86PartialReductionPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "X86 Partial Reduction"
, "x86-partial-reduction", &X86PartialReduction::ID, PassInfo
::NormalCtor_t(callDefaultCtor<X86PartialReduction>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeX86PartialReductionPassFlag; void llvm
::initializeX86PartialReductionPass(PassRegistry &Registry
) { llvm::call_once(InitializeX86PartialReductionPassFlag, initializeX86PartialReductionPassOnce
, std::ref(Registry)); }
65
66bool X86PartialReduction::tryMAddReplacement(Instruction *Op) {
67 if (!ST->hasSSE2())
68 return false;
69
70 // Need at least 8 elements.
71 if (cast<FixedVectorType>(Op->getType())->getNumElements() < 8)
72 return false;
73
74 // Element type should be i32.
75 if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
76 return false;
77
78 auto *Mul = dyn_cast<BinaryOperator>(Op);
79 if (!Mul || Mul->getOpcode() != Instruction::Mul)
80 return false;
81
82 Value *LHS = Mul->getOperand(0);
83 Value *RHS = Mul->getOperand(1);
84
85 // LHS and RHS should be only used once or if they are the same then only
86 // used twice. Only check this when SSE4.1 is enabled and we have zext/sext
87 // instructions, otherwise we use punpck to emulate zero extend in stages. The
88 // trunc/ we need to do likely won't introduce new instructions in that case.
89 if (ST->hasSSE41()) {
90 if (LHS == RHS) {
91 if (!isa<Constant>(LHS) && !LHS->hasNUses(2))
92 return false;
93 } else {
94 if (!isa<Constant>(LHS) && !LHS->hasOneUse())
95 return false;
96 if (!isa<Constant>(RHS) && !RHS->hasOneUse())
97 return false;
98 }
99 }
100
101 auto CanShrinkOp = [&](Value *Op) {
102 auto IsFreeTruncation = [&](Value *Op) {
103 if (auto *Cast = dyn_cast<CastInst>(Op)) {
104 if (Cast->getParent() == Mul->getParent() &&
105 (Cast->getOpcode() == Instruction::SExt ||
106 Cast->getOpcode() == Instruction::ZExt) &&
107 Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 16)
108 return true;
109 }
110
111 return isa<Constant>(Op);
112 };
113
114 // If the operation can be freely truncated and has enough sign bits we
115 // can shrink.
116 if (IsFreeTruncation(Op) &&
117 ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
118 return true;
119
120 // SelectionDAG has limited support for truncating through an add or sub if
121 // the inputs are freely truncatable.
122 if (auto *BO = dyn_cast<BinaryOperator>(Op)) {
123 if (BO->getParent() == Mul->getParent() &&
124 IsFreeTruncation(BO->getOperand(0)) &&
125 IsFreeTruncation(BO->getOperand(1)) &&
126 ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
127 return true;
128 }
129
130 return false;
131 };
132
133 // Both Ops need to be shrinkable.
134 if (!CanShrinkOp(LHS) && !CanShrinkOp(RHS))
135 return false;
136
137 IRBuilder<> Builder(Mul);
138
139 auto *MulTy = cast<FixedVectorType>(Op->getType());
140 unsigned NumElts = MulTy->getNumElements();
141
142 // Extract even elements and odd elements and add them together. This will
143 // be pattern matched by SelectionDAG to pmaddwd. This instruction will be
144 // half the original width.
145 SmallVector<int, 16> EvenMask(NumElts / 2);
146 SmallVector<int, 16> OddMask(NumElts / 2);
147 for (int i = 0, e = NumElts / 2; i != e; ++i) {
148 EvenMask[i] = i * 2;
149 OddMask[i] = i * 2 + 1;
150 }
151 // Creating a new mul so the replaceAllUsesWith below doesn't replace the
152 // uses in the shuffles we're creating.
153 Value *NewMul = Builder.CreateMul(Mul->getOperand(0), Mul->getOperand(1));
154 Value *EvenElts = Builder.CreateShuffleVector(NewMul, NewMul, EvenMask);
155 Value *OddElts = Builder.CreateShuffleVector(NewMul, NewMul, OddMask);
156 Value *MAdd = Builder.CreateAdd(EvenElts, OddElts);
157
158 // Concatenate zeroes to extend back to the original type.
159 SmallVector<int, 32> ConcatMask(NumElts);
160 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
161 Value *Zero = Constant::getNullValue(MAdd->getType());
162 Value *Concat = Builder.CreateShuffleVector(MAdd, Zero, ConcatMask);
163
164 Mul->replaceAllUsesWith(Concat);
165 Mul->eraseFromParent();
166
167 return true;
168}
169
170bool X86PartialReduction::trySADReplacement(Instruction *Op) {
171 if (!ST->hasSSE2())
12
Calling 'X86Subtarget::hasSSE2'
14
Returning from 'X86Subtarget::hasSSE2'
15
Taking false branch
172 return false;
173
174 // TODO: There's nothing special about i32, any integer type above i16 should
175 // work just as well.
176 if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
16
The object is a 'VectorType'
17
Assuming the condition is false
18
Taking false branch
177 return false;
178
179 // Operand should be a select.
180 auto *SI = dyn_cast<SelectInst>(Op);
19
Assuming 'Op' is a 'SelectInst'
181 if (!SI
19.1
'SI' is non-null
19.1
'SI' is non-null
19.1
'SI' is non-null
)
20
Taking false branch
182 return false;
183
184 // Select needs to implement absolute value.
185 Value *LHS, *RHS;
186 auto SPR = matchSelectPattern(SI, LHS, RHS);
187 if (SPR.Flavor != SPF_ABS)
21
Assuming field 'Flavor' is equal to SPF_ABS
22
Taking false branch
188 return false;
189
190 // Need a subtract of two values.
191 auto *Sub = dyn_cast<BinaryOperator>(LHS);
23
Assuming 'LHS' is a 'BinaryOperator'
192 if (!Sub
23.1
'Sub' is non-null
23.1
'Sub' is non-null
23.1
'Sub' is non-null
|| Sub->getOpcode() != Instruction::Sub)
24
Assuming the condition is false
25
Taking false branch
193 return false;
194
195 // Look for zero extend from i8.
196 auto getZeroExtendedVal = [](Value *Op) -> Value * {
197 if (auto *ZExt = dyn_cast<ZExtInst>(Op))
27
Assuming 'ZExt' is non-null
28
Taking true branch
40
Assuming 'ZExt' is non-null
41
Taking true branch
198 if (cast<VectorType>(ZExt->getOperand(0)->getType())
29
The object is a 'VectorType'
30
Assuming the condition is true
31
Taking true branch
42
The object is a 'VectorType'
43
Assuming the condition is true
44
Taking true branch
199 ->getElementType()
200 ->isIntegerTy(8))
201 return ZExt->getOperand(0);
32
Calling 'UnaryInstruction::getOperand'
36
Returning from 'UnaryInstruction::getOperand'
37
Returning pointer, which participates in a condition later
45
Calling 'UnaryInstruction::getOperand'
49
Returning from 'UnaryInstruction::getOperand'
50
Returning pointer, which participates in a condition later
202
203 return nullptr;
204 };
205
206 // Both operands of the subtract should be extends from vXi8.
207 Value *Op0 = getZeroExtendedVal(Sub->getOperand(0));
26
Calling 'operator()'
38
Returning from 'operator()'
208 Value *Op1 = getZeroExtendedVal(Sub->getOperand(1));
39
Calling 'operator()'
51
Returning from 'operator()'
209 if (!Op0
51.1
'Op0' is non-null
51.1
'Op0' is non-null
51.1
'Op0' is non-null
|| !Op1
51.2
'Op1' is non-null
51.2
'Op1' is non-null
51.2
'Op1' is non-null
)
52
Taking false branch
210 return false;
211
212 IRBuilder<> Builder(SI);
213
214 auto *OpTy = cast<FixedVectorType>(Op->getType());
53
The object is a 'FixedVectorType'
215 unsigned NumElts = OpTy->getNumElements();
216
217 unsigned IntrinsicNumElts;
218 Intrinsic::ID IID;
219 if (ST->hasBWI() && NumElts >= 64) {
54
Assuming the condition is false
220 IID = Intrinsic::x86_avx512_psad_bw_512;
221 IntrinsicNumElts = 64;
222 } else if (ST->hasAVX2() && NumElts >= 32) {
223 IID = Intrinsic::x86_avx2_psad_bw;
224 IntrinsicNumElts = 32;
225 } else {
226 IID = Intrinsic::x86_sse2_psad_bw;
227 IntrinsicNumElts = 16;
228 }
229
230 Function *PSADBWFn = Intrinsic::getDeclaration(SI->getModule(), IID);
231
232 if (NumElts < 16) {
55
Assuming 'NumElts' is >= 16
56
Taking false branch
233 // Pad input with zeroes.
234 SmallVector<int, 32> ConcatMask(16);
235 for (unsigned i = 0; i != NumElts; ++i)
236 ConcatMask[i] = i;
237 for (unsigned i = NumElts; i != 16; ++i)
238 ConcatMask[i] = (i % NumElts) + NumElts;
239
240 Value *Zero = Constant::getNullValue(Op0->getType());
241 Op0 = Builder.CreateShuffleVector(Op0, Zero, ConcatMask);
242 Op1 = Builder.CreateShuffleVector(Op1, Zero, ConcatMask);
243 NumElts = 16;
244 }
245
246 // Intrinsics produce vXi64 and need to be casted to vXi32.
247 auto *I32Ty =
248 FixedVectorType::get(Builder.getInt32Ty(), IntrinsicNumElts / 4);
249
250 assert(NumElts % IntrinsicNumElts == 0 && "Unexpected number of elements!")((NumElts % IntrinsicNumElts == 0 && "Unexpected number of elements!"
) ? static_cast<void> (0) : __assert_fail ("NumElts % IntrinsicNumElts == 0 && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86PartialReduction.cpp"
, 250, __PRETTY_FUNCTION__))
;
57
Assuming the condition is true
58
'?' condition is true
251 unsigned NumSplits = NumElts / IntrinsicNumElts;
252
253 // First collect the pieces we need.
254 SmallVector<Value *, 4> Ops(NumSplits);
255 for (unsigned i = 0; i != NumSplits; ++i) {
59
Assuming 'i' is not equal to 'NumSplits'
60
Loop condition is true. Entering loop body
61
Assuming 'i' is equal to 'NumSplits'
62
Loop condition is false. Execution continues on line 264
256 SmallVector<int, 64> ExtractMask(IntrinsicNumElts);
257 std::iota(ExtractMask.begin(), ExtractMask.end(), i * IntrinsicNumElts);
258 Value *ExtractOp0 = Builder.CreateShuffleVector(Op0, Op0, ExtractMask);
259 Value *ExtractOp1 = Builder.CreateShuffleVector(Op1, Op0, ExtractMask);
260 Ops[i] = Builder.CreateCall(PSADBWFn, {ExtractOp0, ExtractOp1});
261 Ops[i] = Builder.CreateBitCast(Ops[i], I32Ty);
262 }
263
264 assert(isPowerOf2_32(NumSplits) && "Expected power of 2 splits")((isPowerOf2_32(NumSplits) && "Expected power of 2 splits"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(NumSplits) && \"Expected power of 2 splits\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86PartialReduction.cpp"
, 264, __PRETTY_FUNCTION__))
;
63
'?' condition is true
265 unsigned Stages = Log2_32(NumSplits);
266 for (unsigned s = Stages; s > 0; --s) {
64
Assuming 's' is <= 0
65
Loop condition is false. Execution continues on line 278
267 unsigned NumConcatElts =
268 cast<FixedVectorType>(Ops[0]->getType())->getNumElements() * 2;
269 for (unsigned i = 0; i != 1U << (s - 1); ++i) {
270 SmallVector<int, 64> ConcatMask(NumConcatElts);
271 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
272 Ops[i] = Builder.CreateShuffleVector(Ops[i*2], Ops[i*2+1], ConcatMask);
273 }
274 }
275
276 // At this point the final value should be in Ops[0]. Now we need to adjust
277 // it to the final original type.
278 NumElts = cast<FixedVectorType>(OpTy)->getNumElements();
66
'OpTy' is a 'FixedVectorType'
279 if (NumElts
66.1
'NumElts' is not equal to 2
66.1
'NumElts' is not equal to 2
66.1
'NumElts' is not equal to 2
== 2) {
67
Taking false branch
280 // Extract down to 2 elements.
281 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{0, 1});
282 } else if (NumElts
67.1
'NumElts' is >= 8
67.1
'NumElts' is >= 8
67.1
'NumElts' is >= 8
>= 8) {
68
Taking true branch
283 SmallVector<int, 32> ConcatMask(NumElts);
284 unsigned SubElts =
70
'SubElts' initialized here
285 cast<FixedVectorType>(Ops[0]->getType())->getNumElements();
69
The object is a 'FixedVectorType'
286 for (unsigned i = 0; i != SubElts; ++i)
71
Assuming 'i' is equal to 'SubElts'
72
Loop condition is false. Execution continues on line 288
287 ConcatMask[i] = i;
288 for (unsigned i = SubElts; i
72.1
'i' is not equal to 'NumElts'
72.1
'i' is not equal to 'NumElts'
72.1
'i' is not equal to 'NumElts'
!= NumElts; ++i)
73
Loop condition is true. Entering loop body
289 ConcatMask[i] = (i % SubElts) + SubElts;
74
Division by zero
290
291 Value *Zero = Constant::getNullValue(Ops[0]->getType());
292 Ops[0] = Builder.CreateShuffleVector(Ops[0], Zero, ConcatMask);
293 }
294
295 SI->replaceAllUsesWith(Ops[0]);
296 SI->eraseFromParent();
297
298 return true;
299}
300
301// Walk backwards from the ExtractElementInst and determine if it is the end of
302// a horizontal reduction. Return the input to the reduction if we find one.
303static Value *matchAddReduction(const ExtractElementInst &EE) {
304 // Make sure we're extracting index 0.
305 auto *Index = dyn_cast<ConstantInt>(EE.getIndexOperand());
306 if (!Index || !Index->isNullValue())
307 return nullptr;
308
309 const auto *BO = dyn_cast<BinaryOperator>(EE.getVectorOperand());
310 if (!BO || BO->getOpcode() != Instruction::Add || !BO->hasOneUse())
311 return nullptr;
312
313 unsigned NumElems = cast<FixedVectorType>(BO->getType())->getNumElements();
314 // Ensure the reduction size is a power of 2.
315 if (!isPowerOf2_32(NumElems))
316 return nullptr;
317
318 const Value *Op = BO;
319 unsigned Stages = Log2_32(NumElems);
320 for (unsigned i = 0; i != Stages; ++i) {
321 const auto *BO = dyn_cast<BinaryOperator>(Op);
322 if (!BO || BO->getOpcode() != Instruction::Add)
323 return nullptr;
324
325 // If this isn't the first add, then it should only have 2 users, the
326 // shuffle and another add which we checked in the previous iteration.
327 if (i != 0 && !BO->hasNUses(2))
328 return nullptr;
329
330 Value *LHS = BO->getOperand(0);
331 Value *RHS = BO->getOperand(1);
332
333 auto *Shuffle = dyn_cast<ShuffleVectorInst>(LHS);
334 if (Shuffle) {
335 Op = RHS;
336 } else {
337 Shuffle = dyn_cast<ShuffleVectorInst>(RHS);
338 Op = LHS;
339 }
340
341 // The first operand of the shuffle should be the same as the other operand
342 // of the bin op.
343 if (!Shuffle || Shuffle->getOperand(0) != Op)
344 return nullptr;
345
346 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
347 unsigned MaskEnd = 1 << i;
348 for (unsigned Index = 0; Index < MaskEnd; ++Index)
349 if (Shuffle->getMaskValue(Index) != (int)(MaskEnd + Index))
350 return nullptr;
351 }
352
353 return const_cast<Value *>(Op);
354}
355
356// See if this BO is reachable from this Phi by walking forward through single
357// use BinaryOperators with the same opcode. If we get back then we know we've
358// found a loop and it is safe to step through this Add to find more leaves.
359static bool isReachableFromPHI(PHINode *Phi, BinaryOperator *BO) {
360 // The PHI itself should only have one use.
361 if (!Phi->hasOneUse())
362 return false;
363
364 Instruction *U = cast<Instruction>(*Phi->user_begin());
365 if (U == BO)
366 return true;
367
368 while (U->hasOneUse() && U->getOpcode() == BO->getOpcode())
369 U = cast<Instruction>(*U->user_begin());
370
371 return U == BO;
372}
373
374// Collect all the leaves of the tree of adds that feeds into the horizontal
375// reduction. Root is the Value that is used by the horizontal reduction.
376// We look through single use phis, single use adds, or adds that are used by
377// a phi that forms a loop with the add.
378static void collectLeaves(Value *Root, SmallVectorImpl<Instruction *> &Leaves) {
379 SmallPtrSet<Value *, 8> Visited;
380 SmallVector<Value *, 8> Worklist;
381 Worklist.push_back(Root);
382
383 while (!Worklist.empty()) {
384 Value *V = Worklist.pop_back_val();
385 if (!Visited.insert(V).second)
386 continue;
387
388 if (auto *PN = dyn_cast<PHINode>(V)) {
389 // PHI node should have single use unless it is the root node, then it
390 // has 2 uses.
391 if (!PN->hasNUses(PN == Root ? 2 : 1))
392 break;
393
394 // Push incoming values to the worklist.
395 append_range(Worklist, PN->incoming_values());
396
397 continue;
398 }
399
400 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
401 if (BO->getOpcode() == Instruction::Add) {
402 // Simple case. Single use, just push its operands to the worklist.
403 if (BO->hasNUses(BO == Root ? 2 : 1)) {
404 append_range(Worklist, BO->operands());
405 continue;
406 }
407
408 // If there is additional use, make sure it is an unvisited phi that
409 // gets us back to this node.
410 if (BO->hasNUses(BO == Root ? 3 : 2)) {
411 PHINode *PN = nullptr;
412 for (auto *U : Root->users())
413 if (auto *P = dyn_cast<PHINode>(U))
414 if (!Visited.count(P))
415 PN = P;
416
417 // If we didn't find a 2-input PHI then this isn't a case we can
418 // handle.
419 if (!PN || PN->getNumIncomingValues() != 2)
420 continue;
421
422 // Walk forward from this phi to see if it reaches back to this add.
423 if (!isReachableFromPHI(PN, BO))
424 continue;
425
426 // The phi forms a loop with this Add, push its operands.
427 append_range(Worklist, BO->operands());
428 }
429 }
430 }
431
432 // Not an add or phi, make it a leaf.
433 if (auto *I = dyn_cast<Instruction>(V)) {
434 if (!V->hasNUses(I == Root ? 2 : 1))
435 continue;
436
437 // Add this as a leaf.
438 Leaves.push_back(I);
439 }
440 }
441}
442
443bool X86PartialReduction::runOnFunction(Function &F) {
444 if (skipFunction(F))
1
Assuming the condition is false
2
Taking false branch
445 return false;
446
447 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
448 if (!TPC)
3
Assuming 'TPC' is non-null
4
Taking false branch
449 return false;
450
451 auto &TM = TPC->getTM<X86TargetMachine>();
452 ST = TM.getSubtargetImpl(F);
453
454 DL = &F.getParent()->getDataLayout();
455
456 bool MadeChange = false;
457 for (auto &BB : F) {
458 for (auto &I : BB) {
459 auto *EE = dyn_cast<ExtractElementInst>(&I);
5
Assuming the object is a 'ExtractElementInst'
460 if (!EE
5.1
'EE' is non-null
5.1
'EE' is non-null
5.1
'EE' is non-null
)
6
Taking false branch
461 continue;
462
463 // First find a reduction tree.
464 // FIXME: Do we need to handle other opcodes than Add?
465 Value *Root = matchAddReduction(*EE);
466 if (!Root
6.1
'Root' is non-null
6.1
'Root' is non-null
6.1
'Root' is non-null
)
7
Taking false branch
467 continue;
468
469 SmallVector<Instruction *, 8> Leaves;
470 collectLeaves(Root, Leaves);
471
472 for (Instruction *I : Leaves) {
8
Assuming '__begin3' is not equal to '__end3'
473 if (tryMAddReplacement(I)) {
9
Taking false branch
474 MadeChange = true;
475 continue;
476 }
477
478 // Don't do SAD matching on the root node. SelectionDAG already
479 // has support for that and currently generates better code.
480 if (I != Root && trySADReplacement(I))
10
Assuming 'I' is not equal to 'Root'
11
Calling 'X86PartialReduction::trySADReplacement'
481 MadeChange = true;
482 }
483 }
484 }
485
486 return MadeChange;
487}

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/X86/X86Subtarget.h

1//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the X86 specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_X86_X86SUBTARGET_H
14#define LLVM_LIB_TARGET_X86_X86SUBTARGET_H
15
16#include "X86FrameLowering.h"
17#include "X86ISelLowering.h"
18#include "X86InstrInfo.h"
19#include "X86SelectionDAGInfo.h"
20#include "llvm/ADT/Triple.h"
21#include "llvm/CodeGen/TargetSubtargetInfo.h"
22#include "llvm/IR/CallingConv.h"
23#include <climits>
24#include <memory>
25
26#define GET_SUBTARGETINFO_HEADER
27#include "X86GenSubtargetInfo.inc"
28
29namespace llvm {
30
31class CallLowering;
32class GlobalValue;
33class InstructionSelector;
34class LegalizerInfo;
35class RegisterBankInfo;
36class StringRef;
37class TargetMachine;
38
39/// The X86 backend supports a number of different styles of PIC.
40///
41namespace PICStyles {
42
43enum class Style {
44 StubPIC, // Used on i386-darwin in pic mode.
45 GOT, // Used on 32 bit elf on when in pic mode.
46 RIPRel, // Used on X86-64 when in pic mode.
47 None // Set when not in pic mode.
48};
49
50} // end namespace PICStyles
51
52class X86Subtarget final : public X86GenSubtargetInfo {
53 // NOTE: Do not add anything new to this list. Coarse, CPU name based flags
54 // are not a good idea. We should be migrating away from these.
55 enum X86ProcFamilyEnum {
56 Others,
57 IntelAtom,
58 IntelSLM
59 };
60
61 enum X86SSEEnum {
62 NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
63 };
64
65 enum X863DNowEnum {
66 NoThreeDNow, MMX, ThreeDNow, ThreeDNowA
67 };
68
69 /// X86 processor family: Intel Atom, and others
70 X86ProcFamilyEnum X86ProcFamily = Others;
71
72 /// Which PIC style to use
73 PICStyles::Style PICStyle;
74
75 const TargetMachine &TM;
76
77 /// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
78 X86SSEEnum X86SSELevel = NoSSE;
79
80 /// MMX, 3DNow, 3DNow Athlon, or none supported.
81 X863DNowEnum X863DNowLevel = NoThreeDNow;
82
83 /// True if the processor supports X87 instructions.
84 bool HasX87 = false;
85
86 /// True if the processor supports CMPXCHG8B.
87 bool HasCmpxchg8b = false;
88
89 /// True if this processor has NOPL instruction
90 /// (generally pentium pro+).
91 bool HasNOPL = false;
92
93 /// True if this processor has conditional move instructions
94 /// (generally pentium pro+).
95 bool HasCMov = false;
96
97 /// True if the processor supports X86-64 instructions.
98 bool HasX86_64 = false;
99
100 /// True if the processor supports POPCNT.
101 bool HasPOPCNT = false;
102
103 /// True if the processor supports SSE4A instructions.
104 bool HasSSE4A = false;
105
106 /// Target has AES instructions
107 bool HasAES = false;
108 bool HasVAES = false;
109
110 /// Target has FXSAVE/FXRESTOR instructions
111 bool HasFXSR = false;
112
113 /// Target has XSAVE instructions
114 bool HasXSAVE = false;
115
116 /// Target has XSAVEOPT instructions
117 bool HasXSAVEOPT = false;
118
119 /// Target has XSAVEC instructions
120 bool HasXSAVEC = false;
121
122 /// Target has XSAVES instructions
123 bool HasXSAVES = false;
124
125 /// Target has carry-less multiplication
126 bool HasPCLMUL = false;
127 bool HasVPCLMULQDQ = false;
128
129 /// Target has Galois Field Arithmetic instructions
130 bool HasGFNI = false;
131
132 /// Target has 3-operand fused multiply-add
133 bool HasFMA = false;
134
135 /// Target has 4-operand fused multiply-add
136 bool HasFMA4 = false;
137
138 /// Target has XOP instructions
139 bool HasXOP = false;
140
141 /// Target has TBM instructions.
142 bool HasTBM = false;
143
144 /// Target has LWP instructions
145 bool HasLWP = false;
146
147 /// True if the processor has the MOVBE instruction.
148 bool HasMOVBE = false;
149
150 /// True if the processor has the RDRAND instruction.
151 bool HasRDRAND = false;
152
153 /// Processor has 16-bit floating point conversion instructions.
154 bool HasF16C = false;
155
156 /// Processor has FS/GS base insturctions.
157 bool HasFSGSBase = false;
158
159 /// Processor has LZCNT instruction.
160 bool HasLZCNT = false;
161
162 /// Processor has BMI1 instructions.
163 bool HasBMI = false;
164
165 /// Processor has BMI2 instructions.
166 bool HasBMI2 = false;
167
168 /// Processor has VBMI instructions.
169 bool HasVBMI = false;
170
171 /// Processor has VBMI2 instructions.
172 bool HasVBMI2 = false;
173
174 /// Processor has Integer Fused Multiply Add
175 bool HasIFMA = false;
176
177 /// Processor has RTM instructions.
178 bool HasRTM = false;
179
180 /// Processor has ADX instructions.
181 bool HasADX = false;
182
183 /// Processor has SHA instructions.
184 bool HasSHA = false;
185
186 /// Processor has PRFCHW instructions.
187 bool HasPRFCHW = false;
188
189 /// Processor has RDSEED instructions.
190 bool HasRDSEED = false;
191
192 /// Processor has LAHF/SAHF instructions in 64-bit mode.
193 bool HasLAHFSAHF64 = false;
194
195 /// Processor has MONITORX/MWAITX instructions.
196 bool HasMWAITX = false;
197
198 /// Processor has Cache Line Zero instruction
199 bool HasCLZERO = false;
200
201 /// Processor has Cache Line Demote instruction
202 bool HasCLDEMOTE = false;
203
204 /// Processor has MOVDIRI instruction (direct store integer).
205 bool HasMOVDIRI = false;
206
207 /// Processor has MOVDIR64B instruction (direct store 64 bytes).
208 bool HasMOVDIR64B = false;
209
210 /// Processor has ptwrite instruction.
211 bool HasPTWRITE = false;
212
213 /// Processor has Prefetch with intent to Write instruction
214 bool HasPREFETCHWT1 = false;
215
216 /// True if SHLD instructions are slow.
217 bool IsSHLDSlow = false;
218
219 /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
220 // PMULUDQ.
221 bool IsPMULLDSlow = false;
222
223 /// True if the PMADDWD instruction is slow compared to PMULLD.
224 bool IsPMADDWDSlow = false;
225
226 /// True if unaligned memory accesses of 16-bytes are slow.
227 bool IsUAMem16Slow = false;
228
229 /// True if unaligned memory accesses of 32-bytes are slow.
230 bool IsUAMem32Slow = false;
231
232 /// True if SSE operations can have unaligned memory operands.
233 /// This may require setting a configuration bit in the processor.
234 bool HasSSEUnalignedMem = false;
235
236 /// True if this processor has the CMPXCHG16B instruction;
237 /// this is true for most x86-64 chips, but not the first AMD chips.
238 bool HasCmpxchg16b = false;
239
240 /// True if the LEA instruction should be used for adjusting
241 /// the stack pointer. This is an optimization for Intel Atom processors.
242 bool UseLeaForSP = false;
243
244 /// True if POPCNT instruction has a false dependency on the destination register.
245 bool HasPOPCNTFalseDeps = false;
246
247 /// True if LZCNT/TZCNT instructions have a false dependency on the destination register.
248 bool HasLZCNTFalseDeps = false;
249
250 /// True if its preferable to combine to a single shuffle using a variable
251 /// mask over multiple fixed shuffles.
252 bool HasFastVariableShuffle = false;
253
254 /// True if vzeroupper instructions should be inserted after code that uses
255 /// ymm or zmm registers.
256 bool InsertVZEROUPPER = false;
257
258 /// True if there is no performance penalty for writing NOPs with up to
259 /// 7 bytes.
260 bool HasFast7ByteNOP = false;
261
262 /// True if there is no performance penalty for writing NOPs with up to
263 /// 11 bytes.
264 bool HasFast11ByteNOP = false;
265
266 /// True if there is no performance penalty for writing NOPs with up to
267 /// 15 bytes.
268 bool HasFast15ByteNOP = false;
269
270 /// True if gather is reasonably fast. This is true for Skylake client and
271 /// all AVX-512 CPUs.
272 bool HasFastGather = false;
273
274 /// True if hardware SQRTSS instruction is at least as fast (latency) as
275 /// RSQRTSS followed by a Newton-Raphson iteration.
276 bool HasFastScalarFSQRT = false;
277
278 /// True if hardware SQRTPS/VSQRTPS instructions are at least as fast
279 /// (throughput) as RSQRTPS/VRSQRTPS followed by a Newton-Raphson iteration.
280 bool HasFastVectorFSQRT = false;
281
282 /// True if 8-bit divisions are significantly faster than
283 /// 32-bit divisions and should be used when possible.
284 bool HasSlowDivide32 = false;
285
286 /// True if 32-bit divides are significantly faster than
287 /// 64-bit divisions and should be used when possible.
288 bool HasSlowDivide64 = false;
289
290 /// True if LZCNT instruction is fast.
291 bool HasFastLZCNT = false;
292
293 /// True if SHLD based rotate is fast.
294 bool HasFastSHLDRotate = false;
295
296 /// True if the processor supports macrofusion.
297 bool HasMacroFusion = false;
298
299 /// True if the processor supports branch fusion.
300 bool HasBranchFusion = false;
301
302 /// True if the processor has enhanced REP MOVSB/STOSB.
303 bool HasERMSB = false;
304
305 /// True if the processor has fast short REP MOV.
306 bool HasFSRM = false;
307
308 /// True if the short functions should be padded to prevent
309 /// a stall when returning too early.
310 bool PadShortFunctions = false;
311
312 /// True if two memory operand instructions should use a temporary register
313 /// instead.
314 bool SlowTwoMemOps = false;
315
316 /// True if the LEA instruction inputs have to be ready at address generation
317 /// (AG) time.
318 bool LEAUsesAG = false;
319
320 /// True if the LEA instruction with certain arguments is slow
321 bool SlowLEA = false;
322
323 /// True if the LEA instruction has all three source operands: base, index,
324 /// and offset or if the LEA instruction uses base and index registers where
325 /// the base is EBP, RBP,or R13
326 bool Slow3OpsLEA = false;
327
328 /// True if INC and DEC instructions are slow when writing to flags
329 bool SlowIncDec = false;
330
331 /// Processor has AVX-512 PreFetch Instructions
332 bool HasPFI = false;
333
334 /// Processor has AVX-512 Exponential and Reciprocal Instructions
335 bool HasERI = false;
336
337 /// Processor has AVX-512 Conflict Detection Instructions
338 bool HasCDI = false;
339
340 /// Processor has AVX-512 population count Instructions
341 bool HasVPOPCNTDQ = false;
342
343 /// Processor has AVX-512 Doubleword and Quadword instructions
344 bool HasDQI = false;
345
346 /// Processor has AVX-512 Byte and Word instructions
347 bool HasBWI = false;
348
349 /// Processor has AVX-512 Vector Length eXtenstions
350 bool HasVLX = false;
351
352 /// Processor has PKU extenstions
353 bool HasPKU = false;
354
355 /// Processor has AVX-512 Vector Neural Network Instructions
356 bool HasVNNI = false;
357
358 /// Processor has AVX Vector Neural Network Instructions
359 bool HasAVXVNNI = false;
360
361 /// Processor has AVX-512 bfloat16 floating-point extensions
362 bool HasBF16 = false;
363
364 /// Processor supports ENQCMD instructions
365 bool HasENQCMD = false;
366
367 /// Processor has AVX-512 Bit Algorithms instructions
368 bool HasBITALG = false;
369
370 /// Processor has AVX-512 vp2intersect instructions
371 bool HasVP2INTERSECT = false;
372
373 /// Processor supports CET SHSTK - Control-Flow Enforcement Technology
374 /// using Shadow Stack
375 bool HasSHSTK = false;
376
377 /// Processor supports Invalidate Process-Context Identifier
378 bool HasINVPCID = false;
379
380 /// Processor has Software Guard Extensions
381 bool HasSGX = false;
382
383 /// Processor supports Flush Cache Line instruction
384 bool HasCLFLUSHOPT = false;
385
386 /// Processor supports Cache Line Write Back instruction
387 bool HasCLWB = false;
388
389 /// Processor supports Write Back No Invalidate instruction
390 bool HasWBNOINVD = false;
391
392 /// Processor support RDPID instruction
393 bool HasRDPID = false;
394
395 /// Processor supports WaitPKG instructions
396 bool HasWAITPKG = false;
397
398 /// Processor supports PCONFIG instruction
399 bool HasPCONFIG = false;
400
401 /// Processor support key locker instructions
402 bool HasKL = false;
403
404 /// Processor support key locker wide instructions
405 bool HasWIDEKL = false;
406
407 /// Processor supports HRESET instruction
408 bool HasHRESET = false;
409
410 /// Processor supports SERIALIZE instruction
411 bool HasSERIALIZE = false;
412
413 /// Processor supports TSXLDTRK instruction
414 bool HasTSXLDTRK = false;
415
416 /// Processor has AMX support
417 bool HasAMXTILE = false;
418 bool HasAMXBF16 = false;
419 bool HasAMXINT8 = false;
420
421 /// Processor supports User Level Interrupt instructions
422 bool HasUINTR = false;
423
424 /// Processor has a single uop BEXTR implementation.
425 bool HasFastBEXTR = false;
426
427 /// Try harder to combine to horizontal vector ops if they are fast.
428 bool HasFastHorizontalOps = false;
429
430 /// Prefer a left/right scalar logical shifts pair over a shift+and pair.
431 bool HasFastScalarShiftMasks = false;
432
433 /// Prefer a left/right vector logical shifts pair over a shift+and pair.
434 bool HasFastVectorShiftMasks = false;
435
436 /// Use a retpoline thunk rather than indirect calls to block speculative
437 /// execution.
438 bool UseRetpolineIndirectCalls = false;
439
440 /// Use a retpoline thunk or remove any indirect branch to block speculative
441 /// execution.
442 bool UseRetpolineIndirectBranches = false;
443
444 /// Deprecated flag, query `UseRetpolineIndirectCalls` and
445 /// `UseRetpolineIndirectBranches` instead.
446 bool DeprecatedUseRetpoline = false;
447
448 /// When using a retpoline thunk, call an externally provided thunk rather
449 /// than emitting one inside the compiler.
450 bool UseRetpolineExternalThunk = false;
451
452 /// Prevent generation of indirect call/branch instructions from memory,
453 /// and force all indirect call/branch instructions from a register to be
454 /// preceded by an LFENCE. Also decompose RET instructions into a
455 /// POP+LFENCE+JMP sequence.
456 bool UseLVIControlFlowIntegrity = false;
457
458 /// Enable Speculative Execution Side Effect Suppression
459 bool UseSpeculativeExecutionSideEffectSuppression = false;
460
461 /// Insert LFENCE instructions to prevent data speculatively injected into
462 /// loads from being used maliciously.
463 bool UseLVILoadHardening = false;
464
465 /// Use software floating point for code generation.
466 bool UseSoftFloat = false;
467
468 /// Use alias analysis during code generation.
469 bool UseAA = false;
470
471 /// The minimum alignment known to hold of the stack frame on
472 /// entry to the function and which must be maintained by every function.
473 Align stackAlignment = Align(4);
474
475 Align TileConfigAlignment = Align(4);
476
477 /// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
478 ///
479 // FIXME: this is a known good value for Yonah. How about others?
480 unsigned MaxInlineSizeThreshold = 128;
481
482 /// Indicates target prefers 128 bit instructions.
483 bool Prefer128Bit = false;
484
485 /// Indicates target prefers 256 bit instructions.
486 bool Prefer256Bit = false;
487
488 /// Indicates target prefers AVX512 mask registers.
489 bool PreferMaskRegisters = false;
490
491 /// Use Goldmont specific floating point div/sqrt costs.
492 bool UseGLMDivSqrtCosts = false;
493
494 /// What processor and OS we're targeting.
495 Triple TargetTriple;
496
497 /// GlobalISel related APIs.
498 std::unique_ptr<CallLowering> CallLoweringInfo;
499 std::unique_ptr<LegalizerInfo> Legalizer;
500 std::unique_ptr<RegisterBankInfo> RegBankInfo;
501 std::unique_ptr<InstructionSelector> InstSelector;
502
503private:
504 /// Override the stack alignment.
505 MaybeAlign StackAlignOverride;
506
507 /// Preferred vector width from function attribute.
508 unsigned PreferVectorWidthOverride;
509
510 /// Resolved preferred vector width from function attribute and subtarget
511 /// features.
512 unsigned PreferVectorWidth = UINT32_MAX(4294967295U);
513
514 /// Required vector width from function attribute.
515 unsigned RequiredVectorWidth;
516
517 /// True if compiling for 64-bit, false for 16-bit or 32-bit.
518 bool In64BitMode = false;
519
520 /// True if compiling for 32-bit, false for 16-bit or 64-bit.
521 bool In32BitMode = false;
522
523 /// True if compiling for 16-bit, false for 32-bit or 64-bit.
524 bool In16BitMode = false;
525
526 X86SelectionDAGInfo TSInfo;
527 // Ordering here is important. X86InstrInfo initializes X86RegisterInfo which
528 // X86TargetLowering needs.
529 X86InstrInfo InstrInfo;
530 X86TargetLowering TLInfo;
531 X86FrameLowering FrameLowering;
532
533public:
534 /// This constructor initializes the data members to match that
535 /// of the specified triple.
536 ///
537 X86Subtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS,
538 const X86TargetMachine &TM, MaybeAlign StackAlignOverride,
539 unsigned PreferVectorWidthOverride,
540 unsigned RequiredVectorWidth);
541
542 const X86TargetLowering *getTargetLowering() const override {
543 return &TLInfo;
544 }
545
546 const X86InstrInfo *getInstrInfo() const override { return &InstrInfo; }
547
548 const X86FrameLowering *getFrameLowering() const override {
549 return &FrameLowering;
550 }
551
552 const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
553 return &TSInfo;
554 }
555
556 const X86RegisterInfo *getRegisterInfo() const override {
557 return &getInstrInfo()->getRegisterInfo();
558 }
559
560 unsigned getTileConfigSize() const { return 64; }
561 Align getTileConfigAlignment() const { return TileConfigAlignment; }
562
563 /// Returns the minimum alignment known to hold of the
564 /// stack frame on entry to the function and which must be maintained by every
565 /// function for this subtarget.
566 Align getStackAlignment() const { return stackAlignment; }
567
568 /// Returns the maximum memset / memcpy size
569 /// that still makes it profitable to inline the call.
570 unsigned getMaxInlineSizeThreshold() const { return MaxInlineSizeThreshold; }
571
572 /// ParseSubtargetFeatures - Parses features string setting specified
573 /// subtarget options. Definition of function is auto generated by tblgen.
574 void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
575
576 /// Methods used by Global ISel
577 const CallLowering *getCallLowering() const override;
578 InstructionSelector *getInstructionSelector() const override;
579 const LegalizerInfo *getLegalizerInfo() const override;
580 const RegisterBankInfo *getRegBankInfo() const override;
581
582private:
583 /// Initialize the full set of dependencies so we can use an initializer
584 /// list for X86Subtarget.
585 X86Subtarget &initializeSubtargetDependencies(StringRef CPU,
586 StringRef TuneCPU,
587 StringRef FS);
588 void initSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
589
590public:
591 /// Is this x86_64? (disregarding specific ABI / programming model)
592 bool is64Bit() const {
593 return In64BitMode;
594 }
595
596 bool is32Bit() const {
597 return In32BitMode;
598 }
599
600 bool is16Bit() const {
601 return In16BitMode;
602 }
603
604 /// Is this x86_64 with the ILP32 programming model (x32 ABI)?
605 bool isTarget64BitILP32() const {
606 return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32 ||
607 TargetTriple.isOSNaCl());
608 }
609
610 /// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
611 bool isTarget64BitLP64() const {
612 return In64BitMode && (TargetTriple.getEnvironment() != Triple::GNUX32 &&
613 !TargetTriple.isOSNaCl());
614 }
615
616 PICStyles::Style getPICStyle() const { return PICStyle; }
617 void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
618
619 bool hasX87() const { return HasX87; }
620 bool hasCmpxchg8b() const { return HasCmpxchg8b; }
621 bool hasNOPL() const { return HasNOPL; }
622 // SSE codegen depends on cmovs, and all SSE1+ processors support them.
623 // All 64-bit processors support cmov.
624 bool hasCMov() const { return HasCMov || X86SSELevel >= SSE1 || is64Bit(); }
625 bool hasSSE1() const { return X86SSELevel >= SSE1; }
626 bool hasSSE2() const { return X86SSELevel
12.1
Field 'X86SSELevel' is >= SSE2
12.1
Field 'X86SSELevel' is >= SSE2
12.1
Field 'X86SSELevel' is >= SSE2
>= SSE2
; }
13
Returning the value 1, which participates in a condition later
627 bool hasSSE3() const { return X86SSELevel >= SSE3; }
628 bool hasSSSE3() const { return X86SSELevel >= SSSE3; }
629 bool hasSSE41() const { return X86SSELevel >= SSE41; }
630 bool hasSSE42() const { return X86SSELevel >= SSE42; }
631 bool hasAVX() const { return X86SSELevel >= AVX; }
632 bool hasAVX2() const { return X86SSELevel >= AVX2; }
633 bool hasAVX512() const { return X86SSELevel >= AVX512F; }
634 bool hasInt256() const { return hasAVX2(); }
635 bool hasSSE4A() const { return HasSSE4A; }
636 bool hasMMX() const { return X863DNowLevel >= MMX; }
637 bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
638 bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
639 bool hasPOPCNT() const { return HasPOPCNT; }
640 bool hasAES() const { return HasAES; }
641 bool hasVAES() const { return HasVAES; }
642 bool hasFXSR() const { return HasFXSR; }
643 bool hasXSAVE() const { return HasXSAVE; }
644 bool hasXSAVEOPT() const { return HasXSAVEOPT; }
645 bool hasXSAVEC() const { return HasXSAVEC; }
646 bool hasXSAVES() const { return HasXSAVES; }
647 bool hasPCLMUL() const { return HasPCLMUL; }
648 bool hasVPCLMULQDQ() const { return HasVPCLMULQDQ; }
649 bool hasGFNI() const { return HasGFNI; }
650 // Prefer FMA4 to FMA - its better for commutation/memory folding and
651 // has equal or better performance on all supported targets.
652 bool hasFMA() const { return HasFMA; }
653 bool hasFMA4() const { return HasFMA4; }
654 bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
655 bool hasXOP() const { return HasXOP; }
656 bool hasTBM() const { return HasTBM; }
657 bool hasLWP() const { return HasLWP; }
658 bool hasMOVBE() const { return HasMOVBE; }
659 bool hasRDRAND() const { return HasRDRAND; }
660 bool hasF16C() const { return HasF16C; }
661 bool hasFSGSBase() const { return HasFSGSBase; }
662 bool hasLZCNT() const { return HasLZCNT; }
663 bool hasBMI() const { return HasBMI; }
664 bool hasBMI2() const { return HasBMI2; }
665 bool hasVBMI() const { return HasVBMI; }
666 bool hasVBMI2() const { return HasVBMI2; }
667 bool hasIFMA() const { return HasIFMA; }
668 bool hasRTM() const { return HasRTM; }
669 bool hasADX() const { return HasADX; }
670 bool hasSHA() const { return HasSHA; }
671 bool hasPRFCHW() const { return HasPRFCHW; }
672 bool hasPREFETCHWT1() const { return HasPREFETCHWT1; }
673 bool hasPrefetchW() const {
674 // The PREFETCHW instruction was added with 3DNow but later CPUs gave it
675 // its own CPUID bit as part of deprecating 3DNow. Intel eventually added
676 // it and KNL has another that prefetches to L2 cache. We assume the
677 // L1 version exists if the L2 version does.
678 return has3DNow() || hasPRFCHW() || hasPREFETCHWT1();
679 }
680 bool hasSSEPrefetch() const {
681 // We implicitly enable these when we have a write prefix supporting cache
682 // level OR if we have prfchw, but don't already have a read prefetch from
683 // 3dnow.
684 return hasSSE1() || (hasPRFCHW() && !has3DNow()) || hasPREFETCHWT1();
685 }
686 bool hasRDSEED() const { return HasRDSEED; }
687 bool hasLAHFSAHF() const { return HasLAHFSAHF64 || !is64Bit(); }
688 bool hasMWAITX() const { return HasMWAITX; }
689 bool hasCLZERO() const { return HasCLZERO; }
690 bool hasCLDEMOTE() const { return HasCLDEMOTE; }
691 bool hasMOVDIRI() const { return HasMOVDIRI; }
692 bool hasMOVDIR64B() const { return HasMOVDIR64B; }
693 bool hasPTWRITE() const { return HasPTWRITE; }
694 bool isSHLDSlow() const { return IsSHLDSlow; }
695 bool isPMULLDSlow() const { return IsPMULLDSlow; }
696 bool isPMADDWDSlow() const { return IsPMADDWDSlow; }
697 bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
698 bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
699 bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
700 bool hasCmpxchg16b() const { return HasCmpxchg16b && is64Bit(); }
701 bool useLeaForSP() const { return UseLeaForSP; }
702 bool hasPOPCNTFalseDeps() const { return HasPOPCNTFalseDeps; }
703 bool hasLZCNTFalseDeps() const { return HasLZCNTFalseDeps; }
704 bool hasFastVariableShuffle() const {
705 return HasFastVariableShuffle;
706 }
707 bool insertVZEROUPPER() const { return InsertVZEROUPPER; }
708 bool hasFastGather() const { return HasFastGather; }
709 bool hasFastScalarFSQRT() const { return HasFastScalarFSQRT; }
710 bool hasFastVectorFSQRT() const { return HasFastVectorFSQRT; }
711 bool hasFastLZCNT() const { return HasFastLZCNT; }
712 bool hasFastSHLDRotate() const { return HasFastSHLDRotate; }
713 bool hasFastBEXTR() const { return HasFastBEXTR; }
714 bool hasFastHorizontalOps() const { return HasFastHorizontalOps; }
715 bool hasFastScalarShiftMasks() const { return HasFastScalarShiftMasks; }
716 bool hasFastVectorShiftMasks() const { return HasFastVectorShiftMasks; }
717 bool hasMacroFusion() const { return HasMacroFusion; }
718 bool hasBranchFusion() const { return HasBranchFusion; }
719 bool hasERMSB() const { return HasERMSB; }
720 bool hasFSRM() const { return HasFSRM; }
721 bool hasSlowDivide32() const { return HasSlowDivide32; }
722 bool hasSlowDivide64() const { return HasSlowDivide64; }
723 bool padShortFunctions() const { return PadShortFunctions; }
724 bool slowTwoMemOps() const { return SlowTwoMemOps; }
725 bool LEAusesAG() const { return LEAUsesAG; }
726 bool slowLEA() const { return SlowLEA; }
727 bool slow3OpsLEA() const { return Slow3OpsLEA; }
728 bool slowIncDec() const { return SlowIncDec; }
729 bool hasCDI() const { return HasCDI; }
730 bool hasVPOPCNTDQ() const { return HasVPOPCNTDQ; }
731 bool hasPFI() const { return HasPFI; }
732 bool hasERI() const { return HasERI; }
733 bool hasDQI() const { return HasDQI; }
734 bool hasBWI() const { return HasBWI; }
735 bool hasVLX() const { return HasVLX; }
736 bool hasPKU() const { return HasPKU; }
737 bool hasVNNI() const { return HasVNNI; }
738 bool hasBF16() const { return HasBF16; }
739 bool hasVP2INTERSECT() const { return HasVP2INTERSECT; }
740 bool hasBITALG() const { return HasBITALG; }
741 bool hasSHSTK() const { return HasSHSTK; }
742 bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; }
743 bool hasCLWB() const { return HasCLWB; }
744 bool hasWBNOINVD() const { return HasWBNOINVD; }
745 bool hasRDPID() const { return HasRDPID; }
746 bool hasWAITPKG() const { return HasWAITPKG; }
747 bool hasPCONFIG() const { return HasPCONFIG; }
748 bool hasSGX() const { return HasSGX; }
749 bool hasINVPCID() const { return HasINVPCID; }
750 bool hasENQCMD() const { return HasENQCMD; }
751 bool hasKL() const { return HasKL; }
752 bool hasWIDEKL() const { return HasWIDEKL; }
753 bool hasHRESET() const { return HasHRESET; }
754 bool hasSERIALIZE() const { return HasSERIALIZE; }
755 bool hasTSXLDTRK() const { return HasTSXLDTRK; }
756 bool hasUINTR() const { return HasUINTR; }
757 bool useRetpolineIndirectCalls() const { return UseRetpolineIndirectCalls; }
758 bool useRetpolineIndirectBranches() const {
759 return UseRetpolineIndirectBranches;
760 }
761 bool hasAVXVNNI() const { return HasAVXVNNI; }
762 bool hasAMXTILE() const { return HasAMXTILE; }
763 bool hasAMXBF16() const { return HasAMXBF16; }
764 bool hasAMXINT8() const { return HasAMXINT8; }
765 bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; }
766
767 // These are generic getters that OR together all of the thunk types
768 // supported by the subtarget. Therefore useIndirectThunk*() will return true
769 // if any respective thunk feature is enabled.
770 bool useIndirectThunkCalls() const {
771 return useRetpolineIndirectCalls() || useLVIControlFlowIntegrity();
772 }
773 bool useIndirectThunkBranches() const {
774 return useRetpolineIndirectBranches() || useLVIControlFlowIntegrity();
775 }
776
777 bool preferMaskRegisters() const { return PreferMaskRegisters; }
778 bool useGLMDivSqrtCosts() const { return UseGLMDivSqrtCosts; }
779 bool useLVIControlFlowIntegrity() const { return UseLVIControlFlowIntegrity; }
780 bool useLVILoadHardening() const { return UseLVILoadHardening; }
781 bool useSpeculativeExecutionSideEffectSuppression() const {
782 return UseSpeculativeExecutionSideEffectSuppression;
783 }
784
785 unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
786 unsigned getRequiredVectorWidth() const { return RequiredVectorWidth; }
787
788 // Helper functions to determine when we should allow widening to 512-bit
789 // during codegen.
790 // TODO: Currently we're always allowing widening on CPUs without VLX,
791 // because for many cases we don't have a better option.
792 bool canExtendTo512DQ() const {
793 return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
794 }
795 bool canExtendTo512BW() const {
796 return hasBWI() && canExtendTo512DQ();
797 }
798
799 // If there are no 512-bit vectors and we prefer not to use 512-bit registers,
800 // disable them in the legalizer.
801 bool useAVX512Regs() const {
802 return hasAVX512() && (canExtendTo512DQ() || RequiredVectorWidth > 256);
803 }
804
805 bool useBWIRegs() const {
806 return hasBWI() && useAVX512Regs();
807 }
808
809 bool isXRaySupported() const override { return is64Bit(); }
810
811 /// TODO: to be removed later and replaced with suitable properties
812 bool isAtom() const { return X86ProcFamily == IntelAtom; }
813 bool isSLM() const { return X86ProcFamily == IntelSLM; }
814 bool useSoftFloat() const { return UseSoftFloat; }
815 bool useAA() const override { return UseAA; }
816
817 /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
818 /// no-sse2). There isn't any reason to disable it if the target processor
819 /// supports it.
820 bool hasMFence() const { return hasSSE2() || is64Bit(); }
821
822 const Triple &getTargetTriple() const { return TargetTriple; }
823
824 bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
825 bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
826 bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
827 bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
828 bool isTargetPS4() const { return TargetTriple.isPS4CPU(); }
829
830 bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
831 bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
832 bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
833
834 bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
835 bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
836 bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
837 bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
838 bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
839 bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
840 bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
841 bool isTargetMCU() const { return TargetTriple.isOSIAMCU(); }
842 bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }
843
844 bool isTargetWindowsMSVC() const {
845 return TargetTriple.isWindowsMSVCEnvironment();
846 }
847
848 bool isTargetWindowsCoreCLR() const {
849 return TargetTriple.isWindowsCoreCLREnvironment();
850 }
851
852 bool isTargetWindowsCygwin() const {
853 return TargetTriple.isWindowsCygwinEnvironment();
854 }
855
856 bool isTargetWindowsGNU() const {
857 return TargetTriple.isWindowsGNUEnvironment();
858 }
859
860 bool isTargetWindowsItanium() const {
861 return TargetTriple.isWindowsItaniumEnvironment();
862 }
863
864 bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
865
866 bool isOSWindows() const { return TargetTriple.isOSWindows(); }
867
868 bool isTargetWin64() const { return In64BitMode && isOSWindows(); }
869
870 bool isTargetWin32() const { return !In64BitMode && isOSWindows(); }
871
872 bool isPICStyleGOT() const { return PICStyle == PICStyles::Style::GOT; }
873 bool isPICStyleRIPRel() const { return PICStyle == PICStyles::Style::RIPRel; }
874
875 bool isPICStyleStubPIC() const {
876 return PICStyle == PICStyles::Style::StubPIC;
877 }
878
879 bool isPositionIndependent() const;
880
881 bool isCallingConvWin64(CallingConv::ID CC) const {
882 switch (CC) {
883 // On Win64, all these conventions just use the default convention.
884 case CallingConv::C:
885 case CallingConv::Fast:
886 case CallingConv::Tail:
887 case CallingConv::Swift:
888 case CallingConv::X86_FastCall:
889 case CallingConv::X86_StdCall:
890 case CallingConv::X86_ThisCall:
891 case CallingConv::X86_VectorCall:
892 case CallingConv::Intel_OCL_BI:
893 return isTargetWin64();
894 // This convention allows using the Win64 convention on other targets.
895 case CallingConv::Win64:
896 return true;
897 // This convention allows using the SysV convention on Windows targets.
898 case CallingConv::X86_64_SysV:
899 return false;
900 // Otherwise, who knows what this is.
901 default:
902 return false;
903 }
904 }
905
906 /// Classify a global variable reference for the current subtarget according
907 /// to how we should reference it in a non-pcrel context.
908 unsigned char classifyLocalReference(const GlobalValue *GV) const;
909
910 unsigned char classifyGlobalReference(const GlobalValue *GV,
911 const Module &M) const;
912 unsigned char classifyGlobalReference(const GlobalValue *GV) const;
913
914 /// Classify a global function reference for the current subtarget.
915 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
916 const Module &M) const;
917 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
918
919 /// Classify a blockaddress reference for the current subtarget according to
920 /// how we should reference it in a non-pcrel context.
921 unsigned char classifyBlockAddressReference() const;
922
923 /// Return true if the subtarget allows calls to immediate address.
924 bool isLegalToCallImmediateAddr() const;
925
926 /// If we are using indirect thunks, we need to expand indirectbr to avoid it
927 /// lowering to an actual indirect jump.
928 bool enableIndirectBrExpand() const override {
929 return useIndirectThunkBranches();
930 }
931
932 /// Enable the MachineScheduler pass for all X86 subtargets.
933 bool enableMachineScheduler() const override { return true; }
934
935 bool enableEarlyIfConversion() const override;
936
937 void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
938 &Mutations) const override;
939
940 AntiDepBreakMode getAntiDepBreakMode() const override {
941 return TargetSubtargetInfo::ANTIDEP_CRITICAL;
942 }
943
944 bool enableAdvancedRASplitCost() const override { return true; }
945};
946
947} // end namespace llvm
948
949#endif // LLVM_LIB_TARGET_X86_X86SUBTARGET_H

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50typedef unsigned ID;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t s) {
72 return User::operator new(s, 1);
73 }
74
75 /// Transparently provide more efficient getOperand methods.
76 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
77
78 // Methods for support type inquiry through isa, cast, and dyn_cast:
79 static bool classof(const Instruction *I) {
80 return I->isUnaryOp() ||
81 I->getOpcode() == Instruction::Alloca ||
82 I->getOpcode() == Instruction::Load ||
83 I->getOpcode() == Instruction::VAArg ||
84 I->getOpcode() == Instruction::ExtractValue ||
85 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
86 }
87 static bool classof(const Value *V) {
88 return isa<Instruction>(V) && classof(cast<Instruction>(V));
89 }
90};
91
92template <>
93struct OperandTraits<UnaryInstruction> :
94 public FixedNumOperandTraits<UnaryInstruction, 1> {
95};
96
97DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { ((
i_nocapture < OperandTraits<UnaryInstruction>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 97, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this))[i_nocapture].get()); } void UnaryInstruction
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<UnaryInstruction>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 97, __PRETTY_FUNCTION__)); OperandTraits<UnaryInstruction
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
UnaryInstruction::getNumOperands() const { return OperandTraits
<UnaryInstruction>::operands(this); } template <int Idx_nocapture
> Use &UnaryInstruction::Op() { return this->OpFrom
<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &UnaryInstruction::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
33
'?' condition is true
34
The object is a 'Value'
35
Returning pointer, which participates in a condition later
46
'?' condition is true
47
The object is a 'Value'
48
Returning pointer, which participates in a condition later
98
99//===----------------------------------------------------------------------===//
100// UnaryOperator Class
101//===----------------------------------------------------------------------===//
102
103class UnaryOperator : public UnaryInstruction {
104 void AssertOK();
105
106protected:
107 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
108 const Twine &Name, Instruction *InsertBefore);
109 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
110 const Twine &Name, BasicBlock *InsertAtEnd);
111
112 // Note: Instruction needs to be a friend here to call cloneImpl.
113 friend class Instruction;
114
115 UnaryOperator *cloneImpl() const;
116
117public:
118
119 /// Construct a unary instruction, given the opcode and an operand.
120 /// Optionally (if InstBefore is specified) insert the instruction
121 /// into a BasicBlock right before the specified instruction. The specified
122 /// Instruction is allowed to be a dereferenced end iterator.
123 ///
124 static UnaryOperator *Create(UnaryOps Op, Value *S,
125 const Twine &Name = Twine(),
126 Instruction *InsertBefore = nullptr);
127
128 /// Construct a unary instruction, given the opcode and an operand.
129 /// Also automatically insert this instruction to the end of the
130 /// BasicBlock specified.
131 ///
132 static UnaryOperator *Create(UnaryOps Op, Value *S,
133 const Twine &Name,
134 BasicBlock *InsertAtEnd);
135
136 /// These methods just forward to Create, and are useful when you
137 /// statically know what type of instruction you're going to create. These
138 /// helpers just save some typing.
139#define HANDLE_UNARY_INST(N, OPC, CLASS) \
140 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
141 return Create(Instruction::OPC, V, Name);\
142 }
143#include "llvm/IR/Instruction.def"
144#define HANDLE_UNARY_INST(N, OPC, CLASS) \
145 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
146 BasicBlock *BB) {\
147 return Create(Instruction::OPC, V, Name, BB);\
148 }
149#include "llvm/IR/Instruction.def"
150#define HANDLE_UNARY_INST(N, OPC, CLASS) \
151 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
152 Instruction *I) {\
153 return Create(Instruction::OPC, V, Name, I);\
154 }
155#include "llvm/IR/Instruction.def"
156
157 static UnaryOperator *
158 CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
159 const Twine &Name = "",
160 Instruction *InsertBefore = nullptr) {
161 UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
162 UO->copyIRFlags(CopyO);
163 return UO;
164 }
165
166 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
167 const Twine &Name = "",
168 Instruction *InsertBefore = nullptr) {
169 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
170 InsertBefore);
171 }
172
173 UnaryOps getOpcode() const {
174 return static_cast<UnaryOps>(Instruction::getOpcode());
175 }
176
177 // Methods for support type inquiry through isa, cast, and dyn_cast:
178 static bool classof(const Instruction *I) {
179 return I->isUnaryOp();
180 }
181 static bool classof(const Value *V) {
182 return isa<Instruction>(V) && classof(cast<Instruction>(V));
183 }
184};
185
186//===----------------------------------------------------------------------===//
187// BinaryOperator Class
188//===----------------------------------------------------------------------===//
189
190class BinaryOperator : public Instruction {
191 void AssertOK();
192
193protected:
194 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
195 const Twine &Name, Instruction *InsertBefore);
196 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
197 const Twine &Name, BasicBlock *InsertAtEnd);
198
199 // Note: Instruction needs to be a friend here to call cloneImpl.
200 friend class Instruction;
201
202 BinaryOperator *cloneImpl() const;
203
204public:
205 // allocate space for exactly two operands
206 void *operator new(size_t s) {
207 return User::operator new(s, 2);
208 }
209
210 /// Transparently provide more efficient getOperand methods.
211 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
212
213 /// Construct a binary instruction, given the opcode and the two
214 /// operands. Optionally (if InstBefore is specified) insert the instruction
215 /// into a BasicBlock right before the specified instruction. The specified
216 /// Instruction is allowed to be a dereferenced end iterator.
217 ///
218 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
219 const Twine &Name = Twine(),
220 Instruction *InsertBefore = nullptr);
221
222 /// Construct a binary instruction, given the opcode and the two
223 /// operands. Also automatically insert this instruction to the end of the
224 /// BasicBlock specified.
225 ///
226 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
227 const Twine &Name, BasicBlock *InsertAtEnd);
228
229 /// These methods just forward to Create, and are useful when you
230 /// statically know what type of instruction you're going to create. These
231 /// helpers just save some typing.
232#define HANDLE_BINARY_INST(N, OPC, CLASS) \
233 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
234 const Twine &Name = "") {\
235 return Create(Instruction::OPC, V1, V2, Name);\
236 }
237#include "llvm/IR/Instruction.def"
238#define HANDLE_BINARY_INST(N, OPC, CLASS) \
239 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
240 const Twine &Name, BasicBlock *BB) {\
241 return Create(Instruction::OPC, V1, V2, Name, BB);\
242 }
243#include "llvm/IR/Instruction.def"
244#define HANDLE_BINARY_INST(N, OPC, CLASS) \
245 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
246 const Twine &Name, Instruction *I) {\
247 return Create(Instruction::OPC, V1, V2, Name, I);\
248 }
249#include "llvm/IR/Instruction.def"
250
251 static BinaryOperator *CreateWithCopiedFlags(BinaryOps Opc,
252 Value *V1, Value *V2,
253 Instruction *CopyO,
254 const Twine &Name = "") {
255 BinaryOperator *BO = Create(Opc, V1, V2, Name);
256 BO->copyIRFlags(CopyO);
257 return BO;
258 }
259
260 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
261 Instruction *FMFSource,
262 const Twine &Name = "") {
263 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
264 }
265 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
266 Instruction *FMFSource,
267 const Twine &Name = "") {
268 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
269 }
270 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
271 Instruction *FMFSource,
272 const Twine &Name = "") {
273 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
274 }
275 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
276 Instruction *FMFSource,
277 const Twine &Name = "") {
278 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
279 }
280 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
281 Instruction *FMFSource,
282 const Twine &Name = "") {
283 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
284 }
285
286 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
287 const Twine &Name = "") {
288 BinaryOperator *BO = Create(Opc, V1, V2, Name);
289 BO->setHasNoSignedWrap(true);
290 return BO;
291 }
292 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
293 const Twine &Name, BasicBlock *BB) {
294 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
295 BO->setHasNoSignedWrap(true);
296 return BO;
297 }
298 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
299 const Twine &Name, Instruction *I) {
300 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
301 BO->setHasNoSignedWrap(true);
302 return BO;
303 }
304
305 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
306 const Twine &Name = "") {
307 BinaryOperator *BO = Create(Opc, V1, V2, Name);
308 BO->setHasNoUnsignedWrap(true);
309 return BO;
310 }
311 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
312 const Twine &Name, BasicBlock *BB) {
313 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
314 BO->setHasNoUnsignedWrap(true);
315 return BO;
316 }
317 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
318 const Twine &Name, Instruction *I) {
319 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
320 BO->setHasNoUnsignedWrap(true);
321 return BO;
322 }
323
324 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
325 const Twine &Name = "") {
326 BinaryOperator *BO = Create(Opc, V1, V2, Name);
327 BO->setIsExact(true);
328 return BO;
329 }
330 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
331 const Twine &Name, BasicBlock *BB) {
332 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
333 BO->setIsExact(true);
334 return BO;
335 }
336 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
337 const Twine &Name, Instruction *I) {
338 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
339 BO->setIsExact(true);
340 return BO;
341 }
342
343#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
344 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
345 const Twine &Name = "") { \
346 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
347 } \
348 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
349 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
350 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
351 } \
352 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
353 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
354 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
355 }
356
357 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
358 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
359 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
360 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
361 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
362 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
363 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
364 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
365
366 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
367 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
368 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
369 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
370
371#undef DEFINE_HELPERS
372
373 /// Helper functions to construct and inspect unary operations (NEG and NOT)
374 /// via binary operators SUB and XOR:
375 ///
376 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
377 ///
378 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
379 Instruction *InsertBefore = nullptr);
380 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
381 BasicBlock *InsertAtEnd);
382 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
383 Instruction *InsertBefore = nullptr);
384 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
385 BasicBlock *InsertAtEnd);
386 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
387 Instruction *InsertBefore = nullptr);
388 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
389 BasicBlock *InsertAtEnd);
390 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
391 Instruction *InsertBefore = nullptr);
392 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
393 BasicBlock *InsertAtEnd);
394
395 BinaryOps getOpcode() const {
396 return static_cast<BinaryOps>(Instruction::getOpcode());
397 }
398
399 /// Exchange the two operands to this instruction.
400 /// This instruction is safe to use on any binary instruction and
401 /// does not modify the semantics of the instruction. If the instruction
402 /// cannot be reversed (ie, it's a Div), then return true.
403 ///
404 bool swapOperands();
405
406 // Methods for support type inquiry through isa, cast, and dyn_cast:
407 static bool classof(const Instruction *I) {
408 return I->isBinaryOp();
409 }
410 static bool classof(const Value *V) {
411 return isa<Instruction>(V) && classof(cast<Instruction>(V));
412 }
413};
414
415template <>
416struct OperandTraits<BinaryOperator> :
417 public FixedNumOperandTraits<BinaryOperator, 2> {
418};
419
420DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<BinaryOperator>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 420, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this))[i_nocapture].get()); } void BinaryOperator
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<BinaryOperator>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 420, __PRETTY_FUNCTION__)); OperandTraits<BinaryOperator
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
BinaryOperator::getNumOperands() const { return OperandTraits
<BinaryOperator>::operands(this); } template <int Idx_nocapture
> Use &BinaryOperator::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &BinaryOperator::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
421
422//===----------------------------------------------------------------------===//
423// CastInst Class
424//===----------------------------------------------------------------------===//
425
426/// This is the base class for all instructions that perform data
427/// casts. It is simply provided so that instruction category testing
428/// can be performed with code like:
429///
430/// if (isa<CastInst>(Instr)) { ... }
431/// Base class of casting instructions.
432class CastInst : public UnaryInstruction {
433protected:
434 /// Constructor with insert-before-instruction semantics for subclasses
435 CastInst(Type *Ty, unsigned iType, Value *S,
436 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
437 : UnaryInstruction(Ty, iType, S, InsertBefore) {
438 setName(NameStr);
439 }
440 /// Constructor with insert-at-end-of-block semantics for subclasses
441 CastInst(Type *Ty, unsigned iType, Value *S,
442 const Twine &NameStr, BasicBlock *InsertAtEnd)
443 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
444 setName(NameStr);
445 }
446
447public:
448 /// Provides a way to construct any of the CastInst subclasses using an
449 /// opcode instead of the subclass's constructor. The opcode must be in the
450 /// CastOps category (Instruction::isCast(opcode) returns true). This
451 /// constructor has insert-before-instruction semantics to automatically
452 /// insert the new CastInst before InsertBefore (if it is non-null).
453 /// Construct any of the CastInst subclasses
454 static CastInst *Create(
455 Instruction::CastOps, ///< The opcode of the cast instruction
456 Value *S, ///< The value to be casted (operand 0)
457 Type *Ty, ///< The type to which cast should be made
458 const Twine &Name = "", ///< Name for the instruction
459 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
460 );
461 /// Provides a way to construct any of the CastInst subclasses using an
462 /// opcode instead of the subclass's constructor. The opcode must be in the
463 /// CastOps category. This constructor has insert-at-end-of-block semantics
464 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
465 /// its non-null).
466 /// Construct any of the CastInst subclasses
467 static CastInst *Create(
468 Instruction::CastOps, ///< The opcode for the cast instruction
469 Value *S, ///< The value to be casted (operand 0)
470 Type *Ty, ///< The type to which operand is casted
471 const Twine &Name, ///< The name for the instruction
472 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
473 );
474
475 /// Create a ZExt or BitCast cast instruction
476 static CastInst *CreateZExtOrBitCast(
477 Value *S, ///< The value to be casted (operand 0)
478 Type *Ty, ///< The type to which cast should be made
479 const Twine &Name = "", ///< Name for the instruction
480 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
481 );
482
483 /// Create a ZExt or BitCast cast instruction
484 static CastInst *CreateZExtOrBitCast(
485 Value *S, ///< The value to be casted (operand 0)
486 Type *Ty, ///< The type to which operand is casted
487 const Twine &Name, ///< The name for the instruction
488 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
489 );
490
491 /// Create a SExt or BitCast cast instruction
492 static CastInst *CreateSExtOrBitCast(
493 Value *S, ///< The value to be casted (operand 0)
494 Type *Ty, ///< The type to which cast should be made
495 const Twine &Name = "", ///< Name for the instruction
496 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
497 );
498
499 /// Create a SExt or BitCast cast instruction
500 static CastInst *CreateSExtOrBitCast(
501 Value *S, ///< The value to be casted (operand 0)
502 Type *Ty, ///< The type to which operand is casted
503 const Twine &Name, ///< The name for the instruction
504 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
505 );
506
507 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
508 static CastInst *CreatePointerCast(
509 Value *S, ///< The pointer value to be casted (operand 0)
510 Type *Ty, ///< The type to which operand is casted
511 const Twine &Name, ///< The name for the instruction
512 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
513 );
514
515 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
516 static CastInst *CreatePointerCast(
517 Value *S, ///< The pointer value to be casted (operand 0)
518 Type *Ty, ///< The type to which cast should be made
519 const Twine &Name = "", ///< Name for the instruction
520 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
521 );
522
523 /// Create a BitCast or an AddrSpaceCast cast instruction.
524 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
525 Value *S, ///< The pointer value to be casted (operand 0)
526 Type *Ty, ///< The type to which operand is casted
527 const Twine &Name, ///< The name for the instruction
528 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
529 );
530
531 /// Create a BitCast or an AddrSpaceCast cast instruction.
532 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
533 Value *S, ///< The pointer value to be casted (operand 0)
534 Type *Ty, ///< The type to which cast should be made
535 const Twine &Name = "", ///< Name for the instruction
536 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
537 );
538
539 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
540 ///
541 /// If the value is a pointer type and the destination an integer type,
542 /// creates a PtrToInt cast. If the value is an integer type and the
543 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
544 /// a bitcast.
545 static CastInst *CreateBitOrPointerCast(
546 Value *S, ///< The pointer value to be casted (operand 0)
547 Type *Ty, ///< The type to which cast should be made
548 const Twine &Name = "", ///< Name for the instruction
549 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
550 );
551
552 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
553 static CastInst *CreateIntegerCast(
554 Value *S, ///< The pointer value to be casted (operand 0)
555 Type *Ty, ///< The type to which cast should be made
556 bool isSigned, ///< Whether to regard S as signed or not
557 const Twine &Name = "", ///< Name for the instruction
558 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
559 );
560
561 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
562 static CastInst *CreateIntegerCast(
563 Value *S, ///< The integer value to be casted (operand 0)
564 Type *Ty, ///< The integer type to which operand is casted
565 bool isSigned, ///< Whether to regard S as signed or not
566 const Twine &Name, ///< The name for the instruction
567 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
568 );
569
570 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
571 static CastInst *CreateFPCast(
572 Value *S, ///< The floating point value to be casted
573 Type *Ty, ///< The floating point type to cast to
574 const Twine &Name = "", ///< Name for the instruction
575 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
576 );
577
578 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
579 static CastInst *CreateFPCast(
580 Value *S, ///< The floating point value to be casted
581 Type *Ty, ///< The floating point type to cast to
582 const Twine &Name, ///< The name for the instruction
583 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
584 );
585
586 /// Create a Trunc or BitCast cast instruction
587 static CastInst *CreateTruncOrBitCast(
588 Value *S, ///< The value to be casted (operand 0)
589 Type *Ty, ///< The type to which cast should be made
590 const Twine &Name = "", ///< Name for the instruction
591 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
592 );
593
594 /// Create a Trunc or BitCast cast instruction
595 static CastInst *CreateTruncOrBitCast(
596 Value *S, ///< The value to be casted (operand 0)
597 Type *Ty, ///< The type to which operand is casted
598 const Twine &Name, ///< The name for the instruction
599 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
600 );
601
602 /// Check whether a bitcast between these types is valid
603 static bool isBitCastable(
604 Type *SrcTy, ///< The Type from which the value should be cast.
605 Type *DestTy ///< The Type to which the value should be cast.
606 );
607
608 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
609 /// types is valid and a no-op.
610 ///
611 /// This ensures that any pointer<->integer cast has enough bits in the
612 /// integer and any other cast is a bitcast.
613 static bool isBitOrNoopPointerCastable(
614 Type *SrcTy, ///< The Type from which the value should be cast.
615 Type *DestTy, ///< The Type to which the value should be cast.
616 const DataLayout &DL);
617
618 /// Returns the opcode necessary to cast Val into Ty using usual casting
619 /// rules.
620 /// Infer the opcode for cast operand and type
621 static Instruction::CastOps getCastOpcode(
622 const Value *Val, ///< The value to cast
623 bool SrcIsSigned, ///< Whether to treat the source as signed
624 Type *Ty, ///< The Type to which the value should be casted
625 bool DstIsSigned ///< Whether to treate the dest. as signed
626 );
627
628 /// There are several places where we need to know if a cast instruction
629 /// only deals with integer source and destination types. To simplify that
630 /// logic, this method is provided.
631 /// @returns true iff the cast has only integral typed operand and dest type.
632 /// Determine if this is an integer-only cast.
633 bool isIntegerCast() const;
634
635 /// A lossless cast is one that does not alter the basic value. It implies
636 /// a no-op cast but is more stringent, preventing things like int->float,
637 /// long->double, or int->ptr.
638 /// @returns true iff the cast is lossless.
639 /// Determine if this is a lossless cast.
640 bool isLosslessCast() const;
641
642 /// A no-op cast is one that can be effected without changing any bits.
643 /// It implies that the source and destination types are the same size. The
644 /// DataLayout argument is to determine the pointer size when examining casts
645 /// involving Integer and Pointer types. They are no-op casts if the integer
646 /// is the same size as the pointer. However, pointer size varies with
647 /// platform. Note that a precondition of this method is that the cast is
648 /// legal - i.e. the instruction formed with these operands would verify.
649 static bool isNoopCast(
650 Instruction::CastOps Opcode, ///< Opcode of cast
651 Type *SrcTy, ///< SrcTy of cast
652 Type *DstTy, ///< DstTy of cast
653 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
654 );
655
656 /// Determine if this cast is a no-op cast.
657 ///
658 /// \param DL is the DataLayout to determine pointer size.
659 bool isNoopCast(const DataLayout &DL) const;
660
661 /// Determine how a pair of casts can be eliminated, if they can be at all.
662 /// This is a helper function for both CastInst and ConstantExpr.
663 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
664 /// returns Instruction::CastOps value for a cast that can replace
665 /// the pair, casting SrcTy to DstTy.
666 /// Determine if a cast pair is eliminable
667 static unsigned isEliminableCastPair(
668 Instruction::CastOps firstOpcode, ///< Opcode of first cast
669 Instruction::CastOps secondOpcode, ///< Opcode of second cast
670 Type *SrcTy, ///< SrcTy of 1st cast
671 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
672 Type *DstTy, ///< DstTy of 2nd cast
673 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
674 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
675 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
676 );
677
678 /// Return the opcode of this CastInst
679 Instruction::CastOps getOpcode() const {
680 return Instruction::CastOps(Instruction::getOpcode());
681 }
682
683 /// Return the source type, as a convenience
684 Type* getSrcTy() const { return getOperand(0)->getType(); }
685 /// Return the destination type, as a convenience
686 Type* getDestTy() const { return getType(); }
687
688 /// This method can be used to determine if a cast from SrcTy to DstTy using
689 /// Opcode op is valid or not.
690 /// @returns true iff the proposed cast is valid.
691 /// Determine if a cast is valid without creating one.
692 static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy);
693 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
694 return castIsValid(op, S->getType(), DstTy);
695 }
696
697 /// Methods for support type inquiry through isa, cast, and dyn_cast:
698 static bool classof(const Instruction *I) {
699 return I->isCast();
700 }
701 static bool classof(const Value *V) {
702 return isa<Instruction>(V) && classof(cast<Instruction>(V));
703 }
704};
705
706//===----------------------------------------------------------------------===//
707// CmpInst Class
708//===----------------------------------------------------------------------===//
709
710/// This class is the base class for the comparison instructions.
711/// Abstract base class of comparison instructions.
712class CmpInst : public Instruction {
713public:
714 /// This enumeration lists the possible predicates for CmpInst subclasses.
715 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
716 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
717 /// predicate values are not overlapping between the classes.
718 ///
719 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
720 /// FCMP_* values. Changing the bit patterns requires a potential change to
721 /// those passes.
722 enum Predicate : unsigned {
723 // Opcode U L G E Intuitive operation
724 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
725 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
726 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
727 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
728 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
729 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
730 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
731 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
732 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
733 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
734 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
735 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
736 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
737 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
738 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
739 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
740 FIRST_FCMP_PREDICATE = FCMP_FALSE,
741 LAST_FCMP_PREDICATE = FCMP_TRUE,
742 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
743 ICMP_EQ = 32, ///< equal
744 ICMP_NE = 33, ///< not equal
745 ICMP_UGT = 34, ///< unsigned greater than
746 ICMP_UGE = 35, ///< unsigned greater or equal
747 ICMP_ULT = 36, ///< unsigned less than
748 ICMP_ULE = 37, ///< unsigned less or equal
749 ICMP_SGT = 38, ///< signed greater than
750 ICMP_SGE = 39, ///< signed greater or equal
751 ICMP_SLT = 40, ///< signed less than
752 ICMP_SLE = 41, ///< signed less or equal
753 FIRST_ICMP_PREDICATE = ICMP_EQ,
754 LAST_ICMP_PREDICATE = ICMP_SLE,
755 BAD_ICMP_PREDICATE = ICMP_SLE + 1
756 };
757 using PredicateField =
758 Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;
759
760protected:
761 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
762 Value *LHS, Value *RHS, const Twine &Name = "",
763 Instruction *InsertBefore = nullptr,
764 Instruction *FlagsSource = nullptr);
765
766 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
767 Value *LHS, Value *RHS, const Twine &Name,
768 BasicBlock *InsertAtEnd);
769
770public:
771 // allocate space for exactly two operands
772 void *operator new(size_t s) {
773 return User::operator new(s, 2);
774 }
775
776 /// Construct a compare instruction, given the opcode, the predicate and
777 /// the two operands. Optionally (if InstBefore is specified) insert the
778 /// instruction into a BasicBlock right before the specified instruction.
779 /// The specified Instruction is allowed to be a dereferenced end iterator.
780 /// Create a CmpInst
781 static CmpInst *Create(OtherOps Op,
782 Predicate predicate, Value *S1,
783 Value *S2, const Twine &Name = "",
784 Instruction *InsertBefore = nullptr);
785
786 /// Construct a compare instruction, given the opcode, the predicate and the
787 /// two operands. Also automatically insert this instruction to the end of
788 /// the BasicBlock specified.
789 /// Create a CmpInst
790 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
791 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
792
793 /// Get the opcode casted to the right type
794 OtherOps getOpcode() const {
795 return static_cast<OtherOps>(Instruction::getOpcode());
796 }
797
798 /// Return the predicate for this instruction.
799 Predicate getPredicate() const { return getSubclassData<PredicateField>(); }
800
801 /// Set the predicate for this instruction to the specified value.
802 void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }
803
804 static bool isFPPredicate(Predicate P) {
805 static_assert(FIRST_FCMP_PREDICATE == 0,
806 "FIRST_FCMP_PREDICATE is required to be 0");
807 return P <= LAST_FCMP_PREDICATE;
808 }
809
810 static bool isIntPredicate(Predicate P) {
811 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
812 }
813
814 static StringRef getPredicateName(Predicate P);
815
816 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
817 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
818
819 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
820 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
821 /// @returns the inverse predicate for the instruction's current predicate.
822 /// Return the inverse of the instruction's predicate.
823 Predicate getInversePredicate() const {
824 return getInversePredicate(getPredicate());
825 }
826
827 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
828 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
829 /// @returns the inverse predicate for predicate provided in \p pred.
830 /// Return the inverse of a given predicate
831 static Predicate getInversePredicate(Predicate pred);
832
833 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
834 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
835 /// @returns the predicate that would be the result of exchanging the two
836 /// operands of the CmpInst instruction without changing the result
837 /// produced.
838 /// Return the predicate as if the operands were swapped
839 Predicate getSwappedPredicate() const {
840 return getSwappedPredicate(getPredicate());
841 }
842
843 /// This is a static version that you can use without an instruction
844 /// available.
845 /// Return the predicate as if the operands were swapped.
846 static Predicate getSwappedPredicate(Predicate pred);
847
848 /// This is a static version that you can use without an instruction
849 /// available.
850 /// @returns true if the comparison predicate is strict, false otherwise.
851 static bool isStrictPredicate(Predicate predicate);
852
853 /// @returns true if the comparison predicate is strict, false otherwise.
854 /// Determine if this instruction is using an strict comparison predicate.
855 bool isStrictPredicate() const { return isStrictPredicate(getPredicate()); }
856
857 /// This is a static version that you can use without an instruction
858 /// available.
859 /// @returns true if the comparison predicate is non-strict, false otherwise.
860 static bool isNonStrictPredicate(Predicate predicate);
861
862 /// @returns true if the comparison predicate is non-strict, false otherwise.
863 /// Determine if this instruction is using an non-strict comparison predicate.
864 bool isNonStrictPredicate() const {
865 return isNonStrictPredicate(getPredicate());
866 }
867
868 /// For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
869 /// Returns the strict version of non-strict comparisons.
870 Predicate getStrictPredicate() const {
871 return getStrictPredicate(getPredicate());
872 }
873
874 /// This is a static version that you can use without an instruction
875 /// available.
876 /// @returns the strict version of comparison provided in \p pred.
877 /// If \p pred is not a strict comparison predicate, returns \p pred.
878 /// Returns the strict version of non-strict comparisons.
879 static Predicate getStrictPredicate(Predicate pred);
880
881 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
882 /// Returns the non-strict version of strict comparisons.
883 Predicate getNonStrictPredicate() const {
884 return getNonStrictPredicate(getPredicate());
885 }
886
887 /// This is a static version that you can use without an instruction
888 /// available.
889 /// @returns the non-strict version of comparison provided in \p pred.
890 /// If \p pred is not a strict comparison predicate, returns \p pred.
891 /// Returns the non-strict version of strict comparisons.
892 static Predicate getNonStrictPredicate(Predicate pred);
893
894 /// This is a static version that you can use without an instruction
895 /// available.
896 /// Return the flipped strictness of predicate
897 static Predicate getFlippedStrictnessPredicate(Predicate pred);
898
899 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
900 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
901 /// does not support other kind of predicates.
902 /// @returns the predicate that does not contains is equal to zero if
903 /// it had and vice versa.
904 /// Return the flipped strictness of predicate
905 Predicate getFlippedStrictnessPredicate() const {
906 return getFlippedStrictnessPredicate(getPredicate());
907 }
908
909 /// Provide more efficient getOperand methods.
910 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
911
912 /// This is just a convenience that dispatches to the subclasses.
913 /// Swap the operands and adjust predicate accordingly to retain
914 /// the same comparison.
915 void swapOperands();
916
917 /// This is just a convenience that dispatches to the subclasses.
918 /// Determine if this CmpInst is commutative.
919 bool isCommutative() const;
920
921 /// Determine if this is an equals/not equals predicate.
922 /// This is a static version that you can use without an instruction
923 /// available.
924 static bool isEquality(Predicate pred);
925
926 /// Determine if this is an equals/not equals predicate.
927 bool isEquality() const { return isEquality(getPredicate()); }
928
929 /// Return true if the predicate is relational (not EQ or NE).
930 static bool isRelational(Predicate P) { return !isEquality(P); }
931
932 /// Return true if the predicate is relational (not EQ or NE).
933 bool isRelational() const { return !isEquality(); }
934
935 /// @returns true if the comparison is signed, false otherwise.
936 /// Determine if this instruction is using a signed comparison.
937 bool isSigned() const {
938 return isSigned(getPredicate());
939 }
940
941 /// @returns true if the comparison is unsigned, false otherwise.
942 /// Determine if this instruction is using an unsigned comparison.
943 bool isUnsigned() const {
944 return isUnsigned(getPredicate());
945 }
946
947 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
948 /// @returns the signed version of the unsigned predicate pred.
949 /// return the signed version of a predicate
950 static Predicate getSignedPredicate(Predicate pred);
951
952 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
953 /// @returns the signed version of the predicate for this instruction (which
954 /// has to be an unsigned predicate).
955 /// return the signed version of a predicate
956 Predicate getSignedPredicate() {
957 return getSignedPredicate(getPredicate());
958 }
959
960 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
961 /// @returns the unsigned version of the signed predicate pred.
962 static Predicate getUnsignedPredicate(Predicate pred);
963
964 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
965 /// @returns the unsigned version of the predicate for this instruction (which
966 /// has to be an signed predicate).
967 /// return the unsigned version of a predicate
968 Predicate getUnsignedPredicate() {
969 return getUnsignedPredicate(getPredicate());
970 }
971
972 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
973 /// @returns the unsigned version of the signed predicate pred or
974 /// the signed version of the signed predicate pred.
975 static Predicate getFlippedSignednessPredicate(Predicate pred);
976
977 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
978 /// @returns the unsigned version of the signed predicate pred or
979 /// the signed version of the signed predicate pred.
980 Predicate getFlippedSignednessPredicate() {
981 return getFlippedSignednessPredicate(getPredicate());
982 }
983
984 /// This is just a convenience.
985 /// Determine if this is true when both operands are the same.
986 bool isTrueWhenEqual() const {
987 return isTrueWhenEqual(getPredicate());
988 }
989
990 /// This is just a convenience.
991 /// Determine if this is false when both operands are the same.
992 bool isFalseWhenEqual() const {
993 return isFalseWhenEqual(getPredicate());
994 }
995
996 /// @returns true if the predicate is unsigned, false otherwise.
997 /// Determine if the predicate is an unsigned operation.
998 static bool isUnsigned(Predicate predicate);
999
1000 /// @returns true if the predicate is signed, false otherwise.
1001 /// Determine if the predicate is an signed operation.
1002 static bool isSigned(Predicate predicate);
1003
1004 /// Determine if the predicate is an ordered operation.
1005 static bool isOrdered(Predicate predicate);
1006
1007 /// Determine if the predicate is an unordered operation.
1008 static bool isUnordered(Predicate predicate);
1009
1010 /// Determine if the predicate is true when comparing a value with itself.
1011 static bool isTrueWhenEqual(Predicate predicate);
1012
1013 /// Determine if the predicate is false when comparing a value with itself.
1014 static bool isFalseWhenEqual(Predicate predicate);
1015
1016 /// Determine if Pred1 implies Pred2 is true when two compares have matching
1017 /// operands.
1018 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
1019
1020 /// Determine if Pred1 implies Pred2 is false when two compares have matching
1021 /// operands.
1022 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
1023
1024 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1025 static bool classof(const Instruction *I) {
1026 return I->getOpcode() == Instruction::ICmp ||
1027 I->getOpcode() == Instruction::FCmp;
1028 }
1029 static bool classof(const Value *V) {
1030 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1031 }
1032
1033 /// Create a result type for fcmp/icmp
1034 static Type* makeCmpResultType(Type* opnd_type) {
1035 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
1036 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
1037 vt->getElementCount());
1038 }
1039 return Type::getInt1Ty(opnd_type->getContext());
1040 }
1041
1042private:
1043 // Shadow Value::setValueSubclassData with a private forwarding method so that
1044 // subclasses cannot accidentally use it.
1045 void setValueSubclassData(unsigned short D) {
1046 Value::setValueSubclassData(D);
1047 }
1048};
1049
1050// FIXME: these are redundant if CmpInst < BinaryOperator
1051template <>
1052struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
1053};
1054
1055DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { ((i_nocapture < OperandTraits<CmpInst>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1055, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CmpInst>::op_begin(const_cast<CmpInst
*>(this))[i_nocapture].get()); } void CmpInst::setOperand(
unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CmpInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1055, __PRETTY_FUNCTION__)); OperandTraits<CmpInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CmpInst
::getNumOperands() const { return OperandTraits<CmpInst>
::operands(this); } template <int Idx_nocapture> Use &
CmpInst::Op() { return this->OpFrom<Idx_nocapture>(this
); } template <int Idx_nocapture> const Use &CmpInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1056
1057/// A lightweight accessor for an operand bundle meant to be passed
1058/// around by value.
1059struct OperandBundleUse {
1060 ArrayRef<Use> Inputs;
1061
1062 OperandBundleUse() = default;
1063 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1064 : Inputs(Inputs), Tag(Tag) {}
1065
1066 /// Return true if the operand at index \p Idx in this operand bundle
1067 /// has the attribute A.
1068 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1069 if (isDeoptOperandBundle())
1070 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1071 return Inputs[Idx]->getType()->isPointerTy();
1072
1073 // Conservative answer: no operands have any attributes.
1074 return false;
1075 }
1076
1077 /// Return the tag of this operand bundle as a string.
1078 StringRef getTagName() const {
1079 return Tag->getKey();
1080 }
1081
1082 /// Return the tag of this operand bundle as an integer.
1083 ///
1084 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1085 /// and this function returns the unique integer getOrInsertBundleTag
1086 /// associated the tag of this operand bundle to.
1087 uint32_t getTagID() const {
1088 return Tag->getValue();
1089 }
1090
1091 /// Return true if this is a "deopt" operand bundle.
1092 bool isDeoptOperandBundle() const {
1093 return getTagID() == LLVMContext::OB_deopt;
1094 }
1095
1096 /// Return true if this is a "funclet" operand bundle.
1097 bool isFuncletOperandBundle() const {
1098 return getTagID() == LLVMContext::OB_funclet;
1099 }
1100
1101 /// Return true if this is a "cfguardtarget" operand bundle.
1102 bool isCFGuardTargetOperandBundle() const {
1103 return getTagID() == LLVMContext::OB_cfguardtarget;
1104 }
1105
1106private:
1107 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1108 StringMapEntry<uint32_t> *Tag;
1109};
1110
1111/// A container for an operand bundle being viewed as a set of values
1112/// rather than a set of uses.
1113///
1114/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1115/// so it is possible to create and pass around "self-contained" instances of
1116/// OperandBundleDef and ConstOperandBundleDef.
1117template <typename InputTy> class OperandBundleDefT {
1118 std::string Tag;
1119 std::vector<InputTy> Inputs;
1120
1121public:
1122 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1123 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1124 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1125 : Tag(std::move(Tag)), Inputs(Inputs) {}
1126
1127 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1128 Tag = std::string(OBU.getTagName());
1129 llvm::append_range(Inputs, OBU.Inputs);
1130 }
1131
1132 ArrayRef<InputTy> inputs() const { return Inputs; }
1133
1134 using input_iterator = typename std::vector<InputTy>::const_iterator;
1135
1136 size_t input_size() const { return Inputs.size(); }
1137 input_iterator input_begin() const { return Inputs.begin(); }
1138 input_iterator input_end() const { return Inputs.end(); }
1139
1140 StringRef getTag() const { return Tag; }
1141};
1142
1143using OperandBundleDef = OperandBundleDefT<Value *>;
1144using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1145
1146//===----------------------------------------------------------------------===//
1147// CallBase Class
1148//===----------------------------------------------------------------------===//
1149
1150/// Base class for all callable instructions (InvokeInst and CallInst)
1151/// Holds everything related to calling a function.
1152///
1153/// All call-like instructions are required to use a common operand layout:
1154/// - Zero or more arguments to the call,
1155/// - Zero or more operand bundles with zero or more operand inputs each
1156/// bundle,
1157/// - Zero or more subclass controlled operands
1158/// - The called function.
1159///
1160/// This allows this base class to easily access the called function and the
1161/// start of the arguments without knowing how many other operands a particular
1162/// subclass requires. Note that accessing the end of the argument list isn't
1163/// as cheap as most other operations on the base class.
1164class CallBase : public Instruction {
1165protected:
1166 // The first two bits are reserved by CallInst for fast retrieval,
1167 using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
1168 using CallingConvField =
1169 Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
1170 CallingConv::MaxID>;
1171 static_assert(
1172 Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
1173 "Bitfields must be contiguous");
1174
1175 /// The last operand is the called operand.
1176 static constexpr int CalledOperandOpEndIdx = -1;
1177
1178 AttributeList Attrs; ///< parameter attributes for callable
1179 FunctionType *FTy;
1180
1181 template <class... ArgsTy>
1182 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1183 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1184
1185 using Instruction::Instruction;
1186
1187 bool hasDescriptor() const { return Value::HasDescriptor; }
1188
1189 unsigned getNumSubclassExtraOperands() const {
1190 switch (getOpcode()) {
1191 case Instruction::Call:
1192 return 0;
1193 case Instruction::Invoke:
1194 return 2;
1195 case Instruction::CallBr:
1196 return getNumSubclassExtraOperandsDynamic();
1197 }
1198 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1198)
;
1199 }
1200
1201 /// Get the number of extra operands for instructions that don't have a fixed
1202 /// number of extra operands.
1203 unsigned getNumSubclassExtraOperandsDynamic() const;
1204
1205public:
1206 using Instruction::getContext;
1207
1208 /// Create a clone of \p CB with a different set of operand bundles and
1209 /// insert it before \p InsertPt.
1210 ///
1211 /// The returned call instruction is identical \p CB in every way except that
1212 /// the operand bundles for the new instruction are set to the operand bundles
1213 /// in \p Bundles.
1214 static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
1215 Instruction *InsertPt = nullptr);
1216
1217 static bool classof(const Instruction *I) {
1218 return I->getOpcode() == Instruction::Call ||
1219 I->getOpcode() == Instruction::Invoke ||
1220 I->getOpcode() == Instruction::CallBr;
1221 }
1222 static bool classof(const Value *V) {
1223 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1224 }
1225
1226 FunctionType *getFunctionType() const { return FTy; }
1227
1228 void mutateFunctionType(FunctionType *FTy) {
1229 Value::mutateType(FTy->getReturnType());
1230 this->FTy = FTy;
1231 }
1232
1233 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1234
1235 /// data_operands_begin/data_operands_end - Return iterators iterating over
1236 /// the call / invoke argument list and bundle operands. For invokes, this is
1237 /// the set of instruction operands except the invoke target and the two
1238 /// successor blocks; and for calls this is the set of instruction operands
1239 /// except the call target.
1240 User::op_iterator data_operands_begin() { return op_begin(); }
1241 User::const_op_iterator data_operands_begin() const {
1242 return const_cast<CallBase *>(this)->data_operands_begin();
1243 }
1244 User::op_iterator data_operands_end() {
1245 // Walk from the end of the operands over the called operand and any
1246 // subclass operands.
1247 return op_end() - getNumSubclassExtraOperands() - 1;
1248 }
1249 User::const_op_iterator data_operands_end() const {
1250 return const_cast<CallBase *>(this)->data_operands_end();
1251 }
1252 iterator_range<User::op_iterator> data_ops() {
1253 return make_range(data_operands_begin(), data_operands_end());
1254 }
1255 iterator_range<User::const_op_iterator> data_ops() const {
1256 return make_range(data_operands_begin(), data_operands_end());
1257 }
1258 bool data_operands_empty() const {
1259 return data_operands_end() == data_operands_begin();
1260 }
1261 unsigned data_operands_size() const {
1262 return std::distance(data_operands_begin(), data_operands_end());
1263 }
1264
1265 bool isDataOperand(const Use *U) const {
1266 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1267, __PRETTY_FUNCTION__))
1267 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1267, __PRETTY_FUNCTION__))
;
1268 return data_operands_begin() <= U && U < data_operands_end();
1269 }
1270 bool isDataOperand(Value::const_user_iterator UI) const {
1271 return isDataOperand(&UI.getUse());
1272 }
1273
1274 /// Given a value use iterator, return the data operand corresponding to it.
1275 /// Iterator must actually correspond to a data operand.
1276 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1277 return getDataOperandNo(&UI.getUse());
1278 }
1279
1280 /// Given a use for a data operand, get the data operand number that
1281 /// corresponds to it.
1282 unsigned getDataOperandNo(const Use *U) const {
1283 assert(isDataOperand(U) && "Data operand # out of range!")((isDataOperand(U) && "Data operand # out of range!")
? static_cast<void> (0) : __assert_fail ("isDataOperand(U) && \"Data operand # out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1283, __PRETTY_FUNCTION__))
;
1284 return U - data_operands_begin();
1285 }
1286
1287 /// Return the iterator pointing to the beginning of the argument list.
1288 User::op_iterator arg_begin() { return op_begin(); }
1289 User::const_op_iterator arg_begin() const {
1290 return const_cast<CallBase *>(this)->arg_begin();
1291 }
1292
1293 /// Return the iterator pointing to the end of the argument list.
1294 User::op_iterator arg_end() {
1295 // From the end of the data operands, walk backwards past the bundle
1296 // operands.
1297 return data_operands_end() - getNumTotalBundleOperands();
1298 }
1299 User::const_op_iterator arg_end() const {
1300 return const_cast<CallBase *>(this)->arg_end();
1301 }
1302
1303 /// Iteration adapter for range-for loops.
1304 iterator_range<User::op_iterator> args() {
1305 return make_range(arg_begin(), arg_end());
1306 }
1307 iterator_range<User::const_op_iterator> args() const {
1308 return make_range(arg_begin(), arg_end());
1309 }
1310 bool arg_empty() const { return arg_end() == arg_begin(); }
1311 unsigned arg_size() const { return arg_end() - arg_begin(); }
1312
1313 // Legacy API names that duplicate the above and will be removed once users
1314 // are migrated.
1315 iterator_range<User::op_iterator> arg_operands() {
1316 return make_range(arg_begin(), arg_end());
1317 }
1318 iterator_range<User::const_op_iterator> arg_operands() const {
1319 return make_range(arg_begin(), arg_end());
1320 }
1321 unsigned getNumArgOperands() const { return arg_size(); }
1322
1323 Value *getArgOperand(unsigned i) const {
1324 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1324, __PRETTY_FUNCTION__))
;
1325 return getOperand(i);
1326 }
1327
1328 void setArgOperand(unsigned i, Value *v) {
1329 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1329, __PRETTY_FUNCTION__))
;
1330 setOperand(i, v);
1331 }
1332
1333 /// Wrappers for getting the \c Use of a call argument.
1334 const Use &getArgOperandUse(unsigned i) const {
1335 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1335, __PRETTY_FUNCTION__))
;
1336 return User::getOperandUse(i);
1337 }
1338 Use &getArgOperandUse(unsigned i) {
1339 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1339, __PRETTY_FUNCTION__))
;
1340 return User::getOperandUse(i);
1341 }
1342
1343 bool isArgOperand(const Use *U) const {
1344 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1345, __PRETTY_FUNCTION__))
1345 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1345, __PRETTY_FUNCTION__))
;
1346 return arg_begin() <= U && U < arg_end();
1347 }
1348 bool isArgOperand(Value::const_user_iterator UI) const {
1349 return isArgOperand(&UI.getUse());
1350 }
1351
1352 /// Given a use for a arg operand, get the arg operand number that
1353 /// corresponds to it.
1354 unsigned getArgOperandNo(const Use *U) const {
1355 assert(isArgOperand(U) && "Arg operand # out of range!")((isArgOperand(U) && "Arg operand # out of range!") ?
static_cast<void> (0) : __assert_fail ("isArgOperand(U) && \"Arg operand # out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1355, __PRETTY_FUNCTION__))
;
1356 return U - arg_begin();
1357 }
1358
1359 /// Given a value use iterator, return the arg operand number corresponding to
1360 /// it. Iterator must actually correspond to a data operand.
1361 unsigned getArgOperandNo(Value::const_user_iterator UI) const {
1362 return getArgOperandNo(&UI.getUse());
1363 }
1364
1365 /// Returns true if this CallSite passes the given Value* as an argument to
1366 /// the called function.
1367 bool hasArgument(const Value *V) const {
1368 return llvm::is_contained(args(), V);
1369 }
1370
1371 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1372
1373 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1374 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1375
1376 /// Returns the function called, or null if this is an
1377 /// indirect function invocation.
1378 Function *getCalledFunction() const {
1379 return dyn_cast_or_null<Function>(getCalledOperand());
1380 }
1381
1382 /// Return true if the callsite is an indirect call.
1383 bool isIndirectCall() const;
1384
1385 /// Determine whether the passed iterator points to the callee operand's Use.
1386 bool isCallee(Value::const_user_iterator UI) const {
1387 return isCallee(&UI.getUse());
1388 }
1389
1390 /// Determine whether this Use is the callee operand's Use.
1391 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1392
1393 /// Helper to get the caller (the parent function).
1394 Function *getCaller();
1395 const Function *getCaller() const {
1396 return const_cast<CallBase *>(this)->getCaller();
1397 }
1398
1399 /// Tests if this call site must be tail call optimized. Only a CallInst can
1400 /// be tail call optimized.
1401 bool isMustTailCall() const;
1402
1403 /// Tests if this call site is marked as a tail call.
1404 bool isTailCall() const;
1405
1406 /// Returns the intrinsic ID of the intrinsic called or
1407 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1408 /// this is an indirect call.
1409 Intrinsic::ID getIntrinsicID() const;
1410
1411 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1412
1413 /// Sets the function called, including updating the function type.
1414 void setCalledFunction(Function *Fn) {
1415 setCalledFunction(Fn->getFunctionType(), Fn);
1416 }
1417
1418 /// Sets the function called, including updating the function type.
1419 void setCalledFunction(FunctionCallee Fn) {
1420 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1421 }
1422
1423 /// Sets the function called, including updating to the specified function
1424 /// type.
1425 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1426 this->FTy = FTy;
1427 assert(FTy == cast<FunctionType>(((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1428, __PRETTY_FUNCTION__))
1428 cast<PointerType>(Fn->getType())->getElementType()))((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1428, __PRETTY_FUNCTION__))
;
1429 // This function doesn't mutate the return type, only the function
1430 // type. Seems broken, but I'm just gonna stick an assert in for now.
1431 assert(getType() == FTy->getReturnType())((getType() == FTy->getReturnType()) ? static_cast<void
> (0) : __assert_fail ("getType() == FTy->getReturnType()"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1431, __PRETTY_FUNCTION__))
;
1432 setCalledOperand(Fn);
1433 }
1434
1435 CallingConv::ID getCallingConv() const {
1436 return getSubclassData<CallingConvField>();
1437 }
1438
1439 void setCallingConv(CallingConv::ID CC) {
1440 setSubclassData<CallingConvField>(CC);
1441 }
1442
1443 /// Check if this call is an inline asm statement.
1444 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1445
1446 /// \name Attribute API
1447 ///
1448 /// These methods access and modify attributes on this call (including
1449 /// looking through to the attributes on the called function when necessary).
1450 ///@{
1451
1452 /// Return the parameter attributes for this call.
1453 ///
1454 AttributeList getAttributes() const { return Attrs; }
1455
1456 /// Set the parameter attributes for this call.
1457 ///
1458 void setAttributes(AttributeList A) { Attrs = A; }
1459
1460 /// Determine whether this call has the given attribute. If it does not
1461 /// then determine if the called function has the attribute, but only if
1462 /// the attribute is allowed for the call.
1463 bool hasFnAttr(Attribute::AttrKind Kind) const {
1464 assert(Kind != Attribute::NoBuiltin &&((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1465, __PRETTY_FUNCTION__))
1465 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1465, __PRETTY_FUNCTION__))
;
1466 return hasFnAttrImpl(Kind);
1467 }
1468
1469 /// Determine whether this call has the given attribute. If it does not
1470 /// then determine if the called function has the attribute, but only if
1471 /// the attribute is allowed for the call.
1472 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1473
1474 /// adds the attribute to the list of attributes.
1475 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1476 AttributeList PAL = getAttributes();
1477 PAL = PAL.addAttribute(getContext(), i, Kind);
1478 setAttributes(PAL);
1479 }
1480
1481 /// adds the attribute to the list of attributes.
1482 void addAttribute(unsigned i, Attribute Attr) {
1483 AttributeList PAL = getAttributes();
1484 PAL = PAL.addAttribute(getContext(), i, Attr);
1485 setAttributes(PAL);
1486 }
1487
1488 /// Adds the attribute to the indicated argument
1489 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1490 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1490, __PRETTY_FUNCTION__))
;
1491 AttributeList PAL = getAttributes();
1492 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1493 setAttributes(PAL);
1494 }
1495
1496 /// Adds the attribute to the indicated argument
1497 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1498 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1498, __PRETTY_FUNCTION__))
;
1499 AttributeList PAL = getAttributes();
1500 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1501 setAttributes(PAL);
1502 }
1503
1504 /// removes the attribute from the list of attributes.
1505 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1506 AttributeList PAL = getAttributes();
1507 PAL = PAL.removeAttribute(getContext(), i, Kind);
1508 setAttributes(PAL);
1509 }
1510
1511 /// removes the attribute from the list of attributes.
1512 void removeAttribute(unsigned i, StringRef Kind) {
1513 AttributeList PAL = getAttributes();
1514 PAL = PAL.removeAttribute(getContext(), i, Kind);
1515 setAttributes(PAL);
1516 }
1517
1518 void removeAttributes(unsigned i, const AttrBuilder &Attrs) {
1519 AttributeList PAL = getAttributes();
1520 PAL = PAL.removeAttributes(getContext(), i, Attrs);
1521 setAttributes(PAL);
1522 }
1523
1524 /// Removes the attribute from the given argument
1525 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1526 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1526, __PRETTY_FUNCTION__))
;
1527 AttributeList PAL = getAttributes();
1528 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1529 setAttributes(PAL);
1530 }
1531
1532 /// Removes the attribute from the given argument
1533 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1534 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1534, __PRETTY_FUNCTION__))
;
1535 AttributeList PAL = getAttributes();
1536 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1537 setAttributes(PAL);
1538 }
1539
1540 /// adds the dereferenceable attribute to the list of attributes.
1541 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1542 AttributeList PAL = getAttributes();
1543 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1544 setAttributes(PAL);
1545 }
1546
1547 /// adds the dereferenceable_or_null attribute to the list of
1548 /// attributes.
1549 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1550 AttributeList PAL = getAttributes();
1551 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1552 setAttributes(PAL);
1553 }
1554
1555 /// Determine whether the return value has the given attribute.
1556 bool hasRetAttr(Attribute::AttrKind Kind) const {
1557 return hasRetAttrImpl(Kind);
1558 }
1559 /// Determine whether the return value has the given attribute.
1560 bool hasRetAttr(StringRef Kind) const { return hasRetAttrImpl(Kind); }
1561
1562 /// Determine whether the argument or parameter has the given attribute.
1563 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1564
1565 /// Get the attribute of a given kind at a position.
1566 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1567 return getAttributes().getAttribute(i, Kind);
1568 }
1569
1570 /// Get the attribute of a given kind at a position.
1571 Attribute getAttribute(unsigned i, StringRef Kind) const {
1572 return getAttributes().getAttribute(i, Kind);
1573 }
1574
1575 /// Get the attribute of a given kind from a given arg
1576 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1577 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1577, __PRETTY_FUNCTION__))
;
1578 return getAttributes().getParamAttr(ArgNo, Kind);
1579 }
1580
1581 /// Get the attribute of a given kind from a given arg
1582 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1583 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1583, __PRETTY_FUNCTION__))
;
1584 return getAttributes().getParamAttr(ArgNo, Kind);
1585 }
1586
1587 /// Return true if the data operand at index \p i has the attribute \p
1588 /// A.
1589 ///
1590 /// Data operands include call arguments and values used in operand bundles,
1591 /// but does not include the callee operand. This routine dispatches to the
1592 /// underlying AttributeList or the OperandBundleUser as appropriate.
1593 ///
1594 /// The index \p i is interpreted as
1595 ///
1596 /// \p i == Attribute::ReturnIndex -> the return value
1597 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1598 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1599 /// (\p i - 1) in the operand list.
1600 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1601 // Note that we have to add one because `i` isn't zero-indexed.
1602 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&((i < (getNumArgOperands() + getNumTotalBundleOperands() +
1) && "Data operand index out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1603, __PRETTY_FUNCTION__))
1603 "Data operand index out of bounds!")((i < (getNumArgOperands() + getNumTotalBundleOperands() +
1) && "Data operand index out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1603, __PRETTY_FUNCTION__))
;
1604
1605 // The attribute A can either be directly specified, if the operand in
1606 // question is a call argument; or be indirectly implied by the kind of its
1607 // containing operand bundle, if the operand is a bundle operand.
1608
1609 if (i == AttributeList::ReturnIndex)
1610 return hasRetAttr(Kind);
1611
1612 // FIXME: Avoid these i - 1 calculations and update the API to use
1613 // zero-based indices.
1614 if (i < (getNumArgOperands() + 1))
1615 return paramHasAttr(i - 1, Kind);
1616
1617 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1618, __PRETTY_FUNCTION__))
1618 "Must be either a call argument or an operand bundle!")((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1618, __PRETTY_FUNCTION__))
;
1619 return bundleOperandHasAttr(i - 1, Kind);
1620 }
1621
1622 /// Determine whether this data operand is not captured.
1623 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1624 // better indicate that this may return a conservative answer.
1625 bool doesNotCapture(unsigned OpNo) const {
1626 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1627 }
1628
1629 /// Determine whether this argument is passed by value.
1630 bool isByValArgument(unsigned ArgNo) const {
1631 return paramHasAttr(ArgNo, Attribute::ByVal);
1632 }
1633
1634 /// Determine whether this argument is passed in an alloca.
1635 bool isInAllocaArgument(unsigned ArgNo) const {
1636 return paramHasAttr(ArgNo, Attribute::InAlloca);
1637 }
1638
1639 /// Determine whether this argument is passed by value, in an alloca, or is
1640 /// preallocated.
1641 bool isPassPointeeByValueArgument(unsigned ArgNo) const {
1642 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1643 paramHasAttr(ArgNo, Attribute::InAlloca) ||
1644 paramHasAttr(ArgNo, Attribute::Preallocated);
1645 }
1646
1647 /// Determine if there are is an inalloca argument. Only the last argument can
1648 /// have the inalloca attribute.
1649 bool hasInAllocaArgument() const {
1650 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1651 }
1652
1653 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1654 // better indicate that this may return a conservative answer.
1655 bool doesNotAccessMemory(unsigned OpNo) const {
1656 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1657 }
1658
1659 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1660 // better indicate that this may return a conservative answer.
1661 bool onlyReadsMemory(unsigned OpNo) const {
1662 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1663 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1664 }
1665
1666 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1667 // better indicate that this may return a conservative answer.
1668 bool doesNotReadMemory(unsigned OpNo) const {
1669 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1670 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1671 }
1672
1673 LLVM_ATTRIBUTE_DEPRECATED(unsigned getRetAlignment() const,[[deprecated("Use getRetAlign() instead")]] unsigned getRetAlignment
() const
1674 "Use getRetAlign() instead")[[deprecated("Use getRetAlign() instead")]] unsigned getRetAlignment
() const
{
1675 if (const auto MA = Attrs.getRetAlignment())
1676 return MA->value();
1677 return 0;
1678 }
1679
1680 /// Extract the alignment of the return value.
1681 MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
1682
1683 /// Extract the alignment for a call or parameter (0=unknown).
1684 LLVM_ATTRIBUTE_DEPRECATED(unsigned getParamAlignment(unsigned ArgNo) const,[[deprecated("Use getParamAlign() instead")]] unsigned getParamAlignment
(unsigned ArgNo) const
1685 "Use getParamAlign() instead")[[deprecated("Use getParamAlign() instead")]] unsigned getParamAlignment
(unsigned ArgNo) const
{
1686 if (const auto MA = Attrs.getParamAlignment(ArgNo))
1687 return MA->value();
1688 return 0;
1689 }
1690
1691 /// Extract the alignment for a call or parameter (0=unknown).
1692 MaybeAlign getParamAlign(unsigned ArgNo) const {
1693 return Attrs.getParamAlignment(ArgNo);
1694 }
1695
1696 /// Extract the byval type for a call or parameter.
1697 Type *getParamByValType(unsigned ArgNo) const {
1698 Type *Ty = Attrs.getParamByValType(ArgNo);
1699 return Ty ? Ty : getArgOperand(ArgNo)->getType()->getPointerElementType();
1700 }
1701
1702 /// Extract the preallocated type for a call or parameter.
1703 Type *getParamPreallocatedType(unsigned ArgNo) const {
1704 Type *Ty = Attrs.getParamPreallocatedType(ArgNo);
1705 return Ty ? Ty : getArgOperand(ArgNo)->getType()->getPointerElementType();
1706 }
1707
1708 /// Extract the number of dereferenceable bytes for a call or
1709 /// parameter (0=unknown).
1710 uint64_t getDereferenceableBytes(unsigned i) const {
1711 return Attrs.getDereferenceableBytes(i);
1712 }
1713
1714 /// Extract the number of dereferenceable_or_null bytes for a call or
1715 /// parameter (0=unknown).
1716 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1717 return Attrs.getDereferenceableOrNullBytes(i);
1718 }
1719
1720 /// Return true if the return value is known to be not null.
1721 /// This may be because it has the nonnull attribute, or because at least
1722 /// one byte is dereferenceable and the pointer is in addrspace(0).
1723 bool isReturnNonNull() const;
1724
1725 /// Determine if the return value is marked with NoAlias attribute.
1726 bool returnDoesNotAlias() const {
1727 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1728 }
1729
1730 /// If one of the arguments has the 'returned' attribute, returns its
1731 /// operand value. Otherwise, return nullptr.
1732 Value *getReturnedArgOperand() const;
1733
1734 /// Return true if the call should not be treated as a call to a
1735 /// builtin.
1736 bool isNoBuiltin() const {
1737 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1738 !hasFnAttrImpl(Attribute::Builtin);
1739 }
1740
1741 /// Determine if the call requires strict floating point semantics.
1742 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1743
1744 /// Return true if the call should not be inlined.
1745 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1746 void setIsNoInline() {
1747 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1748 }
1749 /// Determine if the call does not access memory.
1750 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1751 void setDoesNotAccessMemory() {
1752 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1753 }
1754
1755 /// Determine if the call does not access or only reads memory.
1756 bool onlyReadsMemory() const {
1757 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1758 }
1759
1760 /// Returns true if this function is guaranteed to return.
1761 bool willReturn() const { return hasFnAttr(Attribute::WillReturn); }
1762
1763 void setOnlyReadsMemory() {
1764 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1765 }
1766
1767 /// Determine if the call does not access or only writes memory.
1768 bool doesNotReadMemory() const {
1769 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1770 }
1771 void setDoesNotReadMemory() {
1772 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1773 }
1774
1775 /// Determine if the call can access memmory only using pointers based
1776 /// on its arguments.
1777 bool onlyAccessesArgMemory() const {
1778 return hasFnAttr(Attribute::ArgMemOnly);
1779 }
1780 void setOnlyAccessesArgMemory() {
1781 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1782 }
1783
1784 /// Determine if the function may only access memory that is
1785 /// inaccessible from the IR.
1786 bool onlyAccessesInaccessibleMemory() const {
1787 return hasFnAttr(Attribute::InaccessibleMemOnly);
1788 }
1789 void setOnlyAccessesInaccessibleMemory() {
1790 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1791 }
1792
1793 /// Determine if the function may only access memory that is
1794 /// either inaccessible from the IR or pointed to by its arguments.
1795 bool onlyAccessesInaccessibleMemOrArgMem() const {
1796 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1797 }
1798 void setOnlyAccessesInaccessibleMemOrArgMem() {
1799 addAttribute(AttributeList::FunctionIndex,
1800 Attribute::InaccessibleMemOrArgMemOnly);
1801 }
1802 /// Determine if the call cannot return.
1803 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1804 void setDoesNotReturn() {
1805 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1806 }
1807
1808 /// Determine if the call should not perform indirect branch tracking.
1809 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1810
1811 /// Determine if the call cannot unwind.
1812 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1813 void setDoesNotThrow() {
1814 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1815 }
1816
1817 /// Determine if the invoke cannot be duplicated.
1818 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1819 void setCannotDuplicate() {
1820 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1821 }
1822
1823 /// Determine if the call cannot be tail merged.
1824 bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
1825 void setCannotMerge() {
1826 addAttribute(AttributeList::FunctionIndex, Attribute::NoMerge);
1827 }
1828
1829 /// Determine if the invoke is convergent
1830 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1831 void setConvergent() {
1832 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1833 }
1834 void setNotConvergent() {
1835 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1836 }
1837
1838 /// Determine if the call returns a structure through first
1839 /// pointer argument.
1840 bool hasStructRetAttr() const {
1841 if (getNumArgOperands() == 0)
1842 return false;
1843
1844 // Be friendly and also check the callee.
1845 return paramHasAttr(0, Attribute::StructRet);
1846 }
1847
1848 /// Determine if any call argument is an aggregate passed by value.
1849 bool hasByValArgument() const {
1850 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1851 }
1852
1853 ///@{
1854 // End of attribute API.
1855
1856 /// \name Operand Bundle API
1857 ///
1858 /// This group of methods provides the API to access and manipulate operand
1859 /// bundles on this call.
1860 /// @{
1861
1862 /// Return the number of operand bundles associated with this User.
1863 unsigned getNumOperandBundles() const {
1864 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1865 }
1866
1867 /// Return true if this User has any operand bundles.
1868 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1869
1870 /// Return the index of the first bundle operand in the Use array.
1871 unsigned getBundleOperandsStartIndex() const {
1872 assert(hasOperandBundles() && "Don't call otherwise!")((hasOperandBundles() && "Don't call otherwise!") ? static_cast
<void> (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1872, __PRETTY_FUNCTION__))
;
1873 return bundle_op_info_begin()->Begin;
1874 }
1875
1876 /// Return the index of the last bundle operand in the Use array.
1877 unsigned getBundleOperandsEndIndex() const {
1878 assert(hasOperandBundles() && "Don't call otherwise!")((hasOperandBundles() && "Don't call otherwise!") ? static_cast
<void> (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1878, __PRETTY_FUNCTION__))
;
1879 return bundle_op_info_end()[-1].End;
1880 }
1881
1882 /// Return true if the operand at index \p Idx is a bundle operand.
1883 bool isBundleOperand(unsigned Idx) const {
1884 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1885 Idx < getBundleOperandsEndIndex();
1886 }
1887
1888 /// Returns true if the use is a bundle operand.
1889 bool isBundleOperand(const Use *U) const {
1890 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1891, __PRETTY_FUNCTION__))
1891 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1891, __PRETTY_FUNCTION__))
;
1892 return hasOperandBundles() && isBundleOperand(U - op_begin());
1893 }
1894 bool isBundleOperand(Value::const_user_iterator UI) const {
1895 return isBundleOperand(&UI.getUse());
1896 }
1897
1898 /// Return the total number operands (not operand bundles) used by
1899 /// every operand bundle in this OperandBundleUser.
1900 unsigned getNumTotalBundleOperands() const {
1901 if (!hasOperandBundles())
1902 return 0;
1903
1904 unsigned Begin = getBundleOperandsStartIndex();
1905 unsigned End = getBundleOperandsEndIndex();
1906
1907 assert(Begin <= End && "Should be!")((Begin <= End && "Should be!") ? static_cast<void
> (0) : __assert_fail ("Begin <= End && \"Should be!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1907, __PRETTY_FUNCTION__))
;
1908 return End - Begin;
1909 }
1910
1911 /// Return the operand bundle at a specific index.
1912 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1913 assert(Index < getNumOperandBundles() && "Index out of bounds!")((Index < getNumOperandBundles() && "Index out of bounds!"
) ? static_cast<void> (0) : __assert_fail ("Index < getNumOperandBundles() && \"Index out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1913, __PRETTY_FUNCTION__))
;
1914 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1915 }
1916
1917 /// Return the number of operand bundles with the tag Name attached to
1918 /// this instruction.
1919 unsigned countOperandBundlesOfType(StringRef Name) const {
1920 unsigned Count = 0;
1921 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1922 if (getOperandBundleAt(i).getTagName() == Name)
1923 Count++;
1924
1925 return Count;
1926 }
1927
1928 /// Return the number of operand bundles with the tag ID attached to
1929 /// this instruction.
1930 unsigned countOperandBundlesOfType(uint32_t ID) const {
1931 unsigned Count = 0;
1932 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1933 if (getOperandBundleAt(i).getTagID() == ID)
1934 Count++;
1935
1936 return Count;
1937 }
1938
1939 /// Return an operand bundle by name, if present.
1940 ///
1941 /// It is an error to call this for operand bundle types that may have
1942 /// multiple instances of them on the same instruction.
1943 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1944 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")((countOperandBundlesOfType(Name) < 2 && "Precondition violated!"
) ? static_cast<void> (0) : __assert_fail ("countOperandBundlesOfType(Name) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1944, __PRETTY_FUNCTION__))
;
1945
1946 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1947 OperandBundleUse U = getOperandBundleAt(i);
1948 if (U.getTagName() == Name)
1949 return U;
1950 }
1951
1952 return None;
1953 }
1954
1955 /// Return an operand bundle by tag ID, if present.
1956 ///
1957 /// It is an error to call this for operand bundle types that may have
1958 /// multiple instances of them on the same instruction.
1959 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
1960 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")((countOperandBundlesOfType(ID) < 2 && "Precondition violated!"
) ? static_cast<void> (0) : __assert_fail ("countOperandBundlesOfType(ID) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 1960, __PRETTY_FUNCTION__))
;
1961
1962 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1963 OperandBundleUse U = getOperandBundleAt(i);
1964 if (U.getTagID() == ID)
1965 return U;
1966 }
1967
1968 return None;
1969 }
1970
1971 /// Return the list of operand bundles attached to this instruction as
1972 /// a vector of OperandBundleDefs.
1973 ///
1974 /// This function copies the OperandBundeUse instances associated with this
1975 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
1976 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
1977 /// representations of operand bundles (see documentation above).
1978 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;
1979
1980 /// Return the operand bundle for the operand at index OpIdx.
1981 ///
1982 /// It is an error to call this with an OpIdx that does not correspond to an
1983 /// bundle operand.
1984 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
1985 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
1986 }
1987
1988 /// Return true if this operand bundle user has operand bundles that
1989 /// may read from the heap.
1990 bool hasReadingOperandBundles() const {
1991 // Implementation note: this is a conservative implementation of operand
1992 // bundle semantics, where *any* operand bundle forces a callsite to be at
1993 // least readonly.
1994 return hasOperandBundles();
1995 }
1996
1997 /// Return true if this operand bundle user has operand bundles that
1998 /// may write to the heap.
1999 bool hasClobberingOperandBundles() const {
2000 for (auto &BOI : bundle_op_infos()) {
2001 if (BOI.Tag->second == LLVMContext::OB_deopt ||
2002 BOI.Tag->second == LLVMContext::OB_funclet)
2003 continue;
2004
2005 // This instruction has an operand bundle that is not known to us.
2006 // Assume the worst.
2007 return true;
2008 }
2009
2010 return false;
2011 }
2012
2013 /// Return true if the bundle operand at index \p OpIdx has the
2014 /// attribute \p A.
2015 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
2016 auto &BOI = getBundleOpInfoForOperand(OpIdx);
2017 auto OBU = operandBundleFromBundleOpInfo(BOI);
2018 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
2019 }
2020
2021 /// Return true if \p Other has the same sequence of operand bundle
2022 /// tags with the same number of operands on each one of them as this
2023 /// OperandBundleUser.
2024 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
2025 if (getNumOperandBundles() != Other.getNumOperandBundles())
2026 return false;
2027
2028 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
2029 Other.bundle_op_info_begin());
2030 }
2031
2032 /// Return true if this operand bundle user contains operand bundles
2033 /// with tags other than those specified in \p IDs.
2034 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
2035 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
2036 uint32_t ID = getOperandBundleAt(i).getTagID();
2037 if (!is_contained(IDs, ID))
2038 return true;
2039 }
2040 return false;
2041 }
2042
2043 /// Is the function attribute S disallowed by some operand bundle on
2044 /// this operand bundle user?
2045 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
2046 // Operand bundles only possibly disallow readnone, readonly and argmemonly
2047 // attributes. All String attributes are fine.
2048 return false;
2049 }
2050
2051 /// Is the function attribute A disallowed by some operand bundle on
2052 /// this operand bundle user?
2053 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
2054 switch (A) {
2055 default:
2056 return false;
2057
2058 case Attribute::InaccessibleMemOrArgMemOnly:
2059 return hasReadingOperandBundles();
2060
2061 case Attribute::InaccessibleMemOnly:
2062 return hasReadingOperandBundles();
2063
2064 case Attribute::ArgMemOnly:
2065 return hasReadingOperandBundles();
2066
2067 case Attribute::ReadNone:
2068 return hasReadingOperandBundles();
2069
2070 case Attribute::ReadOnly:
2071 return hasClobberingOperandBundles();
2072 }
2073
2074 llvm_unreachable("switch has a default case!")::llvm::llvm_unreachable_internal("switch has a default case!"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 2074)
;
2075 }
2076
2077 /// Used to keep track of an operand bundle. See the main comment on
2078 /// OperandBundleUser above.
2079 struct BundleOpInfo {
2080 /// The operand bundle tag, interned by
2081 /// LLVMContextImpl::getOrInsertBundleTag.
2082 StringMapEntry<uint32_t> *Tag;
2083
2084 /// The index in the Use& vector where operands for this operand
2085 /// bundle starts.
2086 uint32_t Begin;
2087
2088 /// The index in the Use& vector where operands for this operand
2089 /// bundle ends.
2090 uint32_t End;
2091
2092 bool operator==(const BundleOpInfo &Other) const {
2093 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
2094 }
2095 };
2096
2097 /// Simple helper function to map a BundleOpInfo to an
2098 /// OperandBundleUse.
2099 OperandBundleUse
2100 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
2101 auto begin = op_begin();
2102 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
2103 return OperandBundleUse(BOI.Tag, Inputs);
2104 }
2105
2106 using bundle_op_iterator = BundleOpInfo *;
2107 using const_bundle_op_iterator = const BundleOpInfo *;
2108
2109 /// Return the start of the list of BundleOpInfo instances associated
2110 /// with this OperandBundleUser.
2111 ///
2112 /// OperandBundleUser uses the descriptor area co-allocated with the host User
2113 /// to store some meta information about which operands are "normal" operands,
2114 /// and which ones belong to some operand bundle.
2115 ///
2116 /// The layout of an operand bundle user is
2117 ///
2118 /// +-----------uint32_t End-------------------------------------+
2119 /// | |
2120 /// | +--------uint32_t Begin--------------------+ |
2121 /// | | | |
2122 /// ^ ^ v v
2123 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2124 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
2125 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2126 /// v v ^ ^
2127 /// | | | |
2128 /// | +--------uint32_t Begin------------+ |
2129 /// | |
2130 /// +-----------uint32_t End-----------------------------+
2131 ///
2132 ///
2133 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2134 /// list. These descriptions are installed and managed by this class, and
2135 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2136 ///
2137 /// DU is an additional descriptor installed by User's 'operator new' to keep
2138 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2139 /// access or modify DU in any way, it's an implementation detail private to
2140 /// User.
2141 ///
2142 /// The regular Use& vector for the User starts at U0. The operand bundle
2143 /// uses are part of the Use& vector, just like normal uses. In the diagram
2144 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2145 /// BundleOpInfo has information about a contiguous set of uses constituting
2146 /// an operand bundle, and the total set of operand bundle uses themselves
2147 /// form a contiguous set of uses (i.e. there are no gaps between uses
2148 /// corresponding to individual operand bundles).
2149 ///
2150 /// This class does not know the location of the set of operand bundle uses
2151 /// within the use list -- that is decided by the User using this class via
2152 /// the BeginIdx argument in populateBundleOperandInfos.
2153 ///
2154 /// Currently operand bundle users with hung-off operands are not supported.
2155 bundle_op_iterator bundle_op_info_begin() {
2156 if (!hasDescriptor())
2157 return nullptr;
2158
2159 uint8_t *BytesBegin = getDescriptor().begin();
2160 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2161 }
2162
2163 /// Return the start of the list of BundleOpInfo instances associated
2164 /// with this OperandBundleUser.
2165 const_bundle_op_iterator bundle_op_info_begin() const {
2166 auto *NonConstThis = const_cast<CallBase *>(this);
2167 return NonConstThis->bundle_op_info_begin();
2168 }
2169
2170 /// Return the end of the list of BundleOpInfo instances associated
2171 /// with this OperandBundleUser.
2172 bundle_op_iterator bundle_op_info_end() {
2173 if (!hasDescriptor())
2174 return nullptr;
2175
2176 uint8_t *BytesEnd = getDescriptor().end();
2177 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2178 }
2179
2180 /// Return the end of the list of BundleOpInfo instances associated
2181 /// with this OperandBundleUser.
2182 const_bundle_op_iterator bundle_op_info_end() const {
2183 auto *NonConstThis = const_cast<CallBase *>(this);
2184 return NonConstThis->bundle_op_info_end();
2185 }
2186
2187 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2188 iterator_range<bundle_op_iterator> bundle_op_infos() {
2189 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2190 }
2191
2192 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2193 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2194 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2195 }
2196
2197 /// Populate the BundleOpInfo instances and the Use& vector from \p
2198 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2199 /// last bundle operand use.
2200 ///
2201 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2202 /// instance allocated in this User's descriptor.
2203 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2204 const unsigned BeginIndex);
2205
2206public:
2207 /// Return the BundleOpInfo for the operand at index OpIdx.
2208 ///
2209 /// It is an error to call this with an OpIdx that does not correspond to an
2210 /// bundle operand.
2211 BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
2212 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2213 return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
2214 }
2215
2216protected:
2217 /// Return the total number of values used in \p Bundles.
2218 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2219 unsigned Total = 0;
2220 for (auto &B : Bundles)
2221 Total += B.input_size();
2222 return Total;
2223 }
2224
2225 /// @}
2226 // End of operand bundle API.
2227
2228private:
2229 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2230 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2231
2232 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2233 if (Attrs.hasFnAttribute(Kind))
2234 return true;
2235
2236 // Operand bundles override attributes on the called function, but don't
2237 // override attributes directly present on the call instruction.
2238 if (isFnAttrDisallowedByOpBundle(Kind))
2239 return false;
2240
2241 return hasFnAttrOnCalledFunction(Kind);
2242 }
2243
2244 /// Determine whether the return value has the given attribute. Supports
2245 /// Attribute::AttrKind and StringRef as \p AttrKind types.
2246 template <typename AttrKind> bool hasRetAttrImpl(AttrKind Kind) const {
2247 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
2248 return true;
2249
2250 // Look at the callee, if available.
2251 if (const Function *F = getCalledFunction())
2252 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
2253 return false;
2254 }
2255};
2256
2257template <>
2258struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2259
2260DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CallBase
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 2260, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CallBase>::op_begin(const_cast<CallBase
*>(this))[i_nocapture].get()); } void CallBase::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CallBase>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 2260, __PRETTY_FUNCTION__)); OperandTraits<CallBase>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CallBase
::getNumOperands() const { return OperandTraits<CallBase>
::operands(this); } template <int Idx_nocapture> Use &
CallBase::Op() { return this->OpFrom<Idx_nocapture>(
this); } template <int Idx_nocapture> const Use &CallBase
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2261
2262//===----------------------------------------------------------------------===//
2263// FuncletPadInst Class
2264//===----------------------------------------------------------------------===//
2265class FuncletPadInst : public Instruction {
2266private:
2267 FuncletPadInst(const FuncletPadInst &CPI);
2268
2269 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2270 ArrayRef<Value *> Args, unsigned Values,
2271 const Twine &NameStr, Instruction *InsertBefore);
2272 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2273 ArrayRef<Value *> Args, unsigned Values,
2274 const Twine &NameStr, BasicBlock *InsertAtEnd);
2275
2276 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2277
2278protected:
2279 // Note: Instruction needs to be a friend here to call cloneImpl.
2280 friend class Instruction;
2281 friend class CatchPadInst;
2282 friend class CleanupPadInst;
2283
2284 FuncletPadInst *cloneImpl() const;
2285
2286public:
2287 /// Provide fast operand accessors
2288 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2289
2290 /// getNumArgOperands - Return the number of funcletpad arguments.
2291 ///
2292 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2293
2294 /// Convenience accessors
2295
2296 /// Return the outer EH-pad this funclet is nested within.
2297 ///
2298 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2299 /// is a CatchPadInst.
2300 Value *getParentPad() const { return Op<-1>(); }
2301 void setParentPad(Value *ParentPad) {
2302 assert(ParentPad)((ParentPad) ? static_cast<void> (0) : __assert_fail ("ParentPad"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 2302, __PRETTY_FUNCTION__))
;
2303 Op<-1>() = ParentPad;
2304 }
2305
2306 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2307 ///
2308 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2309 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2310
2311 /// arg_operands - iteration adapter for range-for loops.
2312 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2313
2314 /// arg_operands - iteration adapter for range-for loops.
2315 const_op_range arg_operands() const {
2316 return const_op_range(op_begin(), op_end() - 1);
2317 }
2318
2319 // Methods for support type inquiry through isa, cast, and dyn_cast:
2320 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2321 static bool classof(const Value *V) {
2322 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2323 }
2324};
2325
2326template <>
2327struct OperandTraits<FuncletPadInst>
2328 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2329
2330DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<FuncletPadInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 2330, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this))[i_nocapture].get()); } void FuncletPadInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<FuncletPadInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/InstrTypes.h"
, 2330, __PRETTY_FUNCTION__)); OperandTraits<FuncletPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
FuncletPadInst::getNumOperands() const { return OperandTraits
<FuncletPadInst>::operands(this); } template <int Idx_nocapture
> Use &FuncletPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &FuncletPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2331
2332} // end namespace llvm
2333
2334#endif // LLVM_IR_INSTRTYPES_H