Bug Summary

File:llvm/lib/Target/X86/X86PartialReduction.cpp
Warning:line 289, column 26
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86PartialReduction.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-26-161721-17566-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/X86/X86PartialReduction.cpp

/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/X86/X86PartialReduction.cpp

1//===-- X86PartialReduction.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass looks for add instructions used by a horizontal reduction to see
10// if we might be able to use pmaddwd or psadbw. Some cases of this require
11// cross basic block knowledge and can't be done in SelectionDAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "llvm/Analysis/ValueTracking.h"
17#include "llvm/CodeGen/TargetPassConfig.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Instructions.h"
20#include "llvm/IR/IntrinsicsX86.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/Operator.h"
23#include "llvm/Pass.h"
24#include "X86TargetMachine.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE"x86-partial-reduction" "x86-partial-reduction"
29
30namespace {
31
32class X86PartialReduction : public FunctionPass {
33 const DataLayout *DL;
34 const X86Subtarget *ST;
35
36public:
37 static char ID; // Pass identification, replacement for typeid.
38
39 X86PartialReduction() : FunctionPass(ID) { }
40
41 bool runOnFunction(Function &Fn) override;
42
43 void getAnalysisUsage(AnalysisUsage &AU) const override {
44 AU.setPreservesCFG();
45 }
46
47 StringRef getPassName() const override {
48 return "X86 Partial Reduction";
49 }
50
51private:
52 bool tryMAddReplacement(Instruction *Op);
53 bool trySADReplacement(Instruction *Op);
54};
55}
56
57FunctionPass *llvm::createX86PartialReductionPass() {
58 return new X86PartialReduction();
59}
60
61char X86PartialReduction::ID = 0;
62
63INITIALIZE_PASS(X86PartialReduction, DEBUG_TYPE,static void *initializeX86PartialReductionPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "X86 Partial Reduction"
, "x86-partial-reduction", &X86PartialReduction::ID, PassInfo
::NormalCtor_t(callDefaultCtor<X86PartialReduction>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeX86PartialReductionPassFlag; void llvm
::initializeX86PartialReductionPass(PassRegistry &Registry
) { llvm::call_once(InitializeX86PartialReductionPassFlag, initializeX86PartialReductionPassOnce
, std::ref(Registry)); }
64 "X86 Partial Reduction", false, false)static void *initializeX86PartialReductionPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "X86 Partial Reduction"
, "x86-partial-reduction", &X86PartialReduction::ID, PassInfo
::NormalCtor_t(callDefaultCtor<X86PartialReduction>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeX86PartialReductionPassFlag; void llvm
::initializeX86PartialReductionPass(PassRegistry &Registry
) { llvm::call_once(InitializeX86PartialReductionPassFlag, initializeX86PartialReductionPassOnce
, std::ref(Registry)); }
65
66bool X86PartialReduction::tryMAddReplacement(Instruction *Op) {
67 if (!ST->hasSSE2())
68 return false;
69
70 // Need at least 8 elements.
71 if (cast<FixedVectorType>(Op->getType())->getNumElements() < 8)
72 return false;
73
74 // Element type should be i32.
75 if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
76 return false;
77
78 auto *Mul = dyn_cast<BinaryOperator>(Op);
79 if (!Mul || Mul->getOpcode() != Instruction::Mul)
80 return false;
81
82 Value *LHS = Mul->getOperand(0);
83 Value *RHS = Mul->getOperand(1);
84
85 // LHS and RHS should be only used once or if they are the same then only
86 // used twice. Only check this when SSE4.1 is enabled and we have zext/sext
87 // instructions, otherwise we use punpck to emulate zero extend in stages. The
88 // trunc/ we need to do likely won't introduce new instructions in that case.
89 if (ST->hasSSE41()) {
90 if (LHS == RHS) {
91 if (!isa<Constant>(LHS) && !LHS->hasNUses(2))
92 return false;
93 } else {
94 if (!isa<Constant>(LHS) && !LHS->hasOneUse())
95 return false;
96 if (!isa<Constant>(RHS) && !RHS->hasOneUse())
97 return false;
98 }
99 }
100
101 auto CanShrinkOp = [&](Value *Op) {
102 auto IsFreeTruncation = [&](Value *Op) {
103 if (auto *Cast = dyn_cast<CastInst>(Op)) {
104 if (Cast->getParent() == Mul->getParent() &&
105 (Cast->getOpcode() == Instruction::SExt ||
106 Cast->getOpcode() == Instruction::ZExt) &&
107 Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 16)
108 return true;
109 }
110
111 return isa<Constant>(Op);
112 };
113
114 // If the operation can be freely truncated and has enough sign bits we
115 // can shrink.
116 if (IsFreeTruncation(Op) &&
117 ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
118 return true;
119
120 // SelectionDAG has limited support for truncating through an add or sub if
121 // the inputs are freely truncatable.
122 if (auto *BO = dyn_cast<BinaryOperator>(Op)) {
123 if (BO->getParent() == Mul->getParent() &&
124 IsFreeTruncation(BO->getOperand(0)) &&
125 IsFreeTruncation(BO->getOperand(1)) &&
126 ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
127 return true;
128 }
129
130 return false;
131 };
132
133 // Both Ops need to be shrinkable.
134 if (!CanShrinkOp(LHS) && !CanShrinkOp(RHS))
135 return false;
136
137 IRBuilder<> Builder(Mul);
138
139 auto *MulTy = cast<FixedVectorType>(Op->getType());
140 unsigned NumElts = MulTy->getNumElements();
141
142 // Extract even elements and odd elements and add them together. This will
143 // be pattern matched by SelectionDAG to pmaddwd. This instruction will be
144 // half the original width.
145 SmallVector<int, 16> EvenMask(NumElts / 2);
146 SmallVector<int, 16> OddMask(NumElts / 2);
147 for (int i = 0, e = NumElts / 2; i != e; ++i) {
148 EvenMask[i] = i * 2;
149 OddMask[i] = i * 2 + 1;
150 }
151 // Creating a new mul so the replaceAllUsesWith below doesn't replace the
152 // uses in the shuffles we're creating.
153 Value *NewMul = Builder.CreateMul(Mul->getOperand(0), Mul->getOperand(1));
154 Value *EvenElts = Builder.CreateShuffleVector(NewMul, NewMul, EvenMask);
155 Value *OddElts = Builder.CreateShuffleVector(NewMul, NewMul, OddMask);
156 Value *MAdd = Builder.CreateAdd(EvenElts, OddElts);
157
158 // Concatenate zeroes to extend back to the original type.
159 SmallVector<int, 32> ConcatMask(NumElts);
160 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
161 Value *Zero = Constant::getNullValue(MAdd->getType());
162 Value *Concat = Builder.CreateShuffleVector(MAdd, Zero, ConcatMask);
163
164 Mul->replaceAllUsesWith(Concat);
165 Mul->eraseFromParent();
166
167 return true;
168}
169
170bool X86PartialReduction::trySADReplacement(Instruction *Op) {
171 if (!ST->hasSSE2())
12
Calling 'X86Subtarget::hasSSE2'
14
Returning from 'X86Subtarget::hasSSE2'
15
Taking false branch
172 return false;
173
174 // TODO: There's nothing special about i32, any integer type above i16 should
175 // work just as well.
176 if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
16
The object is a 'VectorType'
17
Assuming the condition is false
18
Taking false branch
177 return false;
178
179 // Operand should be a select.
180 auto *SI = dyn_cast<SelectInst>(Op);
19
Assuming 'Op' is a 'SelectInst'
181 if (!SI
19.1
'SI' is non-null
19.1
'SI' is non-null
19.1
'SI' is non-null
)
20
Taking false branch
182 return false;
183
184 // Select needs to implement absolute value.
185 Value *LHS, *RHS;
186 auto SPR = matchSelectPattern(SI, LHS, RHS);
187 if (SPR.Flavor != SPF_ABS)
21
Assuming field 'Flavor' is equal to SPF_ABS
22
Taking false branch
188 return false;
189
190 // Need a subtract of two values.
191 auto *Sub = dyn_cast<BinaryOperator>(LHS);
23
Assuming 'LHS' is a 'BinaryOperator'
192 if (!Sub
23.1
'Sub' is non-null
23.1
'Sub' is non-null
23.1
'Sub' is non-null
|| Sub->getOpcode() != Instruction::Sub)
24
Assuming the condition is false
25
Taking false branch
193 return false;
194
195 // Look for zero extend from i8.
196 auto getZeroExtendedVal = [](Value *Op) -> Value * {
197 if (auto *ZExt = dyn_cast<ZExtInst>(Op))
27
Assuming 'ZExt' is non-null
28
Taking true branch
40
Assuming 'ZExt' is non-null
41
Taking true branch
198 if (cast<VectorType>(ZExt->getOperand(0)->getType())
29
The object is a 'VectorType'
30
Assuming the condition is true
31
Taking true branch
42
The object is a 'VectorType'
43
Assuming the condition is true
44
Taking true branch
199 ->getElementType()
200 ->isIntegerTy(8))
201 return ZExt->getOperand(0);
32
Calling 'UnaryInstruction::getOperand'
36
Returning from 'UnaryInstruction::getOperand'
37
Returning pointer, which participates in a condition later
45
Calling 'UnaryInstruction::getOperand'
49
Returning from 'UnaryInstruction::getOperand'
50
Returning pointer, which participates in a condition later
202
203 return nullptr;
204 };
205
206 // Both operands of the subtract should be extends from vXi8.
207 Value *Op0 = getZeroExtendedVal(Sub->getOperand(0));
26
Calling 'operator()'
38
Returning from 'operator()'
208 Value *Op1 = getZeroExtendedVal(Sub->getOperand(1));
39
Calling 'operator()'
51
Returning from 'operator()'
209 if (!Op0
51.1
'Op0' is non-null
51.1
'Op0' is non-null
51.1
'Op0' is non-null
|| !Op1
51.2
'Op1' is non-null
51.2
'Op1' is non-null
51.2
'Op1' is non-null
)
52
Taking false branch
210 return false;
211
212 IRBuilder<> Builder(SI);
213
214 auto *OpTy = cast<FixedVectorType>(Op->getType());
53
The object is a 'FixedVectorType'
215 unsigned NumElts = OpTy->getNumElements();
216
217 unsigned IntrinsicNumElts;
218 Intrinsic::ID IID;
219 if (ST->hasBWI() && NumElts >= 64) {
54
Assuming the condition is false
220 IID = Intrinsic::x86_avx512_psad_bw_512;
221 IntrinsicNumElts = 64;
222 } else if (ST->hasAVX2() && NumElts >= 32) {
223 IID = Intrinsic::x86_avx2_psad_bw;
224 IntrinsicNumElts = 32;
225 } else {
226 IID = Intrinsic::x86_sse2_psad_bw;
227 IntrinsicNumElts = 16;
228 }
229
230 Function *PSADBWFn = Intrinsic::getDeclaration(SI->getModule(), IID);
231
232 if (NumElts < 16) {
55
Assuming 'NumElts' is >= 16
56
Taking false branch
233 // Pad input with zeroes.
234 SmallVector<int, 32> ConcatMask(16);
235 for (unsigned i = 0; i != NumElts; ++i)
236 ConcatMask[i] = i;
237 for (unsigned i = NumElts; i != 16; ++i)
238 ConcatMask[i] = (i % NumElts) + NumElts;
239
240 Value *Zero = Constant::getNullValue(Op0->getType());
241 Op0 = Builder.CreateShuffleVector(Op0, Zero, ConcatMask);
242 Op1 = Builder.CreateShuffleVector(Op1, Zero, ConcatMask);
243 NumElts = 16;
244 }
245
246 // Intrinsics produce vXi64 and need to be casted to vXi32.
247 auto *I32Ty =
248 FixedVectorType::get(Builder.getInt32Ty(), IntrinsicNumElts / 4);
249
250 assert(NumElts % IntrinsicNumElts == 0 && "Unexpected number of elements!")((NumElts % IntrinsicNumElts == 0 && "Unexpected number of elements!"
) ? static_cast<void> (0) : __assert_fail ("NumElts % IntrinsicNumElts == 0 && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/X86/X86PartialReduction.cpp"
, 250, __PRETTY_FUNCTION__))
;
57
Assuming the condition is true
58
'?' condition is true
251 unsigned NumSplits = NumElts / IntrinsicNumElts;
252
253 // First collect the pieces we need.
254 SmallVector<Value *, 4> Ops(NumSplits);
255 for (unsigned i = 0; i != NumSplits; ++i) {
59
Assuming 'i' is not equal to 'NumSplits'
60
Loop condition is true. Entering loop body
61
Assuming 'i' is equal to 'NumSplits'
62
Loop condition is false. Execution continues on line 264
256 SmallVector<int, 64> ExtractMask(IntrinsicNumElts);
257 std::iota(ExtractMask.begin(), ExtractMask.end(), i * IntrinsicNumElts);
258 Value *ExtractOp0 = Builder.CreateShuffleVector(Op0, Op0, ExtractMask);
259 Value *ExtractOp1 = Builder.CreateShuffleVector(Op1, Op0, ExtractMask);
260 Ops[i] = Builder.CreateCall(PSADBWFn, {ExtractOp0, ExtractOp1});
261 Ops[i] = Builder.CreateBitCast(Ops[i], I32Ty);
262 }
263
264 assert(isPowerOf2_32(NumSplits) && "Expected power of 2 splits")((isPowerOf2_32(NumSplits) && "Expected power of 2 splits"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(NumSplits) && \"Expected power of 2 splits\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/X86/X86PartialReduction.cpp"
, 264, __PRETTY_FUNCTION__))
;
63
'?' condition is true
265 unsigned Stages = Log2_32(NumSplits);
266 for (unsigned s = Stages; s > 0; --s) {
64
Assuming 's' is <= 0
65
Loop condition is false. Execution continues on line 278
267 unsigned NumConcatElts =
268 cast<FixedVectorType>(Ops[0]->getType())->getNumElements() * 2;
269 for (unsigned i = 0; i != 1U << (s - 1); ++i) {
270 SmallVector<int, 64> ConcatMask(NumConcatElts);
271 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
272 Ops[i] = Builder.CreateShuffleVector(Ops[i*2], Ops[i*2+1], ConcatMask);
273 }
274 }
275
276 // At this point the final value should be in Ops[0]. Now we need to adjust
277 // it to the final original type.
278 NumElts = cast<FixedVectorType>(OpTy)->getNumElements();
66
'OpTy' is a 'FixedVectorType'
279 if (NumElts
66.1
'NumElts' is not equal to 2
66.1
'NumElts' is not equal to 2
66.1
'NumElts' is not equal to 2
== 2) {
67
Taking false branch
280 // Extract down to 2 elements.
281 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{0, 1});
282 } else if (NumElts
67.1
'NumElts' is >= 8
67.1
'NumElts' is >= 8
67.1
'NumElts' is >= 8
>= 8) {
68
Taking true branch
283 SmallVector<int, 32> ConcatMask(NumElts);
284 unsigned SubElts =
70
'SubElts' initialized here
285 cast<FixedVectorType>(Ops[0]->getType())->getNumElements();
69
The object is a 'FixedVectorType'
286 for (unsigned i = 0; i != SubElts; ++i)
71
Assuming 'i' is equal to 'SubElts'
72
Loop condition is false. Execution continues on line 288
287 ConcatMask[i] = i;
288 for (unsigned i = SubElts; i
72.1
'i' is not equal to 'NumElts'
72.1
'i' is not equal to 'NumElts'
72.1
'i' is not equal to 'NumElts'
!= NumElts; ++i)
73
Loop condition is true. Entering loop body
289 ConcatMask[i] = (i % SubElts) + SubElts;
74
Division by zero
290
291 Value *Zero = Constant::getNullValue(Ops[0]->getType());
292 Ops[0] = Builder.CreateShuffleVector(Ops[0], Zero, ConcatMask);
293 }
294
295 SI->replaceAllUsesWith(Ops[0]);
296 SI->eraseFromParent();
297
298 return true;
299}
300
301// Walk backwards from the ExtractElementInst and determine if it is the end of
302// a horizontal reduction. Return the input to the reduction if we find one.
303static Value *matchAddReduction(const ExtractElementInst &EE) {
304 // Make sure we're extracting index 0.
305 auto *Index = dyn_cast<ConstantInt>(EE.getIndexOperand());
306 if (!Index || !Index->isNullValue())
307 return nullptr;
308
309 const auto *BO = dyn_cast<BinaryOperator>(EE.getVectorOperand());
310 if (!BO || BO->getOpcode() != Instruction::Add || !BO->hasOneUse())
311 return nullptr;
312
313 unsigned NumElems = cast<FixedVectorType>(BO->getType())->getNumElements();
314 // Ensure the reduction size is a power of 2.
315 if (!isPowerOf2_32(NumElems))
316 return nullptr;
317
318 const Value *Op = BO;
319 unsigned Stages = Log2_32(NumElems);
320 for (unsigned i = 0; i != Stages; ++i) {
321 const auto *BO = dyn_cast<BinaryOperator>(Op);
322 if (!BO || BO->getOpcode() != Instruction::Add)
323 return nullptr;
324
325 // If this isn't the first add, then it should only have 2 users, the
326 // shuffle and another add which we checked in the previous iteration.
327 if (i != 0 && !BO->hasNUses(2))
328 return nullptr;
329
330 Value *LHS = BO->getOperand(0);
331 Value *RHS = BO->getOperand(1);
332
333 auto *Shuffle = dyn_cast<ShuffleVectorInst>(LHS);
334 if (Shuffle) {
335 Op = RHS;
336 } else {
337 Shuffle = dyn_cast<ShuffleVectorInst>(RHS);
338 Op = LHS;
339 }
340
341 // The first operand of the shuffle should be the same as the other operand
342 // of the bin op.
343 if (!Shuffle || Shuffle->getOperand(0) != Op)
344 return nullptr;
345
346 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
347 unsigned MaskEnd = 1 << i;
348 for (unsigned Index = 0; Index < MaskEnd; ++Index)
349 if (Shuffle->getMaskValue(Index) != (int)(MaskEnd + Index))
350 return nullptr;
351 }
352
353 return const_cast<Value *>(Op);
354}
355
356// See if this BO is reachable from this Phi by walking forward through single
357// use BinaryOperators with the same opcode. If we get back then we know we've
358// found a loop and it is safe to step through this Add to find more leaves.
359static bool isReachableFromPHI(PHINode *Phi, BinaryOperator *BO) {
360 // The PHI itself should only have one use.
361 if (!Phi->hasOneUse())
362 return false;
363
364 Instruction *U = cast<Instruction>(*Phi->user_begin());
365 if (U == BO)
366 return true;
367
368 while (U->hasOneUse() && U->getOpcode() == BO->getOpcode())
369 U = cast<Instruction>(*U->user_begin());
370
371 return U == BO;
372}
373
374// Collect all the leaves of the tree of adds that feeds into the horizontal
375// reduction. Root is the Value that is used by the horizontal reduction.
376// We look through single use phis, single use adds, or adds that are used by
377// a phi that forms a loop with the add.
378static void collectLeaves(Value *Root, SmallVectorImpl<Instruction *> &Leaves) {
379 SmallPtrSet<Value *, 8> Visited;
380 SmallVector<Value *, 8> Worklist;
381 Worklist.push_back(Root);
382
383 while (!Worklist.empty()) {
384 Value *V = Worklist.pop_back_val();
385 if (!Visited.insert(V).second)
386 continue;
387
388 if (auto *PN = dyn_cast<PHINode>(V)) {
389 // PHI node should have single use unless it is the root node, then it
390 // has 2 uses.
391 if (!PN->hasNUses(PN == Root ? 2 : 1))
392 break;
393
394 // Push incoming values to the worklist.
395 for (Value *InV : PN->incoming_values())
396 Worklist.push_back(InV);
397
398 continue;
399 }
400
401 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
402 if (BO->getOpcode() == Instruction::Add) {
403 // Simple case. Single use, just push its operands to the worklist.
404 if (BO->hasNUses(BO == Root ? 2 : 1)) {
405 for (Value *Op : BO->operands())
406 Worklist.push_back(Op);
407 continue;
408 }
409
410 // If there is additional use, make sure it is an unvisited phi that
411 // gets us back to this node.
412 if (BO->hasNUses(BO == Root ? 3 : 2)) {
413 PHINode *PN = nullptr;
414 for (auto *U : Root->users())
415 if (auto *P = dyn_cast<PHINode>(U))
416 if (!Visited.count(P))
417 PN = P;
418
419 // If we didn't find a 2-input PHI then this isn't a case we can
420 // handle.
421 if (!PN || PN->getNumIncomingValues() != 2)
422 continue;
423
424 // Walk forward from this phi to see if it reaches back to this add.
425 if (!isReachableFromPHI(PN, BO))
426 continue;
427
428 // The phi forms a loop with this Add, push its operands.
429 for (Value *Op : BO->operands())
430 Worklist.push_back(Op);
431 }
432 }
433 }
434
435 // Not an add or phi, make it a leaf.
436 if (auto *I = dyn_cast<Instruction>(V)) {
437 if (!V->hasNUses(I == Root ? 2 : 1))
438 continue;
439
440 // Add this as a leaf.
441 Leaves.push_back(I);
442 }
443 }
444}
445
446bool X86PartialReduction::runOnFunction(Function &F) {
447 if (skipFunction(F))
1
Assuming the condition is false
2
Taking false branch
448 return false;
449
450 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
451 if (!TPC)
3
Assuming 'TPC' is non-null
4
Taking false branch
452 return false;
453
454 auto &TM = TPC->getTM<X86TargetMachine>();
455 ST = TM.getSubtargetImpl(F);
456
457 DL = &F.getParent()->getDataLayout();
458
459 bool MadeChange = false;
460 for (auto &BB : F) {
461 for (auto &I : BB) {
462 auto *EE = dyn_cast<ExtractElementInst>(&I);
5
Assuming the object is a 'ExtractElementInst'
463 if (!EE
5.1
'EE' is non-null
5.1
'EE' is non-null
5.1
'EE' is non-null
)
6
Taking false branch
464 continue;
465
466 // First find a reduction tree.
467 // FIXME: Do we need to handle other opcodes than Add?
468 Value *Root = matchAddReduction(*EE);
469 if (!Root
6.1
'Root' is non-null
6.1
'Root' is non-null
6.1
'Root' is non-null
)
7
Taking false branch
470 continue;
471
472 SmallVector<Instruction *, 8> Leaves;
473 collectLeaves(Root, Leaves);
474
475 for (Instruction *I : Leaves) {
8
Assuming '__begin3' is not equal to '__end3'
476 if (tryMAddReplacement(I)) {
9
Taking false branch
477 MadeChange = true;
478 continue;
479 }
480
481 // Don't do SAD matching on the root node. SelectionDAG already
482 // has support for that and currently generates better code.
483 if (I != Root && trySADReplacement(I))
10
Assuming 'I' is not equal to 'Root'
11
Calling 'X86PartialReduction::trySADReplacement'
484 MadeChange = true;
485 }
486 }
487 }
488
489 return MadeChange;
490}

/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/lib/Target/X86/X86Subtarget.h

1//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the X86 specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_X86_X86SUBTARGET_H
14#define LLVM_LIB_TARGET_X86_X86SUBTARGET_H
15
16#include "X86FrameLowering.h"
17#include "X86ISelLowering.h"
18#include "X86InstrInfo.h"
19#include "X86SelectionDAGInfo.h"
20#include "llvm/ADT/Triple.h"
21#include "llvm/CodeGen/TargetSubtargetInfo.h"
22#include "llvm/IR/CallingConv.h"
23#include <climits>
24#include <memory>
25
26#define GET_SUBTARGETINFO_HEADER
27#include "X86GenSubtargetInfo.inc"
28
29namespace llvm {
30
31class CallLowering;
32class GlobalValue;
33class InstructionSelector;
34class LegalizerInfo;
35class RegisterBankInfo;
36class StringRef;
37class TargetMachine;
38
39/// The X86 backend supports a number of different styles of PIC.
40///
41namespace PICStyles {
42
43enum class Style {
44 StubPIC, // Used on i386-darwin in pic mode.
45 GOT, // Used on 32 bit elf on when in pic mode.
46 RIPRel, // Used on X86-64 when in pic mode.
47 None // Set when not in pic mode.
48};
49
50} // end namespace PICStyles
51
52class X86Subtarget final : public X86GenSubtargetInfo {
53 // NOTE: Do not add anything new to this list. Coarse, CPU name based flags
54 // are not a good idea. We should be migrating away from these.
55 enum X86ProcFamilyEnum {
56 Others,
57 IntelAtom,
58 IntelSLM
59 };
60
61 enum X86SSEEnum {
62 NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
63 };
64
65 enum X863DNowEnum {
66 NoThreeDNow, MMX, ThreeDNow, ThreeDNowA
67 };
68
69 /// X86 processor family: Intel Atom, and others
70 X86ProcFamilyEnum X86ProcFamily = Others;
71
72 /// Which PIC style to use
73 PICStyles::Style PICStyle;
74
75 const TargetMachine &TM;
76
77 /// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
78 X86SSEEnum X86SSELevel = NoSSE;
79
80 /// MMX, 3DNow, 3DNow Athlon, or none supported.
81 X863DNowEnum X863DNowLevel = NoThreeDNow;
82
83 /// True if the processor supports X87 instructions.
84 bool HasX87 = false;
85
86 /// True if the processor supports CMPXCHG8B.
87 bool HasCmpxchg8b = false;
88
89 /// True if this processor has NOPL instruction
90 /// (generally pentium pro+).
91 bool HasNOPL = false;
92
93 /// True if this processor has conditional move instructions
94 /// (generally pentium pro+).
95 bool HasCMov = false;
96
97 /// True if the processor supports X86-64 instructions.
98 bool HasX86_64 = false;
99
100 /// True if the processor supports POPCNT.
101 bool HasPOPCNT = false;
102
103 /// True if the processor supports SSE4A instructions.
104 bool HasSSE4A = false;
105
106 /// Target has AES instructions
107 bool HasAES = false;
108 bool HasVAES = false;
109
110 /// Target has FXSAVE/FXRESTOR instructions
111 bool HasFXSR = false;
112
113 /// Target has XSAVE instructions
114 bool HasXSAVE = false;
115
116 /// Target has XSAVEOPT instructions
117 bool HasXSAVEOPT = false;
118
119 /// Target has XSAVEC instructions
120 bool HasXSAVEC = false;
121
122 /// Target has XSAVES instructions
123 bool HasXSAVES = false;
124
125 /// Target has carry-less multiplication
126 bool HasPCLMUL = false;
127 bool HasVPCLMULQDQ = false;
128
129 /// Target has Galois Field Arithmetic instructions
130 bool HasGFNI = false;
131
132 /// Target has 3-operand fused multiply-add
133 bool HasFMA = false;
134
135 /// Target has 4-operand fused multiply-add
136 bool HasFMA4 = false;
137
138 /// Target has XOP instructions
139 bool HasXOP = false;
140
141 /// Target has TBM instructions.
142 bool HasTBM = false;
143
144 /// Target has LWP instructions
145 bool HasLWP = false;
146
147 /// True if the processor has the MOVBE instruction.
148 bool HasMOVBE = false;
149
150 /// True if the processor has the RDRAND instruction.
151 bool HasRDRAND = false;
152
153 /// Processor has 16-bit floating point conversion instructions.
154 bool HasF16C = false;
155
156 /// Processor has FS/GS base insturctions.
157 bool HasFSGSBase = false;
158
159 /// Processor has LZCNT instruction.
160 bool HasLZCNT = false;
161
162 /// Processor has BMI1 instructions.
163 bool HasBMI = false;
164
165 /// Processor has BMI2 instructions.
166 bool HasBMI2 = false;
167
168 /// Processor has VBMI instructions.
169 bool HasVBMI = false;
170
171 /// Processor has VBMI2 instructions.
172 bool HasVBMI2 = false;
173
174 /// Processor has Integer Fused Multiply Add
175 bool HasIFMA = false;
176
177 /// Processor has RTM instructions.
178 bool HasRTM = false;
179
180 /// Processor has ADX instructions.
181 bool HasADX = false;
182
183 /// Processor has SHA instructions.
184 bool HasSHA = false;
185
186 /// Processor has PRFCHW instructions.
187 bool HasPRFCHW = false;
188
189 /// Processor has RDSEED instructions.
190 bool HasRDSEED = false;
191
192 /// Processor has LAHF/SAHF instructions in 64-bit mode.
193 bool HasLAHFSAHF64 = false;
194
195 /// Processor has MONITORX/MWAITX instructions.
196 bool HasMWAITX = false;
197
198 /// Processor has Cache Line Zero instruction
199 bool HasCLZERO = false;
200
201 /// Processor has Cache Line Demote instruction
202 bool HasCLDEMOTE = false;
203
204 /// Processor has MOVDIRI instruction (direct store integer).
205 bool HasMOVDIRI = false;
206
207 /// Processor has MOVDIR64B instruction (direct store 64 bytes).
208 bool HasMOVDIR64B = false;
209
210 /// Processor has ptwrite instruction.
211 bool HasPTWRITE = false;
212
213 /// Processor has Prefetch with intent to Write instruction
214 bool HasPREFETCHWT1 = false;
215
216 /// True if SHLD instructions are slow.
217 bool IsSHLDSlow = false;
218
219 /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
220 // PMULUDQ.
221 bool IsPMULLDSlow = false;
222
223 /// True if the PMADDWD instruction is slow compared to PMULLD.
224 bool IsPMADDWDSlow = false;
225
226 /// True if unaligned memory accesses of 16-bytes are slow.
227 bool IsUAMem16Slow = false;
228
229 /// True if unaligned memory accesses of 32-bytes are slow.
230 bool IsUAMem32Slow = false;
231
232 /// True if SSE operations can have unaligned memory operands.
233 /// This may require setting a configuration bit in the processor.
234 bool HasSSEUnalignedMem = false;
235
236 /// True if this processor has the CMPXCHG16B instruction;
237 /// this is true for most x86-64 chips, but not the first AMD chips.
238 bool HasCmpxchg16b = false;
239
240 /// True if the LEA instruction should be used for adjusting
241 /// the stack pointer. This is an optimization for Intel Atom processors.
242 bool UseLeaForSP = false;
243
244 /// True if POPCNT instruction has a false dependency on the destination register.
245 bool HasPOPCNTFalseDeps = false;
246
247 /// True if LZCNT/TZCNT instructions have a false dependency on the destination register.
248 bool HasLZCNTFalseDeps = false;
249
250 /// True if its preferable to combine to a single shuffle using a variable
251 /// mask over multiple fixed shuffles.
252 bool HasFastVariableShuffle = false;
253
254 /// True if vzeroupper instructions should be inserted after code that uses
255 /// ymm or zmm registers.
256 bool InsertVZEROUPPER = false;
257
258 /// True if there is no performance penalty for writing NOPs with up to
259 /// 7 bytes.
260 bool HasFast7ByteNOP = false;
261
262 /// True if there is no performance penalty for writing NOPs with up to
263 /// 11 bytes.
264 bool HasFast11ByteNOP = false;
265
266 /// True if there is no performance penalty for writing NOPs with up to
267 /// 15 bytes.
268 bool HasFast15ByteNOP = false;
269
270 /// True if gather is reasonably fast. This is true for Skylake client and
271 /// all AVX-512 CPUs.
272 bool HasFastGather = false;
273
274 /// True if hardware SQRTSS instruction is at least as fast (latency) as
275 /// RSQRTSS followed by a Newton-Raphson iteration.
276 bool HasFastScalarFSQRT = false;
277
278 /// True if hardware SQRTPS/VSQRTPS instructions are at least as fast
279 /// (throughput) as RSQRTPS/VRSQRTPS followed by a Newton-Raphson iteration.
280 bool HasFastVectorFSQRT = false;
281
282 /// True if 8-bit divisions are significantly faster than
283 /// 32-bit divisions and should be used when possible.
284 bool HasSlowDivide32 = false;
285
286 /// True if 32-bit divides are significantly faster than
287 /// 64-bit divisions and should be used when possible.
288 bool HasSlowDivide64 = false;
289
290 /// True if LZCNT instruction is fast.
291 bool HasFastLZCNT = false;
292
293 /// True if SHLD based rotate is fast.
294 bool HasFastSHLDRotate = false;
295
296 /// True if the processor supports macrofusion.
297 bool HasMacroFusion = false;
298
299 /// True if the processor supports branch fusion.
300 bool HasBranchFusion = false;
301
302 /// True if the processor has enhanced REP MOVSB/STOSB.
303 bool HasERMSB = false;
304
305 /// True if the processor has fast short REP MOV.
306 bool HasFSRM = false;
307
308 /// True if the short functions should be padded to prevent
309 /// a stall when returning too early.
310 bool PadShortFunctions = false;
311
312 /// True if two memory operand instructions should use a temporary register
313 /// instead.
314 bool SlowTwoMemOps = false;
315
316 /// True if the LEA instruction inputs have to be ready at address generation
317 /// (AG) time.
318 bool LEAUsesAG = false;
319
320 /// True if the LEA instruction with certain arguments is slow
321 bool SlowLEA = false;
322
323 /// True if the LEA instruction has all three source operands: base, index,
324 /// and offset or if the LEA instruction uses base and index registers where
325 /// the base is EBP, RBP,or R13
326 bool Slow3OpsLEA = false;
327
328 /// True if INC and DEC instructions are slow when writing to flags
329 bool SlowIncDec = false;
330
331 /// Processor has AVX-512 PreFetch Instructions
332 bool HasPFI = false;
333
334 /// Processor has AVX-512 Exponential and Reciprocal Instructions
335 bool HasERI = false;
336
337 /// Processor has AVX-512 Conflict Detection Instructions
338 bool HasCDI = false;
339
340 /// Processor has AVX-512 population count Instructions
341 bool HasVPOPCNTDQ = false;
342
343 /// Processor has AVX-512 Doubleword and Quadword instructions
344 bool HasDQI = false;
345
346 /// Processor has AVX-512 Byte and Word instructions
347 bool HasBWI = false;
348
349 /// Processor has AVX-512 Vector Length eXtenstions
350 bool HasVLX = false;
351
352 /// Processor has PKU extenstions
353 bool HasPKU = false;
354
355 /// Processor has AVX-512 Vector Neural Network Instructions
356 bool HasVNNI = false;
357
358 /// Processor has AVX-512 bfloat16 floating-point extensions
359 bool HasBF16 = false;
360
361 /// Processor supports ENQCMD instructions
362 bool HasENQCMD = false;
363
364 /// Processor has AVX-512 Bit Algorithms instructions
365 bool HasBITALG = false;
366
367 /// Processor has AVX-512 vp2intersect instructions
368 bool HasVP2INTERSECT = false;
369
370 /// Processor supports CET SHSTK - Control-Flow Enforcement Technology
371 /// using Shadow Stack
372 bool HasSHSTK = false;
373
374 /// Processor supports Invalidate Process-Context Identifier
375 bool HasINVPCID = false;
376
377 /// Processor has Software Guard Extensions
378 bool HasSGX = false;
379
380 /// Processor supports Flush Cache Line instruction
381 bool HasCLFLUSHOPT = false;
382
383 /// Processor supports Cache Line Write Back instruction
384 bool HasCLWB = false;
385
386 /// Processor supports Write Back No Invalidate instruction
387 bool HasWBNOINVD = false;
388
389 /// Processor support RDPID instruction
390 bool HasRDPID = false;
391
392 /// Processor supports WaitPKG instructions
393 bool HasWAITPKG = false;
394
395 /// Processor supports PCONFIG instruction
396 bool HasPCONFIG = false;
397
398 /// Processor supports SERIALIZE instruction
399 bool HasSERIALIZE = false;
400
401 /// Processor supports TSXLDTRK instruction
402 bool HasTSXLDTRK = false;
403
404 /// Processor has AMX support
405 bool HasAMXTILE = false;
406 bool HasAMXBF16 = false;
407 bool HasAMXINT8 = false;
408
409 /// Processor has a single uop BEXTR implementation.
410 bool HasFastBEXTR = false;
411
412 /// Try harder to combine to horizontal vector ops if they are fast.
413 bool HasFastHorizontalOps = false;
414
415 /// Prefer a left/right scalar logical shifts pair over a shift+and pair.
416 bool HasFastScalarShiftMasks = false;
417
418 /// Prefer a left/right vector logical shifts pair over a shift+and pair.
419 bool HasFastVectorShiftMasks = false;
420
421 /// Use a retpoline thunk rather than indirect calls to block speculative
422 /// execution.
423 bool UseRetpolineIndirectCalls = false;
424
425 /// Use a retpoline thunk or remove any indirect branch to block speculative
426 /// execution.
427 bool UseRetpolineIndirectBranches = false;
428
429 /// Deprecated flag, query `UseRetpolineIndirectCalls` and
430 /// `UseRetpolineIndirectBranches` instead.
431 bool DeprecatedUseRetpoline = false;
432
433 /// When using a retpoline thunk, call an externally provided thunk rather
434 /// than emitting one inside the compiler.
435 bool UseRetpolineExternalThunk = false;
436
437 /// Prevent generation of indirect call/branch instructions from memory,
438 /// and force all indirect call/branch instructions from a register to be
439 /// preceded by an LFENCE. Also decompose RET instructions into a
440 /// POP+LFENCE+JMP sequence.
441 bool UseLVIControlFlowIntegrity = false;
442
443 /// Enable Speculative Execution Side Effect Suppression
444 bool UseSpeculativeExecutionSideEffectSuppression = false;
445
446 /// Insert LFENCE instructions to prevent data speculatively injected into
447 /// loads from being used maliciously.
448 bool UseLVILoadHardening = false;
449
450 /// Use software floating point for code generation.
451 bool UseSoftFloat = false;
452
453 /// Use alias analysis during code generation.
454 bool UseAA = false;
455
456 /// The minimum alignment known to hold of the stack frame on
457 /// entry to the function and which must be maintained by every function.
458 Align stackAlignment = Align(4);
459
460 /// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
461 ///
462 // FIXME: this is a known good value for Yonah. How about others?
463 unsigned MaxInlineSizeThreshold = 128;
464
465 /// Indicates target prefers 128 bit instructions.
466 bool Prefer128Bit = false;
467
468 /// Indicates target prefers 256 bit instructions.
469 bool Prefer256Bit = false;
470
471 /// Indicates target prefers AVX512 mask registers.
472 bool PreferMaskRegisters = false;
473
474 /// Use Goldmont specific floating point div/sqrt costs.
475 bool UseGLMDivSqrtCosts = false;
476
477 /// What processor and OS we're targeting.
478 Triple TargetTriple;
479
480 /// GlobalISel related APIs.
481 std::unique_ptr<CallLowering> CallLoweringInfo;
482 std::unique_ptr<LegalizerInfo> Legalizer;
483 std::unique_ptr<RegisterBankInfo> RegBankInfo;
484 std::unique_ptr<InstructionSelector> InstSelector;
485
486private:
487 /// Override the stack alignment.
488 MaybeAlign StackAlignOverride;
489
490 /// Preferred vector width from function attribute.
491 unsigned PreferVectorWidthOverride;
492
493 /// Resolved preferred vector width from function attribute and subtarget
494 /// features.
495 unsigned PreferVectorWidth = UINT32_MAX(4294967295U);
496
497 /// Required vector width from function attribute.
498 unsigned RequiredVectorWidth;
499
500 /// True if compiling for 64-bit, false for 16-bit or 32-bit.
501 bool In64BitMode = false;
502
503 /// True if compiling for 32-bit, false for 16-bit or 64-bit.
504 bool In32BitMode = false;
505
506 /// True if compiling for 16-bit, false for 32-bit or 64-bit.
507 bool In16BitMode = false;
508
509 X86SelectionDAGInfo TSInfo;
510 // Ordering here is important. X86InstrInfo initializes X86RegisterInfo which
511 // X86TargetLowering needs.
512 X86InstrInfo InstrInfo;
513 X86TargetLowering TLInfo;
514 X86FrameLowering FrameLowering;
515
516public:
517 /// This constructor initializes the data members to match that
518 /// of the specified triple.
519 ///
520 X86Subtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS,
521 const X86TargetMachine &TM, MaybeAlign StackAlignOverride,
522 unsigned PreferVectorWidthOverride,
523 unsigned RequiredVectorWidth);
524
525 const X86TargetLowering *getTargetLowering() const override {
526 return &TLInfo;
527 }
528
529 const X86InstrInfo *getInstrInfo() const override { return &InstrInfo; }
530
531 const X86FrameLowering *getFrameLowering() const override {
532 return &FrameLowering;
533 }
534
535 const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
536 return &TSInfo;
537 }
538
539 const X86RegisterInfo *getRegisterInfo() const override {
540 return &getInstrInfo()->getRegisterInfo();
541 }
542
543 /// Returns the minimum alignment known to hold of the
544 /// stack frame on entry to the function and which must be maintained by every
545 /// function for this subtarget.
546 Align getStackAlignment() const { return stackAlignment; }
547
548 /// Returns the maximum memset / memcpy size
549 /// that still makes it profitable to inline the call.
550 unsigned getMaxInlineSizeThreshold() const { return MaxInlineSizeThreshold; }
551
552 /// ParseSubtargetFeatures - Parses features string setting specified
553 /// subtarget options. Definition of function is auto generated by tblgen.
554 void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
555
556 /// Methods used by Global ISel
557 const CallLowering *getCallLowering() const override;
558 InstructionSelector *getInstructionSelector() const override;
559 const LegalizerInfo *getLegalizerInfo() const override;
560 const RegisterBankInfo *getRegBankInfo() const override;
561
562private:
563 /// Initialize the full set of dependencies so we can use an initializer
564 /// list for X86Subtarget.
565 X86Subtarget &initializeSubtargetDependencies(StringRef CPU,
566 StringRef TuneCPU,
567 StringRef FS);
568 void initSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
569
570public:
571 /// Is this x86_64? (disregarding specific ABI / programming model)
572 bool is64Bit() const {
573 return In64BitMode;
574 }
575
576 bool is32Bit() const {
577 return In32BitMode;
578 }
579
580 bool is16Bit() const {
581 return In16BitMode;
582 }
583
584 /// Is this x86_64 with the ILP32 programming model (x32 ABI)?
585 bool isTarget64BitILP32() const {
586 return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32 ||
587 TargetTriple.isOSNaCl());
588 }
589
590 /// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
591 bool isTarget64BitLP64() const {
592 return In64BitMode && (TargetTriple.getEnvironment() != Triple::GNUX32 &&
593 !TargetTriple.isOSNaCl());
594 }
595
596 PICStyles::Style getPICStyle() const { return PICStyle; }
597 void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
598
599 bool hasX87() const { return HasX87; }
600 bool hasCmpxchg8b() const { return HasCmpxchg8b; }
601 bool hasNOPL() const { return HasNOPL; }
602 // SSE codegen depends on cmovs, and all SSE1+ processors support them.
603 // All 64-bit processors support cmov.
604 bool hasCMov() const { return HasCMov || X86SSELevel >= SSE1 || is64Bit(); }
605 bool hasSSE1() const { return X86SSELevel >= SSE1; }
606 bool hasSSE2() const { return X86SSELevel
12.1
Field 'X86SSELevel' is >= SSE2
12.1
Field 'X86SSELevel' is >= SSE2
12.1
Field 'X86SSELevel' is >= SSE2
>= SSE2
; }
13
Returning the value 1, which participates in a condition later
607 bool hasSSE3() const { return X86SSELevel >= SSE3; }
608 bool hasSSSE3() const { return X86SSELevel >= SSSE3; }
609 bool hasSSE41() const { return X86SSELevel >= SSE41; }
610 bool hasSSE42() const { return X86SSELevel >= SSE42; }
611 bool hasAVX() const { return X86SSELevel >= AVX; }
612 bool hasAVX2() const { return X86SSELevel >= AVX2; }
613 bool hasAVX512() const { return X86SSELevel >= AVX512F; }
614 bool hasInt256() const { return hasAVX2(); }
615 bool hasSSE4A() const { return HasSSE4A; }
616 bool hasMMX() const { return X863DNowLevel >= MMX; }
617 bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
618 bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
619 bool hasPOPCNT() const { return HasPOPCNT; }
620 bool hasAES() const { return HasAES; }
621 bool hasVAES() const { return HasVAES; }
622 bool hasFXSR() const { return HasFXSR; }
623 bool hasXSAVE() const { return HasXSAVE; }
624 bool hasXSAVEOPT() const { return HasXSAVEOPT; }
625 bool hasXSAVEC() const { return HasXSAVEC; }
626 bool hasXSAVES() const { return HasXSAVES; }
627 bool hasPCLMUL() const { return HasPCLMUL; }
628 bool hasVPCLMULQDQ() const { return HasVPCLMULQDQ; }
629 bool hasGFNI() const { return HasGFNI; }
630 // Prefer FMA4 to FMA - its better for commutation/memory folding and
631 // has equal or better performance on all supported targets.
632 bool hasFMA() const { return HasFMA; }
633 bool hasFMA4() const { return HasFMA4; }
634 bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
635 bool hasXOP() const { return HasXOP; }
636 bool hasTBM() const { return HasTBM; }
637 bool hasLWP() const { return HasLWP; }
638 bool hasMOVBE() const { return HasMOVBE; }
639 bool hasRDRAND() const { return HasRDRAND; }
640 bool hasF16C() const { return HasF16C; }
641 bool hasFSGSBase() const { return HasFSGSBase; }
642 bool hasLZCNT() const { return HasLZCNT; }
643 bool hasBMI() const { return HasBMI; }
644 bool hasBMI2() const { return HasBMI2; }
645 bool hasVBMI() const { return HasVBMI; }
646 bool hasVBMI2() const { return HasVBMI2; }
647 bool hasIFMA() const { return HasIFMA; }
648 bool hasRTM() const { return HasRTM; }
649 bool hasADX() const { return HasADX; }
650 bool hasSHA() const { return HasSHA; }
651 bool hasPRFCHW() const { return HasPRFCHW; }
652 bool hasPREFETCHWT1() const { return HasPREFETCHWT1; }
653 bool hasPrefetchW() const {
654 // The PREFETCHW instruction was added with 3DNow but later CPUs gave it
655 // its own CPUID bit as part of deprecating 3DNow. Intel eventually added
656 // it and KNL has another that prefetches to L2 cache. We assume the
657 // L1 version exists if the L2 version does.
658 return has3DNow() || hasPRFCHW() || hasPREFETCHWT1();
659 }
660 bool hasSSEPrefetch() const {
661 // We implicitly enable these when we have a write prefix supporting cache
662 // level OR if we have prfchw, but don't already have a read prefetch from
663 // 3dnow.
664 return hasSSE1() || (hasPRFCHW() && !has3DNow()) || hasPREFETCHWT1();
665 }
666 bool hasRDSEED() const { return HasRDSEED; }
667 bool hasLAHFSAHF() const { return HasLAHFSAHF64 || !is64Bit(); }
668 bool hasMWAITX() const { return HasMWAITX; }
669 bool hasCLZERO() const { return HasCLZERO; }
670 bool hasCLDEMOTE() const { return HasCLDEMOTE; }
671 bool hasMOVDIRI() const { return HasMOVDIRI; }
672 bool hasMOVDIR64B() const { return HasMOVDIR64B; }
673 bool hasPTWRITE() const { return HasPTWRITE; }
674 bool isSHLDSlow() const { return IsSHLDSlow; }
675 bool isPMULLDSlow() const { return IsPMULLDSlow; }
676 bool isPMADDWDSlow() const { return IsPMADDWDSlow; }
677 bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
678 bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
679 bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
680 bool hasCmpxchg16b() const { return HasCmpxchg16b && is64Bit(); }
681 bool useLeaForSP() const { return UseLeaForSP; }
682 bool hasPOPCNTFalseDeps() const { return HasPOPCNTFalseDeps; }
683 bool hasLZCNTFalseDeps() const { return HasLZCNTFalseDeps; }
684 bool hasFastVariableShuffle() const {
685 return HasFastVariableShuffle;
686 }
687 bool insertVZEROUPPER() const { return InsertVZEROUPPER; }
688 bool hasFastGather() const { return HasFastGather; }
689 bool hasFastScalarFSQRT() const { return HasFastScalarFSQRT; }
690 bool hasFastVectorFSQRT() const { return HasFastVectorFSQRT; }
691 bool hasFastLZCNT() const { return HasFastLZCNT; }
692 bool hasFastSHLDRotate() const { return HasFastSHLDRotate; }
693 bool hasFastBEXTR() const { return HasFastBEXTR; }
694 bool hasFastHorizontalOps() const { return HasFastHorizontalOps; }
695 bool hasFastScalarShiftMasks() const { return HasFastScalarShiftMasks; }
696 bool hasFastVectorShiftMasks() const { return HasFastVectorShiftMasks; }
697 bool hasMacroFusion() const { return HasMacroFusion; }
698 bool hasBranchFusion() const { return HasBranchFusion; }
699 bool hasERMSB() const { return HasERMSB; }
700 bool hasFSRM() const { return HasFSRM; }
701 bool hasSlowDivide32() const { return HasSlowDivide32; }
702 bool hasSlowDivide64() const { return HasSlowDivide64; }
703 bool padShortFunctions() const { return PadShortFunctions; }
704 bool slowTwoMemOps() const { return SlowTwoMemOps; }
705 bool LEAusesAG() const { return LEAUsesAG; }
706 bool slowLEA() const { return SlowLEA; }
707 bool slow3OpsLEA() const { return Slow3OpsLEA; }
708 bool slowIncDec() const { return SlowIncDec; }
709 bool hasCDI() const { return HasCDI; }
710 bool hasVPOPCNTDQ() const { return HasVPOPCNTDQ; }
711 bool hasPFI() const { return HasPFI; }
712 bool hasERI() const { return HasERI; }
713 bool hasDQI() const { return HasDQI; }
714 bool hasBWI() const { return HasBWI; }
715 bool hasVLX() const { return HasVLX; }
716 bool hasPKU() const { return HasPKU; }
717 bool hasVNNI() const { return HasVNNI; }
718 bool hasBF16() const { return HasBF16; }
719 bool hasVP2INTERSECT() const { return HasVP2INTERSECT; }
720 bool hasBITALG() const { return HasBITALG; }
721 bool hasSHSTK() const { return HasSHSTK; }
722 bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; }
723 bool hasCLWB() const { return HasCLWB; }
724 bool hasWBNOINVD() const { return HasWBNOINVD; }
725 bool hasRDPID() const { return HasRDPID; }
726 bool hasWAITPKG() const { return HasWAITPKG; }
727 bool hasPCONFIG() const { return HasPCONFIG; }
728 bool hasSGX() const { return HasSGX; }
729 bool hasINVPCID() const { return HasINVPCID; }
730 bool hasENQCMD() const { return HasENQCMD; }
731 bool hasSERIALIZE() const { return HasSERIALIZE; }
732 bool hasTSXLDTRK() const { return HasTSXLDTRK; }
733 bool useRetpolineIndirectCalls() const { return UseRetpolineIndirectCalls; }
734 bool useRetpolineIndirectBranches() const {
735 return UseRetpolineIndirectBranches;
736 }
737 bool hasAMXTILE() const { return HasAMXTILE; }
738 bool hasAMXBF16() const { return HasAMXBF16; }
739 bool hasAMXINT8() const { return HasAMXINT8; }
740 bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; }
741
742 // These are generic getters that OR together all of the thunk types
743 // supported by the subtarget. Therefore useIndirectThunk*() will return true
744 // if any respective thunk feature is enabled.
745 bool useIndirectThunkCalls() const {
746 return useRetpolineIndirectCalls() || useLVIControlFlowIntegrity();
747 }
748 bool useIndirectThunkBranches() const {
749 return useRetpolineIndirectBranches() || useLVIControlFlowIntegrity();
750 }
751
752 bool preferMaskRegisters() const { return PreferMaskRegisters; }
753 bool useGLMDivSqrtCosts() const { return UseGLMDivSqrtCosts; }
754 bool useLVIControlFlowIntegrity() const { return UseLVIControlFlowIntegrity; }
755 bool useLVILoadHardening() const { return UseLVILoadHardening; }
756 bool useSpeculativeExecutionSideEffectSuppression() const {
757 return UseSpeculativeExecutionSideEffectSuppression;
758 }
759
760 unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
761 unsigned getRequiredVectorWidth() const { return RequiredVectorWidth; }
762
763 // Helper functions to determine when we should allow widening to 512-bit
764 // during codegen.
765 // TODO: Currently we're always allowing widening on CPUs without VLX,
766 // because for many cases we don't have a better option.
767 bool canExtendTo512DQ() const {
768 return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
769 }
770 bool canExtendTo512BW() const {
771 return hasBWI() && canExtendTo512DQ();
772 }
773
774 // If there are no 512-bit vectors and we prefer not to use 512-bit registers,
775 // disable them in the legalizer.
776 bool useAVX512Regs() const {
777 return hasAVX512() && (canExtendTo512DQ() || RequiredVectorWidth > 256);
778 }
779
780 bool useBWIRegs() const {
781 return hasBWI() && useAVX512Regs();
782 }
783
784 bool isXRaySupported() const override { return is64Bit(); }
785
786 /// TODO: to be removed later and replaced with suitable properties
787 bool isAtom() const { return X86ProcFamily == IntelAtom; }
788 bool isSLM() const { return X86ProcFamily == IntelSLM; }
789 bool useSoftFloat() const { return UseSoftFloat; }
790 bool useAA() const override { return UseAA; }
791
792 /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
793 /// no-sse2). There isn't any reason to disable it if the target processor
794 /// supports it.
795 bool hasMFence() const { return hasSSE2() || is64Bit(); }
796
797 const Triple &getTargetTriple() const { return TargetTriple; }
798
799 bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
800 bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
801 bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
802 bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
803 bool isTargetPS4() const { return TargetTriple.isPS4CPU(); }
804
805 bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
806 bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
807 bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
808
809 bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
810 bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
811 bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
812 bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
813 bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
814 bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
815 bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
816 bool isTargetMCU() const { return TargetTriple.isOSIAMCU(); }
817 bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }
818
819 bool isTargetWindowsMSVC() const {
820 return TargetTriple.isWindowsMSVCEnvironment();
821 }
822
823 bool isTargetWindowsCoreCLR() const {
824 return TargetTriple.isWindowsCoreCLREnvironment();
825 }
826
827 bool isTargetWindowsCygwin() const {
828 return TargetTriple.isWindowsCygwinEnvironment();
829 }
830
831 bool isTargetWindowsGNU() const {
832 return TargetTriple.isWindowsGNUEnvironment();
833 }
834
835 bool isTargetWindowsItanium() const {
836 return TargetTriple.isWindowsItaniumEnvironment();
837 }
838
839 bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
840
841 bool isOSWindows() const { return TargetTriple.isOSWindows(); }
842
843 bool isTargetWin64() const { return In64BitMode && isOSWindows(); }
844
845 bool isTargetWin32() const { return !In64BitMode && isOSWindows(); }
846
847 bool isPICStyleGOT() const { return PICStyle == PICStyles::Style::GOT; }
848 bool isPICStyleRIPRel() const { return PICStyle == PICStyles::Style::RIPRel; }
849
850 bool isPICStyleStubPIC() const {
851 return PICStyle == PICStyles::Style::StubPIC;
852 }
853
854 bool isPositionIndependent() const;
855
856 bool isCallingConvWin64(CallingConv::ID CC) const {
857 switch (CC) {
858 // On Win64, all these conventions just use the default convention.
859 case CallingConv::C:
860 case CallingConv::Fast:
861 case CallingConv::Tail:
862 case CallingConv::Swift:
863 case CallingConv::X86_FastCall:
864 case CallingConv::X86_StdCall:
865 case CallingConv::X86_ThisCall:
866 case CallingConv::X86_VectorCall:
867 case CallingConv::Intel_OCL_BI:
868 return isTargetWin64();
869 // This convention allows using the Win64 convention on other targets.
870 case CallingConv::Win64:
871 return true;
872 // This convention allows using the SysV convention on Windows targets.
873 case CallingConv::X86_64_SysV:
874 return false;
875 // Otherwise, who knows what this is.
876 default:
877 return false;
878 }
879 }
880
881 /// Classify a global variable reference for the current subtarget according
882 /// to how we should reference it in a non-pcrel context.
883 unsigned char classifyLocalReference(const GlobalValue *GV) const;
884
885 unsigned char classifyGlobalReference(const GlobalValue *GV,
886 const Module &M) const;
887 unsigned char classifyGlobalReference(const GlobalValue *GV) const;
888
889 /// Classify a global function reference for the current subtarget.
890 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
891 const Module &M) const;
892 unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
893
894 /// Classify a blockaddress reference for the current subtarget according to
895 /// how we should reference it in a non-pcrel context.
896 unsigned char classifyBlockAddressReference() const;
897
898 /// Return true if the subtarget allows calls to immediate address.
899 bool isLegalToCallImmediateAddr() const;
900
901 /// If we are using indirect thunks, we need to expand indirectbr to avoid it
902 /// lowering to an actual indirect jump.
903 bool enableIndirectBrExpand() const override {
904 return useIndirectThunkBranches();
905 }
906
907 /// Enable the MachineScheduler pass for all X86 subtargets.
908 bool enableMachineScheduler() const override { return true; }
909
910 bool enableEarlyIfConversion() const override;
911
912 void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
913 &Mutations) const override;
914
915 AntiDepBreakMode getAntiDepBreakMode() const override {
916 return TargetSubtargetInfo::ANTIDEP_CRITICAL;
917 }
918
919 bool enableAdvancedRASplitCost() const override { return true; }
920};
921
922} // end namespace llvm
923
924#endif // LLVM_LIB_TARGET_X86_X86SUBTARGET_H

/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50typedef unsigned ID;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t s) {
72 return User::operator new(s, 1);
73 }
74
75 /// Transparently provide more efficient getOperand methods.
76 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
77
78 // Methods for support type inquiry through isa, cast, and dyn_cast:
79 static bool classof(const Instruction *I) {
80 return I->isUnaryOp() ||
81 I->getOpcode() == Instruction::Alloca ||
82 I->getOpcode() == Instruction::Load ||
83 I->getOpcode() == Instruction::VAArg ||
84 I->getOpcode() == Instruction::ExtractValue ||
85 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
86 }
87 static bool classof(const Value *V) {
88 return isa<Instruction>(V) && classof(cast<Instruction>(V));
89 }
90};
91
92template <>
93struct OperandTraits<UnaryInstruction> :
94 public FixedNumOperandTraits<UnaryInstruction, 1> {
95};
96
97DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { ((
i_nocapture < OperandTraits<UnaryInstruction>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 97, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this))[i_nocapture].get()); } void UnaryInstruction
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<UnaryInstruction>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 97, __PRETTY_FUNCTION__)); OperandTraits<UnaryInstruction
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
UnaryInstruction::getNumOperands() const { return OperandTraits
<UnaryInstruction>::operands(this); } template <int Idx_nocapture
> Use &UnaryInstruction::Op() { return this->OpFrom
<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &UnaryInstruction::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
33
'?' condition is true
34
The object is a 'Value'
35
Returning pointer, which participates in a condition later
46
'?' condition is true
47
The object is a 'Value'
48
Returning pointer, which participates in a condition later
98
99//===----------------------------------------------------------------------===//
100// UnaryOperator Class
101//===----------------------------------------------------------------------===//
102
103class UnaryOperator : public UnaryInstruction {
104 void AssertOK();
105
106protected:
107 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
108 const Twine &Name, Instruction *InsertBefore);
109 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
110 const Twine &Name, BasicBlock *InsertAtEnd);
111
112 // Note: Instruction needs to be a friend here to call cloneImpl.
113 friend class Instruction;
114
115 UnaryOperator *cloneImpl() const;
116
117public:
118
119 /// Construct a unary instruction, given the opcode and an operand.
120 /// Optionally (if InstBefore is specified) insert the instruction
121 /// into a BasicBlock right before the specified instruction. The specified
122 /// Instruction is allowed to be a dereferenced end iterator.
123 ///
124 static UnaryOperator *Create(UnaryOps Op, Value *S,
125 const Twine &Name = Twine(),
126 Instruction *InsertBefore = nullptr);
127
128 /// Construct a unary instruction, given the opcode and an operand.
129 /// Also automatically insert this instruction to the end of the
130 /// BasicBlock specified.
131 ///
132 static UnaryOperator *Create(UnaryOps Op, Value *S,
133 const Twine &Name,
134 BasicBlock *InsertAtEnd);
135
136 /// These methods just forward to Create, and are useful when you
137 /// statically know what type of instruction you're going to create. These
138 /// helpers just save some typing.
139#define HANDLE_UNARY_INST(N, OPC, CLASS) \
140 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
141 return Create(Instruction::OPC, V, Name);\
142 }
143#include "llvm/IR/Instruction.def"
144#define HANDLE_UNARY_INST(N, OPC, CLASS) \
145 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
146 BasicBlock *BB) {\
147 return Create(Instruction::OPC, V, Name, BB);\
148 }
149#include "llvm/IR/Instruction.def"
150#define HANDLE_UNARY_INST(N, OPC, CLASS) \
151 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
152 Instruction *I) {\
153 return Create(Instruction::OPC, V, Name, I);\
154 }
155#include "llvm/IR/Instruction.def"
156
157 static UnaryOperator *
158 CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
159 const Twine &Name = "",
160 Instruction *InsertBefore = nullptr) {
161 UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
162 UO->copyIRFlags(CopyO);
163 return UO;
164 }
165
166 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
167 const Twine &Name = "",
168 Instruction *InsertBefore = nullptr) {
169 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
170 InsertBefore);
171 }
172
173 UnaryOps getOpcode() const {
174 return static_cast<UnaryOps>(Instruction::getOpcode());
175 }
176
177 // Methods for support type inquiry through isa, cast, and dyn_cast:
178 static bool classof(const Instruction *I) {
179 return I->isUnaryOp();
180 }
181 static bool classof(const Value *V) {
182 return isa<Instruction>(V) && classof(cast<Instruction>(V));
183 }
184};
185
186//===----------------------------------------------------------------------===//
187// BinaryOperator Class
188//===----------------------------------------------------------------------===//
189
190class BinaryOperator : public Instruction {
191 void AssertOK();
192
193protected:
194 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
195 const Twine &Name, Instruction *InsertBefore);
196 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
197 const Twine &Name, BasicBlock *InsertAtEnd);
198
199 // Note: Instruction needs to be a friend here to call cloneImpl.
200 friend class Instruction;
201
202 BinaryOperator *cloneImpl() const;
203
204public:
205 // allocate space for exactly two operands
206 void *operator new(size_t s) {
207 return User::operator new(s, 2);
208 }
209
210 /// Transparently provide more efficient getOperand methods.
211 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
212
213 /// Construct a binary instruction, given the opcode and the two
214 /// operands. Optionally (if InstBefore is specified) insert the instruction
215 /// into a BasicBlock right before the specified instruction. The specified
216 /// Instruction is allowed to be a dereferenced end iterator.
217 ///
218 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
219 const Twine &Name = Twine(),
220 Instruction *InsertBefore = nullptr);
221
222 /// Construct a binary instruction, given the opcode and the two
223 /// operands. Also automatically insert this instruction to the end of the
224 /// BasicBlock specified.
225 ///
226 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
227 const Twine &Name, BasicBlock *InsertAtEnd);
228
229 /// These methods just forward to Create, and are useful when you
230 /// statically know what type of instruction you're going to create. These
231 /// helpers just save some typing.
232#define HANDLE_BINARY_INST(N, OPC, CLASS) \
233 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
234 const Twine &Name = "") {\
235 return Create(Instruction::OPC, V1, V2, Name);\
236 }
237#include "llvm/IR/Instruction.def"
238#define HANDLE_BINARY_INST(N, OPC, CLASS) \
239 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
240 const Twine &Name, BasicBlock *BB) {\
241 return Create(Instruction::OPC, V1, V2, Name, BB);\
242 }
243#include "llvm/IR/Instruction.def"
244#define HANDLE_BINARY_INST(N, OPC, CLASS) \
245 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
246 const Twine &Name, Instruction *I) {\
247 return Create(Instruction::OPC, V1, V2, Name, I);\
248 }
249#include "llvm/IR/Instruction.def"
250
251 static BinaryOperator *CreateWithCopiedFlags(BinaryOps Opc,
252 Value *V1, Value *V2,
253 Instruction *CopyO,
254 const Twine &Name = "") {
255 BinaryOperator *BO = Create(Opc, V1, V2, Name);
256 BO->copyIRFlags(CopyO);
257 return BO;
258 }
259
260 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
261 Instruction *FMFSource,
262 const Twine &Name = "") {
263 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
264 }
265 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
266 Instruction *FMFSource,
267 const Twine &Name = "") {
268 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
269 }
270 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
271 Instruction *FMFSource,
272 const Twine &Name = "") {
273 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
274 }
275 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
276 Instruction *FMFSource,
277 const Twine &Name = "") {
278 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
279 }
280 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
281 Instruction *FMFSource,
282 const Twine &Name = "") {
283 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
284 }
285
286 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
287 const Twine &Name = "") {
288 BinaryOperator *BO = Create(Opc, V1, V2, Name);
289 BO->setHasNoSignedWrap(true);
290 return BO;
291 }
292 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
293 const Twine &Name, BasicBlock *BB) {
294 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
295 BO->setHasNoSignedWrap(true);
296 return BO;
297 }
298 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
299 const Twine &Name, Instruction *I) {
300 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
301 BO->setHasNoSignedWrap(true);
302 return BO;
303 }
304
305 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
306 const Twine &Name = "") {
307 BinaryOperator *BO = Create(Opc, V1, V2, Name);
308 BO->setHasNoUnsignedWrap(true);
309 return BO;
310 }
311 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
312 const Twine &Name, BasicBlock *BB) {
313 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
314 BO->setHasNoUnsignedWrap(true);
315 return BO;
316 }
317 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
318 const Twine &Name, Instruction *I) {
319 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
320 BO->setHasNoUnsignedWrap(true);
321 return BO;
322 }
323
324 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
325 const Twine &Name = "") {
326 BinaryOperator *BO = Create(Opc, V1, V2, Name);
327 BO->setIsExact(true);
328 return BO;
329 }
330 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
331 const Twine &Name, BasicBlock *BB) {
332 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
333 BO->setIsExact(true);
334 return BO;
335 }
336 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
337 const Twine &Name, Instruction *I) {
338 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
339 BO->setIsExact(true);
340 return BO;
341 }
342
343#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
344 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
345 const Twine &Name = "") { \
346 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
347 } \
348 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
349 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
350 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
351 } \
352 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
353 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
354 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
355 }
356
357 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
358 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
359 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
360 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
361 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
362 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
363 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
364 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
365
366 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
367 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
368 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
369 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
370
371#undef DEFINE_HELPERS
372
373 /// Helper functions to construct and inspect unary operations (NEG and NOT)
374 /// via binary operators SUB and XOR:
375 ///
376 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
377 ///
378 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
379 Instruction *InsertBefore = nullptr);
380 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
381 BasicBlock *InsertAtEnd);
382 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
383 Instruction *InsertBefore = nullptr);
384 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
385 BasicBlock *InsertAtEnd);
386 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
387 Instruction *InsertBefore = nullptr);
388 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
389 BasicBlock *InsertAtEnd);
390 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
391 Instruction *InsertBefore = nullptr);
392 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
393 BasicBlock *InsertAtEnd);
394
395 BinaryOps getOpcode() const {
396 return static_cast<BinaryOps>(Instruction::getOpcode());
397 }
398
399 /// Exchange the two operands to this instruction.
400 /// This instruction is safe to use on any binary instruction and
401 /// does not modify the semantics of the instruction. If the instruction
402 /// cannot be reversed (ie, it's a Div), then return true.
403 ///
404 bool swapOperands();
405
406 // Methods for support type inquiry through isa, cast, and dyn_cast:
407 static bool classof(const Instruction *I) {
408 return I->isBinaryOp();
409 }
410 static bool classof(const Value *V) {
411 return isa<Instruction>(V) && classof(cast<Instruction>(V));
412 }
413};
414
415template <>
416struct OperandTraits<BinaryOperator> :
417 public FixedNumOperandTraits<BinaryOperator, 2> {
418};
419
420DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<BinaryOperator>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 420, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this))[i_nocapture].get()); } void BinaryOperator
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<BinaryOperator>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 420, __PRETTY_FUNCTION__)); OperandTraits<BinaryOperator
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
BinaryOperator::getNumOperands() const { return OperandTraits
<BinaryOperator>::operands(this); } template <int Idx_nocapture
> Use &BinaryOperator::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &BinaryOperator::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
421
422//===----------------------------------------------------------------------===//
423// CastInst Class
424//===----------------------------------------------------------------------===//
425
426/// This is the base class for all instructions that perform data
427/// casts. It is simply provided so that instruction category testing
428/// can be performed with code like:
429///
430/// if (isa<CastInst>(Instr)) { ... }
431/// Base class of casting instructions.
432class CastInst : public UnaryInstruction {
433protected:
434 /// Constructor with insert-before-instruction semantics for subclasses
435 CastInst(Type *Ty, unsigned iType, Value *S,
436 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
437 : UnaryInstruction(Ty, iType, S, InsertBefore) {
438 setName(NameStr);
439 }
440 /// Constructor with insert-at-end-of-block semantics for subclasses
441 CastInst(Type *Ty, unsigned iType, Value *S,
442 const Twine &NameStr, BasicBlock *InsertAtEnd)
443 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
444 setName(NameStr);
445 }
446
447public:
448 /// Provides a way to construct any of the CastInst subclasses using an
449 /// opcode instead of the subclass's constructor. The opcode must be in the
450 /// CastOps category (Instruction::isCast(opcode) returns true). This
451 /// constructor has insert-before-instruction semantics to automatically
452 /// insert the new CastInst before InsertBefore (if it is non-null).
453 /// Construct any of the CastInst subclasses
454 static CastInst *Create(
455 Instruction::CastOps, ///< The opcode of the cast instruction
456 Value *S, ///< The value to be casted (operand 0)
457 Type *Ty, ///< The type to which cast should be made
458 const Twine &Name = "", ///< Name for the instruction
459 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
460 );
461 /// Provides a way to construct any of the CastInst subclasses using an
462 /// opcode instead of the subclass's constructor. The opcode must be in the
463 /// CastOps category. This constructor has insert-at-end-of-block semantics
464 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
465 /// its non-null).
466 /// Construct any of the CastInst subclasses
467 static CastInst *Create(
468 Instruction::CastOps, ///< The opcode for the cast instruction
469 Value *S, ///< The value to be casted (operand 0)
470 Type *Ty, ///< The type to which operand is casted
471 const Twine &Name, ///< The name for the instruction
472 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
473 );
474
475 /// Create a ZExt or BitCast cast instruction
476 static CastInst *CreateZExtOrBitCast(
477 Value *S, ///< The value to be casted (operand 0)
478 Type *Ty, ///< The type to which cast should be made
479 const Twine &Name = "", ///< Name for the instruction
480 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
481 );
482
483 /// Create a ZExt or BitCast cast instruction
484 static CastInst *CreateZExtOrBitCast(
485 Value *S, ///< The value to be casted (operand 0)
486 Type *Ty, ///< The type to which operand is casted
487 const Twine &Name, ///< The name for the instruction
488 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
489 );
490
491 /// Create a SExt or BitCast cast instruction
492 static CastInst *CreateSExtOrBitCast(
493 Value *S, ///< The value to be casted (operand 0)
494 Type *Ty, ///< The type to which cast should be made
495 const Twine &Name = "", ///< Name for the instruction
496 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
497 );
498
499 /// Create a SExt or BitCast cast instruction
500 static CastInst *CreateSExtOrBitCast(
501 Value *S, ///< The value to be casted (operand 0)
502 Type *Ty, ///< The type to which operand is casted
503 const Twine &Name, ///< The name for the instruction
504 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
505 );
506
507 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
508 static CastInst *CreatePointerCast(
509 Value *S, ///< The pointer value to be casted (operand 0)
510 Type *Ty, ///< The type to which operand is casted
511 const Twine &Name, ///< The name for the instruction
512 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
513 );
514
515 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
516 static CastInst *CreatePointerCast(
517 Value *S, ///< The pointer value to be casted (operand 0)
518 Type *Ty, ///< The type to which cast should be made
519 const Twine &Name = "", ///< Name for the instruction
520 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
521 );
522
523 /// Create a BitCast or an AddrSpaceCast cast instruction.
524 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
525 Value *S, ///< The pointer value to be casted (operand 0)
526 Type *Ty, ///< The type to which operand is casted
527 const Twine &Name, ///< The name for the instruction
528 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
529 );
530
531 /// Create a BitCast or an AddrSpaceCast cast instruction.
532 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
533 Value *S, ///< The pointer value to be casted (operand 0)
534 Type *Ty, ///< The type to which cast should be made
535 const Twine &Name = "", ///< Name for the instruction
536 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
537 );
538
539 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
540 ///
541 /// If the value is a pointer type and the destination an integer type,
542 /// creates a PtrToInt cast. If the value is an integer type and the
543 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
544 /// a bitcast.
545 static CastInst *CreateBitOrPointerCast(
546 Value *S, ///< The pointer value to be casted (operand 0)
547 Type *Ty, ///< The type to which cast should be made
548 const Twine &Name = "", ///< Name for the instruction
549 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
550 );
551
552 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
553 static CastInst *CreateIntegerCast(
554 Value *S, ///< The pointer value to be casted (operand 0)
555 Type *Ty, ///< The type to which cast should be made
556 bool isSigned, ///< Whether to regard S as signed or not
557 const Twine &Name = "", ///< Name for the instruction
558 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
559 );
560
561 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
562 static CastInst *CreateIntegerCast(
563 Value *S, ///< The integer value to be casted (operand 0)
564 Type *Ty, ///< The integer type to which operand is casted
565 bool isSigned, ///< Whether to regard S as signed or not
566 const Twine &Name, ///< The name for the instruction
567 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
568 );
569
570 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
571 static CastInst *CreateFPCast(
572 Value *S, ///< The floating point value to be casted
573 Type *Ty, ///< The floating point type to cast to
574 const Twine &Name = "", ///< Name for the instruction
575 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
576 );
577
578 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
579 static CastInst *CreateFPCast(
580 Value *S, ///< The floating point value to be casted
581 Type *Ty, ///< The floating point type to cast to
582 const Twine &Name, ///< The name for the instruction
583 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
584 );
585
586 /// Create a Trunc or BitCast cast instruction
587 static CastInst *CreateTruncOrBitCast(
588 Value *S, ///< The value to be casted (operand 0)
589 Type *Ty, ///< The type to which cast should be made
590 const Twine &Name = "", ///< Name for the instruction
591 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
592 );
593
594 /// Create a Trunc or BitCast cast instruction
595 static CastInst *CreateTruncOrBitCast(
596 Value *S, ///< The value to be casted (operand 0)
597 Type *Ty, ///< The type to which operand is casted
598 const Twine &Name, ///< The name for the instruction
599 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
600 );
601
602 /// Check whether it is valid to call getCastOpcode for these types.
603 static bool isCastable(
604 Type *SrcTy, ///< The Type from which the value should be cast.
605 Type *DestTy ///< The Type to which the value should be cast.
606 );
607
608 /// Check whether a bitcast between these types is valid
609 static bool isBitCastable(
610 Type *SrcTy, ///< The Type from which the value should be cast.
611 Type *DestTy ///< The Type to which the value should be cast.
612 );
613
614 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
615 /// types is valid and a no-op.
616 ///
617 /// This ensures that any pointer<->integer cast has enough bits in the
618 /// integer and any other cast is a bitcast.
619 static bool isBitOrNoopPointerCastable(
620 Type *SrcTy, ///< The Type from which the value should be cast.
621 Type *DestTy, ///< The Type to which the value should be cast.
622 const DataLayout &DL);
623
624 /// Returns the opcode necessary to cast Val into Ty using usual casting
625 /// rules.
626 /// Infer the opcode for cast operand and type
627 static Instruction::CastOps getCastOpcode(
628 const Value *Val, ///< The value to cast
629 bool SrcIsSigned, ///< Whether to treat the source as signed
630 Type *Ty, ///< The Type to which the value should be casted
631 bool DstIsSigned ///< Whether to treate the dest. as signed
632 );
633
634 /// There are several places where we need to know if a cast instruction
635 /// only deals with integer source and destination types. To simplify that
636 /// logic, this method is provided.
637 /// @returns true iff the cast has only integral typed operand and dest type.
638 /// Determine if this is an integer-only cast.
639 bool isIntegerCast() const;
640
641 /// A lossless cast is one that does not alter the basic value. It implies
642 /// a no-op cast but is more stringent, preventing things like int->float,
643 /// long->double, or int->ptr.
644 /// @returns true iff the cast is lossless.
645 /// Determine if this is a lossless cast.
646 bool isLosslessCast() const;
647
648 /// A no-op cast is one that can be effected without changing any bits.
649 /// It implies that the source and destination types are the same size. The
650 /// DataLayout argument is to determine the pointer size when examining casts
651 /// involving Integer and Pointer types. They are no-op casts if the integer
652 /// is the same size as the pointer. However, pointer size varies with
653 /// platform.
654 /// Determine if the described cast is a no-op cast.
655 static bool isNoopCast(
656 Instruction::CastOps Opcode, ///< Opcode of cast
657 Type *SrcTy, ///< SrcTy of cast
658 Type *DstTy, ///< DstTy of cast
659 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
660 );
661
662 /// Determine if this cast is a no-op cast.
663 ///
664 /// \param DL is the DataLayout to determine pointer size.
665 bool isNoopCast(const DataLayout &DL) const;
666
667 /// Determine how a pair of casts can be eliminated, if they can be at all.
668 /// This is a helper function for both CastInst and ConstantExpr.
669 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
670 /// returns Instruction::CastOps value for a cast that can replace
671 /// the pair, casting SrcTy to DstTy.
672 /// Determine if a cast pair is eliminable
673 static unsigned isEliminableCastPair(
674 Instruction::CastOps firstOpcode, ///< Opcode of first cast
675 Instruction::CastOps secondOpcode, ///< Opcode of second cast
676 Type *SrcTy, ///< SrcTy of 1st cast
677 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
678 Type *DstTy, ///< DstTy of 2nd cast
679 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
680 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
681 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
682 );
683
684 /// Return the opcode of this CastInst
685 Instruction::CastOps getOpcode() const {
686 return Instruction::CastOps(Instruction::getOpcode());
687 }
688
689 /// Return the source type, as a convenience
690 Type* getSrcTy() const { return getOperand(0)->getType(); }
691 /// Return the destination type, as a convenience
692 Type* getDestTy() const { return getType(); }
693
694 /// This method can be used to determine if a cast from S to DstTy using
695 /// Opcode op is valid or not.
696 /// @returns true iff the proposed cast is valid.
697 /// Determine if a cast is valid without creating one.
698 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
699
700 /// Methods for support type inquiry through isa, cast, and dyn_cast:
701 static bool classof(const Instruction *I) {
702 return I->isCast();
703 }
704 static bool classof(const Value *V) {
705 return isa<Instruction>(V) && classof(cast<Instruction>(V));
706 }
707};
708
709//===----------------------------------------------------------------------===//
710// CmpInst Class
711//===----------------------------------------------------------------------===//
712
713/// This class is the base class for the comparison instructions.
714/// Abstract base class of comparison instructions.
715class CmpInst : public Instruction {
716public:
717 /// This enumeration lists the possible predicates for CmpInst subclasses.
718 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
719 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
720 /// predicate values are not overlapping between the classes.
721 ///
722 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
723 /// FCMP_* values. Changing the bit patterns requires a potential change to
724 /// those passes.
725 enum Predicate : unsigned {
726 // Opcode U L G E Intuitive operation
727 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
728 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
729 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
730 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
731 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
732 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
733 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
734 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
735 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
736 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
737 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
738 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
739 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
740 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
741 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
742 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
743 FIRST_FCMP_PREDICATE = FCMP_FALSE,
744 LAST_FCMP_PREDICATE = FCMP_TRUE,
745 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
746 ICMP_EQ = 32, ///< equal
747 ICMP_NE = 33, ///< not equal
748 ICMP_UGT = 34, ///< unsigned greater than
749 ICMP_UGE = 35, ///< unsigned greater or equal
750 ICMP_ULT = 36, ///< unsigned less than
751 ICMP_ULE = 37, ///< unsigned less or equal
752 ICMP_SGT = 38, ///< signed greater than
753 ICMP_SGE = 39, ///< signed greater or equal
754 ICMP_SLT = 40, ///< signed less than
755 ICMP_SLE = 41, ///< signed less or equal
756 FIRST_ICMP_PREDICATE = ICMP_EQ,
757 LAST_ICMP_PREDICATE = ICMP_SLE,
758 BAD_ICMP_PREDICATE = ICMP_SLE + 1
759 };
760 using PredicateField =
761 Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;
762
763protected:
764 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
765 Value *LHS, Value *RHS, const Twine &Name = "",
766 Instruction *InsertBefore = nullptr,
767 Instruction *FlagsSource = nullptr);
768
769 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
770 Value *LHS, Value *RHS, const Twine &Name,
771 BasicBlock *InsertAtEnd);
772
773public:
774 // allocate space for exactly two operands
775 void *operator new(size_t s) {
776 return User::operator new(s, 2);
777 }
778
779 /// Construct a compare instruction, given the opcode, the predicate and
780 /// the two operands. Optionally (if InstBefore is specified) insert the
781 /// instruction into a BasicBlock right before the specified instruction.
782 /// The specified Instruction is allowed to be a dereferenced end iterator.
783 /// Create a CmpInst
784 static CmpInst *Create(OtherOps Op,
785 Predicate predicate, Value *S1,
786 Value *S2, const Twine &Name = "",
787 Instruction *InsertBefore = nullptr);
788
789 /// Construct a compare instruction, given the opcode, the predicate and the
790 /// two operands. Also automatically insert this instruction to the end of
791 /// the BasicBlock specified.
792 /// Create a CmpInst
793 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
794 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
795
796 /// Get the opcode casted to the right type
797 OtherOps getOpcode() const {
798 return static_cast<OtherOps>(Instruction::getOpcode());
799 }
800
801 /// Return the predicate for this instruction.
802 Predicate getPredicate() const { return getSubclassData<PredicateField>(); }
803
804 /// Set the predicate for this instruction to the specified value.
805 void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }
806
807 static bool isFPPredicate(Predicate P) {
808 assert(FIRST_FCMP_PREDICATE == 0 &&((FIRST_FCMP_PREDICATE == 0 && "FIRST_FCMP_PREDICATE is required to be 0"
) ? static_cast<void> (0) : __assert_fail ("FIRST_FCMP_PREDICATE == 0 && \"FIRST_FCMP_PREDICATE is required to be 0\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 809, __PRETTY_FUNCTION__))
809 "FIRST_FCMP_PREDICATE is required to be 0")((FIRST_FCMP_PREDICATE == 0 && "FIRST_FCMP_PREDICATE is required to be 0"
) ? static_cast<void> (0) : __assert_fail ("FIRST_FCMP_PREDICATE == 0 && \"FIRST_FCMP_PREDICATE is required to be 0\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 809, __PRETTY_FUNCTION__))
;
810 return P <= LAST_FCMP_PREDICATE;
811 }
812
813 static bool isIntPredicate(Predicate P) {
814 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
815 }
816
817 static StringRef getPredicateName(Predicate P);
818
819 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
820 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
821
822 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
823 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
824 /// @returns the inverse predicate for the instruction's current predicate.
825 /// Return the inverse of the instruction's predicate.
826 Predicate getInversePredicate() const {
827 return getInversePredicate(getPredicate());
828 }
829
830 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
831 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
832 /// @returns the inverse predicate for predicate provided in \p pred.
833 /// Return the inverse of a given predicate
834 static Predicate getInversePredicate(Predicate pred);
835
836 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
837 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
838 /// @returns the predicate that would be the result of exchanging the two
839 /// operands of the CmpInst instruction without changing the result
840 /// produced.
841 /// Return the predicate as if the operands were swapped
842 Predicate getSwappedPredicate() const {
843 return getSwappedPredicate(getPredicate());
844 }
845
846 /// This is a static version that you can use without an instruction
847 /// available.
848 /// Return the predicate as if the operands were swapped.
849 static Predicate getSwappedPredicate(Predicate pred);
850
851 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
852 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
853 /// does not support other kind of predicates.
854 /// @returns the predicate that does not contains is equal to zero if
855 /// it had and vice versa.
856 /// Return the flipped strictness of predicate
857 Predicate getFlippedStrictnessPredicate() const {
858 return getFlippedStrictnessPredicate(getPredicate());
859 }
860
861 /// This is a static version that you can use without an instruction
862 /// available.
863 /// Return the flipped strictness of predicate
864 static Predicate getFlippedStrictnessPredicate(Predicate pred);
865
866 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
867 /// Returns the non-strict version of strict comparisons.
868 Predicate getNonStrictPredicate() const {
869 return getNonStrictPredicate(getPredicate());
870 }
871
872 /// This is a static version that you can use without an instruction
873 /// available.
874 /// @returns the non-strict version of comparison provided in \p pred.
875 /// If \p pred is not a strict comparison predicate, returns \p pred.
876 /// Returns the non-strict version of strict comparisons.
877 static Predicate getNonStrictPredicate(Predicate pred);
878
879 /// Provide more efficient getOperand methods.
880 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
881
882 /// This is just a convenience that dispatches to the subclasses.
883 /// Swap the operands and adjust predicate accordingly to retain
884 /// the same comparison.
885 void swapOperands();
886
887 /// This is just a convenience that dispatches to the subclasses.
888 /// Determine if this CmpInst is commutative.
889 bool isCommutative() const;
890
891 /// This is just a convenience that dispatches to the subclasses.
892 /// Determine if this is an equals/not equals predicate.
893 bool isEquality() const;
894
895 /// @returns true if the comparison is signed, false otherwise.
896 /// Determine if this instruction is using a signed comparison.
897 bool isSigned() const {
898 return isSigned(getPredicate());
899 }
900
901 /// @returns true if the comparison is unsigned, false otherwise.
902 /// Determine if this instruction is using an unsigned comparison.
903 bool isUnsigned() const {
904 return isUnsigned(getPredicate());
905 }
906
907 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
908 /// @returns the signed version of the unsigned predicate pred.
909 /// return the signed version of a predicate
910 static Predicate getSignedPredicate(Predicate pred);
911
912 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
913 /// @returns the signed version of the predicate for this instruction (which
914 /// has to be an unsigned predicate).
915 /// return the signed version of a predicate
916 Predicate getSignedPredicate() {
917 return getSignedPredicate(getPredicate());
918 }
919
920 /// This is just a convenience.
921 /// Determine if this is true when both operands are the same.
922 bool isTrueWhenEqual() const {
923 return isTrueWhenEqual(getPredicate());
924 }
925
926 /// This is just a convenience.
927 /// Determine if this is false when both operands are the same.
928 bool isFalseWhenEqual() const {
929 return isFalseWhenEqual(getPredicate());
930 }
931
932 /// @returns true if the predicate is unsigned, false otherwise.
933 /// Determine if the predicate is an unsigned operation.
934 static bool isUnsigned(Predicate predicate);
935
936 /// @returns true if the predicate is signed, false otherwise.
937 /// Determine if the predicate is an signed operation.
938 static bool isSigned(Predicate predicate);
939
940 /// Determine if the predicate is an ordered operation.
941 static bool isOrdered(Predicate predicate);
942
943 /// Determine if the predicate is an unordered operation.
944 static bool isUnordered(Predicate predicate);
945
946 /// Determine if the predicate is true when comparing a value with itself.
947 static bool isTrueWhenEqual(Predicate predicate);
948
949 /// Determine if the predicate is false when comparing a value with itself.
950 static bool isFalseWhenEqual(Predicate predicate);
951
952 /// Determine if Pred1 implies Pred2 is true when two compares have matching
953 /// operands.
954 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
955
956 /// Determine if Pred1 implies Pred2 is false when two compares have matching
957 /// operands.
958 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
959
960 /// Methods for support type inquiry through isa, cast, and dyn_cast:
961 static bool classof(const Instruction *I) {
962 return I->getOpcode() == Instruction::ICmp ||
963 I->getOpcode() == Instruction::FCmp;
964 }
965 static bool classof(const Value *V) {
966 return isa<Instruction>(V) && classof(cast<Instruction>(V));
967 }
968
969 /// Create a result type for fcmp/icmp
970 static Type* makeCmpResultType(Type* opnd_type) {
971 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
972 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
973 vt->getElementCount());
974 }
975 return Type::getInt1Ty(opnd_type->getContext());
976 }
977
978private:
979 // Shadow Value::setValueSubclassData with a private forwarding method so that
980 // subclasses cannot accidentally use it.
981 void setValueSubclassData(unsigned short D) {
982 Value::setValueSubclassData(D);
983 }
984};
985
986// FIXME: these are redundant if CmpInst < BinaryOperator
987template <>
988struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
989};
990
991DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { ((i_nocapture < OperandTraits<CmpInst>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 991, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CmpInst>::op_begin(const_cast<CmpInst
*>(this))[i_nocapture].get()); } void CmpInst::setOperand(
unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CmpInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 991, __PRETTY_FUNCTION__)); OperandTraits<CmpInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned CmpInst::getNumOperands
() const { return OperandTraits<CmpInst>::operands(this
); } template <int Idx_nocapture> Use &CmpInst::Op(
) { return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CmpInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
992
993/// A lightweight accessor for an operand bundle meant to be passed
994/// around by value.
995struct OperandBundleUse {
996 ArrayRef<Use> Inputs;
997
998 OperandBundleUse() = default;
999 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1000 : Inputs(Inputs), Tag(Tag) {}
1001
1002 /// Return true if the operand at index \p Idx in this operand bundle
1003 /// has the attribute A.
1004 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1005 if (isDeoptOperandBundle())
1006 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1007 return Inputs[Idx]->getType()->isPointerTy();
1008
1009 // Conservative answer: no operands have any attributes.
1010 return false;
1011 }
1012
1013 /// Return the tag of this operand bundle as a string.
1014 StringRef getTagName() const {
1015 return Tag->getKey();
1016 }
1017
1018 /// Return the tag of this operand bundle as an integer.
1019 ///
1020 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1021 /// and this function returns the unique integer getOrInsertBundleTag
1022 /// associated the tag of this operand bundle to.
1023 uint32_t getTagID() const {
1024 return Tag->getValue();
1025 }
1026
1027 /// Return true if this is a "deopt" operand bundle.
1028 bool isDeoptOperandBundle() const {
1029 return getTagID() == LLVMContext::OB_deopt;
1030 }
1031
1032 /// Return true if this is a "funclet" operand bundle.
1033 bool isFuncletOperandBundle() const {
1034 return getTagID() == LLVMContext::OB_funclet;
1035 }
1036
1037 /// Return true if this is a "cfguardtarget" operand bundle.
1038 bool isCFGuardTargetOperandBundle() const {
1039 return getTagID() == LLVMContext::OB_cfguardtarget;
1040 }
1041
1042private:
1043 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1044 StringMapEntry<uint32_t> *Tag;
1045};
1046
1047/// A container for an operand bundle being viewed as a set of values
1048/// rather than a set of uses.
1049///
1050/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1051/// so it is possible to create and pass around "self-contained" instances of
1052/// OperandBundleDef and ConstOperandBundleDef.
1053template <typename InputTy> class OperandBundleDefT {
1054 std::string Tag;
1055 std::vector<InputTy> Inputs;
1056
1057public:
1058 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1059 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1060 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1061 : Tag(std::move(Tag)), Inputs(Inputs) {}
1062
1063 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1064 Tag = std::string(OBU.getTagName());
1065 Inputs.insert(Inputs.end(), OBU.Inputs.begin(), OBU.Inputs.end());
1066 }
1067
1068 ArrayRef<InputTy> inputs() const { return Inputs; }
1069
1070 using input_iterator = typename std::vector<InputTy>::const_iterator;
1071
1072 size_t input_size() const { return Inputs.size(); }
1073 input_iterator input_begin() const { return Inputs.begin(); }
1074 input_iterator input_end() const { return Inputs.end(); }
1075
1076 StringRef getTag() const { return Tag; }
1077};
1078
1079using OperandBundleDef = OperandBundleDefT<Value *>;
1080using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1081
1082//===----------------------------------------------------------------------===//
1083// CallBase Class
1084//===----------------------------------------------------------------------===//
1085
1086/// Base class for all callable instructions (InvokeInst and CallInst)
1087/// Holds everything related to calling a function.
1088///
1089/// All call-like instructions are required to use a common operand layout:
1090/// - Zero or more arguments to the call,
1091/// - Zero or more operand bundles with zero or more operand inputs each
1092/// bundle,
1093/// - Zero or more subclass controlled operands
1094/// - The called function.
1095///
1096/// This allows this base class to easily access the called function and the
1097/// start of the arguments without knowing how many other operands a particular
1098/// subclass requires. Note that accessing the end of the argument list isn't
1099/// as cheap as most other operations on the base class.
1100class CallBase : public Instruction {
1101protected:
1102 // The first two bits are reserved by CallInst for fast retrieval,
1103 using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
1104 using CallingConvField =
1105 Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
1106 CallingConv::MaxID>;
1107 static_assert(
1108 Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
1109 "Bitfields must be contiguous");
1110
1111 /// The last operand is the called operand.
1112 static constexpr int CalledOperandOpEndIdx = -1;
1113
1114 AttributeList Attrs; ///< parameter attributes for callable
1115 FunctionType *FTy;
1116
1117 template <class... ArgsTy>
1118 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1119 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1120
1121 using Instruction::Instruction;
1122
1123 bool hasDescriptor() const { return Value::HasDescriptor; }
1124
1125 unsigned getNumSubclassExtraOperands() const {
1126 switch (getOpcode()) {
1127 case Instruction::Call:
1128 return 0;
1129 case Instruction::Invoke:
1130 return 2;
1131 case Instruction::CallBr:
1132 return getNumSubclassExtraOperandsDynamic();
1133 }
1134 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1134)
;
1135 }
1136
1137 /// Get the number of extra operands for instructions that don't have a fixed
1138 /// number of extra operands.
1139 unsigned getNumSubclassExtraOperandsDynamic() const;
1140
1141public:
1142 using Instruction::getContext;
1143
1144 /// Create a clone of \p CB with a different set of operand bundles and
1145 /// insert it before \p InsertPt.
1146 ///
1147 /// The returned call instruction is identical \p CB in every way except that
1148 /// the operand bundles for the new instruction are set to the operand bundles
1149 /// in \p Bundles.
1150 static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
1151 Instruction *InsertPt = nullptr);
1152
1153 static bool classof(const Instruction *I) {
1154 return I->getOpcode() == Instruction::Call ||
1155 I->getOpcode() == Instruction::Invoke ||
1156 I->getOpcode() == Instruction::CallBr;
1157 }
1158 static bool classof(const Value *V) {
1159 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1160 }
1161
1162 FunctionType *getFunctionType() const { return FTy; }
1163
1164 void mutateFunctionType(FunctionType *FTy) {
1165 Value::mutateType(FTy->getReturnType());
1166 this->FTy = FTy;
1167 }
1168
1169 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1170
1171 /// data_operands_begin/data_operands_end - Return iterators iterating over
1172 /// the call / invoke argument list and bundle operands. For invokes, this is
1173 /// the set of instruction operands except the invoke target and the two
1174 /// successor blocks; and for calls this is the set of instruction operands
1175 /// except the call target.
1176 User::op_iterator data_operands_begin() { return op_begin(); }
1177 User::const_op_iterator data_operands_begin() const {
1178 return const_cast<CallBase *>(this)->data_operands_begin();
1179 }
1180 User::op_iterator data_operands_end() {
1181 // Walk from the end of the operands over the called operand and any
1182 // subclass operands.
1183 return op_end() - getNumSubclassExtraOperands() - 1;
1184 }
1185 User::const_op_iterator data_operands_end() const {
1186 return const_cast<CallBase *>(this)->data_operands_end();
1187 }
1188 iterator_range<User::op_iterator> data_ops() {
1189 return make_range(data_operands_begin(), data_operands_end());
1190 }
1191 iterator_range<User::const_op_iterator> data_ops() const {
1192 return make_range(data_operands_begin(), data_operands_end());
1193 }
1194 bool data_operands_empty() const {
1195 return data_operands_end() == data_operands_begin();
1196 }
1197 unsigned data_operands_size() const {
1198 return std::distance(data_operands_begin(), data_operands_end());
1199 }
1200
1201 bool isDataOperand(const Use *U) const {
1202 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1203, __PRETTY_FUNCTION__))
1203 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1203, __PRETTY_FUNCTION__))
;
1204 return data_operands_begin() <= U && U < data_operands_end();
1205 }
1206 bool isDataOperand(Value::const_user_iterator UI) const {
1207 return isDataOperand(&UI.getUse());
1208 }
1209
1210 /// Given a value use iterator, return the data operand corresponding to it.
1211 /// Iterator must actually correspond to a data operand.
1212 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1213 return getDataOperandNo(&UI.getUse());
1214 }
1215
1216 /// Given a use for a data operand, get the data operand number that
1217 /// corresponds to it.
1218 unsigned getDataOperandNo(const Use *U) const {
1219 assert(isDataOperand(U) && "Data operand # out of range!")((isDataOperand(U) && "Data operand # out of range!")
? static_cast<void> (0) : __assert_fail ("isDataOperand(U) && \"Data operand # out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1219, __PRETTY_FUNCTION__))
;
1220 return U - data_operands_begin();
1221 }
1222
1223 /// Return the iterator pointing to the beginning of the argument list.
1224 User::op_iterator arg_begin() { return op_begin(); }
1225 User::const_op_iterator arg_begin() const {
1226 return const_cast<CallBase *>(this)->arg_begin();
1227 }
1228
1229 /// Return the iterator pointing to the end of the argument list.
1230 User::op_iterator arg_end() {
1231 // From the end of the data operands, walk backwards past the bundle
1232 // operands.
1233 return data_operands_end() - getNumTotalBundleOperands();
1234 }
1235 User::const_op_iterator arg_end() const {
1236 return const_cast<CallBase *>(this)->arg_end();
1237 }
1238
1239 /// Iteration adapter for range-for loops.
1240 iterator_range<User::op_iterator> args() {
1241 return make_range(arg_begin(), arg_end());
1242 }
1243 iterator_range<User::const_op_iterator> args() const {
1244 return make_range(arg_begin(), arg_end());
1245 }
1246 bool arg_empty() const { return arg_end() == arg_begin(); }
1247 unsigned arg_size() const { return arg_end() - arg_begin(); }
1248
1249 // Legacy API names that duplicate the above and will be removed once users
1250 // are migrated.
1251 iterator_range<User::op_iterator> arg_operands() {
1252 return make_range(arg_begin(), arg_end());
1253 }
1254 iterator_range<User::const_op_iterator> arg_operands() const {
1255 return make_range(arg_begin(), arg_end());
1256 }
1257 unsigned getNumArgOperands() const { return arg_size(); }
1258
1259 Value *getArgOperand(unsigned i) const {
1260 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1260, __PRETTY_FUNCTION__))
;
1261 return getOperand(i);
1262 }
1263
1264 void setArgOperand(unsigned i, Value *v) {
1265 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1265, __PRETTY_FUNCTION__))
;
1266 setOperand(i, v);
1267 }
1268
1269 /// Wrappers for getting the \c Use of a call argument.
1270 const Use &getArgOperandUse(unsigned i) const {
1271 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1271, __PRETTY_FUNCTION__))
;
1272 return User::getOperandUse(i);
1273 }
1274 Use &getArgOperandUse(unsigned i) {
1275 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1275, __PRETTY_FUNCTION__))
;
1276 return User::getOperandUse(i);
1277 }
1278
1279 bool isArgOperand(const Use *U) const {
1280 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1281, __PRETTY_FUNCTION__))
1281 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1281, __PRETTY_FUNCTION__))
;
1282 return arg_begin() <= U && U < arg_end();
1283 }
1284 bool isArgOperand(Value::const_user_iterator UI) const {
1285 return isArgOperand(&UI.getUse());
1286 }
1287
1288 /// Given a use for a arg operand, get the arg operand number that
1289 /// corresponds to it.
1290 unsigned getArgOperandNo(const Use *U) const {
1291 assert(isArgOperand(U) && "Arg operand # out of range!")((isArgOperand(U) && "Arg operand # out of range!") ?
static_cast<void> (0) : __assert_fail ("isArgOperand(U) && \"Arg operand # out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1291, __PRETTY_FUNCTION__))
;
1292 return U - arg_begin();
1293 }
1294
1295 /// Given a value use iterator, return the arg operand number corresponding to
1296 /// it. Iterator must actually correspond to a data operand.
1297 unsigned getArgOperandNo(Value::const_user_iterator UI) const {
1298 return getArgOperandNo(&UI.getUse());
1299 }
1300
1301 /// Returns true if this CallSite passes the given Value* as an argument to
1302 /// the called function.
1303 bool hasArgument(const Value *V) const {
1304 return llvm::any_of(args(), [V](const Value *Arg) { return Arg == V; });
1305 }
1306
1307 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1308
1309 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1310 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1311
1312 /// Returns the function called, or null if this is an
1313 /// indirect function invocation.
1314 Function *getCalledFunction() const {
1315 return dyn_cast_or_null<Function>(getCalledOperand());
1316 }
1317
1318 /// Return true if the callsite is an indirect call.
1319 bool isIndirectCall() const;
1320
1321 /// Determine whether the passed iterator points to the callee operand's Use.
1322 bool isCallee(Value::const_user_iterator UI) const {
1323 return isCallee(&UI.getUse());
1324 }
1325
1326 /// Determine whether this Use is the callee operand's Use.
1327 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1328
1329 /// Helper to get the caller (the parent function).
1330 Function *getCaller();
1331 const Function *getCaller() const {
1332 return const_cast<CallBase *>(this)->getCaller();
1333 }
1334
1335 /// Tests if this call site must be tail call optimized. Only a CallInst can
1336 /// be tail call optimized.
1337 bool isMustTailCall() const;
1338
1339 /// Tests if this call site is marked as a tail call.
1340 bool isTailCall() const;
1341
1342 /// Returns the intrinsic ID of the intrinsic called or
1343 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1344 /// this is an indirect call.
1345 Intrinsic::ID getIntrinsicID() const;
1346
1347 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1348
1349 /// Sets the function called, including updating the function type.
1350 void setCalledFunction(Function *Fn) {
1351 setCalledFunction(Fn->getFunctionType(), Fn);
1352 }
1353
1354 /// Sets the function called, including updating the function type.
1355 void setCalledFunction(FunctionCallee Fn) {
1356 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1357 }
1358
1359 /// Sets the function called, including updating to the specified function
1360 /// type.
1361 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1362 this->FTy = FTy;
1363 assert(FTy == cast<FunctionType>(((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1364, __PRETTY_FUNCTION__))
1364 cast<PointerType>(Fn->getType())->getElementType()))((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1364, __PRETTY_FUNCTION__))
;
1365 // This function doesn't mutate the return type, only the function
1366 // type. Seems broken, but I'm just gonna stick an assert in for now.
1367 assert(getType() == FTy->getReturnType())((getType() == FTy->getReturnType()) ? static_cast<void
> (0) : __assert_fail ("getType() == FTy->getReturnType()"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1367, __PRETTY_FUNCTION__))
;
1368 setCalledOperand(Fn);
1369 }
1370
1371 CallingConv::ID getCallingConv() const {
1372 return getSubclassData<CallingConvField>();
1373 }
1374
1375 void setCallingConv(CallingConv::ID CC) {
1376 setSubclassData<CallingConvField>(CC);
1377 }
1378
1379 /// Check if this call is an inline asm statement.
1380 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1381
1382 /// \name Attribute API
1383 ///
1384 /// These methods access and modify attributes on this call (including
1385 /// looking through to the attributes on the called function when necessary).
1386 ///@{
1387
1388 /// Return the parameter attributes for this call.
1389 ///
1390 AttributeList getAttributes() const { return Attrs; }
1391
1392 /// Set the parameter attributes for this call.
1393 ///
1394 void setAttributes(AttributeList A) { Attrs = A; }
1395
1396 /// Determine whether this call has the given attribute. If it does not
1397 /// then determine if the called function has the attribute, but only if
1398 /// the attribute is allowed for the call.
1399 bool hasFnAttr(Attribute::AttrKind Kind) const {
1400 assert(Kind != Attribute::NoBuiltin &&((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1401, __PRETTY_FUNCTION__))
1401 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1401, __PRETTY_FUNCTION__))
;
1402 return hasFnAttrImpl(Kind);
1403 }
1404
1405 /// Determine whether this call has the given attribute. If it does not
1406 /// then determine if the called function has the attribute, but only if
1407 /// the attribute is allowed for the call.
1408 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1409
1410 /// adds the attribute to the list of attributes.
1411 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1412 AttributeList PAL = getAttributes();
1413 PAL = PAL.addAttribute(getContext(), i, Kind);
1414 setAttributes(PAL);
1415 }
1416
1417 /// adds the attribute to the list of attributes.
1418 void addAttribute(unsigned i, Attribute Attr) {
1419 AttributeList PAL = getAttributes();
1420 PAL = PAL.addAttribute(getContext(), i, Attr);
1421 setAttributes(PAL);
1422 }
1423
1424 /// Adds the attribute to the indicated argument
1425 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1426 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1426, __PRETTY_FUNCTION__))
;
1427 AttributeList PAL = getAttributes();
1428 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1429 setAttributes(PAL);
1430 }
1431
1432 /// Adds the attribute to the indicated argument
1433 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1434 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1434, __PRETTY_FUNCTION__))
;
1435 AttributeList PAL = getAttributes();
1436 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1437 setAttributes(PAL);
1438 }
1439
1440 /// removes the attribute from the list of attributes.
1441 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1442 AttributeList PAL = getAttributes();
1443 PAL = PAL.removeAttribute(getContext(), i, Kind);
1444 setAttributes(PAL);
1445 }
1446
1447 /// removes the attribute from the list of attributes.
1448 void removeAttribute(unsigned i, StringRef Kind) {
1449 AttributeList PAL = getAttributes();
1450 PAL = PAL.removeAttribute(getContext(), i, Kind);
1451 setAttributes(PAL);
1452 }
1453
1454 void removeAttributes(unsigned i, const AttrBuilder &Attrs) {
1455 AttributeList PAL = getAttributes();
1456 PAL = PAL.removeAttributes(getContext(), i, Attrs);
1457 setAttributes(PAL);
1458 }
1459
1460 /// Removes the attribute from the given argument
1461 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1462 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1462, __PRETTY_FUNCTION__))
;
1463 AttributeList PAL = getAttributes();
1464 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1465 setAttributes(PAL);
1466 }
1467
1468 /// Removes the attribute from the given argument
1469 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1470 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1470, __PRETTY_FUNCTION__))
;
1471 AttributeList PAL = getAttributes();
1472 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1473 setAttributes(PAL);
1474 }
1475
1476 /// adds the dereferenceable attribute to the list of attributes.
1477 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1478 AttributeList PAL = getAttributes();
1479 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1480 setAttributes(PAL);
1481 }
1482
1483 /// adds the dereferenceable_or_null attribute to the list of
1484 /// attributes.
1485 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1486 AttributeList PAL = getAttributes();
1487 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1488 setAttributes(PAL);
1489 }
1490
1491 /// Determine whether the return value has the given attribute.
1492 bool hasRetAttr(Attribute::AttrKind Kind) const;
1493
1494 /// Determine whether the argument or parameter has the given attribute.
1495 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1496
1497 /// Get the attribute of a given kind at a position.
1498 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1499 return getAttributes().getAttribute(i, Kind);
1500 }
1501
1502 /// Get the attribute of a given kind at a position.
1503 Attribute getAttribute(unsigned i, StringRef Kind) const {
1504 return getAttributes().getAttribute(i, Kind);
1505 }
1506
1507 /// Get the attribute of a given kind from a given arg
1508 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1509 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1509, __PRETTY_FUNCTION__))
;
1510 return getAttributes().getParamAttr(ArgNo, Kind);
1511 }
1512
1513 /// Get the attribute of a given kind from a given arg
1514 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1515 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1515, __PRETTY_FUNCTION__))
;
1516 return getAttributes().getParamAttr(ArgNo, Kind);
1517 }
1518
1519 /// Return true if the data operand at index \p i has the attribute \p
1520 /// A.
1521 ///
1522 /// Data operands include call arguments and values used in operand bundles,
1523 /// but does not include the callee operand. This routine dispatches to the
1524 /// underlying AttributeList or the OperandBundleUser as appropriate.
1525 ///
1526 /// The index \p i is interpreted as
1527 ///
1528 /// \p i == Attribute::ReturnIndex -> the return value
1529 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1530 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1531 /// (\p i - 1) in the operand list.
1532 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1533 // Note that we have to add one because `i` isn't zero-indexed.
1534 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&((i < (getNumArgOperands() + getNumTotalBundleOperands() +
1) && "Data operand index out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1535, __PRETTY_FUNCTION__))
1535 "Data operand index out of bounds!")((i < (getNumArgOperands() + getNumTotalBundleOperands() +
1) && "Data operand index out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1535, __PRETTY_FUNCTION__))
;
1536
1537 // The attribute A can either be directly specified, if the operand in
1538 // question is a call argument; or be indirectly implied by the kind of its
1539 // containing operand bundle, if the operand is a bundle operand.
1540
1541 if (i == AttributeList::ReturnIndex)
1542 return hasRetAttr(Kind);
1543
1544 // FIXME: Avoid these i - 1 calculations and update the API to use
1545 // zero-based indices.
1546 if (i < (getNumArgOperands() + 1))
1547 return paramHasAttr(i - 1, Kind);
1548
1549 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1550, __PRETTY_FUNCTION__))
1550 "Must be either a call argument or an operand bundle!")((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1550, __PRETTY_FUNCTION__))
;
1551 return bundleOperandHasAttr(i - 1, Kind);
1552 }
1553
1554 /// Determine whether this data operand is not captured.
1555 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1556 // better indicate that this may return a conservative answer.
1557 bool doesNotCapture(unsigned OpNo) const {
1558 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1559 }
1560
1561 /// Determine whether this argument is passed by value.
1562 bool isByValArgument(unsigned ArgNo) const {
1563 return paramHasAttr(ArgNo, Attribute::ByVal);
1564 }
1565
1566 /// Determine whether this argument is passed in an alloca.
1567 bool isInAllocaArgument(unsigned ArgNo) const {
1568 return paramHasAttr(ArgNo, Attribute::InAlloca);
1569 }
1570
1571 /// Determine whether this argument is passed by value, in an alloca, or is
1572 /// preallocated.
1573 bool isPassPointeeByValueArgument(unsigned ArgNo) const {
1574 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1575 paramHasAttr(ArgNo, Attribute::InAlloca) ||
1576 paramHasAttr(ArgNo, Attribute::Preallocated);
1577 }
1578
1579 /// Determine if there are is an inalloca argument. Only the last argument can
1580 /// have the inalloca attribute.
1581 bool hasInAllocaArgument() const {
1582 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1583 }
1584
1585 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1586 // better indicate that this may return a conservative answer.
1587 bool doesNotAccessMemory(unsigned OpNo) const {
1588 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1589 }
1590
1591 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1592 // better indicate that this may return a conservative answer.
1593 bool onlyReadsMemory(unsigned OpNo) const {
1594 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1595 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1596 }
1597
1598 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1599 // better indicate that this may return a conservative answer.
1600 bool doesNotReadMemory(unsigned OpNo) const {
1601 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1602 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1603 }
1604
1605 LLVM_ATTRIBUTE_DEPRECATED(unsigned getRetAlignment() const,unsigned getRetAlignment() const __attribute__((deprecated("Use getRetAlign() instead"
)))
1606 "Use getRetAlign() instead")unsigned getRetAlignment() const __attribute__((deprecated("Use getRetAlign() instead"
)))
{
1607 if (const auto MA = Attrs.getRetAlignment())
1608 return MA->value();
1609 return 0;
1610 }
1611
1612 /// Extract the alignment of the return value.
1613 MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
1614
1615 /// Extract the alignment for a call or parameter (0=unknown).
1616 LLVM_ATTRIBUTE_DEPRECATED(unsigned getParamAlignment(unsigned ArgNo) const,unsigned getParamAlignment(unsigned ArgNo) const __attribute__
((deprecated("Use getParamAlign() instead")))
1617 "Use getParamAlign() instead")unsigned getParamAlignment(unsigned ArgNo) const __attribute__
((deprecated("Use getParamAlign() instead")))
{
1618 if (const auto MA = Attrs.getParamAlignment(ArgNo))
1619 return MA->value();
1620 return 0;
1621 }
1622
1623 /// Extract the alignment for a call or parameter (0=unknown).
1624 MaybeAlign getParamAlign(unsigned ArgNo) const {
1625 return Attrs.getParamAlignment(ArgNo);
1626 }
1627
1628 /// Extract the byval type for a call or parameter.
1629 Type *getParamByValType(unsigned ArgNo) const {
1630 Type *Ty = Attrs.getParamByValType(ArgNo);
1631 return Ty ? Ty : getArgOperand(ArgNo)->getType()->getPointerElementType();
1632 }
1633
1634 /// Extract the preallocated type for a call or parameter.
1635 Type *getParamPreallocatedType(unsigned ArgNo) const {
1636 Type *Ty = Attrs.getParamPreallocatedType(ArgNo);
1637 return Ty ? Ty : getArgOperand(ArgNo)->getType()->getPointerElementType();
1638 }
1639
1640 /// Extract the number of dereferenceable bytes for a call or
1641 /// parameter (0=unknown).
1642 uint64_t getDereferenceableBytes(unsigned i) const {
1643 return Attrs.getDereferenceableBytes(i);
1644 }
1645
1646 /// Extract the number of dereferenceable_or_null bytes for a call or
1647 /// parameter (0=unknown).
1648 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1649 return Attrs.getDereferenceableOrNullBytes(i);
1650 }
1651
1652 /// Return true if the return value is known to be not null.
1653 /// This may be because it has the nonnull attribute, or because at least
1654 /// one byte is dereferenceable and the pointer is in addrspace(0).
1655 bool isReturnNonNull() const;
1656
1657 /// Determine if the return value is marked with NoAlias attribute.
1658 bool returnDoesNotAlias() const {
1659 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1660 }
1661
1662 /// If one of the arguments has the 'returned' attribute, returns its
1663 /// operand value. Otherwise, return nullptr.
1664 Value *getReturnedArgOperand() const;
1665
1666 /// Return true if the call should not be treated as a call to a
1667 /// builtin.
1668 bool isNoBuiltin() const {
1669 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1670 !hasFnAttrImpl(Attribute::Builtin);
1671 }
1672
1673 /// Determine if the call requires strict floating point semantics.
1674 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1675
1676 /// Return true if the call should not be inlined.
1677 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1678 void setIsNoInline() {
1679 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1680 }
1681 /// Determine if the call does not access memory.
1682 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1683 void setDoesNotAccessMemory() {
1684 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1685 }
1686
1687 /// Determine if the call does not access or only reads memory.
1688 bool onlyReadsMemory() const {
1689 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1690 }
1691 void setOnlyReadsMemory() {
1692 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1693 }
1694
1695 /// Determine if the call does not access or only writes memory.
1696 bool doesNotReadMemory() const {
1697 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1698 }
1699 void setDoesNotReadMemory() {
1700 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1701 }
1702
1703 /// Determine if the call can access memmory only using pointers based
1704 /// on its arguments.
1705 bool onlyAccessesArgMemory() const {
1706 return hasFnAttr(Attribute::ArgMemOnly);
1707 }
1708 void setOnlyAccessesArgMemory() {
1709 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1710 }
1711
1712 /// Determine if the function may only access memory that is
1713 /// inaccessible from the IR.
1714 bool onlyAccessesInaccessibleMemory() const {
1715 return hasFnAttr(Attribute::InaccessibleMemOnly);
1716 }
1717 void setOnlyAccessesInaccessibleMemory() {
1718 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1719 }
1720
1721 /// Determine if the function may only access memory that is
1722 /// either inaccessible from the IR or pointed to by its arguments.
1723 bool onlyAccessesInaccessibleMemOrArgMem() const {
1724 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1725 }
1726 void setOnlyAccessesInaccessibleMemOrArgMem() {
1727 addAttribute(AttributeList::FunctionIndex,
1728 Attribute::InaccessibleMemOrArgMemOnly);
1729 }
1730 /// Determine if the call cannot return.
1731 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1732 void setDoesNotReturn() {
1733 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1734 }
1735
1736 /// Determine if the call should not perform indirect branch tracking.
1737 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1738
1739 /// Determine if the call cannot unwind.
1740 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1741 void setDoesNotThrow() {
1742 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1743 }
1744
1745 /// Determine if the invoke cannot be duplicated.
1746 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1747 void setCannotDuplicate() {
1748 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1749 }
1750
1751 /// Determine if the call cannot be tail merged.
1752 bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
1753 void setCannotMerge() {
1754 addAttribute(AttributeList::FunctionIndex, Attribute::NoMerge);
1755 }
1756
1757 /// Determine if the invoke is convergent
1758 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1759 void setConvergent() {
1760 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1761 }
1762 void setNotConvergent() {
1763 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1764 }
1765
1766 /// Determine if the call returns a structure through first
1767 /// pointer argument.
1768 bool hasStructRetAttr() const {
1769 if (getNumArgOperands() == 0)
1770 return false;
1771
1772 // Be friendly and also check the callee.
1773 return paramHasAttr(0, Attribute::StructRet);
1774 }
1775
1776 /// Determine if any call argument is an aggregate passed by value.
1777 bool hasByValArgument() const {
1778 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1779 }
1780
1781 ///@{
1782 // End of attribute API.
1783
1784 /// \name Operand Bundle API
1785 ///
1786 /// This group of methods provides the API to access and manipulate operand
1787 /// bundles on this call.
1788 /// @{
1789
1790 /// Return the number of operand bundles associated with this User.
1791 unsigned getNumOperandBundles() const {
1792 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1793 }
1794
1795 /// Return true if this User has any operand bundles.
1796 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1797
1798 /// Return the index of the first bundle operand in the Use array.
1799 unsigned getBundleOperandsStartIndex() const {
1800 assert(hasOperandBundles() && "Don't call otherwise!")((hasOperandBundles() && "Don't call otherwise!") ? static_cast
<void> (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1800, __PRETTY_FUNCTION__))
;
1801 return bundle_op_info_begin()->Begin;
1802 }
1803
1804 /// Return the index of the last bundle operand in the Use array.
1805 unsigned getBundleOperandsEndIndex() const {
1806 assert(hasOperandBundles() && "Don't call otherwise!")((hasOperandBundles() && "Don't call otherwise!") ? static_cast
<void> (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1806, __PRETTY_FUNCTION__))
;
1807 return bundle_op_info_end()[-1].End;
1808 }
1809
1810 /// Return true if the operand at index \p Idx is a bundle operand.
1811 bool isBundleOperand(unsigned Idx) const {
1812 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1813 Idx < getBundleOperandsEndIndex();
1814 }
1815
1816 /// Returns true if the use is a bundle operand.
1817 bool isBundleOperand(const Use *U) const {
1818 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1819, __PRETTY_FUNCTION__))
1819 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1819, __PRETTY_FUNCTION__))
;
1820 return hasOperandBundles() && isBundleOperand(U - op_begin());
1821 }
1822 bool isBundleOperand(Value::const_user_iterator UI) const {
1823 return isBundleOperand(&UI.getUse());
1824 }
1825
1826 /// Return the total number operands (not operand bundles) used by
1827 /// every operand bundle in this OperandBundleUser.
1828 unsigned getNumTotalBundleOperands() const {
1829 if (!hasOperandBundles())
1830 return 0;
1831
1832 unsigned Begin = getBundleOperandsStartIndex();
1833 unsigned End = getBundleOperandsEndIndex();
1834
1835 assert(Begin <= End && "Should be!")((Begin <= End && "Should be!") ? static_cast<void
> (0) : __assert_fail ("Begin <= End && \"Should be!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1835, __PRETTY_FUNCTION__))
;
1836 return End - Begin;
1837 }
1838
1839 /// Return the operand bundle at a specific index.
1840 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1841 assert(Index < getNumOperandBundles() && "Index out of bounds!")((Index < getNumOperandBundles() && "Index out of bounds!"
) ? static_cast<void> (0) : __assert_fail ("Index < getNumOperandBundles() && \"Index out of bounds!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1841, __PRETTY_FUNCTION__))
;
1842 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1843 }
1844
1845 /// Return the number of operand bundles with the tag Name attached to
1846 /// this instruction.
1847 unsigned countOperandBundlesOfType(StringRef Name) const {
1848 unsigned Count = 0;
1849 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1850 if (getOperandBundleAt(i).getTagName() == Name)
1851 Count++;
1852
1853 return Count;
1854 }
1855
1856 /// Return the number of operand bundles with the tag ID attached to
1857 /// this instruction.
1858 unsigned countOperandBundlesOfType(uint32_t ID) const {
1859 unsigned Count = 0;
1860 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1861 if (getOperandBundleAt(i).getTagID() == ID)
1862 Count++;
1863
1864 return Count;
1865 }
1866
1867 /// Return an operand bundle by name, if present.
1868 ///
1869 /// It is an error to call this for operand bundle types that may have
1870 /// multiple instances of them on the same instruction.
1871 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1872 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")((countOperandBundlesOfType(Name) < 2 && "Precondition violated!"
) ? static_cast<void> (0) : __assert_fail ("countOperandBundlesOfType(Name) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1872, __PRETTY_FUNCTION__))
;
1873
1874 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1875 OperandBundleUse U = getOperandBundleAt(i);
1876 if (U.getTagName() == Name)
1877 return U;
1878 }
1879
1880 return None;
1881 }
1882
1883 /// Return an operand bundle by tag ID, if present.
1884 ///
1885 /// It is an error to call this for operand bundle types that may have
1886 /// multiple instances of them on the same instruction.
1887 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
1888 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")((countOperandBundlesOfType(ID) < 2 && "Precondition violated!"
) ? static_cast<void> (0) : __assert_fail ("countOperandBundlesOfType(ID) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 1888, __PRETTY_FUNCTION__))
;
1889
1890 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1891 OperandBundleUse U = getOperandBundleAt(i);
1892 if (U.getTagID() == ID)
1893 return U;
1894 }
1895
1896 return None;
1897 }
1898
1899 /// Return the list of operand bundles attached to this instruction as
1900 /// a vector of OperandBundleDefs.
1901 ///
1902 /// This function copies the OperandBundeUse instances associated with this
1903 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
1904 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
1905 /// representations of operand bundles (see documentation above).
1906 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;
1907
1908 /// Return the operand bundle for the operand at index OpIdx.
1909 ///
1910 /// It is an error to call this with an OpIdx that does not correspond to an
1911 /// bundle operand.
1912 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
1913 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
1914 }
1915
1916 /// Return true if this operand bundle user has operand bundles that
1917 /// may read from the heap.
1918 bool hasReadingOperandBundles() const {
1919 // Implementation note: this is a conservative implementation of operand
1920 // bundle semantics, where *any* operand bundle forces a callsite to be at
1921 // least readonly.
1922 return hasOperandBundles();
1923 }
1924
1925 /// Return true if this operand bundle user has operand bundles that
1926 /// may write to the heap.
1927 bool hasClobberingOperandBundles() const {
1928 for (auto &BOI : bundle_op_infos()) {
1929 if (BOI.Tag->second == LLVMContext::OB_deopt ||
1930 BOI.Tag->second == LLVMContext::OB_funclet)
1931 continue;
1932
1933 // This instruction has an operand bundle that is not known to us.
1934 // Assume the worst.
1935 return true;
1936 }
1937
1938 return false;
1939 }
1940
1941 /// Return true if the bundle operand at index \p OpIdx has the
1942 /// attribute \p A.
1943 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
1944 auto &BOI = getBundleOpInfoForOperand(OpIdx);
1945 auto OBU = operandBundleFromBundleOpInfo(BOI);
1946 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
1947 }
1948
1949 /// Return true if \p Other has the same sequence of operand bundle
1950 /// tags with the same number of operands on each one of them as this
1951 /// OperandBundleUser.
1952 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
1953 if (getNumOperandBundles() != Other.getNumOperandBundles())
1954 return false;
1955
1956 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
1957 Other.bundle_op_info_begin());
1958 }
1959
1960 /// Return true if this operand bundle user contains operand bundles
1961 /// with tags other than those specified in \p IDs.
1962 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
1963 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1964 uint32_t ID = getOperandBundleAt(i).getTagID();
1965 if (!is_contained(IDs, ID))
1966 return true;
1967 }
1968 return false;
1969 }
1970
1971 /// Is the function attribute S disallowed by some operand bundle on
1972 /// this operand bundle user?
1973 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
1974 // Operand bundles only possibly disallow readnone, readonly and argmemonly
1975 // attributes. All String attributes are fine.
1976 return false;
1977 }
1978
1979 /// Is the function attribute A disallowed by some operand bundle on
1980 /// this operand bundle user?
1981 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
1982 switch (A) {
1983 default:
1984 return false;
1985
1986 case Attribute::InaccessibleMemOrArgMemOnly:
1987 return hasReadingOperandBundles();
1988
1989 case Attribute::InaccessibleMemOnly:
1990 return hasReadingOperandBundles();
1991
1992 case Attribute::ArgMemOnly:
1993 return hasReadingOperandBundles();
1994
1995 case Attribute::ReadNone:
1996 return hasReadingOperandBundles();
1997
1998 case Attribute::ReadOnly:
1999 return hasClobberingOperandBundles();
2000 }
2001
2002 llvm_unreachable("switch has a default case!")::llvm::llvm_unreachable_internal("switch has a default case!"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 2002)
;
2003 }
2004
2005 /// Used to keep track of an operand bundle. See the main comment on
2006 /// OperandBundleUser above.
2007 struct BundleOpInfo {
2008 /// The operand bundle tag, interned by
2009 /// LLVMContextImpl::getOrInsertBundleTag.
2010 StringMapEntry<uint32_t> *Tag;
2011
2012 /// The index in the Use& vector where operands for this operand
2013 /// bundle starts.
2014 uint32_t Begin;
2015
2016 /// The index in the Use& vector where operands for this operand
2017 /// bundle ends.
2018 uint32_t End;
2019
2020 bool operator==(const BundleOpInfo &Other) const {
2021 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
2022 }
2023 };
2024
2025 /// Simple helper function to map a BundleOpInfo to an
2026 /// OperandBundleUse.
2027 OperandBundleUse
2028 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
2029 auto begin = op_begin();
2030 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
2031 return OperandBundleUse(BOI.Tag, Inputs);
2032 }
2033
2034 using bundle_op_iterator = BundleOpInfo *;
2035 using const_bundle_op_iterator = const BundleOpInfo *;
2036
2037 /// Return the start of the list of BundleOpInfo instances associated
2038 /// with this OperandBundleUser.
2039 ///
2040 /// OperandBundleUser uses the descriptor area co-allocated with the host User
2041 /// to store some meta information about which operands are "normal" operands,
2042 /// and which ones belong to some operand bundle.
2043 ///
2044 /// The layout of an operand bundle user is
2045 ///
2046 /// +-----------uint32_t End-------------------------------------+
2047 /// | |
2048 /// | +--------uint32_t Begin--------------------+ |
2049 /// | | | |
2050 /// ^ ^ v v
2051 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2052 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
2053 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2054 /// v v ^ ^
2055 /// | | | |
2056 /// | +--------uint32_t Begin------------+ |
2057 /// | |
2058 /// +-----------uint32_t End-----------------------------+
2059 ///
2060 ///
2061 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2062 /// list. These descriptions are installed and managed by this class, and
2063 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2064 ///
2065 /// DU is an additional descriptor installed by User's 'operator new' to keep
2066 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2067 /// access or modify DU in any way, it's an implementation detail private to
2068 /// User.
2069 ///
2070 /// The regular Use& vector for the User starts at U0. The operand bundle
2071 /// uses are part of the Use& vector, just like normal uses. In the diagram
2072 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2073 /// BundleOpInfo has information about a contiguous set of uses constituting
2074 /// an operand bundle, and the total set of operand bundle uses themselves
2075 /// form a contiguous set of uses (i.e. there are no gaps between uses
2076 /// corresponding to individual operand bundles).
2077 ///
2078 /// This class does not know the location of the set of operand bundle uses
2079 /// within the use list -- that is decided by the User using this class via
2080 /// the BeginIdx argument in populateBundleOperandInfos.
2081 ///
2082 /// Currently operand bundle users with hung-off operands are not supported.
2083 bundle_op_iterator bundle_op_info_begin() {
2084 if (!hasDescriptor())
2085 return nullptr;
2086
2087 uint8_t *BytesBegin = getDescriptor().begin();
2088 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2089 }
2090
2091 /// Return the start of the list of BundleOpInfo instances associated
2092 /// with this OperandBundleUser.
2093 const_bundle_op_iterator bundle_op_info_begin() const {
2094 auto *NonConstThis = const_cast<CallBase *>(this);
2095 return NonConstThis->bundle_op_info_begin();
2096 }
2097
2098 /// Return the end of the list of BundleOpInfo instances associated
2099 /// with this OperandBundleUser.
2100 bundle_op_iterator bundle_op_info_end() {
2101 if (!hasDescriptor())
2102 return nullptr;
2103
2104 uint8_t *BytesEnd = getDescriptor().end();
2105 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2106 }
2107
2108 /// Return the end of the list of BundleOpInfo instances associated
2109 /// with this OperandBundleUser.
2110 const_bundle_op_iterator bundle_op_info_end() const {
2111 auto *NonConstThis = const_cast<CallBase *>(this);
2112 return NonConstThis->bundle_op_info_end();
2113 }
2114
2115 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2116 iterator_range<bundle_op_iterator> bundle_op_infos() {
2117 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2118 }
2119
2120 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2121 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2122 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2123 }
2124
2125 /// Populate the BundleOpInfo instances and the Use& vector from \p
2126 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2127 /// last bundle operand use.
2128 ///
2129 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2130 /// instance allocated in this User's descriptor.
2131 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2132 const unsigned BeginIndex);
2133
2134public:
2135 /// Return the BundleOpInfo for the operand at index OpIdx.
2136 ///
2137 /// It is an error to call this with an OpIdx that does not correspond to an
2138 /// bundle operand.
2139 BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
2140 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2141 return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
2142 }
2143
2144protected:
2145 /// Return the total number of values used in \p Bundles.
2146 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2147 unsigned Total = 0;
2148 for (auto &B : Bundles)
2149 Total += B.input_size();
2150 return Total;
2151 }
2152
2153 /// @}
2154 // End of operand bundle API.
2155
2156private:
2157 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2158 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2159
2160 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2161 if (Attrs.hasFnAttribute(Kind))
2162 return true;
2163
2164 // Operand bundles override attributes on the called function, but don't
2165 // override attributes directly present on the call instruction.
2166 if (isFnAttrDisallowedByOpBundle(Kind))
2167 return false;
2168
2169 return hasFnAttrOnCalledFunction(Kind);
2170 }
2171};
2172
2173template <>
2174struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2175
2176DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CallBase
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 2176, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CallBase>::op_begin(const_cast<CallBase
*>(this))[i_nocapture].get()); } void CallBase::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CallBase>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 2176, __PRETTY_FUNCTION__)); OperandTraits<CallBase>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CallBase
::getNumOperands() const { return OperandTraits<CallBase>
::operands(this); } template <int Idx_nocapture> Use &
CallBase::Op() { return this->OpFrom<Idx_nocapture>(
this); } template <int Idx_nocapture> const Use &CallBase
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2177
2178//===----------------------------------------------------------------------===//
2179// FuncletPadInst Class
2180//===----------------------------------------------------------------------===//
2181class FuncletPadInst : public Instruction {
2182private:
2183 FuncletPadInst(const FuncletPadInst &CPI);
2184
2185 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2186 ArrayRef<Value *> Args, unsigned Values,
2187 const Twine &NameStr, Instruction *InsertBefore);
2188 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2189 ArrayRef<Value *> Args, unsigned Values,
2190 const Twine &NameStr, BasicBlock *InsertAtEnd);
2191
2192 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2193
2194protected:
2195 // Note: Instruction needs to be a friend here to call cloneImpl.
2196 friend class Instruction;
2197 friend class CatchPadInst;
2198 friend class CleanupPadInst;
2199
2200 FuncletPadInst *cloneImpl() const;
2201
2202public:
2203 /// Provide fast operand accessors
2204 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2205
2206 /// getNumArgOperands - Return the number of funcletpad arguments.
2207 ///
2208 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2209
2210 /// Convenience accessors
2211
2212 /// Return the outer EH-pad this funclet is nested within.
2213 ///
2214 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2215 /// is a CatchPadInst.
2216 Value *getParentPad() const { return Op<-1>(); }
2217 void setParentPad(Value *ParentPad) {
2218 assert(ParentPad)((ParentPad) ? static_cast<void> (0) : __assert_fail ("ParentPad"
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 2218, __PRETTY_FUNCTION__))
;
2219 Op<-1>() = ParentPad;
2220 }
2221
2222 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2223 ///
2224 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2225 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2226
2227 /// arg_operands - iteration adapter for range-for loops.
2228 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2229
2230 /// arg_operands - iteration adapter for range-for loops.
2231 const_op_range arg_operands() const {
2232 return const_op_range(op_begin(), op_end() - 1);
2233 }
2234
2235 // Methods for support type inquiry through isa, cast, and dyn_cast:
2236 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2237 static bool classof(const Value *V) {
2238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2239 }
2240};
2241
2242template <>
2243struct OperandTraits<FuncletPadInst>
2244 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2245
2246DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<FuncletPadInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 2246, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this))[i_nocapture].get()); } void FuncletPadInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<FuncletPadInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200926111128+c6c5629f2fb/llvm/include/llvm/IR/InstrTypes.h"
, 2246, __PRETTY_FUNCTION__)); OperandTraits<FuncletPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
FuncletPadInst::getNumOperands() const { return OperandTraits
<FuncletPadInst>::operands(this); } template <int Idx_nocapture
> Use &FuncletPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &FuncletPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2247
2248} // end namespace llvm
2249
2250#endif // LLVM_IR_INSTRTYPES_H