Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/IR/Instructions.h
Warning:line 2626, column 17
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ValueTracking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Analysis -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Analysis -I include -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-04-20-140412-16051-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Analysis/ValueTracking.cpp

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Analysis/ValueTracking.cpp

1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/AssumeBundleQueries.h"
28#include "llvm/Analysis/AssumptionCache.h"
29#include "llvm/Analysis/EHPersonalities.h"
30#include "llvm/Analysis/GuardUtils.h"
31#include "llvm/Analysis/InstructionSimplify.h"
32#include "llvm/Analysis/Loads.h"
33#include "llvm/Analysis/LoopInfo.h"
34#include "llvm/Analysis/OptimizationRemarkEmitter.h"
35#include "llvm/Analysis/TargetLibraryInfo.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/ConstantRange.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DerivedTypes.h"
43#include "llvm/IR/DiagnosticInfo.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GetElementPtrTypeIterator.h"
47#include "llvm/IR/GlobalAlias.h"
48#include "llvm/IR/GlobalValue.h"
49#include "llvm/IR/GlobalVariable.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
52#include "llvm/IR/Instructions.h"
53#include "llvm/IR/IntrinsicInst.h"
54#include "llvm/IR/Intrinsics.h"
55#include "llvm/IR/IntrinsicsAArch64.h"
56#include "llvm/IR/IntrinsicsRISCV.h"
57#include "llvm/IR/IntrinsicsX86.h"
58#include "llvm/IR/LLVMContext.h"
59#include "llvm/IR/Metadata.h"
60#include "llvm/IR/Module.h"
61#include "llvm/IR/Operator.h"
62#include "llvm/IR/PatternMatch.h"
63#include "llvm/IR/Type.h"
64#include "llvm/IR/User.h"
65#include "llvm/IR/Value.h"
66#include "llvm/Support/Casting.h"
67#include "llvm/Support/CommandLine.h"
68#include "llvm/Support/Compiler.h"
69#include "llvm/Support/ErrorHandling.h"
70#include "llvm/Support/KnownBits.h"
71#include "llvm/Support/MathExtras.h"
72#include <algorithm>
73#include <cassert>
74#include <cstdint>
75#include <utility>
76
77using namespace llvm;
78using namespace llvm::PatternMatch;
79
80// Controls the number of uses of the value searched for possible
81// dominating comparisons.
82static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83 cl::Hidden, cl::init(20));
84
85// According to the LangRef, branching on a poison condition is absolutely
86// immediate full UB. However, historically we haven't implemented that
87// consistently as we have an important transformation (non-trivial unswitch)
88// which introduces instances of branch on poison/undef to otherwise well
89// defined programs. This flag exists to let us test optimization benefit
90// of exploiting the specified behavior (in combination with enabling the
91// unswitch fix.)
92static cl::opt<bool> BranchOnPoisonAsUB("branch-on-poison-as-ub",
93 cl::Hidden, cl::init(false));
94
95
96/// Returns the bitwidth of the given scalar or pointer type. For vector types,
97/// returns the element type's bitwidth.
98static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
99 if (unsigned BitWidth = Ty->getScalarSizeInBits())
100 return BitWidth;
101
102 return DL.getPointerTypeSizeInBits(Ty);
103}
104
105namespace {
106
107// Simplifying using an assume can only be done in a particular control-flow
108// context (the context instruction provides that context). If an assume and
109// the context instruction are not in the same block then the DT helps in
110// figuring out if we can use it.
111struct Query {
112 const DataLayout &DL;
113 AssumptionCache *AC;
114 const Instruction *CxtI;
115 const DominatorTree *DT;
116
117 // Unlike the other analyses, this may be a nullptr because not all clients
118 // provide it currently.
119 OptimizationRemarkEmitter *ORE;
120
121 /// If true, it is safe to use metadata during simplification.
122 InstrInfoQuery IIQ;
123
124 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
125 const DominatorTree *DT, bool UseInstrInfo,
126 OptimizationRemarkEmitter *ORE = nullptr)
127 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
128};
129
130} // end anonymous namespace
131
132// Given the provided Value and, potentially, a context instruction, return
133// the preferred context instruction (if any).
134static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
135 // If we've been provided with a context instruction, then use that (provided
136 // it has been inserted).
137 if (CxtI && CxtI->getParent())
138 return CxtI;
139
140 // If the value is really an already-inserted instruction, then use that.
141 CxtI = dyn_cast<Instruction>(V);
142 if (CxtI && CxtI->getParent())
143 return CxtI;
144
145 return nullptr;
146}
147
148static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
149 // If we've been provided with a context instruction, then use that (provided
150 // it has been inserted).
151 if (CxtI && CxtI->getParent())
152 return CxtI;
153
154 // If the value is really an already-inserted instruction, then use that.
155 CxtI = dyn_cast<Instruction>(V1);
156 if (CxtI && CxtI->getParent())
157 return CxtI;
158
159 CxtI = dyn_cast<Instruction>(V2);
160 if (CxtI && CxtI->getParent())
161 return CxtI;
162
163 return nullptr;
164}
165
166static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
167 const APInt &DemandedElts,
168 APInt &DemandedLHS, APInt &DemandedRHS) {
169 // The length of scalable vectors is unknown at compile time, thus we
170 // cannot check their values
171 if (isa<ScalableVectorType>(Shuf->getType()))
172 return false;
173
174 int NumElts =
175 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
176 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
177 DemandedLHS = DemandedRHS = APInt::getZero(NumElts);
178 if (DemandedElts.isZero())
179 return true;
180 // Simple case of a shuffle with zeroinitializer.
181 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
182 DemandedLHS.setBit(0);
183 return true;
184 }
185 for (int i = 0; i != NumMaskElts; ++i) {
186 if (!DemandedElts[i])
187 continue;
188 int M = Shuf->getMaskValue(i);
189 assert(M < (NumElts * 2) && "Invalid shuffle mask constant")(static_cast <bool> (M < (NumElts * 2) && "Invalid shuffle mask constant"
) ? void (0) : __assert_fail ("M < (NumElts * 2) && \"Invalid shuffle mask constant\""
, "llvm/lib/Analysis/ValueTracking.cpp", 189, __extension__ __PRETTY_FUNCTION__
))
;
190
191 // For undef elements, we don't know anything about the common state of
192 // the shuffle result.
193 if (M == -1)
194 return false;
195 if (M < NumElts)
196 DemandedLHS.setBit(M % NumElts);
197 else
198 DemandedRHS.setBit(M % NumElts);
199 }
200
201 return true;
202}
203
204static void computeKnownBits(const Value *V, const APInt &DemandedElts,
205 KnownBits &Known, unsigned Depth, const Query &Q);
206
207static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
208 const Query &Q) {
209 // FIXME: We currently have no way to represent the DemandedElts of a scalable
210 // vector
211 if (isa<ScalableVectorType>(V->getType())) {
212 Known.resetAll();
213 return;
214 }
215
216 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
217 APInt DemandedElts =
218 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
219 computeKnownBits(V, DemandedElts, Known, Depth, Q);
220}
221
222void llvm::computeKnownBits(const Value *V, KnownBits &Known,
223 const DataLayout &DL, unsigned Depth,
224 AssumptionCache *AC, const Instruction *CxtI,
225 const DominatorTree *DT,
226 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
227 ::computeKnownBits(V, Known, Depth,
228 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
229}
230
231void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
232 KnownBits &Known, const DataLayout &DL,
233 unsigned Depth, AssumptionCache *AC,
234 const Instruction *CxtI, const DominatorTree *DT,
235 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
236 ::computeKnownBits(V, DemandedElts, Known, Depth,
237 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
238}
239
240static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
241 unsigned Depth, const Query &Q);
242
243static KnownBits computeKnownBits(const Value *V, unsigned Depth,
244 const Query &Q);
245
246KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
247 unsigned Depth, AssumptionCache *AC,
248 const Instruction *CxtI,
249 const DominatorTree *DT,
250 OptimizationRemarkEmitter *ORE,
251 bool UseInstrInfo) {
252 return ::computeKnownBits(
253 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
254}
255
256KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
257 const DataLayout &DL, unsigned Depth,
258 AssumptionCache *AC, const Instruction *CxtI,
259 const DominatorTree *DT,
260 OptimizationRemarkEmitter *ORE,
261 bool UseInstrInfo) {
262 return ::computeKnownBits(
263 V, DemandedElts, Depth,
264 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
265}
266
267bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
268 const DataLayout &DL, AssumptionCache *AC,
269 const Instruction *CxtI, const DominatorTree *DT,
270 bool UseInstrInfo) {
271 assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 272, __extension__ __PRETTY_FUNCTION__
))
272 "LHS and RHS should have the same type")(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 272, __extension__ __PRETTY_FUNCTION__
))
;
273 assert(LHS->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "llvm/lib/Analysis/ValueTracking.cpp", 274, __extension__ __PRETTY_FUNCTION__
))
274 "LHS and RHS should be integers")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "llvm/lib/Analysis/ValueTracking.cpp", 274, __extension__ __PRETTY_FUNCTION__
))
;
275 // Look for an inverted mask: (X & ~M) op (Y & M).
276 {
277 Value *M;
278 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
279 match(RHS, m_c_And(m_Specific(M), m_Value())))
280 return true;
281 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
282 match(LHS, m_c_And(m_Specific(M), m_Value())))
283 return true;
284 }
285 // Look for: (A & B) op ~(A | B)
286 {
287 Value *A, *B;
288 if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
289 match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
290 return true;
291 if (match(RHS, m_And(m_Value(A), m_Value(B))) &&
292 match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
293 return true;
294 }
295 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
296 KnownBits LHSKnown(IT->getBitWidth());
297 KnownBits RHSKnown(IT->getBitWidth());
298 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
299 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
300 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
301}
302
303bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
304 return !I->user_empty() && all_of(I->users(), [](const User *U) {
305 ICmpInst::Predicate P;
306 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
307 });
308}
309
310static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
311 const Query &Q);
312
313bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
314 bool OrZero, unsigned Depth,
315 AssumptionCache *AC, const Instruction *CxtI,
316 const DominatorTree *DT, bool UseInstrInfo) {
317 return ::isKnownToBeAPowerOfTwo(
318 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
319}
320
321static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
322 unsigned Depth, const Query &Q);
323
324static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
325
326bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
327 AssumptionCache *AC, const Instruction *CxtI,
328 const DominatorTree *DT, bool UseInstrInfo) {
329 return ::isKnownNonZero(V, Depth,
330 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
331}
332
333bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
334 unsigned Depth, AssumptionCache *AC,
335 const Instruction *CxtI, const DominatorTree *DT,
336 bool UseInstrInfo) {
337 KnownBits Known =
338 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
339 return Known.isNonNegative();
340}
341
342bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
343 AssumptionCache *AC, const Instruction *CxtI,
344 const DominatorTree *DT, bool UseInstrInfo) {
345 if (auto *CI = dyn_cast<ConstantInt>(V))
346 return CI->getValue().isStrictlyPositive();
347
348 // TODO: We'd doing two recursive queries here. We should factor this such
349 // that only a single query is needed.
350 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
351 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
352}
353
354bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
355 AssumptionCache *AC, const Instruction *CxtI,
356 const DominatorTree *DT, bool UseInstrInfo) {
357 KnownBits Known =
358 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
359 return Known.isNegative();
360}
361
362static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
363 const Query &Q);
364
365bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
366 const DataLayout &DL, AssumptionCache *AC,
367 const Instruction *CxtI, const DominatorTree *DT,
368 bool UseInstrInfo) {
369 return ::isKnownNonEqual(V1, V2, 0,
370 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
371 UseInstrInfo, /*ORE=*/nullptr));
372}
373
374static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
375 const Query &Q);
376
377bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
378 const DataLayout &DL, unsigned Depth,
379 AssumptionCache *AC, const Instruction *CxtI,
380 const DominatorTree *DT, bool UseInstrInfo) {
381 return ::MaskedValueIsZero(
382 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
383}
384
385static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
386 unsigned Depth, const Query &Q);
387
388static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
389 const Query &Q) {
390 // FIXME: We currently have no way to represent the DemandedElts of a scalable
391 // vector
392 if (isa<ScalableVectorType>(V->getType()))
393 return 1;
394
395 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
396 APInt DemandedElts =
397 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
398 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
399}
400
401unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
402 unsigned Depth, AssumptionCache *AC,
403 const Instruction *CxtI,
404 const DominatorTree *DT, bool UseInstrInfo) {
405 return ::ComputeNumSignBits(
406 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
407}
408
409unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
410 unsigned Depth, AssumptionCache *AC,
411 const Instruction *CxtI,
412 const DominatorTree *DT) {
413 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
414 return V->getType()->getScalarSizeInBits() - SignBits + 1;
415}
416
417static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
418 bool NSW, const APInt &DemandedElts,
419 KnownBits &KnownOut, KnownBits &Known2,
420 unsigned Depth, const Query &Q) {
421 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
422
423 // If one operand is unknown and we have no nowrap information,
424 // the result will be unknown independently of the second operand.
425 if (KnownOut.isUnknown() && !NSW)
426 return;
427
428 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
429 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
430}
431
432static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
433 const APInt &DemandedElts, KnownBits &Known,
434 KnownBits &Known2, unsigned Depth,
435 const Query &Q) {
436 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
437 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
438
439 bool isKnownNegative = false;
440 bool isKnownNonNegative = false;
441 // If the multiplication is known not to overflow, compute the sign bit.
442 if (NSW) {
443 if (Op0 == Op1) {
444 // The product of a number with itself is non-negative.
445 isKnownNonNegative = true;
446 } else {
447 bool isKnownNonNegativeOp1 = Known.isNonNegative();
448 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
449 bool isKnownNegativeOp1 = Known.isNegative();
450 bool isKnownNegativeOp0 = Known2.isNegative();
451 // The product of two numbers with the same sign is non-negative.
452 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
453 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
454 // The product of a negative number and a non-negative number is either
455 // negative or zero.
456 if (!isKnownNonNegative)
457 isKnownNegative =
458 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
459 Known2.isNonZero()) ||
460 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
461 }
462 }
463
464 bool SelfMultiply = Op0 == Op1;
465 // TODO: SelfMultiply can be poison, but not undef.
466 if (SelfMultiply)
467 SelfMultiply &=
468 isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
469 Known = KnownBits::mul(Known, Known2, SelfMultiply);
470
471 // Only make use of no-wrap flags if we failed to compute the sign bit
472 // directly. This matters if the multiplication always overflows, in
473 // which case we prefer to follow the result of the direct computation,
474 // though as the program is invoking undefined behaviour we can choose
475 // whatever we like here.
476 if (isKnownNonNegative && !Known.isNegative())
477 Known.makeNonNegative();
478 else if (isKnownNegative && !Known.isNonNegative())
479 Known.makeNegative();
480}
481
482void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
483 KnownBits &Known) {
484 unsigned BitWidth = Known.getBitWidth();
485 unsigned NumRanges = Ranges.getNumOperands() / 2;
486 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp",
486, __extension__ __PRETTY_FUNCTION__))
;
487
488 Known.Zero.setAllBits();
489 Known.One.setAllBits();
490
491 for (unsigned i = 0; i < NumRanges; ++i) {
492 ConstantInt *Lower =
493 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
494 ConstantInt *Upper =
495 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
496 ConstantRange Range(Lower->getValue(), Upper->getValue());
497
498 // The first CommonPrefixBits of all values in Range are equal.
499 unsigned CommonPrefixBits =
500 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
501 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
502 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
503 Known.One &= UnsignedMax & Mask;
504 Known.Zero &= ~UnsignedMax & Mask;
505 }
506}
507
508static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
509 SmallVector<const Value *, 16> WorkSet(1, I);
510 SmallPtrSet<const Value *, 32> Visited;
511 SmallPtrSet<const Value *, 16> EphValues;
512
513 // The instruction defining an assumption's condition itself is always
514 // considered ephemeral to that assumption (even if it has other
515 // non-ephemeral users). See r246696's test case for an example.
516 if (is_contained(I->operands(), E))
517 return true;
518
519 while (!WorkSet.empty()) {
520 const Value *V = WorkSet.pop_back_val();
521 if (!Visited.insert(V).second)
522 continue;
523
524 // If all uses of this value are ephemeral, then so is this value.
525 if (llvm::all_of(V->users(), [&](const User *U) {
526 return EphValues.count(U);
527 })) {
528 if (V == E)
529 return true;
530
531 if (V == I || (isa<Instruction>(V) &&
532 !cast<Instruction>(V)->mayHaveSideEffects() &&
533 !cast<Instruction>(V)->isTerminator())) {
534 EphValues.insert(V);
535 if (const User *U = dyn_cast<User>(V))
536 append_range(WorkSet, U->operands());
537 }
538 }
539 }
540
541 return false;
542}
543
544// Is this an intrinsic that cannot be speculated but also cannot trap?
545bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
546 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
547 return CI->isAssumeLikeIntrinsic();
548
549 return false;
550}
551
552bool llvm::isValidAssumeForContext(const Instruction *Inv,
553 const Instruction *CxtI,
554 const DominatorTree *DT) {
555 // There are two restrictions on the use of an assume:
556 // 1. The assume must dominate the context (or the control flow must
557 // reach the assume whenever it reaches the context).
558 // 2. The context must not be in the assume's set of ephemeral values
559 // (otherwise we will use the assume to prove that the condition
560 // feeding the assume is trivially true, thus causing the removal of
561 // the assume).
562
563 if (Inv->getParent() == CxtI->getParent()) {
564 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
565 // in the BB.
566 if (Inv->comesBefore(CxtI))
567 return true;
568
569 // Don't let an assume affect itself - this would cause the problems
570 // `isEphemeralValueOf` is trying to prevent, and it would also make
571 // the loop below go out of bounds.
572 if (Inv == CxtI)
573 return false;
574
575 // The context comes first, but they're both in the same block.
576 // Make sure there is nothing in between that might interrupt
577 // the control flow, not even CxtI itself.
578 // We limit the scan distance between the assume and its context instruction
579 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
580 // it can be adjusted if needed (could be turned into a cl::opt).
581 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
582 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
583 return false;
584
585 return !isEphemeralValueOf(Inv, CxtI);
586 }
587
588 // Inv and CxtI are in different blocks.
589 if (DT) {
590 if (DT->dominates(Inv, CxtI))
591 return true;
592 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
593 // We don't have a DT, but this trivially dominates.
594 return true;
595 }
596
597 return false;
598}
599
600static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
601 // v u> y implies v != 0.
602 if (Pred == ICmpInst::ICMP_UGT)
603 return true;
604
605 // Special-case v != 0 to also handle v != null.
606 if (Pred == ICmpInst::ICMP_NE)
607 return match(RHS, m_Zero());
608
609 // All other predicates - rely on generic ConstantRange handling.
610 const APInt *C;
611 if (!match(RHS, m_APInt(C)))
612 return false;
613
614 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
615 return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
616}
617
618static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
619 // Use of assumptions is context-sensitive. If we don't have a context, we
620 // cannot use them!
621 if (!Q.AC || !Q.CxtI)
622 return false;
623
624 if (Q.CxtI && V->getType()->isPointerTy()) {
625 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
626 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
627 V->getType()->getPointerAddressSpace()))
628 AttrKinds.push_back(Attribute::Dereferenceable);
629
630 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
631 return true;
632 }
633
634 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
635 if (!AssumeVH)
636 continue;
637 CallInst *I = cast<CallInst>(AssumeVH);
638 assert(I->getFunction() == Q.CxtI->getFunction() &&(static_cast <bool> (I->getFunction() == Q.CxtI->
getFunction() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 639, __extension__ __PRETTY_FUNCTION__
))
639 "Got assumption for the wrong function!")(static_cast <bool> (I->getFunction() == Q.CxtI->
getFunction() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 639, __extension__ __PRETTY_FUNCTION__
))
;
640
641 // Warning: This loop can end up being somewhat performance sensitive.
642 // We're running this loop for once for each value queried resulting in a
643 // runtime of ~O(#assumes * #values).
644
645 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 646, __extension__ __PRETTY_FUNCTION__
))
646 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 646, __extension__ __PRETTY_FUNCTION__
))
;
647
648 Value *RHS;
649 CmpInst::Predicate Pred;
650 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
651 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
652 return false;
653
654 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
655 return true;
656 }
657
658 return false;
659}
660
661static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
662 unsigned Depth, const Query &Q) {
663 // Use of assumptions is context-sensitive. If we don't have a context, we
664 // cannot use them!
665 if (!Q.AC || !Q.CxtI)
666 return;
667
668 unsigned BitWidth = Known.getBitWidth();
669
670 // Refine Known set if the pointer alignment is set by assume bundles.
671 if (V->getType()->isPointerTy()) {
672 if (RetainedKnowledge RK = getKnowledgeValidInContext(
673 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
674 if (isPowerOf2_64(RK.ArgValue))
675 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
676 }
677 }
678
679 // Note that the patterns below need to be kept in sync with the code
680 // in AssumptionCache::updateAffectedValues.
681
682 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
683 if (!AssumeVH)
684 continue;
685 CallInst *I = cast<CallInst>(AssumeVH);
686 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 687, __extension__ __PRETTY_FUNCTION__
))
687 "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 687, __extension__ __PRETTY_FUNCTION__
))
;
688
689 // Warning: This loop can end up being somewhat performance sensitive.
690 // We're running this loop for once for each value queried resulting in a
691 // runtime of ~O(#assumes * #values).
692
693 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 694, __extension__ __PRETTY_FUNCTION__
))
694 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 694, __extension__ __PRETTY_FUNCTION__
))
;
695
696 Value *Arg = I->getArgOperand(0);
697
698 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
699 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 699, __extension__ __PRETTY_FUNCTION__
))
;
700 Known.setAllOnes();
701 return;
702 }
703 if (match(Arg, m_Not(m_Specific(V))) &&
704 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
705 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 705, __extension__ __PRETTY_FUNCTION__
))
;
706 Known.setAllZero();
707 return;
708 }
709
710 // The remaining tests are all recursive, so bail out if we hit the limit.
711 if (Depth == MaxAnalysisRecursionDepth)
712 continue;
713
714 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
715 if (!Cmp)
716 continue;
717
718 // We are attempting to compute known bits for the operands of an assume.
719 // Do not try to use other assumptions for those recursive calls because
720 // that can lead to mutual recursion and a compile-time explosion.
721 // An example of the mutual recursion: computeKnownBits can call
722 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
723 // and so on.
724 Query QueryNoAC = Q;
725 QueryNoAC.AC = nullptr;
726
727 // Note that ptrtoint may change the bitwidth.
728 Value *A, *B;
729 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
730
731 CmpInst::Predicate Pred;
732 uint64_t C;
733 switch (Cmp->getPredicate()) {
734 default:
735 break;
736 case ICmpInst::ICMP_EQ:
737 // assume(v = a)
738 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
739 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
740 KnownBits RHSKnown =
741 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
742 Known.Zero |= RHSKnown.Zero;
743 Known.One |= RHSKnown.One;
744 // assume(v & b = a)
745 } else if (match(Cmp,
746 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
747 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
748 KnownBits RHSKnown =
749 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
750 KnownBits MaskKnown =
751 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
752
753 // For those bits in the mask that are known to be one, we can propagate
754 // known bits from the RHS to V.
755 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
756 Known.One |= RHSKnown.One & MaskKnown.One;
757 // assume(~(v & b) = a)
758 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
759 m_Value(A))) &&
760 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
761 KnownBits RHSKnown =
762 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
763 KnownBits MaskKnown =
764 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
765
766 // For those bits in the mask that are known to be one, we can propagate
767 // inverted known bits from the RHS to V.
768 Known.Zero |= RHSKnown.One & MaskKnown.One;
769 Known.One |= RHSKnown.Zero & MaskKnown.One;
770 // assume(v | b = a)
771 } else if (match(Cmp,
772 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
773 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
774 KnownBits RHSKnown =
775 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
776 KnownBits BKnown =
777 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
778
779 // For those bits in B that are known to be zero, we can propagate known
780 // bits from the RHS to V.
781 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
782 Known.One |= RHSKnown.One & BKnown.Zero;
783 // assume(~(v | b) = a)
784 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
785 m_Value(A))) &&
786 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
787 KnownBits RHSKnown =
788 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
789 KnownBits BKnown =
790 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
791
792 // For those bits in B that are known to be zero, we can propagate
793 // inverted known bits from the RHS to V.
794 Known.Zero |= RHSKnown.One & BKnown.Zero;
795 Known.One |= RHSKnown.Zero & BKnown.Zero;
796 // assume(v ^ b = a)
797 } else if (match(Cmp,
798 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
799 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
800 KnownBits RHSKnown =
801 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
802 KnownBits BKnown =
803 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
804
805 // For those bits in B that are known to be zero, we can propagate known
806 // bits from the RHS to V. For those bits in B that are known to be one,
807 // we can propagate inverted known bits from the RHS to V.
808 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
809 Known.One |= RHSKnown.One & BKnown.Zero;
810 Known.Zero |= RHSKnown.One & BKnown.One;
811 Known.One |= RHSKnown.Zero & BKnown.One;
812 // assume(~(v ^ b) = a)
813 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
814 m_Value(A))) &&
815 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
816 KnownBits RHSKnown =
817 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
818 KnownBits BKnown =
819 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
820
821 // For those bits in B that are known to be zero, we can propagate
822 // inverted known bits from the RHS to V. For those bits in B that are
823 // known to be one, we can propagate known bits from the RHS to V.
824 Known.Zero |= RHSKnown.One & BKnown.Zero;
825 Known.One |= RHSKnown.Zero & BKnown.Zero;
826 Known.Zero |= RHSKnown.Zero & BKnown.One;
827 Known.One |= RHSKnown.One & BKnown.One;
828 // assume(v << c = a)
829 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
830 m_Value(A))) &&
831 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
832 KnownBits RHSKnown =
833 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
834
835 // For those bits in RHS that are known, we can propagate them to known
836 // bits in V shifted to the right by C.
837 RHSKnown.Zero.lshrInPlace(C);
838 Known.Zero |= RHSKnown.Zero;
839 RHSKnown.One.lshrInPlace(C);
840 Known.One |= RHSKnown.One;
841 // assume(~(v << c) = a)
842 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
843 m_Value(A))) &&
844 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
845 KnownBits RHSKnown =
846 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
847 // For those bits in RHS that are known, we can propagate them inverted
848 // to known bits in V shifted to the right by C.
849 RHSKnown.One.lshrInPlace(C);
850 Known.Zero |= RHSKnown.One;
851 RHSKnown.Zero.lshrInPlace(C);
852 Known.One |= RHSKnown.Zero;
853 // assume(v >> c = a)
854 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
855 m_Value(A))) &&
856 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
857 KnownBits RHSKnown =
858 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
859 // For those bits in RHS that are known, we can propagate them to known
860 // bits in V shifted to the right by C.
861 Known.Zero |= RHSKnown.Zero << C;
862 Known.One |= RHSKnown.One << C;
863 // assume(~(v >> c) = a)
864 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
865 m_Value(A))) &&
866 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
867 KnownBits RHSKnown =
868 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
869 // For those bits in RHS that are known, we can propagate them inverted
870 // to known bits in V shifted to the right by C.
871 Known.Zero |= RHSKnown.One << C;
872 Known.One |= RHSKnown.Zero << C;
873 }
874 break;
875 case ICmpInst::ICMP_SGE:
876 // assume(v >=_s c) where c is non-negative
877 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
878 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
879 KnownBits RHSKnown =
880 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
881
882 if (RHSKnown.isNonNegative()) {
883 // We know that the sign bit is zero.
884 Known.makeNonNegative();
885 }
886 }
887 break;
888 case ICmpInst::ICMP_SGT:
889 // assume(v >_s c) where c is at least -1.
890 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
891 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
892 KnownBits RHSKnown =
893 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
894
895 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
896 // We know that the sign bit is zero.
897 Known.makeNonNegative();
898 }
899 }
900 break;
901 case ICmpInst::ICMP_SLE:
902 // assume(v <=_s c) where c is negative
903 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
904 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
905 KnownBits RHSKnown =
906 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
907
908 if (RHSKnown.isNegative()) {
909 // We know that the sign bit is one.
910 Known.makeNegative();
911 }
912 }
913 break;
914 case ICmpInst::ICMP_SLT:
915 // assume(v <_s c) where c is non-positive
916 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
917 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
918 KnownBits RHSKnown =
919 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
920
921 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
922 // We know that the sign bit is one.
923 Known.makeNegative();
924 }
925 }
926 break;
927 case ICmpInst::ICMP_ULE:
928 // assume(v <=_u c)
929 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
930 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
931 KnownBits RHSKnown =
932 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
933
934 // Whatever high bits in c are zero are known to be zero.
935 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
936 }
937 break;
938 case ICmpInst::ICMP_ULT:
939 // assume(v <_u c)
940 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
941 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
942 KnownBits RHSKnown =
943 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
944
945 // If the RHS is known zero, then this assumption must be wrong (nothing
946 // is unsigned less than zero). Signal a conflict and get out of here.
947 if (RHSKnown.isZero()) {
948 Known.Zero.setAllBits();
949 Known.One.setAllBits();
950 break;
951 }
952
953 // Whatever high bits in c are zero are known to be zero (if c is a power
954 // of 2, then one more).
955 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
956 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
957 else
958 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
959 }
960 break;
961 }
962 }
963
964 // If assumptions conflict with each other or previous known bits, then we
965 // have a logical fallacy. It's possible that the assumption is not reachable,
966 // so this isn't a real bug. On the other hand, the program may have undefined
967 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
968 // clear out the known bits, try to warn the user, and hope for the best.
969 if (Known.Zero.intersects(Known.One)) {
970 Known.resetAll();
971
972 if (Q.ORE)
973 Q.ORE->emit([&]() {
974 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
975 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
976 CxtI)
977 << "Detected conflicting code assumptions. Program may "
978 "have undefined behavior, or compiler may have "
979 "internal error.";
980 });
981 }
982}
983
984/// Compute known bits from a shift operator, including those with a
985/// non-constant shift amount. Known is the output of this function. Known2 is a
986/// pre-allocated temporary with the same bit width as Known and on return
987/// contains the known bit of the shift value source. KF is an
988/// operator-specific function that, given the known-bits and a shift amount,
989/// compute the implied known-bits of the shift operator's result respectively
990/// for that shift amount. The results from calling KF are conservatively
991/// combined for all permitted shift amounts.
992static void computeKnownBitsFromShiftOperator(
993 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
994 KnownBits &Known2, unsigned Depth, const Query &Q,
995 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
996 unsigned BitWidth = Known.getBitWidth();
997 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
998 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
999
1000 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
1001 // BitWidth > 64 and any upper bits are known, we'll end up returning the
1002 // limit value (which implies all bits are known).
1003 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
1004 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
1005 bool ShiftAmtIsConstant = Known.isConstant();
1006 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
1007
1008 if (ShiftAmtIsConstant) {
1009 Known = KF(Known2, Known);
1010
1011 // If the known bits conflict, this must be an overflowing left shift, so
1012 // the shift result is poison. We can return anything we want. Choose 0 for
1013 // the best folding opportunity.
1014 if (Known.hasConflict())
1015 Known.setAllZero();
1016
1017 return;
1018 }
1019
1020 // If the shift amount could be greater than or equal to the bit-width of the
1021 // LHS, the value could be poison, but bail out because the check below is
1022 // expensive.
1023 // TODO: Should we just carry on?
1024 if (MaxShiftAmtIsOutOfRange) {
1025 Known.resetAll();
1026 return;
1027 }
1028
1029 // It would be more-clearly correct to use the two temporaries for this
1030 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1031 Known.resetAll();
1032
1033 // If we know the shifter operand is nonzero, we can sometimes infer more
1034 // known bits. However this is expensive to compute, so be lazy about it and
1035 // only compute it when absolutely necessary.
1036 Optional<bool> ShifterOperandIsNonZero;
1037
1038 // Early exit if we can't constrain any well-defined shift amount.
1039 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1040 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1041 ShifterOperandIsNonZero =
1042 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1043 if (!*ShifterOperandIsNonZero)
1044 return;
1045 }
1046
1047 Known.Zero.setAllBits();
1048 Known.One.setAllBits();
1049 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1050 // Combine the shifted known input bits only for those shift amounts
1051 // compatible with its known constraints.
1052 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1053 continue;
1054 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1055 continue;
1056 // If we know the shifter is nonzero, we may be able to infer more known
1057 // bits. This check is sunk down as far as possible to avoid the expensive
1058 // call to isKnownNonZero if the cheaper checks above fail.
1059 if (ShiftAmt == 0) {
1060 if (!ShifterOperandIsNonZero.hasValue())
1061 ShifterOperandIsNonZero =
1062 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1063 if (*ShifterOperandIsNonZero)
1064 continue;
1065 }
1066
1067 Known = KnownBits::commonBits(
1068 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1069 }
1070
1071 // If the known bits conflict, the result is poison. Return a 0 and hope the
1072 // caller can further optimize that.
1073 if (Known.hasConflict())
1074 Known.setAllZero();
1075}
1076
1077static void computeKnownBitsFromOperator(const Operator *I,
1078 const APInt &DemandedElts,
1079 KnownBits &Known, unsigned Depth,
1080 const Query &Q) {
1081 unsigned BitWidth = Known.getBitWidth();
1082
1083 KnownBits Known2(BitWidth);
1084 switch (I->getOpcode()) {
1085 default: break;
1086 case Instruction::Load:
1087 if (MDNode *MD =
1088 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1089 computeKnownBitsFromRangeMetadata(*MD, Known);
1090 break;
1091 case Instruction::And: {
1092 // If either the LHS or the RHS are Zero, the result is zero.
1093 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1094 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1095
1096 Known &= Known2;
1097
1098 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1099 // here we handle the more general case of adding any odd number by
1100 // matching the form add(x, add(x, y)) where y is odd.
1101 // TODO: This could be generalized to clearing any bit set in y where the
1102 // following bit is known to be unset in y.
1103 Value *X = nullptr, *Y = nullptr;
1104 if (!Known.Zero[0] && !Known.One[0] &&
1105 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1106 Known2.resetAll();
1107 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1108 if (Known2.countMinTrailingOnes() > 0)
1109 Known.Zero.setBit(0);
1110 }
1111 break;
1112 }
1113 case Instruction::Or:
1114 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1115 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1116
1117 Known |= Known2;
1118 break;
1119 case Instruction::Xor:
1120 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1121 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1122
1123 Known ^= Known2;
1124 break;
1125 case Instruction::Mul: {
1126 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1127 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1128 Known, Known2, Depth, Q);
1129 break;
1130 }
1131 case Instruction::UDiv: {
1132 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1133 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1134 Known = KnownBits::udiv(Known, Known2);
1135 break;
1136 }
1137 case Instruction::Select: {
1138 const Value *LHS = nullptr, *RHS = nullptr;
1139 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1140 if (SelectPatternResult::isMinOrMax(SPF)) {
1141 computeKnownBits(RHS, Known, Depth + 1, Q);
1142 computeKnownBits(LHS, Known2, Depth + 1, Q);
1143 switch (SPF) {
1144 default:
1145 llvm_unreachable("Unhandled select pattern flavor!")::llvm::llvm_unreachable_internal("Unhandled select pattern flavor!"
, "llvm/lib/Analysis/ValueTracking.cpp", 1145)
;
1146 case SPF_SMAX:
1147 Known = KnownBits::smax(Known, Known2);
1148 break;
1149 case SPF_SMIN:
1150 Known = KnownBits::smin(Known, Known2);
1151 break;
1152 case SPF_UMAX:
1153 Known = KnownBits::umax(Known, Known2);
1154 break;
1155 case SPF_UMIN:
1156 Known = KnownBits::umin(Known, Known2);
1157 break;
1158 }
1159 break;
1160 }
1161
1162 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1163 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1164
1165 // Only known if known in both the LHS and RHS.
1166 Known = KnownBits::commonBits(Known, Known2);
1167
1168 if (SPF == SPF_ABS) {
1169 // RHS from matchSelectPattern returns the negation part of abs pattern.
1170 // If the negate has an NSW flag we can assume the sign bit of the result
1171 // will be 0 because that makes abs(INT_MIN) undefined.
1172 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1173 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS)))
1174 Known.Zero.setSignBit();
1175 }
1176
1177 break;
1178 }
1179 case Instruction::FPTrunc:
1180 case Instruction::FPExt:
1181 case Instruction::FPToUI:
1182 case Instruction::FPToSI:
1183 case Instruction::SIToFP:
1184 case Instruction::UIToFP:
1185 break; // Can't work with floating point.
1186 case Instruction::PtrToInt:
1187 case Instruction::IntToPtr:
1188 // Fall through and handle them the same as zext/trunc.
1189 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1190 case Instruction::ZExt:
1191 case Instruction::Trunc: {
1192 Type *SrcTy = I->getOperand(0)->getType();
1193
1194 unsigned SrcBitWidth;
1195 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1196 // which fall through here.
1197 Type *ScalarTy = SrcTy->getScalarType();
1198 SrcBitWidth = ScalarTy->isPointerTy() ?
1199 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1200 Q.DL.getTypeSizeInBits(ScalarTy);
1201
1202 assert(SrcBitWidth && "SrcBitWidth can't be zero")(static_cast <bool> (SrcBitWidth && "SrcBitWidth can't be zero"
) ? void (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1202, __extension__ __PRETTY_FUNCTION__
))
;
1203 Known = Known.anyextOrTrunc(SrcBitWidth);
1204 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1205 Known = Known.zextOrTrunc(BitWidth);
1206 break;
1207 }
1208 case Instruction::BitCast: {
1209 Type *SrcTy = I->getOperand(0)->getType();
1210 if (SrcTy->isIntOrPtrTy() &&
1211 // TODO: For now, not handling conversions like:
1212 // (bitcast i64 %x to <2 x i32>)
1213 !I->getType()->isVectorTy()) {
1214 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1215 break;
1216 }
1217
1218 // Handle cast from vector integer type to scalar or vector integer.
1219 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1220 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1221 !I->getType()->isIntOrIntVectorTy())
1222 break;
1223
1224 // Look through a cast from narrow vector elements to wider type.
1225 // Examples: v4i32 -> v2i64, v3i8 -> v24
1226 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1227 if (BitWidth % SubBitWidth == 0) {
1228 // Known bits are automatically intersected across demanded elements of a
1229 // vector. So for example, if a bit is computed as known zero, it must be
1230 // zero across all demanded elements of the vector.
1231 //
1232 // For this bitcast, each demanded element of the output is sub-divided
1233 // across a set of smaller vector elements in the source vector. To get
1234 // the known bits for an entire element of the output, compute the known
1235 // bits for each sub-element sequentially. This is done by shifting the
1236 // one-set-bit demanded elements parameter across the sub-elements for
1237 // consecutive calls to computeKnownBits. We are using the demanded
1238 // elements parameter as a mask operator.
1239 //
1240 // The known bits of each sub-element are then inserted into place
1241 // (dependent on endian) to form the full result of known bits.
1242 unsigned NumElts = DemandedElts.getBitWidth();
1243 unsigned SubScale = BitWidth / SubBitWidth;
1244 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1245 for (unsigned i = 0; i != NumElts; ++i) {
1246 if (DemandedElts[i])
1247 SubDemandedElts.setBit(i * SubScale);
1248 }
1249
1250 KnownBits KnownSrc(SubBitWidth);
1251 for (unsigned i = 0; i != SubScale; ++i) {
1252 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1253 Depth + 1, Q);
1254 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1255 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1256 }
1257 }
1258 break;
1259 }
1260 case Instruction::SExt: {
1261 // Compute the bits in the result that are not present in the input.
1262 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1263
1264 Known = Known.trunc(SrcBitWidth);
1265 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1266 // If the sign bit of the input is known set or clear, then we know the
1267 // top bits of the result.
1268 Known = Known.sext(BitWidth);
1269 break;
1270 }
1271 case Instruction::Shl: {
1272 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1273 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1274 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1275 // If this shift has "nsw" keyword, then the result is either a poison
1276 // value or has the same sign bit as the first operand.
1277 if (NSW) {
1278 if (KnownVal.Zero.isSignBitSet())
1279 Result.Zero.setSignBit();
1280 if (KnownVal.One.isSignBitSet())
1281 Result.One.setSignBit();
1282 }
1283 return Result;
1284 };
1285 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1286 KF);
1287 // Trailing zeros of a right-shifted constant never decrease.
1288 const APInt *C;
1289 if (match(I->getOperand(0), m_APInt(C)))
1290 Known.Zero.setLowBits(C->countTrailingZeros());
1291 break;
1292 }
1293 case Instruction::LShr: {
1294 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1295 return KnownBits::lshr(KnownVal, KnownAmt);
1296 };
1297 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1298 KF);
1299 // Leading zeros of a left-shifted constant never decrease.
1300 const APInt *C;
1301 if (match(I->getOperand(0), m_APInt(C)))
1302 Known.Zero.setHighBits(C->countLeadingZeros());
1303 break;
1304 }
1305 case Instruction::AShr: {
1306 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1307 return KnownBits::ashr(KnownVal, KnownAmt);
1308 };
1309 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1310 KF);
1311 break;
1312 }
1313 case Instruction::Sub: {
1314 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1315 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1316 DemandedElts, Known, Known2, Depth, Q);
1317 break;
1318 }
1319 case Instruction::Add: {
1320 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1321 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1322 DemandedElts, Known, Known2, Depth, Q);
1323 break;
1324 }
1325 case Instruction::SRem:
1326 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1327 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1328 Known = KnownBits::srem(Known, Known2);
1329 break;
1330
1331 case Instruction::URem:
1332 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1333 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1334 Known = KnownBits::urem(Known, Known2);
1335 break;
1336 case Instruction::Alloca:
1337 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1338 break;
1339 case Instruction::GetElementPtr: {
1340 // Analyze all of the subscripts of this getelementptr instruction
1341 // to determine if we can prove known low zero bits.
1342 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1343 // Accumulate the constant indices in a separate variable
1344 // to minimize the number of calls to computeForAddSub.
1345 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1346
1347 gep_type_iterator GTI = gep_type_begin(I);
1348 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1349 // TrailZ can only become smaller, short-circuit if we hit zero.
1350 if (Known.isUnknown())
1351 break;
1352
1353 Value *Index = I->getOperand(i);
1354
1355 // Handle case when index is zero.
1356 Constant *CIndex = dyn_cast<Constant>(Index);
1357 if (CIndex && CIndex->isZeroValue())
1358 continue;
1359
1360 if (StructType *STy = GTI.getStructTypeOrNull()) {
1361 // Handle struct member offset arithmetic.
1362
1363 assert(CIndex &&(static_cast <bool> (CIndex && "Access to structure field must be known at compile time"
) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1364, __extension__ __PRETTY_FUNCTION__
))
1364 "Access to structure field must be known at compile time")(static_cast <bool> (CIndex && "Access to structure field must be known at compile time"
) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1364, __extension__ __PRETTY_FUNCTION__
))
;
1365
1366 if (CIndex->getType()->isVectorTy())
1367 Index = CIndex->getSplatValue();
1368
1369 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1370 const StructLayout *SL = Q.DL.getStructLayout(STy);
1371 uint64_t Offset = SL->getElementOffset(Idx);
1372 AccConstIndices += Offset;
1373 continue;
1374 }
1375
1376 // Handle array index arithmetic.
1377 Type *IndexedTy = GTI.getIndexedType();
1378 if (!IndexedTy->isSized()) {
1379 Known.resetAll();
1380 break;
1381 }
1382
1383 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1384 KnownBits IndexBits(IndexBitWidth);
1385 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1386 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1387 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1388 KnownBits ScalingFactor(IndexBitWidth);
1389 // Multiply by current sizeof type.
1390 // &A[i] == A + i * sizeof(*A[i]).
1391 if (IndexTypeSize.isScalable()) {
1392 // For scalable types the only thing we know about sizeof is
1393 // that this is a multiple of the minimum size.
1394 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1395 } else if (IndexBits.isConstant()) {
1396 APInt IndexConst = IndexBits.getConstant();
1397 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1398 IndexConst *= ScalingFactor;
1399 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1400 continue;
1401 } else {
1402 ScalingFactor =
1403 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1404 }
1405 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1406
1407 // If the offsets have a different width from the pointer, according
1408 // to the language reference we need to sign-extend or truncate them
1409 // to the width of the pointer.
1410 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1411
1412 // Note that inbounds does *not* guarantee nsw for the addition, as only
1413 // the offset is signed, while the base address is unsigned.
1414 Known = KnownBits::computeForAddSub(
1415 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1416 }
1417 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1418 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1419 Known = KnownBits::computeForAddSub(
1420 /*Add=*/true, /*NSW=*/false, Known, Index);
1421 }
1422 break;
1423 }
1424 case Instruction::PHI: {
1425 const PHINode *P = cast<PHINode>(I);
1426 BinaryOperator *BO = nullptr;
1427 Value *R = nullptr, *L = nullptr;
1428 if (matchSimpleRecurrence(P, BO, R, L)) {
1429 // Handle the case of a simple two-predecessor recurrence PHI.
1430 // There's a lot more that could theoretically be done here, but
1431 // this is sufficient to catch some interesting cases.
1432 unsigned Opcode = BO->getOpcode();
1433
1434 // If this is a shift recurrence, we know the bits being shifted in.
1435 // We can combine that with information about the start value of the
1436 // recurrence to conclude facts about the result.
1437 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1438 Opcode == Instruction::Shl) &&
1439 BO->getOperand(0) == I) {
1440
1441 // We have matched a recurrence of the form:
1442 // %iv = [R, %entry], [%iv.next, %backedge]
1443 // %iv.next = shift_op %iv, L
1444
1445 // Recurse with the phi context to avoid concern about whether facts
1446 // inferred hold at original context instruction. TODO: It may be
1447 // correct to use the original context. IF warranted, explore and
1448 // add sufficient tests to cover.
1449 Query RecQ = Q;
1450 RecQ.CxtI = P;
1451 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1452 switch (Opcode) {
1453 case Instruction::Shl:
1454 // A shl recurrence will only increase the tailing zeros
1455 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1456 break;
1457 case Instruction::LShr:
1458 // A lshr recurrence will preserve the leading zeros of the
1459 // start value
1460 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1461 break;
1462 case Instruction::AShr:
1463 // An ashr recurrence will extend the initial sign bit
1464 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1465 Known.One.setHighBits(Known2.countMinLeadingOnes());
1466 break;
1467 };
1468 }
1469
1470 // Check for operations that have the property that if
1471 // both their operands have low zero bits, the result
1472 // will have low zero bits.
1473 if (Opcode == Instruction::Add ||
1474 Opcode == Instruction::Sub ||
1475 Opcode == Instruction::And ||
1476 Opcode == Instruction::Or ||
1477 Opcode == Instruction::Mul) {
1478 // Change the context instruction to the "edge" that flows into the
1479 // phi. This is important because that is where the value is actually
1480 // "evaluated" even though it is used later somewhere else. (see also
1481 // D69571).
1482 Query RecQ = Q;
1483
1484 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1485 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1486 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1487
1488 // Ok, we have a PHI of the form L op= R. Check for low
1489 // zero bits.
1490 RecQ.CxtI = RInst;
1491 computeKnownBits(R, Known2, Depth + 1, RecQ);
1492
1493 // We need to take the minimum number of known bits
1494 KnownBits Known3(BitWidth);
1495 RecQ.CxtI = LInst;
1496 computeKnownBits(L, Known3, Depth + 1, RecQ);
1497
1498 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1499 Known3.countMinTrailingZeros()));
1500
1501 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1502 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1503 // If initial value of recurrence is nonnegative, and we are adding
1504 // a nonnegative number with nsw, the result can only be nonnegative
1505 // or poison value regardless of the number of times we execute the
1506 // add in phi recurrence. If initial value is negative and we are
1507 // adding a negative number with nsw, the result can only be
1508 // negative or poison value. Similar arguments apply to sub and mul.
1509 //
1510 // (add non-negative, non-negative) --> non-negative
1511 // (add negative, negative) --> negative
1512 if (Opcode == Instruction::Add) {
1513 if (Known2.isNonNegative() && Known3.isNonNegative())
1514 Known.makeNonNegative();
1515 else if (Known2.isNegative() && Known3.isNegative())
1516 Known.makeNegative();
1517 }
1518
1519 // (sub nsw non-negative, negative) --> non-negative
1520 // (sub nsw negative, non-negative) --> negative
1521 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1522 if (Known2.isNonNegative() && Known3.isNegative())
1523 Known.makeNonNegative();
1524 else if (Known2.isNegative() && Known3.isNonNegative())
1525 Known.makeNegative();
1526 }
1527
1528 // (mul nsw non-negative, non-negative) --> non-negative
1529 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1530 Known3.isNonNegative())
1531 Known.makeNonNegative();
1532 }
1533
1534 break;
1535 }
1536 }
1537
1538 // Unreachable blocks may have zero-operand PHI nodes.
1539 if (P->getNumIncomingValues() == 0)
1540 break;
1541
1542 // Otherwise take the unions of the known bit sets of the operands,
1543 // taking conservative care to avoid excessive recursion.
1544 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1545 // Skip if every incoming value references to ourself.
1546 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1547 break;
1548
1549 Known.Zero.setAllBits();
1550 Known.One.setAllBits();
1551 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1552 Value *IncValue = P->getIncomingValue(u);
1553 // Skip direct self references.
1554 if (IncValue == P) continue;
1555
1556 // Change the context instruction to the "edge" that flows into the
1557 // phi. This is important because that is where the value is actually
1558 // "evaluated" even though it is used later somewhere else. (see also
1559 // D69571).
1560 Query RecQ = Q;
1561 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1562
1563 Known2 = KnownBits(BitWidth);
1564 // Recurse, but cap the recursion to one level, because we don't
1565 // want to waste time spinning around in loops.
1566 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1567 Known = KnownBits::commonBits(Known, Known2);
1568 // If all bits have been ruled out, there's no need to check
1569 // more operands.
1570 if (Known.isUnknown())
1571 break;
1572 }
1573 }
1574 break;
1575 }
1576 case Instruction::Call:
1577 case Instruction::Invoke:
1578 // If range metadata is attached to this call, set known bits from that,
1579 // and then intersect with known bits based on other properties of the
1580 // function.
1581 if (MDNode *MD =
1582 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1583 computeKnownBitsFromRangeMetadata(*MD, Known);
1584 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1585 computeKnownBits(RV, Known2, Depth + 1, Q);
1586 Known.Zero |= Known2.Zero;
1587 Known.One |= Known2.One;
1588 }
1589 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1590 switch (II->getIntrinsicID()) {
1591 default: break;
1592 case Intrinsic::abs: {
1593 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1594 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1595 Known = Known2.abs(IntMinIsPoison);
1596 break;
1597 }
1598 case Intrinsic::bitreverse:
1599 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1600 Known.Zero |= Known2.Zero.reverseBits();
1601 Known.One |= Known2.One.reverseBits();
1602 break;
1603 case Intrinsic::bswap:
1604 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1605 Known.Zero |= Known2.Zero.byteSwap();
1606 Known.One |= Known2.One.byteSwap();
1607 break;
1608 case Intrinsic::ctlz: {
1609 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1610 // If we have a known 1, its position is our upper bound.
1611 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1612 // If this call is poison for 0 input, the result will be less than 2^n.
1613 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1614 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1615 unsigned LowBits = Log2_32(PossibleLZ)+1;
1616 Known.Zero.setBitsFrom(LowBits);
1617 break;
1618 }
1619 case Intrinsic::cttz: {
1620 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1621 // If we have a known 1, its position is our upper bound.
1622 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1623 // If this call is poison for 0 input, the result will be less than 2^n.
1624 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1625 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1626 unsigned LowBits = Log2_32(PossibleTZ)+1;
1627 Known.Zero.setBitsFrom(LowBits);
1628 break;
1629 }
1630 case Intrinsic::ctpop: {
1631 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1632 // We can bound the space the count needs. Also, bits known to be zero
1633 // can't contribute to the population.
1634 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1635 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1636 Known.Zero.setBitsFrom(LowBits);
1637 // TODO: we could bound KnownOne using the lower bound on the number
1638 // of bits which might be set provided by popcnt KnownOne2.
1639 break;
1640 }
1641 case Intrinsic::fshr:
1642 case Intrinsic::fshl: {
1643 const APInt *SA;
1644 if (!match(I->getOperand(2), m_APInt(SA)))
1645 break;
1646
1647 // Normalize to funnel shift left.
1648 uint64_t ShiftAmt = SA->urem(BitWidth);
1649 if (II->getIntrinsicID() == Intrinsic::fshr)
1650 ShiftAmt = BitWidth - ShiftAmt;
1651
1652 KnownBits Known3(BitWidth);
1653 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1654 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1655
1656 Known.Zero =
1657 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1658 Known.One =
1659 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1660 break;
1661 }
1662 case Intrinsic::uadd_sat:
1663 case Intrinsic::usub_sat: {
1664 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1665 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1666 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1667
1668 // Add: Leading ones of either operand are preserved.
1669 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1670 // as leading zeros in the result.
1671 unsigned LeadingKnown;
1672 if (IsAdd)
1673 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1674 Known2.countMinLeadingOnes());
1675 else
1676 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1677 Known2.countMinLeadingOnes());
1678
1679 Known = KnownBits::computeForAddSub(
1680 IsAdd, /* NSW */ false, Known, Known2);
1681
1682 // We select between the operation result and all-ones/zero
1683 // respectively, so we can preserve known ones/zeros.
1684 if (IsAdd) {
1685 Known.One.setHighBits(LeadingKnown);
1686 Known.Zero.clearAllBits();
1687 } else {
1688 Known.Zero.setHighBits(LeadingKnown);
1689 Known.One.clearAllBits();
1690 }
1691 break;
1692 }
1693 case Intrinsic::umin:
1694 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1695 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1696 Known = KnownBits::umin(Known, Known2);
1697 break;
1698 case Intrinsic::umax:
1699 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1700 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1701 Known = KnownBits::umax(Known, Known2);
1702 break;
1703 case Intrinsic::smin:
1704 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1705 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1706 Known = KnownBits::smin(Known, Known2);
1707 break;
1708 case Intrinsic::smax:
1709 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1710 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1711 Known = KnownBits::smax(Known, Known2);
1712 break;
1713 case Intrinsic::x86_sse42_crc32_64_64:
1714 Known.Zero.setBitsFrom(32);
1715 break;
1716 case Intrinsic::riscv_vsetvli:
1717 case Intrinsic::riscv_vsetvlimax:
1718 // Assume that VL output is positive and would fit in an int32_t.
1719 // TODO: VLEN might be capped at 16 bits in a future V spec update.
1720 if (BitWidth >= 32)
1721 Known.Zero.setBitsFrom(31);
1722 break;
1723 case Intrinsic::vscale: {
1724 if (!II->getParent() || !II->getFunction() ||
1725 !II->getFunction()->hasFnAttribute(Attribute::VScaleRange))
1726 break;
1727
1728 auto Attr = II->getFunction()->getFnAttribute(Attribute::VScaleRange);
1729 Optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
1730
1731 if (!VScaleMax)
1732 break;
1733
1734 unsigned VScaleMin = Attr.getVScaleRangeMin();
1735
1736 // If vscale min = max then we know the exact value at compile time
1737 // and hence we know the exact bits.
1738 if (VScaleMin == VScaleMax) {
1739 Known.One = VScaleMin;
1740 Known.Zero = VScaleMin;
1741 Known.Zero.flipAllBits();
1742 break;
1743 }
1744
1745 unsigned FirstZeroHighBit =
1746 32 - countLeadingZeros(VScaleMax.getValue());
1747 if (FirstZeroHighBit < BitWidth)
1748 Known.Zero.setBitsFrom(FirstZeroHighBit);
1749
1750 break;
1751 }
1752 }
1753 }
1754 break;
1755 case Instruction::ShuffleVector: {
1756 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1757 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1758 if (!Shuf) {
1759 Known.resetAll();
1760 return;
1761 }
1762 // For undef elements, we don't know anything about the common state of
1763 // the shuffle result.
1764 APInt DemandedLHS, DemandedRHS;
1765 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1766 Known.resetAll();
1767 return;
1768 }
1769 Known.One.setAllBits();
1770 Known.Zero.setAllBits();
1771 if (!!DemandedLHS) {
1772 const Value *LHS = Shuf->getOperand(0);
1773 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1774 // If we don't know any bits, early out.
1775 if (Known.isUnknown())
1776 break;
1777 }
1778 if (!!DemandedRHS) {
1779 const Value *RHS = Shuf->getOperand(1);
1780 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1781 Known = KnownBits::commonBits(Known, Known2);
1782 }
1783 break;
1784 }
1785 case Instruction::InsertElement: {
1786 const Value *Vec = I->getOperand(0);
1787 const Value *Elt = I->getOperand(1);
1788 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1789 // Early out if the index is non-constant or out-of-range.
1790 unsigned NumElts = DemandedElts.getBitWidth();
1791 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1792 Known.resetAll();
1793 return;
1794 }
1795 Known.One.setAllBits();
1796 Known.Zero.setAllBits();
1797 unsigned EltIdx = CIdx->getZExtValue();
1798 // Do we demand the inserted element?
1799 if (DemandedElts[EltIdx]) {
1800 computeKnownBits(Elt, Known, Depth + 1, Q);
1801 // If we don't know any bits, early out.
1802 if (Known.isUnknown())
1803 break;
1804 }
1805 // We don't need the base vector element that has been inserted.
1806 APInt DemandedVecElts = DemandedElts;
1807 DemandedVecElts.clearBit(EltIdx);
1808 if (!!DemandedVecElts) {
1809 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1810 Known = KnownBits::commonBits(Known, Known2);
1811 }
1812 break;
1813 }
1814 case Instruction::ExtractElement: {
1815 // Look through extract element. If the index is non-constant or
1816 // out-of-range demand all elements, otherwise just the extracted element.
1817 const Value *Vec = I->getOperand(0);
1818 const Value *Idx = I->getOperand(1);
1819 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1820 if (isa<ScalableVectorType>(Vec->getType())) {
1821 // FIXME: there's probably *something* we can do with scalable vectors
1822 Known.resetAll();
1823 break;
1824 }
1825 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1826 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1827 if (CIdx && CIdx->getValue().ult(NumElts))
1828 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1829 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1830 break;
1831 }
1832 case Instruction::ExtractValue:
1833 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1834 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1835 if (EVI->getNumIndices() != 1) break;
1836 if (EVI->getIndices()[0] == 0) {
1837 switch (II->getIntrinsicID()) {
1838 default: break;
1839 case Intrinsic::uadd_with_overflow:
1840 case Intrinsic::sadd_with_overflow:
1841 computeKnownBitsAddSub(true, II->getArgOperand(0),
1842 II->getArgOperand(1), false, DemandedElts,
1843 Known, Known2, Depth, Q);
1844 break;
1845 case Intrinsic::usub_with_overflow:
1846 case Intrinsic::ssub_with_overflow:
1847 computeKnownBitsAddSub(false, II->getArgOperand(0),
1848 II->getArgOperand(1), false, DemandedElts,
1849 Known, Known2, Depth, Q);
1850 break;
1851 case Intrinsic::umul_with_overflow:
1852 case Intrinsic::smul_with_overflow:
1853 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1854 DemandedElts, Known, Known2, Depth, Q);
1855 break;
1856 }
1857 }
1858 }
1859 break;
1860 case Instruction::Freeze:
1861 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1862 Depth + 1))
1863 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1864 break;
1865 }
1866}
1867
1868/// Determine which bits of V are known to be either zero or one and return
1869/// them.
1870KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1871 unsigned Depth, const Query &Q) {
1872 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1873 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1874 return Known;
1875}
1876
1877/// Determine which bits of V are known to be either zero or one and return
1878/// them.
1879KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1880 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1881 computeKnownBits(V, Known, Depth, Q);
1882 return Known;
1883}
1884
1885/// Determine which bits of V are known to be either zero or one and return
1886/// them in the Known bit set.
1887///
1888/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1889/// we cannot optimize based on the assumption that it is zero without changing
1890/// it to be an explicit zero. If we don't change it to zero, other code could
1891/// optimized based on the contradictory assumption that it is non-zero.
1892/// Because instcombine aggressively folds operations with undef args anyway,
1893/// this won't lose us code quality.
1894///
1895/// This function is defined on values with integer type, values with pointer
1896/// type, and vectors of integers. In the case
1897/// where V is a vector, known zero, and known one values are the
1898/// same width as the vector element, and the bit is set only if it is true
1899/// for all of the demanded elements in the vector specified by DemandedElts.
1900void computeKnownBits(const Value *V, const APInt &DemandedElts,
1901 KnownBits &Known, unsigned Depth, const Query &Q) {
1902 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1903 // No demanded elts or V is a scalable vector, better to assume we don't
1904 // know anything.
1905 Known.resetAll();
1906 return;
1907 }
1908
1909 assert(V && "No Value?")(static_cast <bool> (V && "No Value?") ? void (
0) : __assert_fail ("V && \"No Value?\"", "llvm/lib/Analysis/ValueTracking.cpp"
, 1909, __extension__ __PRETTY_FUNCTION__))
;
1910 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1910, __extension__ __PRETTY_FUNCTION__
))
;
1911
1912#ifndef NDEBUG
1913 Type *Ty = V->getType();
1914 unsigned BitWidth = Known.getBitWidth();
1915
1916 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth
) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1917, __extension__ __PRETTY_FUNCTION__
))
1917 "Not integer or pointer type!")(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth
) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1917, __extension__ __PRETTY_FUNCTION__
))
;
1918
1919 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1920 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1922, __extension__ __PRETTY_FUNCTION__
))
1921 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1922, __extension__ __PRETTY_FUNCTION__
))
1922 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1922, __extension__ __PRETTY_FUNCTION__
))
;
1923 } else {
1924 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1925, __extension__ __PRETTY_FUNCTION__
))
1925 "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1925, __extension__ __PRETTY_FUNCTION__
))
;
1926 }
1927
1928 Type *ScalarTy = Ty->getScalarType();
1929 if (ScalarTy->isPointerTy()) {
1930 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1931, __extension__ __PRETTY_FUNCTION__
))
1931 "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1931, __extension__ __PRETTY_FUNCTION__
))
;
1932 } else {
1933 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1934, __extension__ __PRETTY_FUNCTION__
))
1934 "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1934, __extension__ __PRETTY_FUNCTION__
))
;
1935 }
1936#endif
1937
1938 const APInt *C;
1939 if (match(V, m_APInt(C))) {
1940 // We know all of the bits for a scalar constant or a splat vector constant!
1941 Known = KnownBits::makeConstant(*C);
1942 return;
1943 }
1944 // Null and aggregate-zero are all-zeros.
1945 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1946 Known.setAllZero();
1947 return;
1948 }
1949 // Handle a constant vector by taking the intersection of the known bits of
1950 // each element.
1951 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1952 // We know that CDV must be a vector of integers. Take the intersection of
1953 // each element.
1954 Known.Zero.setAllBits(); Known.One.setAllBits();
1955 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1956 if (!DemandedElts[i])
1957 continue;
1958 APInt Elt = CDV->getElementAsAPInt(i);
1959 Known.Zero &= ~Elt;
1960 Known.One &= Elt;
1961 }
1962 return;
1963 }
1964
1965 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1966 // We know that CV must be a vector of integers. Take the intersection of
1967 // each element.
1968 Known.Zero.setAllBits(); Known.One.setAllBits();
1969 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1970 if (!DemandedElts[i])
1971 continue;
1972 Constant *Element = CV->getAggregateElement(i);
1973 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1974 if (!ElementCI) {
1975 Known.resetAll();
1976 return;
1977 }
1978 const APInt &Elt = ElementCI->getValue();
1979 Known.Zero &= ~Elt;
1980 Known.One &= Elt;
1981 }
1982 return;
1983 }
1984
1985 // Start out not knowing anything.
1986 Known.resetAll();
1987
1988 // We can't imply anything about undefs.
1989 if (isa<UndefValue>(V))
1990 return;
1991
1992 // There's no point in looking through other users of ConstantData for
1993 // assumptions. Confirm that we've handled them all.
1994 assert(!isa<ConstantData>(V) && "Unhandled constant data!")(static_cast <bool> (!isa<ConstantData>(V) &&
"Unhandled constant data!") ? void (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1994, __extension__ __PRETTY_FUNCTION__
))
;
1995
1996 // All recursive calls that increase depth must come after this.
1997 if (Depth == MaxAnalysisRecursionDepth)
1998 return;
1999
2000 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2001 // the bits of its aliasee.
2002 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2003 if (!GA->isInterposable())
2004 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2005 return;
2006 }
2007
2008 if (const Operator *I = dyn_cast<Operator>(V))
2009 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2010
2011 // Aligned pointers have trailing zeros - refine Known.Zero set
2012 if (isa<PointerType>(V->getType())) {
2013 Align Alignment = V->getPointerAlignment(Q.DL);
2014 Known.Zero.setLowBits(Log2(Alignment));
2015 }
2016
2017 // computeKnownBitsFromAssume strictly refines Known.
2018 // Therefore, we run them after computeKnownBitsFromOperator.
2019
2020 // Check whether a nearby assume intrinsic can determine some known bits.
2021 computeKnownBitsFromAssume(V, Known, Depth, Q);
2022
2023 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(static_cast <bool> ((Known.Zero & Known.One) == 0 &&
"Bits known to be one AND zero?") ? void (0) : __assert_fail
("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2023, __extension__ __PRETTY_FUNCTION__
))
;
2024}
2025
2026/// Return true if the given value is known to have exactly one
2027/// bit set when defined. For vectors return true if every element is known to
2028/// be a power of two when defined. Supports values with integer or pointer
2029/// types and vectors of integers.
2030bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2031 const Query &Q) {
2032 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2032, __extension__ __PRETTY_FUNCTION__
))
;
2033
2034 // Attempt to match against constants.
2035 if (OrZero && match(V, m_Power2OrZero()))
2036 return true;
2037 if (match(V, m_Power2()))
2038 return true;
2039
2040 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2041 // it is shifted off the end then the result is undefined.
2042 if (match(V, m_Shl(m_One(), m_Value())))
2043 return true;
2044
2045 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2046 // the bottom. If it is shifted off the bottom then the result is undefined.
2047 if (match(V, m_LShr(m_SignMask(), m_Value())))
2048 return true;
2049
2050 // The remaining tests are all recursive, so bail out if we hit the limit.
2051 if (Depth++ == MaxAnalysisRecursionDepth)
2052 return false;
2053
2054 Value *X = nullptr, *Y = nullptr;
2055 // A shift left or a logical shift right of a power of two is a power of two
2056 // or zero.
2057 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2058 match(V, m_LShr(m_Value(X), m_Value()))))
2059 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2060
2061 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2062 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2063
2064 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2065 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2066 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2067
2068 // Peek through min/max.
2069 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2070 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2071 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2072 }
2073
2074 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2075 // A power of two and'd with anything is a power of two or zero.
2076 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2077 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2078 return true;
2079 // X & (-X) is always a power of two or zero.
2080 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2081 return true;
2082 return false;
2083 }
2084
2085 // Adding a power-of-two or zero to the same power-of-two or zero yields
2086 // either the original power-of-two, a larger power-of-two or zero.
2087 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2088 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2089 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2090 Q.IIQ.hasNoSignedWrap(VOBO)) {
2091 if (match(X, m_And(m_Specific(Y), m_Value())) ||
2092 match(X, m_And(m_Value(), m_Specific(Y))))
2093 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2094 return true;
2095 if (match(Y, m_And(m_Specific(X), m_Value())) ||
2096 match(Y, m_And(m_Value(), m_Specific(X))))
2097 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2098 return true;
2099
2100 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2101 KnownBits LHSBits(BitWidth);
2102 computeKnownBits(X, LHSBits, Depth, Q);
2103
2104 KnownBits RHSBits(BitWidth);
2105 computeKnownBits(Y, RHSBits, Depth, Q);
2106 // If i8 V is a power of two or zero:
2107 // ZeroBits: 1 1 1 0 1 1 1 1
2108 // ~ZeroBits: 0 0 0 1 0 0 0 0
2109 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2110 // If OrZero isn't set, we cannot give back a zero result.
2111 // Make sure either the LHS or RHS has a bit set.
2112 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2113 return true;
2114 }
2115 }
2116
2117 // An exact divide or right shift can only shift off zero bits, so the result
2118 // is a power of two only if the first operand is a power of two and not
2119 // copying a sign bit (sdiv int_min, 2).
2120 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2121 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2122 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2123 Depth, Q);
2124 }
2125
2126 return false;
2127}
2128
2129/// Test whether a GEP's result is known to be non-null.
2130///
2131/// Uses properties inherent in a GEP to try to determine whether it is known
2132/// to be non-null.
2133///
2134/// Currently this routine does not support vector GEPs.
2135static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2136 const Query &Q) {
2137 const Function *F = nullptr;
2138 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2139 F = I->getFunction();
2140
2141 if (!GEP->isInBounds() ||
2142 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2143 return false;
2144
2145 // FIXME: Support vector-GEPs.
2146 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")(static_cast <bool> (GEP->getType()->isPointerTy(
) && "We only support plain pointer GEP") ? void (0) :
__assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2146, __extension__ __PRETTY_FUNCTION__
))
;
2147
2148 // If the base pointer is non-null, we cannot walk to a null address with an
2149 // inbounds GEP in address space zero.
2150 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2151 return true;
2152
2153 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2154 // If so, then the GEP cannot produce a null pointer, as doing so would
2155 // inherently violate the inbounds contract within address space zero.
2156 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2157 GTI != GTE; ++GTI) {
2158 // Struct types are easy -- they must always be indexed by a constant.
2159 if (StructType *STy = GTI.getStructTypeOrNull()) {
2160 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2161 unsigned ElementIdx = OpC->getZExtValue();
2162 const StructLayout *SL = Q.DL.getStructLayout(STy);
2163 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2164 if (ElementOffset > 0)
2165 return true;
2166 continue;
2167 }
2168
2169 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2170 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2171 continue;
2172
2173 // Fast path the constant operand case both for efficiency and so we don't
2174 // increment Depth when just zipping down an all-constant GEP.
2175 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2176 if (!OpC->isZero())
2177 return true;
2178 continue;
2179 }
2180
2181 // We post-increment Depth here because while isKnownNonZero increments it
2182 // as well, when we pop back up that increment won't persist. We don't want
2183 // to recurse 10k times just because we have 10k GEP operands. We don't
2184 // bail completely out because we want to handle constant GEPs regardless
2185 // of depth.
2186 if (Depth++ >= MaxAnalysisRecursionDepth)
2187 continue;
2188
2189 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2190 return true;
2191 }
2192
2193 return false;
2194}
2195
2196static bool isKnownNonNullFromDominatingCondition(const Value *V,
2197 const Instruction *CtxI,
2198 const DominatorTree *DT) {
2199 if (isa<Constant>(V))
2200 return false;
2201
2202 if (!CtxI || !DT)
2203 return false;
2204
2205 unsigned NumUsesExplored = 0;
2206 for (auto *U : V->users()) {
2207 // Avoid massive lists
2208 if (NumUsesExplored >= DomConditionsMaxUses)
2209 break;
2210 NumUsesExplored++;
2211
2212 // If the value is used as an argument to a call or invoke, then argument
2213 // attributes may provide an answer about null-ness.
2214 if (const auto *CB = dyn_cast<CallBase>(U))
2215 if (auto *CalledFunc = CB->getCalledFunction())
2216 for (const Argument &Arg : CalledFunc->args())
2217 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2218 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2219 DT->dominates(CB, CtxI))
2220 return true;
2221
2222 // If the value is used as a load/store, then the pointer must be non null.
2223 if (V == getLoadStorePointerOperand(U)) {
2224 const Instruction *I = cast<Instruction>(U);
2225 if (!NullPointerIsDefined(I->getFunction(),
2226 V->getType()->getPointerAddressSpace()) &&
2227 DT->dominates(I, CtxI))
2228 return true;
2229 }
2230
2231 // Consider only compare instructions uniquely controlling a branch
2232 Value *RHS;
2233 CmpInst::Predicate Pred;
2234 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2235 continue;
2236
2237 bool NonNullIfTrue;
2238 if (cmpExcludesZero(Pred, RHS))
2239 NonNullIfTrue = true;
2240 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2241 NonNullIfTrue = false;
2242 else
2243 continue;
2244
2245 SmallVector<const User *, 4> WorkList;
2246 SmallPtrSet<const User *, 4> Visited;
2247 for (auto *CmpU : U->users()) {
2248 assert(WorkList.empty() && "Should be!")(static_cast <bool> (WorkList.empty() && "Should be!"
) ? void (0) : __assert_fail ("WorkList.empty() && \"Should be!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2248, __extension__ __PRETTY_FUNCTION__
))
;
2249 if (Visited.insert(CmpU).second)
2250 WorkList.push_back(CmpU);
2251
2252 while (!WorkList.empty()) {
2253 auto *Curr = WorkList.pop_back_val();
2254
2255 // If a user is an AND, add all its users to the work list. We only
2256 // propagate "pred != null" condition through AND because it is only
2257 // correct to assume that all conditions of AND are met in true branch.
2258 // TODO: Support similar logic of OR and EQ predicate?
2259 if (NonNullIfTrue)
2260 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2261 for (auto *CurrU : Curr->users())
2262 if (Visited.insert(CurrU).second)
2263 WorkList.push_back(CurrU);
2264 continue;
2265 }
2266
2267 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2268 assert(BI->isConditional() && "uses a comparison!")(static_cast <bool> (BI->isConditional() && "uses a comparison!"
) ? void (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2268, __extension__ __PRETTY_FUNCTION__
))
;
2269
2270 BasicBlock *NonNullSuccessor =
2271 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2272 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2273 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2274 return true;
2275 } else if (NonNullIfTrue && isGuard(Curr) &&
2276 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2277 return true;
2278 }
2279 }
2280 }
2281 }
2282
2283 return false;
2284}
2285
2286/// Does the 'Range' metadata (which must be a valid MD_range operand list)
2287/// ensure that the value it's attached to is never Value? 'RangeType' is
2288/// is the type of the value described by the range.
2289static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2290 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2291 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp",
2291, __extension__ __PRETTY_FUNCTION__))
;
2292 for (unsigned i = 0; i < NumRanges; ++i) {
2293 ConstantInt *Lower =
2294 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2295 ConstantInt *Upper =
2296 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2297 ConstantRange Range(Lower->getValue(), Upper->getValue());
2298 if (Range.contains(Value))
2299 return false;
2300 }
2301 return true;
2302}
2303
2304/// Try to detect a recurrence that monotonically increases/decreases from a
2305/// non-zero starting value. These are common as induction variables.
2306static bool isNonZeroRecurrence(const PHINode *PN) {
2307 BinaryOperator *BO = nullptr;
2308 Value *Start = nullptr, *Step = nullptr;
2309 const APInt *StartC, *StepC;
2310 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2311 !match(Start, m_APInt(StartC)) || StartC->isZero())
2312 return false;
2313
2314 switch (BO->getOpcode()) {
2315 case Instruction::Add:
2316 // Starting from non-zero and stepping away from zero can never wrap back
2317 // to zero.
2318 return BO->hasNoUnsignedWrap() ||
2319 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2320 StartC->isNegative() == StepC->isNegative());
2321 case Instruction::Mul:
2322 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2323 match(Step, m_APInt(StepC)) && !StepC->isZero();
2324 case Instruction::Shl:
2325 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2326 case Instruction::AShr:
2327 case Instruction::LShr:
2328 return BO->isExact();
2329 default:
2330 return false;
2331 }
2332}
2333
2334/// Return true if the given value is known to be non-zero when defined. For
2335/// vectors, return true if every demanded element is known to be non-zero when
2336/// defined. For pointers, if the context instruction and dominator tree are
2337/// specified, perform context-sensitive analysis and return true if the
2338/// pointer couldn't possibly be null at the specified instruction.
2339/// Supports values with integer or pointer type and vectors of integers.
2340bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2341 const Query &Q) {
2342 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2343 // vector
2344 if (isa<ScalableVectorType>(V->getType()))
2345 return false;
2346
2347 if (auto *C = dyn_cast<Constant>(V)) {
2348 if (C->isNullValue())
2349 return false;
2350 if (isa<ConstantInt>(C))
2351 // Must be non-zero due to null test above.
2352 return true;
2353
2354 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2355 // See the comment for IntToPtr/PtrToInt instructions below.
2356 if (CE->getOpcode() == Instruction::IntToPtr ||
2357 CE->getOpcode() == Instruction::PtrToInt)
2358 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2359 .getFixedSize() <=
2360 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2361 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2362 }
2363
2364 // For constant vectors, check that all elements are undefined or known
2365 // non-zero to determine that the whole vector is known non-zero.
2366 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2367 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2368 if (!DemandedElts[i])
2369 continue;
2370 Constant *Elt = C->getAggregateElement(i);
2371 if (!Elt || Elt->isNullValue())
2372 return false;
2373 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2374 return false;
2375 }
2376 return true;
2377 }
2378
2379 // A global variable in address space 0 is non null unless extern weak
2380 // or an absolute symbol reference. Other address spaces may have null as a
2381 // valid address for a global, so we can't assume anything.
2382 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2383 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2384 GV->getType()->getAddressSpace() == 0)
2385 return true;
2386 } else
2387 return false;
2388 }
2389
2390 if (auto *I = dyn_cast<Instruction>(V)) {
2391 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2392 // If the possible ranges don't contain zero, then the value is
2393 // definitely non-zero.
2394 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2395 const APInt ZeroValue(Ty->getBitWidth(), 0);
2396 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2397 return true;
2398 }
2399 }
2400 }
2401
2402 if (isKnownNonZeroFromAssume(V, Q))
2403 return true;
2404
2405 // Some of the tests below are recursive, so bail out if we hit the limit.
2406 if (Depth++ >= MaxAnalysisRecursionDepth)
2407 return false;
2408
2409 // Check for pointer simplifications.
2410
2411 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2412 // Alloca never returns null, malloc might.
2413 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2414 return true;
2415
2416 // A byval, inalloca may not be null in a non-default addres space. A
2417 // nonnull argument is assumed never 0.
2418 if (const Argument *A = dyn_cast<Argument>(V)) {
2419 if (((A->hasPassPointeeByValueCopyAttr() &&
2420 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2421 A->hasNonNullAttr()))
2422 return true;
2423 }
2424
2425 // A Load tagged with nonnull metadata is never null.
2426 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2427 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2428 return true;
2429
2430 if (const auto *Call = dyn_cast<CallBase>(V)) {
2431 if (Call->isReturnNonNull())
2432 return true;
2433 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2434 return isKnownNonZero(RP, Depth, Q);
2435 }
2436 }
2437
2438 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2439 return true;
2440
2441 // Check for recursive pointer simplifications.
2442 if (V->getType()->isPointerTy()) {
2443 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2444 // do not alter the value, or at least not the nullness property of the
2445 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2446 //
2447 // Note that we have to take special care to avoid looking through
2448 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2449 // as casts that can alter the value, e.g., AddrSpaceCasts.
2450 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2451 return isGEPKnownNonNull(GEP, Depth, Q);
2452
2453 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2454 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2455
2456 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2457 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2458 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2459 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2460 }
2461
2462 // Similar to int2ptr above, we can look through ptr2int here if the cast
2463 // is a no-op or an extend and not a truncate.
2464 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2465 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2466 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2467 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2468
2469 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2470
2471 // X | Y != 0 if X != 0 or Y != 0.
2472 Value *X = nullptr, *Y = nullptr;
2473 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2474 return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2475 isKnownNonZero(Y, DemandedElts, Depth, Q);
2476
2477 // ext X != 0 if X != 0.
2478 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2479 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2480
2481 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2482 // if the lowest bit is shifted off the end.
2483 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2484 // shl nuw can't remove any non-zero bits.
2485 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2486 if (Q.IIQ.hasNoUnsignedWrap(BO))
2487 return isKnownNonZero(X, Depth, Q);
2488
2489 KnownBits Known(BitWidth);
2490 computeKnownBits(X, DemandedElts, Known, Depth, Q);
2491 if (Known.One[0])
2492 return true;
2493 }
2494 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2495 // defined if the sign bit is shifted off the end.
2496 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2497 // shr exact can only shift out zero bits.
2498 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2499 if (BO->isExact())
2500 return isKnownNonZero(X, Depth, Q);
2501
2502 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2503 if (Known.isNegative())
2504 return true;
2505
2506 // If the shifter operand is a constant, and all of the bits shifted
2507 // out are known to be zero, and X is known non-zero then at least one
2508 // non-zero bit must remain.
2509 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2510 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2511 // Is there a known one in the portion not shifted out?
2512 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2513 return true;
2514 // Are all the bits to be shifted out known zero?
2515 if (Known.countMinTrailingZeros() >= ShiftVal)
2516 return isKnownNonZero(X, DemandedElts, Depth, Q);
2517 }
2518 }
2519 // div exact can only produce a zero if the dividend is zero.
2520 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2521 return isKnownNonZero(X, DemandedElts, Depth, Q);
2522 }
2523 // X + Y.
2524 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2525 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2526 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2527
2528 // If X and Y are both non-negative (as signed values) then their sum is not
2529 // zero unless both X and Y are zero.
2530 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2531 if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2532 isKnownNonZero(Y, DemandedElts, Depth, Q))
2533 return true;
2534
2535 // If X and Y are both negative (as signed values) then their sum is not
2536 // zero unless both X and Y equal INT_MIN.
2537 if (XKnown.isNegative() && YKnown.isNegative()) {
2538 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2539 // The sign bit of X is set. If some other bit is set then X is not equal
2540 // to INT_MIN.
2541 if (XKnown.One.intersects(Mask))
2542 return true;
2543 // The sign bit of Y is set. If some other bit is set then Y is not equal
2544 // to INT_MIN.
2545 if (YKnown.One.intersects(Mask))
2546 return true;
2547 }
2548
2549 // The sum of a non-negative number and a power of two is not zero.
2550 if (XKnown.isNonNegative() &&
2551 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2552 return true;
2553 if (YKnown.isNonNegative() &&
2554 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2555 return true;
2556 }
2557 // X * Y.
2558 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2559 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2560 // If X and Y are non-zero then so is X * Y as long as the multiplication
2561 // does not overflow.
2562 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2563 isKnownNonZero(X, DemandedElts, Depth, Q) &&
2564 isKnownNonZero(Y, DemandedElts, Depth, Q))
2565 return true;
2566 }
2567 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2568 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2569 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2570 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2571 return true;
2572 }
2573 // PHI
2574 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2575 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2576 return true;
2577
2578 // Check if all incoming values are non-zero using recursion.
2579 Query RecQ = Q;
2580 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2581 return llvm::all_of(PN->operands(), [&](const Use &U) {
2582 if (U.get() == PN)
2583 return true;
2584 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2585 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2586 });
2587 }
2588 // ExtractElement
2589 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2590 const Value *Vec = EEI->getVectorOperand();
2591 const Value *Idx = EEI->getIndexOperand();
2592 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2593 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2594 unsigned NumElts = VecTy->getNumElements();
2595 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2596 if (CIdx && CIdx->getValue().ult(NumElts))
2597 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2598 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2599 }
2600 }
2601 // Freeze
2602 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2603 auto *Op = FI->getOperand(0);
2604 if (isKnownNonZero(Op, Depth, Q) &&
2605 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2606 return true;
2607 }
2608
2609 KnownBits Known(BitWidth);
2610 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2611 return Known.One != 0;
2612}
2613
2614bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2615 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2616 // vector
2617 if (isa<ScalableVectorType>(V->getType()))
2618 return false;
2619
2620 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2621 APInt DemandedElts =
2622 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
2623 return isKnownNonZero(V, DemandedElts, Depth, Q);
2624}
2625
2626/// If the pair of operators are the same invertible function, return the
2627/// the operands of the function corresponding to each input. Otherwise,
2628/// return None. An invertible function is one that is 1-to-1 and maps
2629/// every input value to exactly one output value. This is equivalent to
2630/// saying that Op1 and Op2 are equal exactly when the specified pair of
2631/// operands are equal, (except that Op1 and Op2 may be poison more often.)
2632static Optional<std::pair<Value*, Value*>>
2633getInvertibleOperands(const Operator *Op1,
2634 const Operator *Op2) {
2635 if (Op1->getOpcode() != Op2->getOpcode())
2636 return None;
2637
2638 auto getOperands = [&](unsigned OpNum) -> auto {
2639 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2640 };
2641
2642 switch (Op1->getOpcode()) {
2643 default:
2644 break;
2645 case Instruction::Add:
2646 case Instruction::Sub:
2647 if (Op1->getOperand(0) == Op2->getOperand(0))
2648 return getOperands(1);
2649 if (Op1->getOperand(1) == Op2->getOperand(1))
2650 return getOperands(0);
2651 break;
2652 case Instruction::Mul: {
2653 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2654 // and N is the bitwdith. The nsw case is non-obvious, but proven by
2655 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2656 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2657 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2658 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2659 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2660 break;
2661
2662 // Assume operand order has been canonicalized
2663 if (Op1->getOperand(1) == Op2->getOperand(1) &&
2664 isa<ConstantInt>(Op1->getOperand(1)) &&
2665 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2666 return getOperands(0);
2667 break;
2668 }
2669 case Instruction::Shl: {
2670 // Same as multiplies, with the difference that we don't need to check
2671 // for a non-zero multiply. Shifts always multiply by non-zero.
2672 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2673 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2674 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2675 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2676 break;
2677
2678 if (Op1->getOperand(1) == Op2->getOperand(1))
2679 return getOperands(0);
2680 break;
2681 }
2682 case Instruction::AShr:
2683 case Instruction::LShr: {
2684 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2685 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2686 if (!PEO1->isExact() || !PEO2->isExact())
2687 break;
2688
2689 if (Op1->getOperand(1) == Op2->getOperand(1))
2690 return getOperands(0);
2691 break;
2692 }
2693 case Instruction::SExt:
2694 case Instruction::ZExt:
2695 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2696 return getOperands(0);
2697 break;
2698 case Instruction::PHI: {
2699 const PHINode *PN1 = cast<PHINode>(Op1);
2700 const PHINode *PN2 = cast<PHINode>(Op2);
2701
2702 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2703 // are a single invertible function of the start values? Note that repeated
2704 // application of an invertible function is also invertible
2705 BinaryOperator *BO1 = nullptr;
2706 Value *Start1 = nullptr, *Step1 = nullptr;
2707 BinaryOperator *BO2 = nullptr;
2708 Value *Start2 = nullptr, *Step2 = nullptr;
2709 if (PN1->getParent() != PN2->getParent() ||
2710 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2711 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2712 break;
2713
2714 auto Values = getInvertibleOperands(cast<Operator>(BO1),
2715 cast<Operator>(BO2));
2716 if (!Values)
2717 break;
2718
2719 // We have to be careful of mutually defined recurrences here. Ex:
2720 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2721 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2722 // The invertibility of these is complicated, and not worth reasoning
2723 // about (yet?).
2724 if (Values->first != PN1 || Values->second != PN2)
2725 break;
2726
2727 return std::make_pair(Start1, Start2);
2728 }
2729 }
2730 return None;
2731}
2732
2733/// Return true if V2 == V1 + X, where X is known non-zero.
2734static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2735 const Query &Q) {
2736 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2737 if (!BO || BO->getOpcode() != Instruction::Add)
2738 return false;
2739 Value *Op = nullptr;
2740 if (V2 == BO->getOperand(0))
2741 Op = BO->getOperand(1);
2742 else if (V2 == BO->getOperand(1))
2743 Op = BO->getOperand(0);
2744 else
2745 return false;
2746 return isKnownNonZero(Op, Depth + 1, Q);
2747}
2748
2749/// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2750/// the multiplication is nuw or nsw.
2751static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2752 const Query &Q) {
2753 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2754 const APInt *C;
2755 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2756 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2757 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
2758 }
2759 return false;
2760}
2761
2762/// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2763/// the shift is nuw or nsw.
2764static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2765 const Query &Q) {
2766 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2767 const APInt *C;
2768 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2769 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2770 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
2771 }
2772 return false;
2773}
2774
2775static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2776 unsigned Depth, const Query &Q) {
2777 // Check two PHIs are in same block.
2778 if (PN1->getParent() != PN2->getParent())
2779 return false;
2780
2781 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2782 bool UsedFullRecursion = false;
2783 for (const BasicBlock *IncomBB : PN1->blocks()) {
2784 if (!VisitedBBs.insert(IncomBB).second)
2785 continue; // Don't reprocess blocks that we have dealt with already.
2786 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2787 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2788 const APInt *C1, *C2;
2789 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2790 continue;
2791
2792 // Only one pair of phi operands is allowed for full recursion.
2793 if (UsedFullRecursion)
2794 return false;
2795
2796 Query RecQ = Q;
2797 RecQ.CxtI = IncomBB->getTerminator();
2798 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2799 return false;
2800 UsedFullRecursion = true;
2801 }
2802 return true;
2803}
2804
2805/// Return true if it is known that V1 != V2.
2806static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2807 const Query &Q) {
2808 if (V1 == V2)
2809 return false;
2810 if (V1->getType() != V2->getType())
2811 // We can't look through casts yet.
2812 return false;
2813
2814 if (Depth >= MaxAnalysisRecursionDepth)
2815 return false;
2816
2817 // See if we can recurse through (exactly one of) our operands. This
2818 // requires our operation be 1-to-1 and map every input value to exactly
2819 // one output value. Such an operation is invertible.
2820 auto *O1 = dyn_cast<Operator>(V1);
2821 auto *O2 = dyn_cast<Operator>(V2);
2822 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2823 if (auto Values = getInvertibleOperands(O1, O2))
2824 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2825
2826 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2827 const PHINode *PN2 = cast<PHINode>(V2);
2828 // FIXME: This is missing a generalization to handle the case where one is
2829 // a PHI and another one isn't.
2830 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2831 return true;
2832 };
2833 }
2834
2835 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2836 return true;
2837
2838 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2839 return true;
2840
2841 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2842 return true;
2843
2844 if (V1->getType()->isIntOrIntVectorTy()) {
2845 // Are any known bits in V1 contradictory to known bits in V2? If V1
2846 // has a known zero where V2 has a known one, they must not be equal.
2847 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2848 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2849
2850 if (Known1.Zero.intersects(Known2.One) ||
2851 Known2.Zero.intersects(Known1.One))
2852 return true;
2853 }
2854 return false;
2855}
2856
2857/// Return true if 'V & Mask' is known to be zero. We use this predicate to
2858/// simplify operations downstream. Mask is known to be zero for bits that V
2859/// cannot have.
2860///
2861/// This function is defined on values with integer type, values with pointer
2862/// type, and vectors of integers. In the case
2863/// where V is a vector, the mask, known zero, and known one values are the
2864/// same width as the vector element, and the bit is set only if it is true
2865/// for all of the elements in the vector.
2866bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2867 const Query &Q) {
2868 KnownBits Known(Mask.getBitWidth());
2869 computeKnownBits(V, Known, Depth, Q);
2870 return Mask.isSubsetOf(Known.Zero);
2871}
2872
2873// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2874// Returns the input and lower/upper bounds.
2875static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2876 const APInt *&CLow, const APInt *&CHigh) {
2877 assert(isa<Operator>(Select) &&(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2879, __extension__ __PRETTY_FUNCTION__
))
2878 cast<Operator>(Select)->getOpcode() == Instruction::Select &&(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2879, __extension__ __PRETTY_FUNCTION__
))
2879 "Input should be a Select!")(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2879, __extension__ __PRETTY_FUNCTION__
))
;
2880
2881 const Value *LHS = nullptr, *RHS = nullptr;
2882 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2883 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2884 return false;
2885
2886 if (!match(RHS, m_APInt(CLow)))
2887 return false;
2888
2889 const Value *LHS2 = nullptr, *RHS2 = nullptr;
2890 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2891 if (getInverseMinMaxFlavor(SPF) != SPF2)
2892 return false;
2893
2894 if (!match(RHS2, m_APInt(CHigh)))
2895 return false;
2896
2897 if (SPF == SPF_SMIN)
2898 std::swap(CLow, CHigh);
2899
2900 In = LHS2;
2901 return CLow->sle(*CHigh);
2902}
2903
2904static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
2905 const APInt *&CLow,
2906 const APInt *&CHigh) {
2907 assert((II->getIntrinsicID() == Intrinsic::smin ||(static_cast <bool> ((II->getIntrinsicID() == Intrinsic
::smin || II->getIntrinsicID() == Intrinsic::smax) &&
"Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2908, __extension__ __PRETTY_FUNCTION__
))
2908 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax")(static_cast <bool> ((II->getIntrinsicID() == Intrinsic
::smin || II->getIntrinsicID() == Intrinsic::smax) &&
"Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2908, __extension__ __PRETTY_FUNCTION__
))
;
2909
2910 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
2911 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
2912 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
2913 !match(II->getArgOperand(1), m_APInt(CLow)) ||
2914 !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
2915 return false;
2916
2917 if (II->getIntrinsicID() == Intrinsic::smin)
2918 std::swap(CLow, CHigh);
2919 return CLow->sle(*CHigh);
2920}
2921
2922/// For vector constants, loop over the elements and find the constant with the
2923/// minimum number of sign bits. Return 0 if the value is not a vector constant
2924/// or if any element was not analyzed; otherwise, return the count for the
2925/// element with the minimum number of sign bits.
2926static unsigned computeNumSignBitsVectorConstant(const Value *V,
2927 const APInt &DemandedElts,
2928 unsigned TyBits) {
2929 const auto *CV = dyn_cast<Constant>(V);
2930 if (!CV || !isa<FixedVectorType>(CV->getType()))
2931 return 0;
2932
2933 unsigned MinSignBits = TyBits;
2934 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2935 for (unsigned i = 0; i != NumElts; ++i) {
2936 if (!DemandedElts[i])
2937 continue;
2938 // If we find a non-ConstantInt, bail out.
2939 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2940 if (!Elt)
2941 return 0;
2942
2943 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2944 }
2945
2946 return MinSignBits;
2947}
2948
2949static unsigned ComputeNumSignBitsImpl(const Value *V,
2950 const APInt &DemandedElts,
2951 unsigned Depth, const Query &Q);
2952
2953static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2954 unsigned Depth, const Query &Q) {
2955 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2956 assert(Result > 0 && "At least one sign bit needs to be present!")(static_cast <bool> (Result > 0 && "At least one sign bit needs to be present!"
) ? void (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2956, __extension__ __PRETTY_FUNCTION__
))
;
2957 return Result;
2958}
2959
2960/// Return the number of times the sign bit of the register is replicated into
2961/// the other bits. We know that at least 1 bit is always equal to the sign bit
2962/// (itself), but other cases can give us information. For example, immediately
2963/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2964/// other, so we return 3. For vectors, return the number of sign bits for the
2965/// vector element with the minimum number of known sign bits of the demanded
2966/// elements in the vector specified by DemandedElts.
2967static unsigned ComputeNumSignBitsImpl(const Value *V,
2968 const APInt &DemandedElts,
2969 unsigned Depth, const Query &Q) {
2970 Type *Ty = V->getType();
2971
2972 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2973 // vector
2974 if (isa<ScalableVectorType>(Ty))
2975 return 1;
2976
2977#ifndef NDEBUG
2978 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2978, __extension__ __PRETTY_FUNCTION__
))
;
2979
2980 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2981 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2983, __extension__ __PRETTY_FUNCTION__
))
2982 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2983, __extension__ __PRETTY_FUNCTION__
))
2983 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2983, __extension__ __PRETTY_FUNCTION__
))
;
2984 } else {
2985 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2986, __extension__ __PRETTY_FUNCTION__
))
2986 "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2986, __extension__ __PRETTY_FUNCTION__
))
;
2987 }
2988#endif
2989
2990 // We return the minimum number of sign bits that are guaranteed to be present
2991 // in V, so for undef we have to conservatively return 1. We don't have the
2992 // same behavior for poison though -- that's a FIXME today.
2993
2994 Type *ScalarTy = Ty->getScalarType();
2995 unsigned TyBits = ScalarTy->isPointerTy() ?
2996 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2997 Q.DL.getTypeSizeInBits(ScalarTy);
2998
2999 unsigned Tmp, Tmp2;
3000 unsigned FirstAnswer = 1;
3001
3002 // Note that ConstantInt is handled by the general computeKnownBits case
3003 // below.
3004
3005 if (Depth == MaxAnalysisRecursionDepth)
3006 return 1;
3007
3008 if (auto *U = dyn_cast<Operator>(V)) {
3009 switch (Operator::getOpcode(V)) {
3010 default: break;
3011 case Instruction::SExt:
3012 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3013 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
3014
3015 case Instruction::SDiv: {
3016 const APInt *Denominator;
3017 // sdiv X, C -> adds log(C) sign bits.
3018 if (match(U->getOperand(1), m_APInt(Denominator))) {
3019
3020 // Ignore non-positive denominator.
3021 if (!Denominator->isStrictlyPositive())
3022 break;
3023
3024 // Calculate the incoming numerator bits.
3025 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3026
3027 // Add floor(log(C)) bits to the numerator bits.
3028 return std::min(TyBits, NumBits + Denominator->logBase2());
3029 }
3030 break;
3031 }
3032
3033 case Instruction::SRem: {
3034 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3035
3036 const APInt *Denominator;
3037 // srem X, C -> we know that the result is within [-C+1,C) when C is a
3038 // positive constant. This let us put a lower bound on the number of sign
3039 // bits.
3040 if (match(U->getOperand(1), m_APInt(Denominator))) {
3041
3042 // Ignore non-positive denominator.
3043 if (Denominator->isStrictlyPositive()) {
3044 // Calculate the leading sign bit constraints by examining the
3045 // denominator. Given that the denominator is positive, there are two
3046 // cases:
3047 //
3048 // 1. The numerator is positive. The result range is [0,C) and
3049 // [0,C) u< (1 << ceilLogBase2(C)).
3050 //
3051 // 2. The numerator is negative. Then the result range is (-C,0] and
3052 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3053 //
3054 // Thus a lower bound on the number of sign bits is `TyBits -
3055 // ceilLogBase2(C)`.
3056
3057 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3058 Tmp = std::max(Tmp, ResBits);
3059 }
3060 }
3061 return Tmp;
3062 }
3063
3064 case Instruction::AShr: {
3065 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3066 // ashr X, C -> adds C sign bits. Vectors too.
3067 const APInt *ShAmt;
3068 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3069 if (ShAmt->uge(TyBits))
3070 break; // Bad shift.
3071 unsigned ShAmtLimited = ShAmt->getZExtValue();
3072 Tmp += ShAmtLimited;
3073 if (Tmp > TyBits) Tmp = TyBits;
3074 }
3075 return Tmp;
3076 }
3077 case Instruction::Shl: {
3078 const APInt *ShAmt;
3079 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3080 // shl destroys sign bits.
3081 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3082 if (ShAmt->uge(TyBits) || // Bad shift.
3083 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3084 Tmp2 = ShAmt->getZExtValue();
3085 return Tmp - Tmp2;
3086 }
3087 break;
3088 }
3089 case Instruction::And:
3090 case Instruction::Or:
3091 case Instruction::Xor: // NOT is handled here.
3092 // Logical binary ops preserve the number of sign bits at the worst.
3093 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3094 if (Tmp != 1) {
3095 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3096 FirstAnswer = std::min(Tmp, Tmp2);
3097 // We computed what we know about the sign bits as our first
3098 // answer. Now proceed to the generic code that uses
3099 // computeKnownBits, and pick whichever answer is better.
3100 }
3101 break;
3102
3103 case Instruction::Select: {
3104 // If we have a clamp pattern, we know that the number of sign bits will
3105 // be the minimum of the clamp min/max range.
3106 const Value *X;
3107 const APInt *CLow, *CHigh;
3108 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3109 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3110
3111 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3112 if (Tmp == 1) break;
3113 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3114 return std::min(Tmp, Tmp2);
3115 }
3116
3117 case Instruction::Add:
3118 // Add can have at most one carry bit. Thus we know that the output
3119 // is, at worst, one more bit than the inputs.
3120 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3121 if (Tmp == 1) break;
3122
3123 // Special case decrementing a value (ADD X, -1):
3124 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3125 if (CRHS->isAllOnesValue()) {
3126 KnownBits Known(TyBits);
3127 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3128
3129 // If the input is known to be 0 or 1, the output is 0/-1, which is
3130 // all sign bits set.
3131 if ((Known.Zero | 1).isAllOnes())
3132 return TyBits;
3133
3134 // If we are subtracting one from a positive number, there is no carry
3135 // out of the result.
3136 if (Known.isNonNegative())
3137 return Tmp;
3138 }
3139
3140 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3141 if (Tmp2 == 1) break;
3142 return std::min(Tmp, Tmp2) - 1;
3143
3144 case Instruction::Sub:
3145 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3146 if (Tmp2 == 1) break;
3147
3148 // Handle NEG.
3149 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3150 if (CLHS->isNullValue()) {
3151 KnownBits Known(TyBits);
3152 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3153 // If the input is known to be 0 or 1, the output is 0/-1, which is
3154 // all sign bits set.
3155 if ((Known.Zero | 1).isAllOnes())
3156 return TyBits;
3157
3158 // If the input is known to be positive (the sign bit is known clear),
3159 // the output of the NEG has the same number of sign bits as the
3160 // input.
3161 if (Known.isNonNegative())
3162 return Tmp2;
3163
3164 // Otherwise, we treat this like a SUB.
3165 }
3166
3167 // Sub can have at most one carry bit. Thus we know that the output
3168 // is, at worst, one more bit than the inputs.
3169 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3170 if (Tmp == 1) break;
3171 return std::min(Tmp, Tmp2) - 1;
3172
3173 case Instruction::Mul: {
3174 // The output of the Mul can be at most twice the valid bits in the
3175 // inputs.
3176 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3177 if (SignBitsOp0 == 1) break;
3178 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3179 if (SignBitsOp1 == 1) break;
3180 unsigned OutValidBits =
3181 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3182 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3183 }
3184
3185 case Instruction::PHI: {
3186 const PHINode *PN = cast<PHINode>(U);
3187 unsigned NumIncomingValues = PN->getNumIncomingValues();
3188 // Don't analyze large in-degree PHIs.
3189 if (NumIncomingValues > 4) break;
3190 // Unreachable blocks may have zero-operand PHI nodes.
3191 if (NumIncomingValues == 0) break;
3192
3193 // Take the minimum of all incoming values. This can't infinitely loop
3194 // because of our depth threshold.
3195 Query RecQ = Q;
3196 Tmp = TyBits;
3197 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3198 if (Tmp == 1) return Tmp;
3199 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3200 Tmp = std::min(
3201 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3202 }
3203 return Tmp;
3204 }
3205
3206 case Instruction::Trunc:
3207 // FIXME: it's tricky to do anything useful for this, but it is an
3208 // important case for targets like X86.
3209 break;
3210
3211 case Instruction::ExtractElement:
3212 // Look through extract element. At the moment we keep this simple and
3213 // skip tracking the specific element. But at least we might find
3214 // information valid for all elements of the vector (for example if vector
3215 // is sign extended, shifted, etc).
3216 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3217
3218 case Instruction::ShuffleVector: {
3219 // Collect the minimum number of sign bits that are shared by every vector
3220 // element referenced by the shuffle.
3221 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3222 if (!Shuf) {
3223 // FIXME: Add support for shufflevector constant expressions.
3224 return 1;
3225 }
3226 APInt DemandedLHS, DemandedRHS;
3227 // For undef elements, we don't know anything about the common state of
3228 // the shuffle result.
3229 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3230 return 1;
3231 Tmp = std::numeric_limits<unsigned>::max();
3232 if (!!DemandedLHS) {
3233 const Value *LHS = Shuf->getOperand(0);
3234 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3235 }
3236 // If we don't know anything, early out and try computeKnownBits
3237 // fall-back.
3238 if (Tmp == 1)
3239 break;
3240 if (!!DemandedRHS) {
3241 const Value *RHS = Shuf->getOperand(1);
3242 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3243 Tmp = std::min(Tmp, Tmp2);
3244 }
3245 // If we don't know anything, early out and try computeKnownBits
3246 // fall-back.
3247 if (Tmp == 1)
3248 break;
3249 assert(Tmp <= TyBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= TyBits && "Failed to determine minimum sign bits"
) ? void (0) : __assert_fail ("Tmp <= TyBits && \"Failed to determine minimum sign bits\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3249, __extension__ __PRETTY_FUNCTION__
))
;
3250 return Tmp;
3251 }
3252 case Instruction::Call: {
3253 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3254 switch (II->getIntrinsicID()) {
3255 default: break;
3256 case Intrinsic::abs:
3257 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3258 if (Tmp == 1) break;
3259
3260 // Absolute value reduces number of sign bits by at most 1.
3261 return Tmp - 1;
3262 case Intrinsic::smin:
3263 case Intrinsic::smax: {
3264 const APInt *CLow, *CHigh;
3265 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
3266 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3267 }
3268 }
3269 }
3270 }
3271 }
3272 }
3273
3274 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3275 // use this information.
3276
3277 // If we can examine all elements of a vector constant successfully, we're
3278 // done (we can't do any better than that). If not, keep trying.
3279 if (unsigned VecSignBits =
3280 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3281 return VecSignBits;
3282
3283 KnownBits Known(TyBits);
3284 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3285
3286 // If we know that the sign bit is either zero or one, determine the number of
3287 // identical bits in the top of the input value.
3288 return std::max(FirstAnswer, Known.countMinSignBits());
3289}
3290
3291Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3292 const TargetLibraryInfo *TLI) {
3293 const Function *F = CB.getCalledFunction();
3294 if (!F)
3295 return Intrinsic::not_intrinsic;
3296
3297 if (F->isIntrinsic())
3298 return F->getIntrinsicID();
3299
3300 // We are going to infer semantics of a library function based on mapping it
3301 // to an LLVM intrinsic. Check that the library function is available from
3302 // this callbase and in this environment.
3303 LibFunc Func;
3304 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3305 !CB.onlyReadsMemory())
3306 return Intrinsic::not_intrinsic;
3307
3308 switch (Func) {
3309 default:
3310 break;
3311 case LibFunc_sin:
3312 case LibFunc_sinf:
3313 case LibFunc_sinl:
3314 return Intrinsic::sin;
3315 case LibFunc_cos:
3316 case LibFunc_cosf:
3317 case LibFunc_cosl:
3318 return Intrinsic::cos;
3319 case LibFunc_exp:
3320 case LibFunc_expf:
3321 case LibFunc_expl:
3322 return Intrinsic::exp;
3323 case LibFunc_exp2:
3324 case LibFunc_exp2f:
3325 case LibFunc_exp2l:
3326 return Intrinsic::exp2;
3327 case LibFunc_log:
3328 case LibFunc_logf:
3329 case LibFunc_logl:
3330 return Intrinsic::log;
3331 case LibFunc_log10:
3332 case LibFunc_log10f:
3333 case LibFunc_log10l:
3334 return Intrinsic::log10;
3335 case LibFunc_log2:
3336 case LibFunc_log2f:
3337 case LibFunc_log2l:
3338 return Intrinsic::log2;
3339 case LibFunc_fabs:
3340 case LibFunc_fabsf:
3341 case LibFunc_fabsl:
3342 return Intrinsic::fabs;
3343 case LibFunc_fmin:
3344 case LibFunc_fminf:
3345 case LibFunc_fminl:
3346 return Intrinsic::minnum;
3347 case LibFunc_fmax:
3348 case LibFunc_fmaxf:
3349 case LibFunc_fmaxl:
3350 return Intrinsic::maxnum;
3351 case LibFunc_copysign:
3352 case LibFunc_copysignf:
3353 case LibFunc_copysignl:
3354 return Intrinsic::copysign;
3355 case LibFunc_floor:
3356 case LibFunc_floorf:
3357 case LibFunc_floorl:
3358 return Intrinsic::floor;
3359 case LibFunc_ceil:
3360 case LibFunc_ceilf:
3361 case LibFunc_ceill:
3362 return Intrinsic::ceil;
3363 case LibFunc_trunc:
3364 case LibFunc_truncf:
3365 case LibFunc_truncl:
3366 return Intrinsic::trunc;
3367 case LibFunc_rint:
3368 case LibFunc_rintf:
3369 case LibFunc_rintl:
3370 return Intrinsic::rint;
3371 case LibFunc_nearbyint:
3372 case LibFunc_nearbyintf:
3373 case LibFunc_nearbyintl:
3374 return Intrinsic::nearbyint;
3375 case LibFunc_round:
3376 case LibFunc_roundf:
3377 case LibFunc_roundl:
3378 return Intrinsic::round;
3379 case LibFunc_roundeven:
3380 case LibFunc_roundevenf:
3381 case LibFunc_roundevenl:
3382 return Intrinsic::roundeven;
3383 case LibFunc_pow:
3384 case LibFunc_powf:
3385 case LibFunc_powl:
3386 return Intrinsic::pow;
3387 case LibFunc_sqrt:
3388 case LibFunc_sqrtf:
3389 case LibFunc_sqrtl:
3390 return Intrinsic::sqrt;
3391 }
3392
3393 return Intrinsic::not_intrinsic;
3394}
3395
3396/// Return true if we can prove that the specified FP value is never equal to
3397/// -0.0.
3398/// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3399/// that a value is not -0.0. It only guarantees that -0.0 may be treated
3400/// the same as +0.0 in floating-point ops.
3401bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3402 unsigned Depth) {
3403 if (auto *CFP = dyn_cast<ConstantFP>(V))
3404 return !CFP->getValueAPF().isNegZero();
3405
3406 if (Depth == MaxAnalysisRecursionDepth)
3407 return false;
3408
3409 auto *Op = dyn_cast<Operator>(V);
3410 if (!Op)
3411 return false;
3412
3413 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3414 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3415 return true;
3416
3417 // sitofp and uitofp turn into +0.0 for zero.
3418 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3419 return true;
3420
3421 if (auto *Call = dyn_cast<CallInst>(Op)) {
3422 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3423 switch (IID) {
3424 default:
3425 break;
3426 // sqrt(-0.0) = -0.0, no other negative results are possible.
3427 case Intrinsic::sqrt:
3428 case Intrinsic::canonicalize:
3429 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3430 case Intrinsic::experimental_constrained_sqrt: {
3431 // NOTE: This rounding mode restriction may be too strict.
3432 const auto *CI = cast<ConstrainedFPIntrinsic>(Call);
3433 if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven)
3434 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3435 else
3436 return false;
3437 }
3438 // fabs(x) != -0.0
3439 case Intrinsic::fabs:
3440 return true;
3441 // sitofp and uitofp turn into +0.0 for zero.
3442 case Intrinsic::experimental_constrained_sitofp:
3443 case Intrinsic::experimental_constrained_uitofp:
3444 return true;
3445 }
3446 }
3447
3448 return false;
3449}
3450
3451/// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3452/// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3453/// bit despite comparing equal.
3454static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3455 const TargetLibraryInfo *TLI,
3456 bool SignBitOnly,
3457 unsigned Depth) {
3458 // TODO: This function does not do the right thing when SignBitOnly is true
3459 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3460 // which flips the sign bits of NaNs. See
3461 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3462
3463 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3464 return !CFP->getValueAPF().isNegative() ||
3465 (!SignBitOnly && CFP->getValueAPF().isZero());
3466 }
3467
3468 // Handle vector of constants.
3469 if (auto *CV = dyn_cast<Constant>(V)) {
3470 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3471 unsigned NumElts = CVFVTy->getNumElements();
3472 for (unsigned i = 0; i != NumElts; ++i) {
3473 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3474 if (!CFP)
3475 return false;
3476 if (CFP->getValueAPF().isNegative() &&
3477 (SignBitOnly || !CFP->getValueAPF().isZero()))
3478 return false;
3479 }
3480
3481 // All non-negative ConstantFPs.
3482 return true;
3483 }
3484 }
3485
3486 if (Depth == MaxAnalysisRecursionDepth)
3487 return false;
3488
3489 const Operator *I = dyn_cast<Operator>(V);
3490 if (!I)
3491 return false;
3492
3493 switch (I->getOpcode()) {
3494 default:
3495 break;
3496 // Unsigned integers are always nonnegative.
3497 case Instruction::UIToFP:
3498 return true;
3499 case Instruction::FMul:
3500 case Instruction::FDiv:
3501 // X * X is always non-negative or a NaN.
3502 // X / X is always exactly 1.0 or a NaN.
3503 if (I->getOperand(0) == I->getOperand(1) &&
3504 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3505 return true;
3506
3507 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3508 case Instruction::FAdd:
3509 case Instruction::FRem:
3510 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3511 Depth + 1) &&
3512 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3513 Depth + 1);
3514 case Instruction::Select:
3515 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3516 Depth + 1) &&
3517 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3518 Depth + 1);
3519 case Instruction::FPExt:
3520 case Instruction::FPTrunc:
3521 // Widening/narrowing never change sign.
3522 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3523 Depth + 1);
3524 case Instruction::ExtractElement:
3525 // Look through extract element. At the moment we keep this simple and skip
3526 // tracking the specific element. But at least we might find information
3527 // valid for all elements of the vector.
3528 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3529 Depth + 1);
3530 case Instruction::Call:
3531 const auto *CI = cast<CallInst>(I);
3532 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3533 switch (IID) {
3534 default:
3535 break;
3536 case Intrinsic::maxnum: {
3537 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3538 auto isPositiveNum = [&](Value *V) {
3539 if (SignBitOnly) {
3540 // With SignBitOnly, this is tricky because the result of
3541 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3542 // a constant strictly greater than 0.0.
3543 const APFloat *C;
3544 return match(V, m_APFloat(C)) &&
3545 *C > APFloat::getZero(C->getSemantics());
3546 }
3547
3548 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3549 // maxnum can't be ordered-less-than-zero.
3550 return isKnownNeverNaN(V, TLI) &&
3551 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3552 };
3553
3554 // TODO: This could be improved. We could also check that neither operand
3555 // has its sign bit set (and at least 1 is not-NAN?).
3556 return isPositiveNum(V0) || isPositiveNum(V1);
3557 }
3558
3559 case Intrinsic::maximum:
3560 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3561 Depth + 1) ||
3562 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3563 Depth + 1);
3564 case Intrinsic::minnum:
3565 case Intrinsic::minimum:
3566 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3567 Depth + 1) &&
3568 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3569 Depth + 1);
3570 case Intrinsic::exp:
3571 case Intrinsic::exp2:
3572 case Intrinsic::fabs:
3573 return true;
3574
3575 case Intrinsic::sqrt:
3576 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3577 if (!SignBitOnly)
3578 return true;
3579 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3580 CannotBeNegativeZero(CI->getOperand(0), TLI));
3581
3582 case Intrinsic::powi:
3583 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3584 // powi(x,n) is non-negative if n is even.
3585 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3586 return true;
3587 }
3588 // TODO: This is not correct. Given that exp is an integer, here are the
3589 // ways that pow can return a negative value:
3590 //
3591 // pow(x, exp) --> negative if exp is odd and x is negative.
3592 // pow(-0, exp) --> -inf if exp is negative odd.
3593 // pow(-0, exp) --> -0 if exp is positive odd.
3594 // pow(-inf, exp) --> -0 if exp is negative odd.
3595 // pow(-inf, exp) --> -inf if exp is positive odd.
3596 //
3597 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3598 // but we must return false if x == -0. Unfortunately we do not currently
3599 // have a way of expressing this constraint. See details in
3600 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3601 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3602 Depth + 1);
3603
3604 case Intrinsic::fma:
3605 case Intrinsic::fmuladd:
3606 // x*x+y is non-negative if y is non-negative.
3607 return I->getOperand(0) == I->getOperand(1) &&
3608 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3609 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3610 Depth + 1);
3611 }
3612 break;
3613 }
3614 return false;
3615}
3616
3617bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3618 const TargetLibraryInfo *TLI) {
3619 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3620}
3621
3622bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3623 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3624}
3625
3626bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3627 unsigned Depth) {
3628 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for Inf on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for Inf on non-FP type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3628, __extension__ __PRETTY_FUNCTION__
))
;
3629
3630 // If we're told that infinities won't happen, assume they won't.
3631 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3632 if (FPMathOp->hasNoInfs())
3633 return true;
3634
3635 // Handle scalar constants.
3636 if (auto *CFP = dyn_cast<ConstantFP>(V))
3637 return !CFP->isInfinity();
3638
3639 if (Depth == MaxAnalysisRecursionDepth)
3640 return false;
3641
3642 if (auto *Inst = dyn_cast<Instruction>(V)) {
3643 switch (Inst->getOpcode()) {
3644 case Instruction::Select: {
3645 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3646 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3647 }
3648 case Instruction::SIToFP:
3649 case Instruction::UIToFP: {
3650 // Get width of largest magnitude integer (remove a bit if signed).
3651 // This still works for a signed minimum value because the largest FP
3652 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3653 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3654 if (Inst->getOpcode() == Instruction::SIToFP)
3655 --IntSize;
3656
3657 // If the exponent of the largest finite FP value can hold the largest
3658 // integer, the result of the cast must be finite.
3659 Type *FPTy = Inst->getType()->getScalarType();
3660 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3661 }
3662 default:
3663 break;
3664 }
3665 }
3666
3667 // try to handle fixed width vector constants
3668 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3669 if (VFVTy && isa<Constant>(V)) {
3670 // For vectors, verify that each element is not infinity.
3671 unsigned NumElts = VFVTy->getNumElements();
3672 for (unsigned i = 0; i != NumElts; ++i) {
3673 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3674 if (!Elt)
3675 return false;
3676 if (isa<UndefValue>(Elt))
3677 continue;
3678 auto *CElt = dyn_cast<ConstantFP>(Elt);
3679 if (!CElt || CElt->isInfinity())
3680 return false;
3681 }
3682 // All elements were confirmed non-infinity or undefined.
3683 return true;
3684 }
3685
3686 // was not able to prove that V never contains infinity
3687 return false;
3688}
3689
3690bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3691 unsigned Depth) {
3692 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for NaN on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3692, __extension__ __PRETTY_FUNCTION__
))
;
3693
3694 // If we're told that NaNs won't happen, assume they won't.
3695 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3696 if (FPMathOp->hasNoNaNs())
3697 return true;
3698
3699 // Handle scalar constants.
3700 if (auto *CFP = dyn_cast<ConstantFP>(V))
3701 return !CFP->isNaN();
3702
3703 if (Depth == MaxAnalysisRecursionDepth)
3704 return false;
3705
3706 if (auto *Inst = dyn_cast<Instruction>(V)) {
3707 switch (Inst->getOpcode()) {
3708 case Instruction::FAdd:
3709 case Instruction::FSub:
3710 // Adding positive and negative infinity produces NaN.
3711 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3712 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3713 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3714 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3715
3716 case Instruction::FMul:
3717 // Zero multiplied with infinity produces NaN.
3718 // FIXME: If neither side can be zero fmul never produces NaN.
3719 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3720 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3721 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3722 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3723
3724 case Instruction::FDiv:
3725 case Instruction::FRem:
3726 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3727 return false;
3728
3729 case Instruction::Select: {
3730 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3731 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3732 }
3733 case Instruction::SIToFP:
3734 case Instruction::UIToFP:
3735 return true;
3736 case Instruction::FPTrunc:
3737 case Instruction::FPExt:
3738 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3739 default:
3740 break;
3741 }
3742 }
3743
3744 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3745 switch (II->getIntrinsicID()) {
3746 case Intrinsic::canonicalize:
3747 case Intrinsic::fabs:
3748 case Intrinsic::copysign:
3749 case Intrinsic::exp:
3750 case Intrinsic::exp2:
3751 case Intrinsic::floor:
3752 case Intrinsic::ceil:
3753 case Intrinsic::trunc:
3754 case Intrinsic::rint:
3755 case Intrinsic::nearbyint:
3756 case Intrinsic::round:
3757 case Intrinsic::roundeven:
3758 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3759 case Intrinsic::sqrt:
3760 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3761 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3762 case Intrinsic::minnum:
3763 case Intrinsic::maxnum:
3764 // If either operand is not NaN, the result is not NaN.
3765 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3766 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3767 default:
3768 return false;
3769 }
3770 }
3771
3772 // Try to handle fixed width vector constants
3773 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3774 if (VFVTy && isa<Constant>(V)) {
3775 // For vectors, verify that each element is not NaN.
3776 unsigned NumElts = VFVTy->getNumElements();
3777 for (unsigned i = 0; i != NumElts; ++i) {
3778 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3779 if (!Elt)
3780 return false;
3781 if (isa<UndefValue>(Elt))
3782 continue;
3783 auto *CElt = dyn_cast<ConstantFP>(Elt);
3784 if (!CElt || CElt->isNaN())
3785 return false;
3786 }
3787 // All elements were confirmed not-NaN or undefined.
3788 return true;
3789 }
3790
3791 // Was not able to prove that V never contains NaN
3792 return false;
3793}
3794
3795Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3796
3797 // All byte-wide stores are splatable, even of arbitrary variables.
3798 if (V->getType()->isIntegerTy(8))
3799 return V;
3800
3801 LLVMContext &Ctx = V->getContext();
3802
3803 // Undef don't care.
3804 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3805 if (isa<UndefValue>(V))
3806 return UndefInt8;
3807
3808 // Return Undef for zero-sized type.
3809 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3810 return UndefInt8;
3811
3812 Constant *C = dyn_cast<Constant>(V);
3813 if (!C) {
3814 // Conceptually, we could handle things like:
3815 // %a = zext i8 %X to i16
3816 // %b = shl i16 %a, 8
3817 // %c = or i16 %a, %b
3818 // but until there is an example that actually needs this, it doesn't seem
3819 // worth worrying about.
3820 return nullptr;
3821 }
3822
3823 // Handle 'null' ConstantArrayZero etc.
3824 if (C->isNullValue())
3825 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3826
3827 // Constant floating-point values can be handled as integer values if the
3828 // corresponding integer value is "byteable". An important case is 0.0.
3829 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3830 Type *Ty = nullptr;
3831 if (CFP->getType()->isHalfTy())
3832 Ty = Type::getInt16Ty(Ctx);
3833 else if (CFP->getType()->isFloatTy())
3834 Ty = Type::getInt32Ty(Ctx);
3835 else if (CFP->getType()->isDoubleTy())
3836 Ty = Type::getInt64Ty(Ctx);
3837 // Don't handle long double formats, which have strange constraints.
3838 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3839 : nullptr;
3840 }
3841
3842 // We can handle constant integers that are multiple of 8 bits.
3843 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3844 if (CI->getBitWidth() % 8 == 0) {
3845 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")(static_cast <bool> (CI->getBitWidth() > 8 &&
"8 bits should be handled above!") ? void (0) : __assert_fail
("CI->getBitWidth() > 8 && \"8 bits should be handled above!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3845, __extension__ __PRETTY_FUNCTION__
))
;
3846 if (!CI->getValue().isSplat(8))
3847 return nullptr;
3848 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3849 }
3850 }
3851
3852 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3853 if (CE->getOpcode() == Instruction::IntToPtr) {
3854 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3855 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3856 return isBytewiseValue(
3857 ConstantExpr::getIntegerCast(CE->getOperand(0),
3858 Type::getIntNTy(Ctx, BitWidth), false),
3859 DL);
3860 }
3861 }
3862 }
3863
3864 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3865 if (LHS == RHS)
3866 return LHS;
3867 if (!LHS || !RHS)
3868 return nullptr;
3869 if (LHS == UndefInt8)
3870 return RHS;
3871 if (RHS == UndefInt8)
3872 return LHS;
3873 return nullptr;
3874 };
3875
3876 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3877 Value *Val = UndefInt8;
3878 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3879 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3880 return nullptr;
3881 return Val;
3882 }
3883
3884 if (isa<ConstantAggregate>(C)) {
3885 Value *Val = UndefInt8;
3886 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3887 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3888 return nullptr;
3889 return Val;
3890 }
3891
3892 // Don't try to handle the handful of other constants.
3893 return nullptr;
3894}
3895
3896// This is the recursive version of BuildSubAggregate. It takes a few different
3897// arguments. Idxs is the index within the nested struct From that we are
3898// looking at now (which is of type IndexedType). IdxSkip is the number of
3899// indices from Idxs that should be left out when inserting into the resulting
3900// struct. To is the result struct built so far, new insertvalue instructions
3901// build on that.
3902static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3903 SmallVectorImpl<unsigned> &Idxs,
3904 unsigned IdxSkip,
3905 Instruction *InsertBefore) {
3906 StructType *STy = dyn_cast<StructType>(IndexedType);
1
Assuming 'IndexedType' is a 'StructType'
3907 if (STy
1.1
'STy' is non-null
1.1
'STy' is non-null
) {
2
Taking true branch
3908 // Save the original To argument so we can modify it
3909 Value *OrigTo = To;
3910 // General case, the type indexed by Idxs is a struct
3911 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3
Assuming 'i' is equal to 'e'
4
Loop condition is false. Execution continues on line 3930
3912 // Process each struct element recursively
3913 Idxs.push_back(i);
3914 Value *PrevTo = To;
3915 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3916 InsertBefore);
3917 Idxs.pop_back();
3918 if (!To) {
3919 // Couldn't find any inserted value for this index? Cleanup
3920 while (PrevTo != OrigTo) {
3921 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3922 PrevTo = Del->getAggregateOperand();
3923 Del->eraseFromParent();
3924 }
3925 // Stop processing elements
3926 break;
3927 }
3928 }
3929 // If we successfully found a value for each of our subaggregates
3930 if (To)
5
Assuming 'To' is null
6
Taking false branch
3931 return To;
3932 }
3933 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3934 // the struct's elements had a value that was inserted directly. In the latter
3935 // case, perhaps we can't determine each of the subelements individually, but
3936 // we might be able to find the complete struct somewhere.
3937
3938 // Find the value that is at that particular spot
3939 Value *V = FindInsertedValue(From, Idxs);
3940
3941 if (!V)
7
Assuming 'V' is non-null
8
Taking false branch
3942 return nullptr;
3943
3944 // Insert the value in the new (sub) aggregate
3945 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
9
Passing null pointer value via 1st parameter 'Agg'
10
Calling 'InsertValueInst::Create'
3946 "tmp", InsertBefore);
3947}
3948
3949// This helper takes a nested struct and extracts a part of it (which is again a
3950// struct) into a new value. For example, given the struct:
3951// { a, { b, { c, d }, e } }
3952// and the indices "1, 1" this returns
3953// { c, d }.
3954//
3955// It does this by inserting an insertvalue for each element in the resulting
3956// struct, as opposed to just inserting a single struct. This will only work if
3957// each of the elements of the substruct are known (ie, inserted into From by an
3958// insertvalue instruction somewhere).
3959//
3960// All inserted insertvalue instructions are inserted before InsertBefore
3961static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3962 Instruction *InsertBefore) {
3963 assert(InsertBefore && "Must have someplace to insert!")(static_cast <bool> (InsertBefore && "Must have someplace to insert!"
) ? void (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3963, __extension__ __PRETTY_FUNCTION__
))
;
3964 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3965 idx_range);
3966 Value *To = UndefValue::get(IndexedType);
3967 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3968 unsigned IdxSkip = Idxs.size();
3969
3970 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3971}
3972
3973/// Given an aggregate and a sequence of indices, see if the scalar value
3974/// indexed is already around as a register, for example if it was inserted
3975/// directly into the aggregate.
3976///
3977/// If InsertBefore is not null, this function will duplicate (modified)
3978/// insertvalues when a part of a nested struct is extracted.
3979Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3980 Instruction *InsertBefore) {
3981 // Nothing to index? Just return V then (this is useful at the end of our
3982 // recursion).
3983 if (idx_range.empty())
3984 return V;
3985 // We have indices, so V should have an indexable type.
3986 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3987, __extension__ __PRETTY_FUNCTION__
))
3987 "Not looking at a struct or array?")(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3987, __extension__ __PRETTY_FUNCTION__
))
;
3988 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3989, __extension__ __PRETTY_FUNCTION__
))
3989 "Invalid indices for type?")(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3989, __extension__ __PRETTY_FUNCTION__
))
;
3990
3991 if (Constant *C = dyn_cast<Constant>(V)) {
3992 C = C->getAggregateElement(idx_range[0]);
3993 if (!C) return nullptr;
3994 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3995 }
3996
3997 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3998 // Loop the indices for the insertvalue instruction in parallel with the
3999 // requested indices
4000 const unsigned *req_idx = idx_range.begin();
4001 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
4002 i != e; ++i, ++req_idx) {
4003 if (req_idx == idx_range.end()) {
4004 // We can't handle this without inserting insertvalues
4005 if (!InsertBefore)
4006 return nullptr;
4007
4008 // The requested index identifies a part of a nested aggregate. Handle
4009 // this specially. For example,
4010 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
4011 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
4012 // %C = extractvalue {i32, { i32, i32 } } %B, 1
4013 // This can be changed into
4014 // %A = insertvalue {i32, i32 } undef, i32 10, 0
4015 // %C = insertvalue {i32, i32 } %A, i32 11, 1
4016 // which allows the unused 0,0 element from the nested struct to be
4017 // removed.
4018 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
4019 InsertBefore);
4020 }
4021
4022 // This insert value inserts something else than what we are looking for.
4023 // See if the (aggregate) value inserted into has the value we are
4024 // looking for, then.
4025 if (*req_idx != *i)
4026 return FindInsertedValue(I->getAggregateOperand(), idx_range,
4027 InsertBefore);
4028 }
4029 // If we end up here, the indices of the insertvalue match with those
4030 // requested (though possibly only partially). Now we recursively look at
4031 // the inserted value, passing any remaining indices.
4032 return FindInsertedValue(I->getInsertedValueOperand(),
4033 makeArrayRef(req_idx, idx_range.end()),
4034 InsertBefore);
4035 }
4036
4037 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4038 // If we're extracting a value from an aggregate that was extracted from
4039 // something else, we can extract from that something else directly instead.
4040 // However, we will need to chain I's indices with the requested indices.
4041
4042 // Calculate the number of indices required
4043 unsigned size = I->getNumIndices() + idx_range.size();
4044 // Allocate some space to put the new indices in
4045 SmallVector<unsigned, 5> Idxs;
4046 Idxs.reserve(size);
4047 // Add indices from the extract value instruction
4048 Idxs.append(I->idx_begin(), I->idx_end());
4049
4050 // Add requested indices
4051 Idxs.append(idx_range.begin(), idx_range.end());
4052
4053 assert(Idxs.size() == size(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4054, __extension__ __PRETTY_FUNCTION__
))
4054 && "Number of indices added not correct?")(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4054, __extension__ __PRETTY_FUNCTION__
))
;
4055
4056 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4057 }
4058 // Otherwise, we don't know (such as, extracting from a function return value
4059 // or load instruction)
4060 return nullptr;
4061}
4062
4063bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4064 unsigned CharSize) {
4065 // Make sure the GEP has exactly three arguments.
4066 if (GEP->getNumOperands() != 3)
4067 return false;
4068
4069 // Make sure the index-ee is a pointer to array of \p CharSize integers.
4070 // CharSize.
4071 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4072 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4073 return false;
4074
4075 // Check to make sure that the first operand of the GEP is an integer and
4076 // has value 0 so that we are sure we're indexing into the initializer.
4077 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4078 if (!FirstIdx || !FirstIdx->isZero())
4079 return false;
4080
4081 return true;
4082}
4083
4084bool llvm::getConstantDataArrayInfo(const Value *V,
4085 ConstantDataArraySlice &Slice,
4086 unsigned ElementSize, uint64_t Offset) {
4087 assert(V)(static_cast <bool> (V) ? void (0) : __assert_fail ("V"
, "llvm/lib/Analysis/ValueTracking.cpp", 4087, __extension__ __PRETTY_FUNCTION__
))
;
4088
4089 // Look through bitcast instructions and geps.
4090 V = V->stripPointerCasts();
4091
4092 // If the value is a GEP instruction or constant expression, treat it as an
4093 // offset.
4094 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4095 // The GEP operator should be based on a pointer to string constant, and is
4096 // indexing into the string constant.
4097 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
4098 return false;
4099
4100 // If the second index isn't a ConstantInt, then this is a variable index
4101 // into the array. If this occurs, we can't say anything meaningful about
4102 // the string.
4103 uint64_t StartIdx = 0;
4104 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
4105 StartIdx = CI->getZExtValue();
4106 else
4107 return false;
4108 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
4109 StartIdx + Offset);
4110 }
4111
4112 // The GEP instruction, constant or instruction, must reference a global
4113 // variable that is a constant and is initialized. The referenced constant
4114 // initializer is the array that we'll use for optimization.
4115 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
4116 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4117 return false;
4118
4119 const ConstantDataArray *Array;
4120 ArrayType *ArrayTy;
4121 if (GV->getInitializer()->isNullValue()) {
4122 Type *GVTy = GV->getValueType();
4123 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4124 // A zeroinitializer for the array; there is no ConstantDataArray.
4125 Array = nullptr;
4126 } else {
4127 const DataLayout &DL = GV->getParent()->getDataLayout();
4128 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4129 uint64_t Length = SizeInBytes / (ElementSize / 8);
4130 if (Length <= Offset)
4131 return false;
4132
4133 Slice.Array = nullptr;
4134 Slice.Offset = 0;
4135 Slice.Length = Length - Offset;
4136 return true;
4137 }
4138 } else {
4139 // This must be a ConstantDataArray.
4140 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4141 if (!Array)
4142 return false;
4143 ArrayTy = Array->getType();
4144 }
4145 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4146 return false;
4147
4148 uint64_t NumElts = ArrayTy->getArrayNumElements();
4149 if (Offset > NumElts)
4150 return false;
4151
4152 Slice.Array = Array;
4153 Slice.Offset = Offset;
4154 Slice.Length = NumElts - Offset;
4155 return true;
4156}
4157
4158/// This function computes the length of a null-terminated C string pointed to
4159/// by V. If successful, it returns true and returns the string in Str.
4160/// If unsuccessful, it returns false.
4161bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4162 uint64_t Offset, bool TrimAtNul) {
4163 ConstantDataArraySlice Slice;
4164 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4165 return false;
4166
4167 if (Slice.Array == nullptr) {
4168 if (TrimAtNul) {
4169 Str = StringRef();
4170 return true;
4171 }
4172 if (Slice.Length == 1) {
4173 Str = StringRef("", 1);
4174 return true;
4175 }
4176 // We cannot instantiate a StringRef as we do not have an appropriate string
4177 // of 0s at hand.
4178 return false;
4179 }
4180
4181 // Start out with the entire array in the StringRef.
4182 Str = Slice.Array->getAsString();
4183 // Skip over 'offset' bytes.
4184 Str = Str.substr(Slice.Offset);
4185
4186 if (TrimAtNul) {
4187 // Trim off the \0 and anything after it. If the array is not nul
4188 // terminated, we just return the whole end of string. The client may know
4189 // some other way that the string is length-bound.
4190 Str = Str.substr(0, Str.find('\0'));
4191 }
4192 return true;
4193}
4194
4195// These next two are very similar to the above, but also look through PHI
4196// nodes.
4197// TODO: See if we can integrate these two together.
4198
4199/// If we can compute the length of the string pointed to by
4200/// the specified pointer, return 'len+1'. If we can't, return 0.
4201static uint64_t GetStringLengthH(const Value *V,
4202 SmallPtrSetImpl<const PHINode*> &PHIs,
4203 unsigned CharSize) {
4204 // Look through noop bitcast instructions.
4205 V = V->stripPointerCasts();
4206
4207 // If this is a PHI node, there are two cases: either we have already seen it
4208 // or we haven't.
4209 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4210 if (!PHIs.insert(PN).second)
4211 return ~0ULL; // already in the set.
4212
4213 // If it was new, see if all the input strings are the same length.
4214 uint64_t LenSoFar = ~0ULL;
4215 for (Value *IncValue : PN->incoming_values()) {
4216 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4217 if (Len == 0) return 0; // Unknown length -> unknown.
4218
4219 if (Len == ~0ULL) continue;
4220
4221 if (Len != LenSoFar && LenSoFar != ~0ULL)
4222 return 0; // Disagree -> unknown.
4223 LenSoFar = Len;
4224 }
4225
4226 // Success, all agree.
4227 return LenSoFar;
4228 }
4229
4230 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4231 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4232 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4233 if (Len1 == 0) return 0;
4234 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4235 if (Len2 == 0) return 0;
4236 if (Len1 == ~0ULL) return Len2;
4237 if (Len2 == ~0ULL) return Len1;
4238 if (Len1 != Len2) return 0;
4239 return Len1;
4240 }
4241
4242 // Otherwise, see if we can read the string.
4243 ConstantDataArraySlice Slice;
4244 if (!getConstantDataArrayInfo(V, Slice, CharSize))
4245 return 0;
4246
4247 if (Slice.Array == nullptr)
4248 return 1;
4249
4250 // Search for nul characters
4251 unsigned NullIndex = 0;
4252 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4253 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4254 break;
4255 }
4256
4257 return NullIndex + 1;
4258}
4259
4260/// If we can compute the length of the string pointed to by
4261/// the specified pointer, return 'len+1'. If we can't, return 0.
4262uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4263 if (!V->getType()->isPointerTy())
4264 return 0;
4265
4266 SmallPtrSet<const PHINode*, 32> PHIs;
4267 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4268 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4269 // an empty string as a length.
4270 return Len == ~0ULL ? 1 : Len;
4271}
4272
4273const Value *
4274llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4275 bool MustPreserveNullness) {
4276 assert(Call &&(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4277, __extension__ __PRETTY_FUNCTION__
))
4277 "getArgumentAliasingToReturnedPointer only works on nonnull calls")(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4277, __extension__ __PRETTY_FUNCTION__
))
;
4278 if (const Value *RV = Call->getReturnedArgOperand())
4279 return RV;
4280 // This can be used only as a aliasing property.
4281 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4282 Call, MustPreserveNullness))
4283 return Call->getArgOperand(0);
4284 return nullptr;
4285}
4286
4287bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4288 const CallBase *Call, bool MustPreserveNullness) {
4289 switch (Call->getIntrinsicID()) {
4290 case Intrinsic::launder_invariant_group:
4291 case Intrinsic::strip_invariant_group:
4292 case Intrinsic::aarch64_irg:
4293 case Intrinsic::aarch64_tagp:
4294 return true;
4295 case Intrinsic::ptrmask:
4296 return !MustPreserveNullness;
4297 default:
4298 return false;
4299 }
4300}
4301
4302/// \p PN defines a loop-variant pointer to an object. Check if the
4303/// previous iteration of the loop was referring to the same object as \p PN.
4304static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4305 const LoopInfo *LI) {
4306 // Find the loop-defined value.
4307 Loop *L = LI->getLoopFor(PN->getParent());
4308 if (PN->getNumIncomingValues() != 2)
4309 return true;
4310
4311 // Find the value from previous iteration.
4312 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4313 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4314 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4315 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4316 return true;
4317
4318 // If a new pointer is loaded in the loop, the pointer references a different
4319 // object in every iteration. E.g.:
4320 // for (i)
4321 // int *p = a[i];
4322 // ...
4323 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4324 if (!L->isLoopInvariant(Load->getPointerOperand()))
4325 return false;
4326 return true;
4327}
4328
4329const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4330 if (!V->getType()->isPointerTy())
4331 return V;
4332 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4333 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4334 V = GEP->getPointerOperand();
4335 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4336 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4337 V = cast<Operator>(V)->getOperand(0);
4338 if (!V->getType()->isPointerTy())
4339 return V;
4340 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4341 if (GA->isInterposable())
4342 return V;
4343 V = GA->getAliasee();
4344 } else {
4345 if (auto *PHI = dyn_cast<PHINode>(V)) {
4346 // Look through single-arg phi nodes created by LCSSA.
4347 if (PHI->getNumIncomingValues() == 1) {
4348 V = PHI->getIncomingValue(0);
4349 continue;
4350 }
4351 } else if (auto *Call = dyn_cast<CallBase>(V)) {
4352 // CaptureTracking can know about special capturing properties of some
4353 // intrinsics like launder.invariant.group, that can't be expressed with
4354 // the attributes, but have properties like returning aliasing pointer.
4355 // Because some analysis may assume that nocaptured pointer is not
4356 // returned from some special intrinsic (because function would have to
4357 // be marked with returns attribute), it is crucial to use this function
4358 // because it should be in sync with CaptureTracking. Not using it may
4359 // cause weird miscompilations where 2 aliasing pointers are assumed to
4360 // noalias.
4361 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4362 V = RP;
4363 continue;
4364 }
4365 }
4366
4367 return V;
4368 }
4369 assert(V->getType()->isPointerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isPointerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4369, __extension__ __PRETTY_FUNCTION__
))
;
4370 }
4371 return V;
4372}
4373
4374void llvm::getUnderlyingObjects(const Value *V,
4375 SmallVectorImpl<const Value *> &Objects,
4376 LoopInfo *LI, unsigned MaxLookup) {
4377 SmallPtrSet<const Value *, 4> Visited;
4378 SmallVector<const Value *, 4> Worklist;
4379 Worklist.push_back(V);
4380 do {
4381 const Value *P = Worklist.pop_back_val();
4382 P = getUnderlyingObject(P, MaxLookup);
4383
4384 if (!Visited.insert(P).second)
4385 continue;
4386
4387 if (auto *SI = dyn_cast<SelectInst>(P)) {
4388 Worklist.push_back(SI->getTrueValue());
4389 Worklist.push_back(SI->getFalseValue());
4390 continue;
4391 }
4392
4393 if (auto *PN = dyn_cast<PHINode>(P)) {
4394 // If this PHI changes the underlying object in every iteration of the
4395 // loop, don't look through it. Consider:
4396 // int **A;
4397 // for (i) {
4398 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4399 // Curr = A[i];
4400 // *Prev, *Curr;
4401 //
4402 // Prev is tracking Curr one iteration behind so they refer to different
4403 // underlying objects.
4404 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4405 isSameUnderlyingObjectInLoop(PN, LI))
4406 append_range(Worklist, PN->incoming_values());
4407 continue;
4408 }
4409
4410 Objects.push_back(P);
4411 } while (!Worklist.empty());
4412}
4413
4414/// This is the function that does the work of looking through basic
4415/// ptrtoint+arithmetic+inttoptr sequences.
4416static const Value *getUnderlyingObjectFromInt(const Value *V) {
4417 do {
4418 if (const Operator *U = dyn_cast<Operator>(V)) {
4419 // If we find a ptrtoint, we can transfer control back to the
4420 // regular getUnderlyingObjectFromInt.
4421 if (U->getOpcode() == Instruction::PtrToInt)
4422 return U->getOperand(0);
4423 // If we find an add of a constant, a multiplied value, or a phi, it's
4424 // likely that the other operand will lead us to the base
4425 // object. We don't have to worry about the case where the
4426 // object address is somehow being computed by the multiply,
4427 // because our callers only care when the result is an
4428 // identifiable object.
4429 if (U->getOpcode() != Instruction::Add ||
4430 (!isa<ConstantInt>(U->getOperand(1)) &&
4431 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4432 !isa<PHINode>(U->getOperand(1))))
4433 return V;
4434 V = U->getOperand(0);
4435 } else {
4436 return V;
4437 }
4438 assert(V->getType()->isIntegerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isIntegerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4438, __extension__ __PRETTY_FUNCTION__
))
;
4439 } while (true);
4440}
4441
4442/// This is a wrapper around getUnderlyingObjects and adds support for basic
4443/// ptrtoint+arithmetic+inttoptr sequences.
4444/// It returns false if unidentified object is found in getUnderlyingObjects.
4445bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4446 SmallVectorImpl<Value *> &Objects) {
4447 SmallPtrSet<const Value *, 16> Visited;
4448 SmallVector<const Value *, 4> Working(1, V);
4449 do {
4450 V = Working.pop_back_val();
4451
4452 SmallVector<const Value *, 4> Objs;
4453 getUnderlyingObjects(V, Objs);
4454
4455 for (const Value *V : Objs) {
4456 if (!Visited.insert(V).second)
4457 continue;
4458 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4459 const Value *O =
4460 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4461 if (O->getType()->isPointerTy()) {
4462 Working.push_back(O);
4463 continue;
4464 }
4465 }
4466 // If getUnderlyingObjects fails to find an identifiable object,
4467 // getUnderlyingObjectsForCodeGen also fails for safety.
4468 if (!isIdentifiedObject(V)) {
4469 Objects.clear();
4470 return false;
4471 }
4472 Objects.push_back(const_cast<Value *>(V));
4473 }
4474 } while (!Working.empty());
4475 return true;
4476}
4477
4478AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4479 AllocaInst *Result = nullptr;
4480 SmallPtrSet<Value *, 4> Visited;
4481 SmallVector<Value *, 4> Worklist;
4482
4483 auto AddWork = [&](Value *V) {
4484 if (Visited.insert(V).second)
4485 Worklist.push_back(V);
4486 };
4487
4488 AddWork(V);
4489 do {
4490 V = Worklist.pop_back_val();
4491 assert(Visited.count(V))(static_cast <bool> (Visited.count(V)) ? void (0) : __assert_fail
("Visited.count(V)", "llvm/lib/Analysis/ValueTracking.cpp", 4491
, __extension__ __PRETTY_FUNCTION__))
;
4492
4493 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4494 if (Result && Result != AI)
4495 return nullptr;
4496 Result = AI;
4497 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4498 AddWork(CI->getOperand(0));
4499 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4500 for (Value *IncValue : PN->incoming_values())
4501 AddWork(IncValue);
4502 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4503 AddWork(SI->getTrueValue());
4504 AddWork(SI->getFalseValue());
4505 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4506 if (OffsetZero && !GEP->hasAllZeroIndices())
4507 return nullptr;
4508 AddWork(GEP->getPointerOperand());
4509 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
4510 Value *Returned = CB->getReturnedArgOperand();
4511 if (Returned)
4512 AddWork(Returned);
4513 else
4514 return nullptr;
4515 } else {
4516 return nullptr;
4517 }
4518 } while (!Worklist.empty());
4519
4520 return Result;
4521}
4522
4523static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4524 const Value *V, bool AllowLifetime, bool AllowDroppable) {
4525 for (const User *U : V->users()) {
4526 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4527 if (!II)
4528 return false;
4529
4530 if (AllowLifetime && II->isLifetimeStartOrEnd())
4531 continue;
4532
4533 if (AllowDroppable && II->isDroppable())
4534 continue;
4535
4536 return false;
4537 }
4538 return true;
4539}
4540
4541bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4542 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4543 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4544}
4545bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4546 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4547 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4548}
4549
4550bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4551 if (!LI.isUnordered())
4552 return true;
4553 const Function &F = *LI.getFunction();
4554 // Speculative load may create a race that did not exist in the source.
4555 return F.hasFnAttribute(Attribute::SanitizeThread) ||
4556 // Speculative load may load data from dirty regions.
4557 F.hasFnAttribute(Attribute::SanitizeAddress) ||
4558 F.hasFnAttribute(Attribute::SanitizeHWAddress);
4559}
4560
4561
4562bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4563 const Instruction *CtxI,
4564 const DominatorTree *DT,
4565 const TargetLibraryInfo *TLI) {
4566 const Operator *Inst = dyn_cast<Operator>(V);
4567 if (!Inst)
4568 return false;
4569
4570 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4571 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4572 if (C->canTrap())
4573 return false;
4574
4575 switch (Inst->getOpcode()) {
4576 default:
4577 return true;
4578 case Instruction::UDiv:
4579 case Instruction::URem: {
4580 // x / y is undefined if y == 0.
4581 const APInt *V;
4582 if (match(Inst->getOperand(1), m_APInt(V)))
4583 return *V != 0;
4584 return false;
4585 }
4586 case Instruction::SDiv:
4587 case Instruction::SRem: {
4588 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4589 const APInt *Numerator, *Denominator;
4590 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4591 return false;
4592 // We cannot hoist this division if the denominator is 0.
4593 if (*Denominator == 0)
4594 return false;
4595 // It's safe to hoist if the denominator is not 0 or -1.
4596 if (!Denominator->isAllOnes())
4597 return true;
4598 // At this point we know that the denominator is -1. It is safe to hoist as
4599 // long we know that the numerator is not INT_MIN.
4600 if (match(Inst->getOperand(0), m_APInt(Numerator)))
4601 return !Numerator->isMinSignedValue();
4602 // The numerator *might* be MinSignedValue.
4603 return false;
4604 }
4605 case Instruction::Load: {
4606 const LoadInst *LI = cast<LoadInst>(Inst);
4607 if (mustSuppressSpeculation(*LI))
4608 return false;
4609 const DataLayout &DL = LI->getModule()->getDataLayout();
4610 return isDereferenceableAndAlignedPointer(
4611 LI->getPointerOperand(), LI->getType(), LI->getAlign(), DL, CtxI, DT,
4612 TLI);
4613 }
4614 case Instruction::Call: {
4615 auto *CI = cast<const CallInst>(Inst);
4616 const Function *Callee = CI->getCalledFunction();
4617
4618 // The called function could have undefined behavior or side-effects, even
4619 // if marked readnone nounwind.
4620 return Callee && Callee->isSpeculatable();
4621 }
4622 case Instruction::VAArg:
4623 case Instruction::Alloca:
4624 case Instruction::Invoke:
4625 case Instruction::CallBr:
4626 case Instruction::PHI:
4627 case Instruction::Store:
4628 case Instruction::Ret:
4629 case Instruction::Br:
4630 case Instruction::IndirectBr:
4631 case Instruction::Switch:
4632 case Instruction::Unreachable:
4633 case Instruction::Fence:
4634 case Instruction::AtomicRMW:
4635 case Instruction::AtomicCmpXchg:
4636 case Instruction::LandingPad:
4637 case Instruction::Resume:
4638 case Instruction::CatchSwitch:
4639 case Instruction::CatchPad:
4640 case Instruction::CatchRet:
4641 case Instruction::CleanupPad:
4642 case Instruction::CleanupRet:
4643 return false; // Misc instructions which have effects
4644 }
4645}
4646
4647bool llvm::mayHaveNonDefUseDependency(const Instruction &I) {
4648 if (I.mayReadOrWriteMemory())
4649 // Memory dependency possible
4650 return true;
4651 if (!isSafeToSpeculativelyExecute(&I))
4652 // Can't move above a maythrow call or infinite loop. Or if an
4653 // inalloca alloca, above a stacksave call.
4654 return true;
4655 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4656 // 1) Can't reorder two inf-loop calls, even if readonly
4657 // 2) Also can't reorder an inf-loop call below a instruction which isn't
4658 // safe to speculative execute. (Inverse of above)
4659 return true;
4660 return false;
4661}
4662
4663/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4664static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4665 switch (OR) {
4666 case ConstantRange::OverflowResult::MayOverflow:
4667 return OverflowResult::MayOverflow;
4668 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4669 return OverflowResult::AlwaysOverflowsLow;
4670 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4671 return OverflowResult::AlwaysOverflowsHigh;
4672 case ConstantRange::OverflowResult::NeverOverflows:
4673 return OverflowResult::NeverOverflows;
4674 }
4675 llvm_unreachable("Unknown OverflowResult")::llvm::llvm_unreachable_internal("Unknown OverflowResult", "llvm/lib/Analysis/ValueTracking.cpp"
, 4675)
;
4676}
4677
4678/// Combine constant ranges from computeConstantRange() and computeKnownBits().
4679static ConstantRange computeConstantRangeIncludingKnownBits(
4680 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4681 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4682 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4683 KnownBits Known = computeKnownBits(
4684 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4685 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4686 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4687 ConstantRange::PreferredRangeType RangeType =
4688 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4689 return CR1.intersectWith(CR2, RangeType);
4690}
4691
4692OverflowResult llvm::computeOverflowForUnsignedMul(
4693 const Value *LHS, const Value *RHS, const DataLayout &DL,
4694 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4695 bool UseInstrInfo) {
4696 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4697 nullptr, UseInstrInfo);
4698 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4699 nullptr, UseInstrInfo);
4700 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4701 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4702 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4703}
4704
4705OverflowResult
4706llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4707 const DataLayout &DL, AssumptionCache *AC,
4708 const Instruction *CxtI,
4709 const DominatorTree *DT, bool UseInstrInfo) {
4710 // Multiplying n * m significant bits yields a result of n + m significant
4711 // bits. If the total number of significant bits does not exceed the
4712 // result bit width (minus 1), there is no overflow.
4713 // This means if we have enough leading sign bits in the operands
4714 // we can guarantee that the result does not overflow.
4715 // Ref: "Hacker's Delight" by Henry Warren
4716 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4717
4718 // Note that underestimating the number of sign bits gives a more
4719 // conservative answer.
4720 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4721 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4722
4723 // First handle the easy case: if we have enough sign bits there's
4724 // definitely no overflow.
4725 if (SignBits > BitWidth + 1)
4726 return OverflowResult::NeverOverflows;
4727
4728 // There are two ambiguous cases where there can be no overflow:
4729 // SignBits == BitWidth + 1 and
4730 // SignBits == BitWidth
4731 // The second case is difficult to check, therefore we only handle the
4732 // first case.
4733 if (SignBits == BitWidth + 1) {
4734 // It overflows only when both arguments are negative and the true
4735 // product is exactly the minimum negative number.
4736 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4737 // For simplicity we just check if at least one side is not negative.
4738 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4739 nullptr, UseInstrInfo);
4740 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4741 nullptr, UseInstrInfo);
4742 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4743 return OverflowResult::NeverOverflows;
4744 }
4745 return OverflowResult::MayOverflow;
4746}
4747
4748OverflowResult llvm::computeOverflowForUnsignedAdd(
4749 const Value *LHS, const Value *RHS, const DataLayout &DL,
4750 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4751 bool UseInstrInfo) {
4752 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4753 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4754 nullptr, UseInstrInfo);
4755 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4756 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4757 nullptr, UseInstrInfo);
4758 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4759}
4760
4761static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4762 const Value *RHS,
4763 const AddOperator *Add,
4764 const DataLayout &DL,
4765 AssumptionCache *AC,
4766 const Instruction *CxtI,
4767 const DominatorTree *DT) {
4768 if (Add && Add->hasNoSignedWrap()) {
4769 return OverflowResult::NeverOverflows;
4770 }
4771
4772 // If LHS and RHS each have at least two sign bits, the addition will look
4773 // like
4774 //
4775 // XX..... +
4776 // YY.....
4777 //
4778 // If the carry into the most significant position is 0, X and Y can't both
4779 // be 1 and therefore the carry out of the addition is also 0.
4780 //
4781 // If the carry into the most significant position is 1, X and Y can't both
4782 // be 0 and therefore the carry out of the addition is also 1.
4783 //
4784 // Since the carry into the most significant position is always equal to
4785 // the carry out of the addition, there is no signed overflow.
4786 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4787 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4788 return OverflowResult::NeverOverflows;
4789
4790 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4791 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4792 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4793 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4794 OverflowResult OR =
4795 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4796 if (OR != OverflowResult::MayOverflow)
4797 return OR;
4798
4799 // The remaining code needs Add to be available. Early returns if not so.
4800 if (!Add)
4801 return OverflowResult::MayOverflow;
4802
4803 // If the sign of Add is the same as at least one of the operands, this add
4804 // CANNOT overflow. If this can be determined from the known bits of the
4805 // operands the above signedAddMayOverflow() check will have already done so.
4806 // The only other way to improve on the known bits is from an assumption, so
4807 // call computeKnownBitsFromAssume() directly.
4808 bool LHSOrRHSKnownNonNegative =
4809 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4810 bool LHSOrRHSKnownNegative =
4811 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4812 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4813 KnownBits AddKnown(LHSRange.getBitWidth());
4814 computeKnownBitsFromAssume(
4815 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4816 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4817 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4818 return OverflowResult::NeverOverflows;
4819 }
4820
4821 return OverflowResult::MayOverflow;
4822}
4823
4824OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4825 const Value *RHS,
4826 const DataLayout &DL,
4827 AssumptionCache *AC,
4828 const Instruction *CxtI,
4829 const DominatorTree *DT) {
4830 // Checking for conditions implied by dominating conditions may be expensive.
4831 // Limit it to usub_with_overflow calls for now.
4832 if (match(CxtI,
4833 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4834 if (auto C =
4835 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4836 if (*C)
4837 return OverflowResult::NeverOverflows;
4838 return OverflowResult::AlwaysOverflowsLow;
4839 }
4840 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4841 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4842 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4843 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4844 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4845}
4846
4847OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4848 const Value *RHS,
4849 const DataLayout &DL,
4850 AssumptionCache *AC,
4851 const Instruction *CxtI,
4852 const DominatorTree *DT) {
4853 // If LHS and RHS each have at least two sign bits, the subtraction
4854 // cannot overflow.
4855 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4856 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4857 return OverflowResult::NeverOverflows;
4858
4859 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4860 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4861 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4862 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4863 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4864}
4865
4866bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4867 const DominatorTree &DT) {
4868 SmallVector<const BranchInst *, 2> GuardingBranches;
4869 SmallVector<const ExtractValueInst *, 2> Results;
4870
4871 for (const User *U : WO->users()) {
4872 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4873 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getNumIndices() == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4873, __extension__ __PRETTY_FUNCTION__
))
;
4874
4875 if (EVI->getIndices()[0] == 0)
4876 Results.push_back(EVI);
4877 else {
4878 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getIndices()[0] == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4878, __extension__ __PRETTY_FUNCTION__
))
;
4879
4880 for (const auto *U : EVI->users())
4881 if (const auto *B = dyn_cast<BranchInst>(U)) {
4882 assert(B->isConditional() && "How else is it using an i1?")(static_cast <bool> (B->isConditional() && "How else is it using an i1?"
) ? void (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4882, __extension__ __PRETTY_FUNCTION__
))
;
4883 GuardingBranches.push_back(B);
4884 }
4885 }
4886 } else {
4887 // We are using the aggregate directly in a way we don't want to analyze
4888 // here (storing it to a global, say).
4889 return false;
4890 }
4891 }
4892
4893 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4894 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4895 if (!NoWrapEdge.isSingleEdge())
4896 return false;
4897
4898 // Check if all users of the add are provably no-wrap.
4899 for (const auto *Result : Results) {
4900 // If the extractvalue itself is not executed on overflow, the we don't
4901 // need to check each use separately, since domination is transitive.
4902 if (DT.dominates(NoWrapEdge, Result->getParent()))
4903 continue;
4904
4905 for (auto &RU : Result->uses())
4906 if (!DT.dominates(NoWrapEdge, RU))
4907 return false;
4908 }
4909
4910 return true;
4911 };
4912
4913 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4914}
4915
4916static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
4917 bool ConsiderFlags) {
4918
4919 if (ConsiderFlags && Op->hasPoisonGeneratingFlags())
4920 return true;
4921
4922 unsigned Opcode = Op->getOpcode();
4923
4924 // Check whether opcode is a poison/undef-generating operation
4925 switch (Opcode) {
4926 case Instruction::Shl:
4927 case Instruction::AShr:
4928 case Instruction::LShr: {
4929 // Shifts return poison if shiftwidth is larger than the bitwidth.
4930 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4931 SmallVector<Constant *, 4> ShiftAmounts;
4932 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4933 unsigned NumElts = FVTy->getNumElements();
4934 for (unsigned i = 0; i < NumElts; ++i)
4935 ShiftAmounts.push_back(C->getAggregateElement(i));
4936 } else if (isa<ScalableVectorType>(C->getType()))
4937 return true; // Can't tell, just return true to be safe
4938 else
4939 ShiftAmounts.push_back(C);
4940
4941 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4942 auto *CI = dyn_cast_or_null<ConstantInt>(C);
4943 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4944 });
4945 return !Safe;
4946 }
4947 return true;
4948 }
4949 case Instruction::FPToSI:
4950 case Instruction::FPToUI:
4951 // fptosi/ui yields poison if the resulting value does not fit in the
4952 // destination type.
4953 return true;
4954 case Instruction::Call:
4955 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
4956 switch (II->getIntrinsicID()) {
4957 // TODO: Add more intrinsics.
4958 case Intrinsic::ctpop:
4959 case Intrinsic::sadd_with_overflow:
4960 case Intrinsic::ssub_with_overflow:
4961 case Intrinsic::smul_with_overflow:
4962 case Intrinsic::uadd_with_overflow:
4963 case Intrinsic::usub_with_overflow:
4964 case Intrinsic::umul_with_overflow:
4965 return false;
4966 }
4967 }
4968 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4969 case Instruction::CallBr:
4970 case Instruction::Invoke: {
4971 const auto *CB = cast<CallBase>(Op);
4972 return !CB->hasRetAttr(Attribute::NoUndef);
4973 }
4974 case Instruction::InsertElement:
4975 case Instruction::ExtractElement: {
4976 // If index exceeds the length of the vector, it returns poison
4977 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4978 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4979 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4980 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4981 return true;
4982 return false;
4983 }
4984 case Instruction::ShuffleVector: {
4985 // shufflevector may return undef.
4986 if (PoisonOnly)
4987 return false;
4988 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4989 ? cast<ConstantExpr>(Op)->getShuffleMask()
4990 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4991 return is_contained(Mask, UndefMaskElem);
4992 }
4993 case Instruction::FNeg:
4994 case Instruction::PHI:
4995 case Instruction::Select:
4996 case Instruction::URem:
4997 case Instruction::SRem:
4998 case Instruction::ExtractValue:
4999 case Instruction::InsertValue:
5000 case Instruction::Freeze:
5001 case Instruction::ICmp:
5002 case Instruction::FCmp:
5003 return false;
5004 case Instruction::GetElementPtr:
5005 // inbounds is handled above
5006 // TODO: what about inrange on constexpr?
5007 return false;
5008 default: {
5009 const auto *CE = dyn_cast<ConstantExpr>(Op);
5010 if (isa<CastInst>(Op) || (CE && CE->isCast()))
5011 return false;
5012 else if (Instruction::isBinaryOp(Opcode))
5013 return false;
5014 // Be conservative and return true.
5015 return true;
5016 }
5017 }
5018}
5019
5020bool llvm::canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlags) {
5021 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false, ConsiderFlags);
5022}
5023
5024bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlags) {
5025 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true, ConsiderFlags);
5026}
5027
5028static bool directlyImpliesPoison(const Value *ValAssumedPoison,
5029 const Value *V, unsigned Depth) {
5030 if (ValAssumedPoison == V)
5031 return true;
5032
5033 const unsigned MaxDepth = 2;
5034 if (Depth >= MaxDepth)
5035 return false;
5036
5037 if (const auto *I = dyn_cast<Instruction>(V)) {
5038 if (propagatesPoison(cast<Operator>(I)))
5039 return any_of(I->operands(), [=](const Value *Op) {
5040 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
5041 });
5042
5043 // 'select ValAssumedPoison, _, _' is poison.
5044 if (const auto *SI = dyn_cast<SelectInst>(I))
5045 return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
5046 Depth + 1);
5047 // V = extractvalue V0, idx
5048 // V2 = extractvalue V0, idx2
5049 // V0's elements are all poison or not. (e.g., add_with_overflow)
5050 const WithOverflowInst *II;
5051 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
5052 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
5053 llvm::is_contained(II->args(), ValAssumedPoison)))
5054 return true;
5055 }
5056 return false;
5057}
5058
5059static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
5060 unsigned Depth) {
5061 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5062 return true;
5063
5064 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5065 return true;
5066
5067 const unsigned MaxDepth = 2;
5068 if (Depth >= MaxDepth)
5069 return false;
5070
5071 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5072 if (I && !canCreatePoison(cast<Operator>(I))) {
5073 return all_of(I->operands(), [=](const Value *Op) {
5074 return impliesPoison(Op, V, Depth + 1);
5075 });
5076 }
5077 return false;
5078}
5079
5080bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5081 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5082}
5083
5084static bool programUndefinedIfUndefOrPoison(const Value *V,
5085 bool PoisonOnly);
5086
5087static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5088 AssumptionCache *AC,
5089 const Instruction *CtxI,
5090 const DominatorTree *DT,
5091 unsigned Depth, bool PoisonOnly) {
5092 if (Depth >= MaxAnalysisRecursionDepth)
5093 return false;
5094
5095 if (isa<MetadataAsValue>(V))
5096 return false;
5097
5098 if (const auto *A = dyn_cast<Argument>(V)) {
5099 if (A->hasAttribute(Attribute::NoUndef))
5100 return true;
5101 }
5102
5103 if (auto *C = dyn_cast<Constant>(V)) {
5104 if (isa<UndefValue>(C))
5105 return PoisonOnly && !isa<PoisonValue>(C);
5106
5107 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5108 isa<ConstantPointerNull>(C) || isa<Function>(C))
5109 return true;
5110
5111 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5112 return (PoisonOnly ? !C->containsPoisonElement()
5113 : !C->containsUndefOrPoisonElement()) &&
5114 !C->containsConstantExpression();
5115 }
5116
5117 // Strip cast operations from a pointer value.
5118 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5119 // inbounds with zero offset. To guarantee that the result isn't poison, the
5120 // stripped pointer is checked as it has to be pointing into an allocated
5121 // object or be null `null` to ensure `inbounds` getelement pointers with a
5122 // zero offset could not produce poison.
5123 // It can strip off addrspacecast that do not change bit representation as
5124 // well. We believe that such addrspacecast is equivalent to no-op.
5125 auto *StrippedV = V->stripPointerCastsSameRepresentation();
5126 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5127 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5128 return true;
5129
5130 auto OpCheck = [&](const Value *V) {
5131 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5132 PoisonOnly);
5133 };
5134
5135 if (auto *Opr = dyn_cast<Operator>(V)) {
5136 // If the value is a freeze instruction, then it can never
5137 // be undef or poison.
5138 if (isa<FreezeInst>(V))
5139 return true;
5140
5141 if (const auto *CB = dyn_cast<CallBase>(V)) {
5142 if (CB->hasRetAttr(Attribute::NoUndef))
5143 return true;
5144 }
5145
5146 if (const auto *PN = dyn_cast<PHINode>(V)) {
5147 unsigned Num = PN->getNumIncomingValues();
5148 bool IsWellDefined = true;
5149 for (unsigned i = 0; i < Num; ++i) {
5150 auto *TI = PN->getIncomingBlock(i)->getTerminator();
5151 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5152 DT, Depth + 1, PoisonOnly)) {
5153 IsWellDefined = false;
5154 break;
5155 }
5156 }
5157 if (IsWellDefined)
5158 return true;
5159 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5160 return true;
5161 }
5162
5163 if (auto *I = dyn_cast<LoadInst>(V))
5164 if (I->getMetadata(LLVMContext::MD_noundef))
5165 return true;
5166
5167 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5168 return true;
5169
5170 // CxtI may be null or a cloned instruction.
5171 if (!CtxI || !CtxI->getParent() || !DT)
5172 return false;
5173
5174 auto *DNode = DT->getNode(CtxI->getParent());
5175 if (!DNode)
5176 // Unreachable block
5177 return false;
5178
5179 // If V is used as a branch condition before reaching CtxI, V cannot be
5180 // undef or poison.
5181 // br V, BB1, BB2
5182 // BB1:
5183 // CtxI ; V cannot be undef or poison here
5184 auto *Dominator = DNode->getIDom();
5185 while (Dominator) {
5186 auto *TI = Dominator->getBlock()->getTerminator();
5187
5188 Value *Cond = nullptr;
5189 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
5190 if (BI->isConditional())
5191 Cond = BI->getCondition();
5192 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
5193 Cond = SI->getCondition();
5194 }
5195
5196 if (Cond) {
5197 if (Cond == V)
5198 return true;
5199 else if (PoisonOnly && isa<Operator>(Cond)) {
5200 // For poison, we can analyze further
5201 auto *Opr = cast<Operator>(Cond);
5202 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5203 return true;
5204 }
5205 }
5206
5207 Dominator = Dominator->getIDom();
5208 }
5209
5210 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
5211 return true;
5212
5213 return false;
5214}
5215
5216bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5217 const Instruction *CtxI,
5218 const DominatorTree *DT,
5219 unsigned Depth) {
5220 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5221}
5222
5223bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5224 const Instruction *CtxI,
5225 const DominatorTree *DT, unsigned Depth) {
5226 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5227}
5228
5229OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5230 const DataLayout &DL,
5231 AssumptionCache *AC,
5232 const Instruction *CxtI,
5233 const DominatorTree *DT) {
5234 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5235 Add, DL, AC, CxtI, DT);
5236}
5237
5238OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5239 const Value *RHS,
5240 const DataLayout &DL,
5241 AssumptionCache *AC,
5242 const Instruction *CxtI,
5243 const DominatorTree *DT) {
5244 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5245}
5246
5247bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5248 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5249 // of time because it's possible for another thread to interfere with it for an
5250 // arbitrary length of time, but programs aren't allowed to rely on that.
5251
5252 // If there is no successor, then execution can't transfer to it.
5253 if (isa<ReturnInst>(I))
5254 return false;
5255 if (isa<UnreachableInst>(I))
5256 return false;
5257
5258 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
5259 // Instruction::willReturn.
5260 //
5261 // FIXME: Move this check into Instruction::willReturn.
5262 if (isa<CatchPadInst>(I)) {
5263 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
5264 default:
5265 // A catchpad may invoke exception object constructors and such, which
5266 // in some languages can be arbitrary code, so be conservative by default.
5267 return false;
5268 case EHPersonality::CoreCLR:
5269 // For CoreCLR, it just involves a type test.
5270 return true;
5271 }
5272 }
5273
5274 // An instruction that returns without throwing must transfer control flow
5275 // to a successor.
5276 return !I->mayThrow() && I->willReturn();
5277}
5278
5279bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5280 // TODO: This is slightly conservative for invoke instruction since exiting
5281 // via an exception *is* normal control for them.
5282 for (const Instruction &I : *BB)
5283 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5284 return false;
5285 return true;
5286}
5287
5288bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5289 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
5290 unsigned ScanLimit) {
5291 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
5292 ScanLimit);
5293}
5294
5295bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5296 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
5297 assert(ScanLimit && "scan limit must be non-zero")(static_cast <bool> (ScanLimit && "scan limit must be non-zero"
) ? void (0) : __assert_fail ("ScanLimit && \"scan limit must be non-zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5297, __extension__ __PRETTY_FUNCTION__
))
;
5298 for (const Instruction &I : Range) {
5299 if (isa<DbgInfoIntrinsic>(I))
5300 continue;
5301 if (--ScanLimit == 0)
5302 return false;
5303 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5304 return false;
5305 }
5306 return true;
5307}
5308
5309bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5310 const Loop *L) {
5311 // The loop header is guaranteed to be executed for every iteration.
5312 //
5313 // FIXME: Relax this constraint to cover all basic blocks that are
5314 // guaranteed to be executed at every iteration.
5315 if (I->getParent() != L->getHeader()) return false;
5316
5317 for (const Instruction &LI : *L->getHeader()) {
5318 if (&LI == I) return true;
5319 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5320 }
5321 llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block."
, "llvm/lib/Analysis/ValueTracking.cpp", 5321)
;
5322}
5323
5324bool llvm::propagatesPoison(const Operator *I) {
5325 switch (I->getOpcode()) {
5326 case Instruction::Freeze:
5327 case Instruction::Select:
5328 case Instruction::PHI:
5329 case Instruction::Invoke:
5330 return false;
5331 case Instruction::Call:
5332 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5333 switch (II->getIntrinsicID()) {
5334 // TODO: Add more intrinsics.
5335 case Intrinsic::sadd_with_overflow:
5336 case Intrinsic::ssub_with_overflow:
5337 case Intrinsic::smul_with_overflow:
5338 case Intrinsic::uadd_with_overflow:
5339 case Intrinsic::usub_with_overflow:
5340 case Intrinsic::umul_with_overflow:
5341 // If an input is a vector containing a poison element, the
5342 // two output vectors (calculated results, overflow bits)'
5343 // corresponding lanes are poison.
5344 return true;
5345 case Intrinsic::ctpop:
5346 return true;
5347 }
5348 }
5349 return false;
5350 case Instruction::ICmp:
5351 case Instruction::FCmp:
5352 case Instruction::GetElementPtr:
5353 return true;
5354 default:
5355 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5356 return true;
5357
5358 // Be conservative and return false.
5359 return false;
5360 }
5361}
5362
5363void llvm::getGuaranteedWellDefinedOps(
5364 const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5365 switch (I->getOpcode()) {
5366 case Instruction::Store:
5367 Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5368 break;
5369
5370 case Instruction::Load:
5371 Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5372 break;
5373
5374 // Since dereferenceable attribute imply noundef, atomic operations
5375 // also implicitly have noundef pointers too
5376 case Instruction::AtomicCmpXchg:
5377 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5378 break;
5379
5380 case Instruction::AtomicRMW:
5381 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5382 break;
5383
5384 case Instruction::Call:
5385 case Instruction::Invoke: {
5386 const CallBase *CB = cast<CallBase>(I);
5387 if (CB->isIndirectCall())
5388 Operands.insert(CB->getCalledOperand());
5389 for (unsigned i = 0; i < CB->arg_size(); ++i) {
5390 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5391 CB->paramHasAttr(i, Attribute::Dereferenceable))
5392 Operands.insert(CB->getArgOperand(i));
5393 }
5394 break;
5395 }
5396 case Instruction::Ret:
5397 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
5398 Operands.insert(I->getOperand(0));
5399 break;
5400 default:
5401 break;
5402 }
5403}
5404
5405void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5406 SmallPtrSetImpl<const Value *> &Operands) {
5407 getGuaranteedWellDefinedOps(I, Operands);
5408 switch (I->getOpcode()) {
5409 // Divisors of these operations are allowed to be partially undef.
5410 case Instruction::UDiv:
5411 case Instruction::SDiv:
5412 case Instruction::URem:
5413 case Instruction::SRem:
5414 Operands.insert(I->getOperand(1));
5415 break;
5416 case Instruction::Switch:
5417 if (BranchOnPoisonAsUB)
5418 Operands.insert(cast<SwitchInst>(I)->getCondition());
5419 break;
5420 case Instruction::Br: {
5421 auto *BR = cast<BranchInst>(I);
5422 if (BranchOnPoisonAsUB && BR->isConditional())
5423 Operands.insert(BR->getCondition());
5424 break;
5425 }
5426 default:
5427 break;
5428 }
5429}
5430
5431bool llvm::mustTriggerUB(const Instruction *I,
5432 const SmallSet<const Value *, 16>& KnownPoison) {
5433 SmallPtrSet<const Value *, 4> NonPoisonOps;
5434 getGuaranteedNonPoisonOps(I, NonPoisonOps);
5435
5436 for (const auto *V : NonPoisonOps)
5437 if (KnownPoison.count(V))
5438 return true;
5439
5440 return false;
5441}
5442
5443static bool programUndefinedIfUndefOrPoison(const Value *V,
5444 bool PoisonOnly) {
5445 // We currently only look for uses of values within the same basic
5446 // block, as that makes it easier to guarantee that the uses will be
5447 // executed given that Inst is executed.
5448 //
5449 // FIXME: Expand this to consider uses beyond the same basic block. To do
5450 // this, look out for the distinction between post-dominance and strong
5451 // post-dominance.
5452 const BasicBlock *BB = nullptr;
5453 BasicBlock::const_iterator Begin;
5454 if (const auto *Inst = dyn_cast<Instruction>(V)) {
5455 BB = Inst->getParent();
5456 Begin = Inst->getIterator();
5457 Begin++;
5458 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5459 BB = &Arg->getParent()->getEntryBlock();
5460 Begin = BB->begin();
5461 } else {
5462 return false;
5463 }
5464
5465 // Limit number of instructions we look at, to avoid scanning through large
5466 // blocks. The current limit is chosen arbitrarily.
5467 unsigned ScanLimit = 32;
5468 BasicBlock::const_iterator End = BB->end();
5469
5470 if (!PoisonOnly) {
5471 // Since undef does not propagate eagerly, be conservative & just check
5472 // whether a value is directly passed to an instruction that must take
5473 // well-defined operands.
5474
5475 for (auto &I : make_range(Begin, End)) {
5476 if (isa<DbgInfoIntrinsic>(I))
5477 continue;
5478 if (--ScanLimit == 0)
5479 break;
5480
5481 SmallPtrSet<const Value *, 4> WellDefinedOps;
5482 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5483 if (WellDefinedOps.contains(V))
5484 return true;
5485
5486 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5487 break;
5488 }
5489 return false;
5490 }
5491
5492 // Set of instructions that we have proved will yield poison if Inst
5493 // does.
5494 SmallSet<const Value *, 16> YieldsPoison;
5495 SmallSet<const BasicBlock *, 4> Visited;
5496
5497 YieldsPoison.insert(V);
5498 auto Propagate = [&](const User *User) {
5499 if (propagatesPoison(cast<Operator>(User)))
5500 YieldsPoison.insert(User);
5501 };
5502 for_each(V->users(), Propagate);
5503 Visited.insert(BB);
5504
5505 while (true) {
5506 for (auto &I : make_range(Begin, End)) {
5507 if (isa<DbgInfoIntrinsic>(I))
5508 continue;
5509 if (--ScanLimit == 0)
5510 return false;
5511 if (mustTriggerUB(&I, YieldsPoison))
5512 return true;
5513 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5514 return false;
5515
5516 // Mark poison that propagates from I through uses of I.
5517 if (YieldsPoison.count(&I))
5518 for_each(I.users(), Propagate);
5519 }
5520
5521 BB = BB->getSingleSuccessor();
5522 if (!BB || !Visited.insert(BB).second)
5523 break;
5524
5525 Begin = BB->getFirstNonPHI()->getIterator();
5526 End = BB->end();
5527 }
5528 return false;
5529}
5530
5531bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5532 return ::programUndefinedIfUndefOrPoison(Inst, false);
5533}
5534
5535bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5536 return ::programUndefinedIfUndefOrPoison(Inst, true);
5537}
5538
5539static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5540 if (FMF.noNaNs())
5541 return true;
5542
5543 if (auto *C = dyn_cast<ConstantFP>(V))
5544 return !C->isNaN();
5545
5546 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5547 if (!C->getElementType()->isFloatingPointTy())
5548 return false;
5549 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5550 if (C->getElementAsAPFloat(I).isNaN())
5551 return false;
5552 }
5553 return true;
5554 }
5555
5556 if (isa<ConstantAggregateZero>(V))
5557 return true;
5558
5559 return false;
5560}
5561
5562static bool isKnownNonZero(const Value *V) {
5563 if (auto *C = dyn_cast<ConstantFP>(V))
5564 return !C->isZero();
5565
5566 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5567 if (!C->getElementType()->isFloatingPointTy())
5568 return false;
5569 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5570 if (C->getElementAsAPFloat(I).isZero())
5571 return false;
5572 }
5573 return true;
5574 }
5575
5576 return false;
5577}
5578
5579/// Match clamp pattern for float types without care about NaNs or signed zeros.
5580/// Given non-min/max outer cmp/select from the clamp pattern this
5581/// function recognizes if it can be substitued by a "canonical" min/max
5582/// pattern.
5583static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5584 Value *CmpLHS, Value *CmpRHS,
5585 Value *TrueVal, Value *FalseVal,
5586 Value *&LHS, Value *&RHS) {
5587 // Try to match
5588 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5589 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5590 // and return description of the outer Max/Min.
5591
5592 // First, check if select has inverse order:
5593 if (CmpRHS == FalseVal) {
5594 std::swap(TrueVal, FalseVal);
5595 Pred = CmpInst::getInversePredicate(Pred);
5596 }
5597
5598 // Assume success now. If there's no match, callers should not use these anyway.
5599 LHS = TrueVal;
5600 RHS = FalseVal;
5601
5602 const APFloat *FC1;
5603 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5604 return {SPF_UNKNOWN, SPNB_NA, false};
5605
5606 const APFloat *FC2;
5607 switch (Pred) {
5608 case CmpInst::FCMP_OLT:
5609 case CmpInst::FCMP_OLE:
5610 case CmpInst::FCMP_ULT:
5611 case CmpInst::FCMP_ULE:
5612 if (match(FalseVal,
5613 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5614 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5615 *FC1 < *FC2)
5616 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5617 break;
5618 case CmpInst::FCMP_OGT:
5619 case CmpInst::FCMP_OGE:
5620 case CmpInst::FCMP_UGT:
5621 case CmpInst::FCMP_UGE:
5622 if (match(FalseVal,
5623 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5624 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5625 *FC1 > *FC2)
5626 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5627 break;
5628 default:
5629 break;
5630 }
5631
5632 return {SPF_UNKNOWN, SPNB_NA, false};
5633}
5634
5635/// Recognize variations of:
5636/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5637static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5638 Value *CmpLHS, Value *CmpRHS,
5639 Value *TrueVal, Value *FalseVal) {
5640 // Swap the select operands and predicate to match the patterns below.
5641 if (CmpRHS != TrueVal) {
5642 Pred = ICmpInst::getSwappedPredicate(Pred);
5643 std::swap(TrueVal, FalseVal);
5644 }
5645 const APInt *C1;
5646 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5647 const APInt *C2;
5648 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5649 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5650 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5651 return {SPF_SMAX, SPNB_NA, false};
5652
5653 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5654 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5655 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5656 return {SPF_SMIN, SPNB_NA, false};
5657
5658 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5659 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5660 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5661 return {SPF_UMAX, SPNB_NA, false};
5662
5663 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5664 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5665 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5666 return {SPF_UMIN, SPNB_NA, false};
5667 }
5668 return {SPF_UNKNOWN, SPNB_NA, false};
5669}
5670
5671/// Recognize variations of:
5672/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5673static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5674 Value *CmpLHS, Value *CmpRHS,
5675 Value *TVal, Value *FVal,
5676 unsigned Depth) {
5677 // TODO: Allow FP min/max with nnan/nsz.
5678 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")(static_cast <bool> (CmpInst::isIntPredicate(Pred) &&
"Expected integer comparison") ? void (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5678, __extension__ __PRETTY_FUNCTION__
))
;
5679
5680 Value *A = nullptr, *B = nullptr;
5681 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5682 if (!SelectPatternResult::isMinOrMax(L.Flavor))
5683 return {SPF_UNKNOWN, SPNB_NA, false};
5684
5685 Value *C = nullptr, *D = nullptr;
5686 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5687 if (L.Flavor != R.Flavor)
5688 return {SPF_UNKNOWN, SPNB_NA, false};
5689
5690 // We have something like: x Pred y ? min(a, b) : min(c, d).
5691 // Try to match the compare to the min/max operations of the select operands.
5692 // First, make sure we have the right compare predicate.
5693 switch (L.Flavor) {
5694 case SPF_SMIN:
5695 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5696 Pred = ICmpInst::getSwappedPredicate(Pred);
5697 std::swap(CmpLHS, CmpRHS);
5698 }
5699 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5700 break;
5701 return {SPF_UNKNOWN, SPNB_NA, false};
5702 case SPF_SMAX:
5703 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5704 Pred = ICmpInst::getSwappedPredicate(Pred);
5705 std::swap(CmpLHS, CmpRHS);
5706 }
5707 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5708 break;
5709 return {SPF_UNKNOWN, SPNB_NA, false};
5710 case SPF_UMIN:
5711 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5712 Pred = ICmpInst::getSwappedPredicate(Pred);
5713 std::swap(CmpLHS, CmpRHS);
5714 }
5715 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5716 break;
5717 return {SPF_UNKNOWN, SPNB_NA, false};
5718 case SPF_UMAX:
5719 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5720 Pred = ICmpInst::getSwappedPredicate(Pred);
5721 std::swap(CmpLHS, CmpRHS);
5722 }
5723 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5724 break;
5725 return {SPF_UNKNOWN, SPNB_NA, false};
5726 default:
5727 return {SPF_UNKNOWN, SPNB_NA, false};
5728 }
5729
5730 // If there is a common operand in the already matched min/max and the other
5731 // min/max operands match the compare operands (either directly or inverted),
5732 // then this is min/max of the same flavor.
5733
5734 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5735 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5736 if (D == B) {
5737 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5738 match(A, m_Not(m_Specific(CmpRHS)))))
5739 return {L.Flavor, SPNB_NA, false};
5740 }
5741 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5742 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5743 if (C == B) {
5744 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5745 match(A, m_Not(m_Specific(CmpRHS)))))
5746 return {L.Flavor, SPNB_NA, false};
5747 }
5748 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5749 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5750 if (D == A) {
5751 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5752 match(B, m_Not(m_Specific(CmpRHS)))))
5753 return {L.Flavor, SPNB_NA, false};
5754 }
5755 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5756 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5757 if (C == A) {
5758 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5759 match(B, m_Not(m_Specific(CmpRHS)))))
5760 return {L.Flavor, SPNB_NA, false};
5761 }
5762
5763 return {SPF_UNKNOWN, SPNB_NA, false};
5764}
5765
5766/// If the input value is the result of a 'not' op, constant integer, or vector
5767/// splat of a constant integer, return the bitwise-not source value.
5768/// TODO: This could be extended to handle non-splat vector integer constants.
5769static Value *getNotValue(Value *V) {
5770 Value *NotV;
5771 if (match(V, m_Not(m_Value(NotV))))
5772 return NotV;
5773
5774 const APInt *C;
5775 if (match(V, m_APInt(C)))
5776 return ConstantInt::get(V->getType(), ~(*C));
5777
5778 return nullptr;
5779}
5780
5781/// Match non-obvious integer minimum and maximum sequences.
5782static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5783 Value *CmpLHS, Value *CmpRHS,
5784 Value *TrueVal, Value *FalseVal,
5785 Value *&LHS, Value *&RHS,
5786 unsigned Depth) {
5787 // Assume success. If there's no match, callers should not use these anyway.
5788 LHS = TrueVal;
5789 RHS = FalseVal;
5790
5791 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5792 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5793 return SPR;
5794
5795 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5796 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5797 return SPR;
5798
5799 // Look through 'not' ops to find disguised min/max.
5800 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5801 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5802 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5803 switch (Pred) {
5804 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5805 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5806 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5807 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5808 default: break;
5809 }
5810 }
5811
5812 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5813 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5814 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5815 switch (Pred) {
5816 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5817 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5818 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5819 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5820 default: break;
5821 }
5822 }
5823
5824 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5825 return {SPF_UNKNOWN, SPNB_NA, false};
5826
5827 const APInt *C1;
5828 if (!match(CmpRHS, m_APInt(C1)))
5829 return {SPF_UNKNOWN, SPNB_NA, false};
5830
5831 // An unsigned min/max can be written with a signed compare.
5832 const APInt *C2;
5833 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5834 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5835 // Is the sign bit set?
5836 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5837 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5838 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
5839 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5840
5841 // Is the sign bit clear?
5842 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5843 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5844 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
5845 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5846 }
5847
5848 return {SPF_UNKNOWN, SPNB_NA, false};
5849}
5850
5851bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5852 assert(X && Y && "Invalid operand")(static_cast <bool> (X && Y && "Invalid operand"
) ? void (0) : __assert_fail ("X && Y && \"Invalid operand\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5852, __extension__ __PRETTY_FUNCTION__
))
;
5853
5854 // X = sub (0, Y) || X = sub nsw (0, Y)
5855 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5856 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5857 return true;
5858
5859 // Y = sub (0, X) || Y = sub nsw (0, X)
5860 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5861 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5862 return true;
5863
5864 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5865 Value *A, *B;
5866 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5867 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5868 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5869 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5870}
5871
5872static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5873 FastMathFlags FMF,
5874 Value *CmpLHS, Value *CmpRHS,
5875 Value *TrueVal, Value *FalseVal,
5876 Value *&LHS, Value *&RHS,
5877 unsigned Depth) {
5878 if (CmpInst::isFPPredicate(Pred)) {
5879 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5880 // 0.0 operand, set the compare's 0.0 operands to that same value for the
5881 // purpose of identifying min/max. Disregard vector constants with undefined
5882 // elements because those can not be back-propagated for analysis.
5883 Value *OutputZeroVal = nullptr;
5884 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5885 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
5886 OutputZeroVal = TrueVal;
5887 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5888 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
5889 OutputZeroVal = FalseVal;
5890
5891 if (OutputZeroVal) {
5892 if (match(CmpLHS, m_AnyZeroFP()))
5893 CmpLHS = OutputZeroVal;
5894 if (match(CmpRHS, m_AnyZeroFP()))
5895 CmpRHS = OutputZeroVal;
5896 }
5897 }
5898
5899 LHS = CmpLHS;
5900 RHS = CmpRHS;
5901
5902 // Signed zero may return inconsistent results between implementations.
5903 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5904 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5905 // Therefore, we behave conservatively and only proceed if at least one of the
5906 // operands is known to not be zero or if we don't care about signed zero.
5907 switch (Pred) {
5908 default: break;
5909 // FIXME: Include OGT/OLT/UGT/ULT.
5910 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5911 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5912 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5913 !isKnownNonZero(CmpRHS))
5914 return {SPF_UNKNOWN, SPNB_NA, false};
5915 }
5916
5917 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5918 bool Ordered = false;
5919
5920 // When given one NaN and one non-NaN input:
5921 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5922 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5923 // ordered comparison fails), which could be NaN or non-NaN.
5924 // so here we discover exactly what NaN behavior is required/accepted.
5925 if (CmpInst::isFPPredicate(Pred)) {
5926 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5927 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5928
5929 if (LHSSafe && RHSSafe) {
5930 // Both operands are known non-NaN.
5931 NaNBehavior = SPNB_RETURNS_ANY;
5932 } else if (CmpInst::isOrdered(Pred)) {
5933 // An ordered comparison will return false when given a NaN, so it
5934 // returns the RHS.
5935 Ordered = true;
5936 if (LHSSafe)
5937 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5938 NaNBehavior = SPNB_RETURNS_NAN;
5939 else if (RHSSafe)
5940 NaNBehavior = SPNB_RETURNS_OTHER;
5941 else
5942 // Completely unsafe.
5943 return {SPF_UNKNOWN, SPNB_NA, false};
5944 } else {
5945 Ordered = false;
5946 // An unordered comparison will return true when given a NaN, so it
5947 // returns the LHS.
5948 if (LHSSafe)
5949 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5950 NaNBehavior = SPNB_RETURNS_OTHER;
5951 else if (RHSSafe)
5952 NaNBehavior = SPNB_RETURNS_NAN;
5953 else
5954 // Completely unsafe.
5955 return {SPF_UNKNOWN, SPNB_NA, false};
5956 }
5957 }
5958
5959 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5960 std::swap(CmpLHS, CmpRHS);
5961 Pred = CmpInst::getSwappedPredicate(Pred);
5962 if (NaNBehavior == SPNB_RETURNS_NAN)
5963 NaNBehavior = SPNB_RETURNS_OTHER;
5964 else if (NaNBehavior == SPNB_RETURNS_OTHER)
5965 NaNBehavior = SPNB_RETURNS_NAN;
5966 Ordered = !Ordered;
5967 }
5968
5969 // ([if]cmp X, Y) ? X : Y
5970 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5971 switch (Pred) {
5972 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5973 case ICmpInst::ICMP_UGT:
5974 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5975 case ICmpInst::ICMP_SGT:
5976 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5977 case ICmpInst::ICMP_ULT:
5978 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5979 case ICmpInst::ICMP_SLT:
5980 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5981 case FCmpInst::FCMP_UGT:
5982 case FCmpInst::FCMP_UGE:
5983 case FCmpInst::FCMP_OGT:
5984 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5985 case FCmpInst::FCMP_ULT:
5986 case FCmpInst::FCMP_ULE:
5987 case FCmpInst::FCMP_OLT:
5988 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5989 }
5990 }
5991
5992 if (isKnownNegation(TrueVal, FalseVal)) {
5993 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5994 // match against either LHS or sext(LHS).
5995 auto MaybeSExtCmpLHS =
5996 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5997 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5998 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5999 if (match(TrueVal, MaybeSExtCmpLHS)) {
6000 // Set the return values. If the compare uses the negated value (-X >s 0),
6001 // swap the return values because the negated value is always 'RHS'.
6002 LHS = TrueVal;
6003 RHS = FalseVal;
6004 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
6005 std::swap(LHS, RHS);
6006
6007 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
6008 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
6009 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6010 return {SPF_ABS, SPNB_NA, false};
6011
6012 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
6013 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
6014 return {SPF_ABS, SPNB_NA, false};
6015
6016 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
6017 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
6018 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6019 return {SPF_NABS, SPNB_NA, false};
6020 }
6021 else if (match(FalseVal, MaybeSExtCmpLHS)) {
6022 // Set the return values. If the compare uses the negated value (-X >s 0),
6023 // swap the return values because the negated value is always 'RHS'.
6024 LHS = FalseVal;
6025 RHS = TrueVal;
6026 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
6027 std::swap(LHS, RHS);
6028
6029 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
6030 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
6031 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6032 return {SPF_NABS, SPNB_NA, false};
6033
6034 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
6035 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
6036 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6037 return {SPF_ABS, SPNB_NA, false};
6038 }
6039 }
6040
6041 if (CmpInst::isIntPredicate(Pred))
6042 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
6043
6044 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
6045 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
6046 // semantics than minNum. Be conservative in such case.
6047 if (NaNBehavior != SPNB_RETURNS_ANY ||
6048 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6049 !isKnownNonZero(CmpRHS)))
6050 return {SPF_UNKNOWN, SPNB_NA, false};
6051
6052 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
6053}
6054
6055/// Helps to match a select pattern in case of a type mismatch.
6056///
6057/// The function processes the case when type of true and false values of a
6058/// select instruction differs from type of the cmp instruction operands because
6059/// of a cast instruction. The function checks if it is legal to move the cast
6060/// operation after "select". If yes, it returns the new second value of
6061/// "select" (with the assumption that cast is moved):
6062/// 1. As operand of cast instruction when both values of "select" are same cast
6063/// instructions.
6064/// 2. As restored constant (by applying reverse cast operation) when the first
6065/// value of the "select" is a cast operation and the second value is a
6066/// constant.
6067/// NOTE: We return only the new second value because the first value could be
6068/// accessed as operand of cast instruction.
6069static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
6070 Instruction::CastOps *CastOp) {
6071 auto *Cast1 = dyn_cast<CastInst>(V1);
6072 if (!Cast1)
6073 return nullptr;
6074
6075 *CastOp = Cast1->getOpcode();
6076 Type *SrcTy = Cast1->getSrcTy();
6077 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
6078 // If V1 and V2 are both the same cast from the same type, look through V1.
6079 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
6080 return Cast2->getOperand(0);
6081 return nullptr;
6082 }
6083
6084 auto *C = dyn_cast<Constant>(V2);
6085 if (!C)
6086 return nullptr;
6087
6088 Constant *CastedTo = nullptr;
6089 switch (*CastOp) {
6090 case Instruction::ZExt:
6091 if (CmpI->isUnsigned())
6092 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
6093 break;
6094 case Instruction::SExt:
6095 if (CmpI->isSigned())
6096 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
6097 break;
6098 case Instruction::Trunc:
6099 Constant *CmpConst;
6100 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
6101 CmpConst->getType() == SrcTy) {
6102 // Here we have the following case:
6103 //
6104 // %cond = cmp iN %x, CmpConst
6105 // %tr = trunc iN %x to iK
6106 // %narrowsel = select i1 %cond, iK %t, iK C
6107 //
6108 // We can always move trunc after select operation:
6109 //
6110 // %cond = cmp iN %x, CmpConst
6111 // %widesel = select i1 %cond, iN %x, iN CmpConst
6112 // %tr = trunc iN %widesel to iK
6113 //
6114 // Note that C could be extended in any way because we don't care about
6115 // upper bits after truncation. It can't be abs pattern, because it would
6116 // look like:
6117 //
6118 // select i1 %cond, x, -x.
6119 //
6120 // So only min/max pattern could be matched. Such match requires widened C
6121 // == CmpConst. That is why set widened C = CmpConst, condition trunc
6122 // CmpConst == C is checked below.
6123 CastedTo = CmpConst;
6124 } else {
6125 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
6126 }
6127 break;
6128 case Instruction::FPTrunc:
6129 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
6130 break;
6131 case Instruction::FPExt:
6132 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
6133 break;
6134 case Instruction::FPToUI:
6135 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
6136 break;
6137 case Instruction::FPToSI:
6138 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
6139 break;
6140 case Instruction::UIToFP:
6141 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
6142 break;
6143 case Instruction::SIToFP:
6144 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
6145 break;
6146 default:
6147 break;
6148 }
6149
6150 if (!CastedTo)
6151 return nullptr;
6152
6153 // Make sure the cast doesn't lose any information.
6154 Constant *CastedBack =
6155 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
6156 if (CastedBack != C)
6157 return nullptr;
6158
6159 return CastedTo;
6160}
6161
6162SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
6163 Instruction::CastOps *CastOp,
6164 unsigned Depth) {
6165 if (Depth >= MaxAnalysisRecursionDepth)
6166 return {SPF_UNKNOWN, SPNB_NA, false};
6167
6168 SelectInst *SI = dyn_cast<SelectInst>(V);
6169 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
6170
6171 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
6172 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
6173
6174 Value *TrueVal = SI->getTrueValue();
6175 Value *FalseVal = SI->getFalseValue();
6176
6177 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
6178 CastOp, Depth);
6179}
6180
6181SelectPatternResult llvm::matchDecomposedSelectPattern(
6182 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
6183 Instruction::CastOps *CastOp, unsigned Depth) {
6184 CmpInst::Predicate Pred = CmpI->getPredicate();
6185 Value *CmpLHS = CmpI->getOperand(0);
6186 Value *CmpRHS = CmpI->getOperand(1);
6187 FastMathFlags FMF;
6188 if (isa<FPMathOperator>(CmpI))
6189 FMF = CmpI->getFastMathFlags();
6190
6191 // Bail out early.
6192 if (CmpI->isEquality())
6193 return {SPF_UNKNOWN, SPNB_NA, false};
6194
6195 // Deal with type mismatches.
6196 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
6197 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
6198 // If this is a potential fmin/fmax with a cast to integer, then ignore
6199 // -0.0 because there is no corresponding integer value.
6200 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6201 FMF.setNoSignedZeros();
6202 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6203 cast<CastInst>(TrueVal)->getOperand(0), C,
6204 LHS, RHS, Depth);
6205 }
6206 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
6207 // If this is a potential fmin/fmax with a cast to integer, then ignore
6208 // -0.0 because there is no corresponding integer value.
6209 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6210 FMF.setNoSignedZeros();
6211 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6212 C, cast<CastInst>(FalseVal)->getOperand(0),
6213 LHS, RHS, Depth);
6214 }
6215 }
6216 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6217 LHS, RHS, Depth);
6218}
6219
6220CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6221 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6222 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6223 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6224 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6225 if (SPF == SPF_FMINNUM)
6226 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6227 if (SPF == SPF_FMAXNUM)
6228 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6229 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "llvm/lib/Analysis/ValueTracking.cpp"
, 6229)
;
6230}
6231
6232SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6233 if (SPF == SPF_SMIN) return SPF_SMAX;
6234 if (SPF == SPF_UMIN) return SPF_UMAX;
6235 if (SPF == SPF_SMAX) return SPF_SMIN;
6236 if (SPF == SPF_UMAX) return SPF_UMIN;
6237 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "llvm/lib/Analysis/ValueTracking.cpp"
, 6237)
;
6238}
6239
6240Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6241 switch (MinMaxID) {
6242 case Intrinsic::smax: return Intrinsic::smin;
6243 case Intrinsic::smin: return Intrinsic::smax;
6244 case Intrinsic::umax: return Intrinsic::umin;
6245 case Intrinsic::umin: return Intrinsic::umax;
6246 default: llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/Analysis/ValueTracking.cpp"
, 6246)
;
6247 }
6248}
6249
6250CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6251 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6252}
6253
6254APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
6255 switch (SPF) {
6256 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
6257 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
6258 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
6259 case SPF_UMIN: return APInt::getMinValue(BitWidth);
6260 default: llvm_unreachable("Unexpected flavor")::llvm::llvm_unreachable_internal("Unexpected flavor", "llvm/lib/Analysis/ValueTracking.cpp"
, 6260)
;
6261 }
6262}
6263
6264std::pair<Intrinsic::ID, bool>
6265llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6266 // Check if VL contains select instructions that can be folded into a min/max
6267 // vector intrinsic and return the intrinsic if it is possible.
6268 // TODO: Support floating point min/max.
6269 bool AllCmpSingleUse = true;
6270 SelectPatternResult SelectPattern;
6271 SelectPattern.Flavor = SPF_UNKNOWN;
6272 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6273 Value *LHS, *RHS;
6274 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6275 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6276 CurrentPattern.Flavor == SPF_FMINNUM ||
6277 CurrentPattern.Flavor == SPF_FMAXNUM ||
6278 !I->getType()->isIntOrIntVectorTy())
6279 return false;
6280 if (SelectPattern.Flavor != SPF_UNKNOWN &&
6281 SelectPattern.Flavor != CurrentPattern.Flavor)
6282 return false;
6283 SelectPattern = CurrentPattern;
6284 AllCmpSingleUse &=
6285 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6286 return true;
6287 })) {
6288 switch (SelectPattern.Flavor) {
6289 case SPF_SMIN:
6290 return {Intrinsic::smin, AllCmpSingleUse};
6291 case SPF_UMIN:
6292 return {Intrinsic::umin, AllCmpSingleUse};
6293 case SPF_SMAX:
6294 return {Intrinsic::smax, AllCmpSingleUse};
6295 case SPF_UMAX:
6296 return {Intrinsic::umax, AllCmpSingleUse};
6297 default:
6298 llvm_unreachable("unexpected select pattern flavor")::llvm::llvm_unreachable_internal("unexpected select pattern flavor"
, "llvm/lib/Analysis/ValueTracking.cpp", 6298)
;
6299 }
6300 }
6301 return {Intrinsic::not_intrinsic, false};
6302}
6303
6304bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6305 Value *&Start, Value *&Step) {
6306 // Handle the case of a simple two-predecessor recurrence PHI.
6307 // There's a lot more that could theoretically be done here, but
6308 // this is sufficient to catch some interesting cases.
6309 if (P->getNumIncomingValues() != 2)
6310 return false;
6311
6312 for (unsigned i = 0; i != 2; ++i) {
6313 Value *L = P->getIncomingValue(i);
6314 Value *R = P->getIncomingValue(!i);
6315 Operator *LU = dyn_cast<Operator>(L);
6316 if (!LU)
6317 continue;
6318 unsigned Opcode = LU->getOpcode();
6319
6320 switch (Opcode) {
6321 default:
6322 continue;
6323 // TODO: Expand list -- xor, div, gep, uaddo, etc..
6324 case Instruction::LShr:
6325 case Instruction::AShr:
6326 case Instruction::Shl:
6327 case Instruction::Add:
6328 case Instruction::Sub:
6329 case Instruction::And:
6330 case Instruction::Or:
6331 case Instruction::Mul: {
6332 Value *LL = LU->getOperand(0);
6333 Value *LR = LU->getOperand(1);
6334 // Find a recurrence.
6335 if (LL == P)
6336 L = LR;
6337 else if (LR == P)
6338 L = LL;
6339 else
6340 continue; // Check for recurrence with L and R flipped.
6341
6342 break; // Match!
6343 }
6344 };
6345
6346 // We have matched a recurrence of the form:
6347 // %iv = [R, %entry], [%iv.next, %backedge]
6348 // %iv.next = binop %iv, L
6349 // OR
6350 // %iv = [R, %entry], [%iv.next, %backedge]
6351 // %iv.next = binop L, %iv
6352 BO = cast<BinaryOperator>(LU);
6353 Start = R;
6354 Step = L;
6355 return true;
6356 }
6357 return false;
6358}
6359
6360bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6361 Value *&Start, Value *&Step) {
6362 BinaryOperator *BO = nullptr;
6363 P = dyn_cast<PHINode>(I->getOperand(0));
6364 if (!P)
6365 P = dyn_cast<PHINode>(I->getOperand(1));
6366 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6367}
6368
6369/// Return true if "icmp Pred LHS RHS" is always true.
6370static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6371 const Value *RHS, const DataLayout &DL,
6372 unsigned Depth) {
6373 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!")(static_cast <bool> (!LHS->getType()->isVectorTy(
) && "TODO: extend to handle vectors!") ? void (0) : __assert_fail
("!LHS->getType()->isVectorTy() && \"TODO: extend to handle vectors!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6373, __extension__ __PRETTY_FUNCTION__
))
;
6374 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6375 return true;
6376
6377 switch (Pred) {
6378 default:
6379 return false;
6380
6381 case CmpInst::ICMP_SLE: {
6382 const APInt *C;
6383
6384 // LHS s<= LHS +_{nsw} C if C >= 0
6385 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6386 return !C->isNegative();
6387 return false;
6388 }
6389
6390 case CmpInst::ICMP_ULE: {
6391 const APInt *C;
6392
6393 // LHS u<= LHS +_{nuw} C for any C
6394 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6395 return true;
6396
6397 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6398 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6399 const Value *&X,
6400 const APInt *&CA, const APInt *&CB) {
6401 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6402 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6403 return true;
6404
6405 // If X & C == 0 then (X | C) == X +_{nuw} C
6406 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6407 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6408 KnownBits Known(CA->getBitWidth());
6409 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6410 /*CxtI*/ nullptr, /*DT*/ nullptr);
6411 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6412 return true;
6413 }
6414
6415 return false;
6416 };
6417
6418 const Value *X;
6419 const APInt *CLHS, *CRHS;
6420 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6421 return CLHS->ule(*CRHS);
6422
6423 return false;
6424 }
6425 }
6426}
6427
6428/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6429/// ALHS ARHS" is true. Otherwise, return None.
6430static Optional<bool>
6431isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6432 const Value *ARHS, const Value *BLHS, const Value *BRHS,
6433 const DataLayout &DL, unsigned Depth) {
6434 switch (Pred) {
6435 default:
6436 return None;
6437
6438 case CmpInst::ICMP_SLT:
6439 case CmpInst::ICMP_SLE:
6440 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6441 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6442 return true;
6443 return None;
6444
6445 case CmpInst::ICMP_ULT:
6446 case CmpInst::ICMP_ULE:
6447 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6448 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6449 return true;
6450 return None;
6451 }
6452}
6453
6454/// Return true if the operands of the two compares match. IsSwappedOps is true
6455/// when the operands match, but are swapped.
6456static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6457 const Value *BLHS, const Value *BRHS,
6458 bool &IsSwappedOps) {
6459
6460 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6461 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6462 return IsMatchingOps || IsSwappedOps;
6463}
6464
6465/// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6466/// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6467/// Otherwise, return None if we can't infer anything.
6468static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6469 CmpInst::Predicate BPred,
6470 bool AreSwappedOps) {
6471 // Canonicalize the predicate as if the operands were not commuted.
6472 if (AreSwappedOps)
6473 BPred = ICmpInst::getSwappedPredicate(BPred);
6474
6475 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6476 return true;
6477 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6478 return false;
6479
6480 return None;
6481}
6482
6483/// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6484/// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6485/// Otherwise, return None if we can't infer anything.
6486static Optional<bool>
6487isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6488 const ConstantInt *C1,
6489 CmpInst::Predicate BPred,
6490 const ConstantInt *C2) {
6491 ConstantRange DomCR =
6492 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6493 ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2->getValue());
6494 ConstantRange Intersection = DomCR.intersectWith(CR);
6495 ConstantRange Difference = DomCR.difference(CR);
6496 if (Intersection.isEmptySet())
6497 return false;
6498 if (Difference.isEmptySet())
6499 return true;
6500 return None;
6501}
6502
6503/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6504/// false. Otherwise, return None if we can't infer anything.
6505static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6506 CmpInst::Predicate BPred,
6507 const Value *BLHS, const Value *BRHS,
6508 const DataLayout &DL, bool LHSIsTrue,
6509 unsigned Depth) {
6510 Value *ALHS = LHS->getOperand(0);
6511 Value *ARHS = LHS->getOperand(1);
6512
6513 // The rest of the logic assumes the LHS condition is true. If that's not the
6514 // case, invert the predicate to make it so.
6515 CmpInst::Predicate APred =
6516 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6517
6518 // Can we infer anything when the two compares have matching operands?
6519 bool AreSwappedOps;
6520 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6521 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6522 APred, BPred, AreSwappedOps))
6523 return Implication;
6524 // No amount of additional analysis will infer the second condition, so
6525 // early exit.
6526 return None;
6527 }
6528
6529 // Can we infer anything when the LHS operands match and the RHS operands are
6530 // constants (not necessarily matching)?
6531 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6532 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6533 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6534 return Implication;
6535 // No amount of additional analysis will infer the second condition, so
6536 // early exit.
6537 return None;
6538 }
6539
6540 if (APred == BPred)
6541 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6542 return None;
6543}
6544
6545/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6546/// false. Otherwise, return None if we can't infer anything. We expect the
6547/// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6548static Optional<bool>
6549isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6550 const Value *RHSOp0, const Value *RHSOp1,
6551 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6552 // The LHS must be an 'or', 'and', or a 'select' instruction.
6553 assert((LHS->getOpcode() == Instruction::And ||(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6556, __extension__ __PRETTY_FUNCTION__
))
6554 LHS->getOpcode() == Instruction::Or ||(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6556, __extension__ __PRETTY_FUNCTION__
))
6555 LHS->getOpcode() == Instruction::Select) &&(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6556, __extension__ __PRETTY_FUNCTION__
))
6556 "Expected LHS to be 'and', 'or', or 'select'.")(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6556, __extension__ __PRETTY_FUNCTION__
))
;
6557
6558 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Hit recursion limit") ? void (0) : __assert_fail
("Depth <= MaxAnalysisRecursionDepth && \"Hit recursion limit\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6558, __extension__ __PRETTY_FUNCTION__
))
;
6559
6560 // If the result of an 'or' is false, then we know both legs of the 'or' are
6561 // false. Similarly, if the result of an 'and' is true, then we know both
6562 // legs of the 'and' are true.
6563 const Value *ALHS, *ARHS;
6564 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6565 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6566 // FIXME: Make this non-recursion.
6567 if (Optional<bool> Implication = isImpliedCondition(
6568 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6569 return Implication;
6570 if (Optional<bool> Implication = isImpliedCondition(
6571 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6572 return Implication;
6573 return None;
6574 }
6575 return None;
6576}
6577
6578Optional<bool>
6579llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6580 const Value *RHSOp0, const Value *RHSOp1,
6581 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6582 // Bail out when we hit the limit.
6583 if (Depth == MaxAnalysisRecursionDepth)
6584 return None;
6585
6586 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6587 // example.
6588 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6589 return None;
6590
6591 Type *OpTy = LHS->getType();
6592 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!")(static_cast <bool> (OpTy->isIntOrIntVectorTy(1) &&
"Expected integer type only!") ? void (0) : __assert_fail ("OpTy->isIntOrIntVectorTy(1) && \"Expected integer type only!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6592, __extension__ __PRETTY_FUNCTION__
))
;
6593
6594 // FIXME: Extending the code below to handle vectors.
6595 if (OpTy->isVectorTy())
6596 return None;
6597
6598 assert(OpTy->isIntegerTy(1) && "implied by above")(static_cast <bool> (OpTy->isIntegerTy(1) &&
"implied by above") ? void (0) : __assert_fail ("OpTy->isIntegerTy(1) && \"implied by above\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6598, __extension__ __PRETTY_FUNCTION__
))
;
6599
6600 // Both LHS and RHS are icmps.
6601 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6602 if (LHSCmp)
6603 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6604 Depth);
6605
6606 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
6607 /// the RHS to be an icmp.
6608 /// FIXME: Add support for and/or/select on the RHS.
6609 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6610 if ((LHSI->getOpcode() == Instruction::And ||
6611 LHSI->getOpcode() == Instruction::Or ||
6612 LHSI->getOpcode() == Instruction::Select))
6613 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6614 Depth);
6615 }
6616 return None;
6617}
6618
6619Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6620 const DataLayout &DL, bool LHSIsTrue,
6621 unsigned Depth) {
6622 // LHS ==> RHS by definition
6623 if (LHS == RHS)
6624 return LHSIsTrue;
6625
6626 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6627 if (RHSCmp)
6628 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6629 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6630 LHSIsTrue, Depth);
6631 return None;
6632}
6633
6634// Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6635// condition dominating ContextI or nullptr, if no condition is found.
6636static std::pair<Value *, bool>
6637getDomPredecessorCondition(const Instruction *ContextI) {
6638 if (!ContextI || !ContextI->getParent())
6639 return {nullptr, false};
6640
6641 // TODO: This is a poor/cheap way to determine dominance. Should we use a
6642 // dominator tree (eg, from a SimplifyQuery) instead?
6643 const BasicBlock *ContextBB = ContextI->getParent();
6644 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6645 if (!PredBB)
6646 return {nullptr, false};
6647
6648 // We need a conditional branch in the predecessor.
6649 Value *PredCond;
6650 BasicBlock *TrueBB, *FalseBB;
6651 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6652 return {nullptr, false};
6653
6654 // The branch should get simplified. Don't bother simplifying this condition.
6655 if (TrueBB == FalseBB)
6656 return {nullptr, false};
6657
6658 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&(static_cast <bool> ((TrueBB == ContextBB || FalseBB ==
ContextBB) && "Predecessor block does not point to successor?"
) ? void (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6659, __extension__ __PRETTY_FUNCTION__
))
6659 "Predecessor block does not point to successor?")(static_cast <bool> ((TrueBB == ContextBB || FalseBB ==
ContextBB) && "Predecessor block does not point to successor?"
) ? void (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6659, __extension__ __PRETTY_FUNCTION__
))
;
6660
6661 // Is this condition implied by the predecessor condition?
6662 return {PredCond, TrueBB == ContextBB};
6663}
6664
6665Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6666 const Instruction *ContextI,
6667 const DataLayout &DL) {
6668 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool")(static_cast <bool> (Cond->getType()->isIntOrIntVectorTy
(1) && "Condition must be bool") ? void (0) : __assert_fail
("Cond->getType()->isIntOrIntVectorTy(1) && \"Condition must be bool\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6668, __extension__ __PRETTY_FUNCTION__
))
;
6669 auto PredCond = getDomPredecessorCondition(ContextI);
6670 if (PredCond.first)
6671 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6672 return None;
6673}
6674
6675Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6676 const Value *LHS, const Value *RHS,
6677 const Instruction *ContextI,
6678 const DataLayout &DL) {
6679 auto PredCond = getDomPredecessorCondition(ContextI);
6680 if (PredCond.first)
6681 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6682 PredCond.second);
6683 return None;
6684}
6685
6686static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6687 APInt &Upper, const InstrInfoQuery &IIQ,
6688 bool PreferSignedRange) {
6689 unsigned Width = Lower.getBitWidth();
6690 const APInt *C;
6691 switch (BO.getOpcode()) {
6692 case Instruction::Add:
6693 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
6694 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
6695 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
6696
6697 // If the caller expects a signed compare, then try to use a signed range.
6698 // Otherwise if both no-wraps are set, use the unsigned range because it
6699 // is never larger than the signed range. Example:
6700 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
6701 if (PreferSignedRange && HasNSW && HasNUW)
6702 HasNUW = false;
6703
6704 if (HasNUW) {
6705 // 'add nuw x, C' produces [C, UINT_MAX].
6706 Lower = *C;
6707 } else if (HasNSW) {
6708 if (C->isNegative()) {
6709 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6710 Lower = APInt::getSignedMinValue(Width);
6711 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6712 } else {
6713 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6714 Lower = APInt::getSignedMinValue(Width) + *C;
6715 Upper = APInt::getSignedMaxValue(Width) + 1;
6716 }
6717 }
6718 }
6719 break;
6720
6721 case Instruction::And:
6722 if (match(BO.getOperand(1), m_APInt(C)))
6723 // 'and x, C' produces [0, C].
6724 Upper = *C + 1;
6725 break;
6726
6727 case Instruction::Or:
6728 if (match(BO.getOperand(1), m_APInt(C)))
6729 // 'or x, C' produces [C, UINT_MAX].
6730 Lower = *C;
6731 break;
6732
6733 case Instruction::AShr:
6734 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6735 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6736 Lower = APInt::getSignedMinValue(Width).ashr(*C);
6737 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6738 } else if (match(BO.getOperand(0), m_APInt(C))) {
6739 unsigned ShiftAmount = Width - 1;
6740 if (!C->isZero() && IIQ.isExact(&BO))
6741 ShiftAmount = C->countTrailingZeros();
6742 if (C->isNegative()) {
6743 // 'ashr C, x' produces [C, C >> (Width-1)]
6744 Lower = *C;
6745 Upper = C->ashr(ShiftAmount) + 1;
6746 } else {
6747 // 'ashr C, x' produces [C >> (Width-1), C]
6748 Lower = C->ashr(ShiftAmount);
6749 Upper = *C + 1;
6750 }
6751 }
6752 break;
6753
6754 case Instruction::LShr:
6755 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6756 // 'lshr x, C' produces [0, UINT_MAX >> C].
6757 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
6758 } else if (match(BO.getOperand(0), m_APInt(C))) {
6759 // 'lshr C, x' produces [C >> (Width-1), C].
6760 unsigned ShiftAmount = Width - 1;
6761 if (!C->isZero() && IIQ.isExact(&BO))
6762 ShiftAmount = C->countTrailingZeros();
6763 Lower = C->lshr(ShiftAmount);
6764 Upper = *C + 1;
6765 }
6766 break;
6767
6768 case Instruction::Shl:
6769 if (match(BO.getOperand(0), m_APInt(C))) {
6770 if (IIQ.hasNoUnsignedWrap(&BO)) {
6771 // 'shl nuw C, x' produces [C, C << CLZ(C)]
6772 Lower = *C;
6773 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6774 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6775 if (C->isNegative()) {
6776 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6777 unsigned ShiftAmount = C->countLeadingOnes() - 1;
6778 Lower = C->shl(ShiftAmount);
6779 Upper = *C + 1;
6780 } else {
6781 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6782 unsigned ShiftAmount = C->countLeadingZeros() - 1;
6783 Lower = *C;
6784 Upper = C->shl(ShiftAmount) + 1;
6785 }
6786 }
6787 }
6788 break;
6789
6790 case Instruction::SDiv:
6791 if (match(BO.getOperand(1), m_APInt(C))) {
6792 APInt IntMin = APInt::getSignedMinValue(Width);
6793 APInt IntMax = APInt::getSignedMaxValue(Width);
6794 if (C->isAllOnes()) {
6795 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6796 // where C != -1 and C != 0 and C != 1
6797 Lower = IntMin + 1;
6798 Upper = IntMax + 1;
6799 } else if (C->countLeadingZeros() < Width - 1) {
6800 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6801 // where C != -1 and C != 0 and C != 1
6802 Lower = IntMin.sdiv(*C);
6803 Upper = IntMax.sdiv(*C);
6804 if (Lower.sgt(Upper))
6805 std::swap(Lower, Upper);
6806 Upper = Upper + 1;
6807 assert(Upper != Lower && "Upper part of range has wrapped!")(static_cast <bool> (Upper != Lower && "Upper part of range has wrapped!"
) ? void (0) : __assert_fail ("Upper != Lower && \"Upper part of range has wrapped!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6807, __extension__ __PRETTY_FUNCTION__
))
;
6808 }
6809 } else if (match(BO.getOperand(0), m_APInt(C))) {
6810 if (C->isMinSignedValue()) {
6811 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6812 Lower = *C;
6813 Upper = Lower.lshr(1) + 1;
6814 } else {
6815 // 'sdiv C, x' produces [-|C|, |C|].
6816 Upper = C->abs() + 1;
6817 Lower = (-Upper) + 1;
6818 }
6819 }
6820 break;
6821
6822 case Instruction::UDiv:
6823 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
6824 // 'udiv x, C' produces [0, UINT_MAX / C].
6825 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6826 } else if (match(BO.getOperand(0), m_APInt(C))) {
6827 // 'udiv C, x' produces [0, C].
6828 Upper = *C + 1;
6829 }
6830 break;
6831
6832 case Instruction::SRem:
6833 if (match(BO.getOperand(1), m_APInt(C))) {
6834 // 'srem x, C' produces (-|C|, |C|).
6835 Upper = C->abs();
6836 Lower = (-Upper) + 1;
6837 }
6838 break;
6839
6840 case Instruction::URem:
6841 if (match(BO.getOperand(1), m_APInt(C)))
6842 // 'urem x, C' produces [0, C).
6843 Upper = *C;
6844 break;
6845
6846 default:
6847 break;
6848 }
6849}
6850
6851static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6852 APInt &Upper) {
6853 unsigned Width = Lower.getBitWidth();
6854 const APInt *C;
6855 switch (II.getIntrinsicID()) {
6856 case Intrinsic::ctpop:
6857 case Intrinsic::ctlz:
6858 case Intrinsic::cttz:
6859 // Maximum of set/clear bits is the bit width.
6860 assert(Lower == 0 && "Expected lower bound to be zero")(static_cast <bool> (Lower == 0 && "Expected lower bound to be zero"
) ? void (0) : __assert_fail ("Lower == 0 && \"Expected lower bound to be zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6860, __extension__ __PRETTY_FUNCTION__
))
;
6861 Upper = Width + 1;
6862 break;
6863 case Intrinsic::uadd_sat:
6864 // uadd.sat(x, C) produces [C, UINT_MAX].
6865 if (match(II.getOperand(0), m_APInt(C)) ||
6866 match(II.getOperand(1), m_APInt(C)))
6867 Lower = *C;
6868 break;
6869 case Intrinsic::sadd_sat:
6870 if (match(II.getOperand(0), m_APInt(C)) ||
6871 match(II.getOperand(1), m_APInt(C))) {
6872 if (C->isNegative()) {
6873 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6874 Lower = APInt::getSignedMinValue(Width);
6875 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6876 } else {
6877 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6878 Lower = APInt::getSignedMinValue(Width) + *C;
6879 Upper = APInt::getSignedMaxValue(Width) + 1;
6880 }
6881 }
6882 break;
6883 case Intrinsic::usub_sat:
6884 // usub.sat(C, x) produces [0, C].
6885 if (match(II.getOperand(0), m_APInt(C)))
6886 Upper = *C + 1;
6887 // usub.sat(x, C) produces [0, UINT_MAX - C].
6888 else if (match(II.getOperand(1), m_APInt(C)))
6889 Upper = APInt::getMaxValue(Width) - *C + 1;
6890 break;
6891 case Intrinsic::ssub_sat:
6892 if (match(II.getOperand(0), m_APInt(C))) {
6893 if (C->isNegative()) {
6894 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6895 Lower = APInt::getSignedMinValue(Width);
6896 Upper = *C - APInt::getSignedMinValue(Width) + 1;
6897 } else {
6898 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6899 Lower = *C - APInt::getSignedMaxValue(Width);
6900 Upper = APInt::getSignedMaxValue(Width) + 1;
6901 }
6902 } else if (match(II.getOperand(1), m_APInt(C))) {
6903 if (C->isNegative()) {
6904 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6905 Lower = APInt::getSignedMinValue(Width) - *C;
6906 Upper = APInt::getSignedMaxValue(Width) + 1;
6907 } else {
6908 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6909 Lower = APInt::getSignedMinValue(Width);
6910 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6911 }
6912 }
6913 break;
6914 case Intrinsic::umin:
6915 case Intrinsic::umax:
6916 case Intrinsic::smin:
6917 case Intrinsic::smax:
6918 if (!match(II.getOperand(0), m_APInt(C)) &&
6919 !match(II.getOperand(1), m_APInt(C)))
6920 break;
6921
6922 switch (II.getIntrinsicID()) {
6923 case Intrinsic::umin:
6924 Upper = *C + 1;
6925 break;
6926 case Intrinsic::umax:
6927 Lower = *C;
6928 break;
6929 case Intrinsic::smin:
6930 Lower = APInt::getSignedMinValue(Width);
6931 Upper = *C + 1;
6932 break;
6933 case Intrinsic::smax:
6934 Lower = *C;
6935 Upper = APInt::getSignedMaxValue(Width) + 1;
6936 break;
6937 default:
6938 llvm_unreachable("Must be min/max intrinsic")::llvm::llvm_unreachable_internal("Must be min/max intrinsic"
, "llvm/lib/Analysis/ValueTracking.cpp", 6938)
;
6939 }
6940 break;
6941 case Intrinsic::abs:
6942 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6943 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6944 if (match(II.getOperand(1), m_One()))
6945 Upper = APInt::getSignedMaxValue(Width) + 1;
6946 else
6947 Upper = APInt::getSignedMinValue(Width) + 1;
6948 break;
6949 default:
6950 break;
6951 }
6952}
6953
6954static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6955 APInt &Upper, const InstrInfoQuery &IIQ) {
6956 const Value *LHS = nullptr, *RHS = nullptr;
6957 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6958 if (R.Flavor == SPF_UNKNOWN)
6959 return;
6960
6961 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6962
6963 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6964 // If the negation part of the abs (in RHS) has the NSW flag,
6965 // then the result of abs(X) is [0..SIGNED_MAX],
6966 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6967 Lower = APInt::getZero(BitWidth);
6968 if (match(RHS, m_Neg(m_Specific(LHS))) &&
6969 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6970 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6971 else
6972 Upper = APInt::getSignedMinValue(BitWidth) + 1;
6973 return;
6974 }
6975
6976 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6977 // The result of -abs(X) is <= 0.
6978 Lower = APInt::getSignedMinValue(BitWidth);
6979 Upper = APInt(BitWidth, 1);
6980 return;
6981 }
6982
6983 const APInt *C;
6984 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6985 return;
6986
6987 switch (R.Flavor) {
6988 case SPF_UMIN:
6989 Upper = *C + 1;
6990 break;
6991 case SPF_UMAX:
6992 Lower = *C;
6993 break;
6994 case SPF_SMIN:
6995 Lower = APInt::getSignedMinValue(BitWidth);
6996 Upper = *C + 1;
6997 break;
6998 case SPF_SMAX:
6999 Lower = *C;
7000 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7001 break;
7002 default:
7003 break;
7004 }
7005}
7006
7007static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
7008 // The maximum representable value of a half is 65504. For floats the maximum
7009 // value is 3.4e38 which requires roughly 129 bits.
7010 unsigned BitWidth = I->getType()->getScalarSizeInBits();
7011 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
7012 return;
7013 if (isa<FPToSIInst>(I) && BitWidth >= 17) {
7014 Lower = APInt(BitWidth, -65504);
7015 Upper = APInt(BitWidth, 65505);
7016 }
7017
7018 if (isa<FPToUIInst>(I) && BitWidth >= 16) {
7019 // For a fptoui the lower limit is left as 0.
7020 Upper = APInt(BitWidth, 65505);
7021 }
7022}
7023
7024ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
7025 bool UseInstrInfo, AssumptionCache *AC,
7026 const Instruction *CtxI,
7027 const DominatorTree *DT,
7028 unsigned Depth) {
7029 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && "Expected integer instruction") ? void (0) : __assert_fail
("V->getType()->isIntOrIntVectorTy() && \"Expected integer instruction\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7029, __extension__ __PRETTY_FUNCTION__
))
;
7030
7031 if (Depth == MaxAnalysisRecursionDepth)
7032 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
7033
7034 const APInt *C;
7035 if (match(V, m_APInt(C)))
7036 return ConstantRange(*C);
7037
7038 InstrInfoQuery IIQ(UseInstrInfo);
7039 unsigned BitWidth = V->getType()->getScalarSizeInBits();
7040 APInt Lower = APInt(BitWidth, 0);
7041 APInt Upper = APInt(BitWidth, 0);
7042 if (auto *BO = dyn_cast<BinaryOperator>(V))
7043 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
7044 else if (auto *II = dyn_cast<IntrinsicInst>(V))
7045 setLimitsForIntrinsic(*II, Lower, Upper);
7046 else if (auto *SI = dyn_cast<SelectInst>(V))
7047 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
7048 else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V))
7049 setLimitForFPToI(cast<Instruction>(V), Lower, Upper);
7050
7051 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
7052
7053 if (auto *I = dyn_cast<Instruction>(V))
7054 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
7055 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
7056
7057 if (CtxI && AC) {
7058 // Try to restrict the range based on information from assumptions.
7059 for (auto &AssumeVH : AC->assumptionsFor(V)) {
7060 if (!AssumeVH)
7061 continue;
7062 CallInst *I = cast<CallInst>(AssumeVH);
7063 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() ==
CtxI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7064, __extension__ __PRETTY_FUNCTION__
))
7064 "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() ==
CtxI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7064, __extension__ __PRETTY_FUNCTION__
))
;
7065 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7066, __extension__ __PRETTY_FUNCTION__
))
7066 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7066, __extension__ __PRETTY_FUNCTION__
))
;
7067
7068 if (!isValidAssumeForContext(I, CtxI, DT))
7069 continue;
7070 Value *Arg = I->getArgOperand(0);
7071 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
7072 // Currently we just use information from comparisons.
7073 if (!Cmp || Cmp->getOperand(0) != V)
7074 continue;
7075 // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
7076 ConstantRange RHS =
7077 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
7078 UseInstrInfo, AC, I, DT, Depth + 1);
7079 CR = CR.intersectWith(
7080 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
7081 }
7082 }
7083
7084 return CR;
7085}
7086
7087static Optional<int64_t>
7088getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
7089 // Skip over the first indices.
7090 gep_type_iterator GTI = gep_type_begin(GEP);
7091 for (unsigned i = 1; i != Idx; ++i, ++GTI)
7092 /*skip along*/;
7093
7094 // Compute the offset implied by the rest of the indices.
7095 int64_t Offset = 0;
7096 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
7097 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
7098 if (!OpC)
7099 return None;
7100 if (OpC->isZero())
7101 continue; // No offset.
7102
7103 // Handle struct indices, which add their field offset to the pointer.
7104 if (StructType *STy = GTI.getStructTypeOrNull()) {
7105 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
7106 continue;
7107 }
7108
7109 // Otherwise, we have a sequential type like an array or fixed-length
7110 // vector. Multiply the index by the ElementSize.
7111 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
7112 if (Size.isScalable())
7113 return None;
7114 Offset += Size.getFixedSize() * OpC->getSExtValue();
7115 }
7116
7117 return Offset;
7118}
7119
7120Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
7121 const DataLayout &DL) {
7122 APInt Offset1(DL.getIndexTypeSizeInBits(Ptr1->getType()), 0);
7123 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0);
7124 Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset1, true);
7125 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true);
7126
7127 // Handle the trivial case first.
7128 if (Ptr1 == Ptr2)
7129 return Offset2.getSExtValue() - Offset1.getSExtValue();
7130
7131 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
7132 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
7133
7134 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
7135 // base. After that base, they may have some number of common (and
7136 // potentially variable) indices. After that they handle some constant
7137 // offset, which determines their offset from each other. At this point, we
7138 // handle no other case.
7139 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0) ||
7140 GEP1->getSourceElementType() != GEP2->getSourceElementType())
7141 return None;
7142
7143 // Skip any common indices and track the GEP types.
7144 unsigned Idx = 1;
7145 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
7146 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
7147 break;
7148
7149 auto IOffset1 = getOffsetFromIndex(GEP1, Idx, DL);
7150 auto IOffset2 = getOffsetFromIndex(GEP2, Idx, DL);
7151 if (!IOffset1 || !IOffset2)
7152 return None;
7153 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() -
7154 Offset1.getSExtValue();
7155}

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/OperandTraits.h"
33#include "llvm/IR/Use.h"
34#include "llvm/IR/User.h"
35#include "llvm/Support/AtomicOrdering.h"
36#include "llvm/Support/ErrorHandling.h"
37#include <cassert>
38#include <cstddef>
39#include <cstdint>
40#include <iterator>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class ConstantInt;
48class DataLayout;
49class StringRef;
50class Type;
51class Value;
52
53//===----------------------------------------------------------------------===//
54// AllocaInst Class
55//===----------------------------------------------------------------------===//
56
57/// an instruction to allocate memory on the stack
58class AllocaInst : public UnaryInstruction {
59 Type *AllocatedType;
60
61 using AlignmentField = AlignmentBitfieldElementT<0>;
62 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
63 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
64 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
65 SwiftErrorField>(),
66 "Bitfields must be contiguous");
67
68protected:
69 // Note: Instruction needs to be a friend here to call cloneImpl.
70 friend class Instruction;
71
72 AllocaInst *cloneImpl() const;
73
74public:
75 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
76 const Twine &Name, Instruction *InsertBefore);
77 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78 const Twine &Name, BasicBlock *InsertAtEnd);
79
80 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
81 Instruction *InsertBefore);
82 AllocaInst(Type *Ty, unsigned AddrSpace,
83 const Twine &Name, BasicBlock *InsertAtEnd);
84
85 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
86 const Twine &Name = "", Instruction *InsertBefore = nullptr);
87 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
88 const Twine &Name, BasicBlock *InsertAtEnd);
89
90 /// Return true if there is an allocation size parameter to the allocation
91 /// instruction that is not 1.
92 bool isArrayAllocation() const;
93
94 /// Get the number of elements allocated. For a simple allocation of a single
95 /// element, this will return a constant 1 value.
96 const Value *getArraySize() const { return getOperand(0); }
97 Value *getArraySize() { return getOperand(0); }
98
99 /// Overload to return most specific pointer type.
100 PointerType *getType() const {
101 return cast<PointerType>(Instruction::getType());
102 }
103
104 /// Return the address space for the allocation.
105 unsigned getAddressSpace() const {
106 return getType()->getAddressSpace();
107 }
108
109 /// Get allocation size in bits. Returns None if size can't be determined,
110 /// e.g. in case of a VLA.
111 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
112
113 /// Return the type that is being allocated by the instruction.
114 Type *getAllocatedType() const { return AllocatedType; }
115 /// for use only in special circumstances that need to generically
116 /// transform a whole instruction (eg: IR linking and vectorization).
117 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
118
119 /// Return the alignment of the memory that is being allocated by the
120 /// instruction.
121 Align getAlign() const {
122 return Align(1ULL << getSubclassData<AlignmentField>());
123 }
124
125 void setAlignment(Align Align) {
126 setSubclassData<AlignmentField>(Log2(Align));
127 }
128
129 // FIXME: Remove this one transition to Align is over.
130 uint64_t getAlignment() const { return getAlign().value(); }
131
132 /// Return true if this alloca is in the entry block of the function and is a
133 /// constant size. If so, the code generator will fold it into the
134 /// prolog/epilog code, so it is basically free.
135 bool isStaticAlloca() const;
136
137 /// Return true if this alloca is used as an inalloca argument to a call. Such
138 /// allocas are never considered static even if they are in the entry block.
139 bool isUsedWithInAlloca() const {
140 return getSubclassData<UsedWithInAllocaField>();
141 }
142
143 /// Specify whether this alloca is used to represent the arguments to a call.
144 void setUsedWithInAlloca(bool V) {
145 setSubclassData<UsedWithInAllocaField>(V);
146 }
147
148 /// Return true if this alloca is used as a swifterror argument to a call.
149 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
150 /// Specify whether this alloca is used to represent a swifterror.
151 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
152
153 // Methods for support type inquiry through isa, cast, and dyn_cast:
154 static bool classof(const Instruction *I) {
155 return (I->getOpcode() == Instruction::Alloca);
156 }
157 static bool classof(const Value *V) {
158 return isa<Instruction>(V) && classof(cast<Instruction>(V));
159 }
160
161private:
162 // Shadow Instruction::setInstructionSubclassData with a private forwarding
163 // method so that subclasses cannot accidentally use it.
164 template <typename Bitfield>
165 void setSubclassData(typename Bitfield::Type Value) {
166 Instruction::setSubclassData<Bitfield>(Value);
167 }
168};
169
170//===----------------------------------------------------------------------===//
171// LoadInst Class
172//===----------------------------------------------------------------------===//
173
174/// An instruction for reading from memory. This uses the SubclassData field in
175/// Value to store whether or not the load is volatile.
176class LoadInst : public UnaryInstruction {
177 using VolatileField = BoolBitfieldElementT<0>;
178 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
179 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
180 static_assert(
181 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
182 "Bitfields must be contiguous");
183
184 void AssertOK();
185
186protected:
187 // Note: Instruction needs to be a friend here to call cloneImpl.
188 friend class Instruction;
189
190 LoadInst *cloneImpl() const;
191
192public:
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
194 Instruction *InsertBefore);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 Instruction *InsertBefore);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 BasicBlock *InsertAtEnd);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, Instruction *InsertBefore = nullptr);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, BasicBlock *InsertAtEnd);
204 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
205 Align Align, AtomicOrdering Order,
206 SyncScope::ID SSID = SyncScope::System,
207 Instruction *InsertBefore = nullptr);
208 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
210 BasicBlock *InsertAtEnd);
211
212 /// Return true if this is a load from a volatile memory location.
213 bool isVolatile() const { return getSubclassData<VolatileField>(); }
214
215 /// Specify whether this is a volatile load or not.
216 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
217
218 /// Return the alignment of the access that is being performed.
219 /// FIXME: Remove this function once transition to Align is over.
220 /// Use getAlign() instead.
221 uint64_t getAlignment() const { return getAlign().value(); }
222
223 /// Return the alignment of the access that is being performed.
224 Align getAlign() const {
225 return Align(1ULL << (getSubclassData<AlignmentField>()));
226 }
227
228 void setAlignment(Align Align) {
229 setSubclassData<AlignmentField>(Log2(Align));
230 }
231
232 /// Returns the ordering constraint of this load instruction.
233 AtomicOrdering getOrdering() const {
234 return getSubclassData<OrderingField>();
235 }
236 /// Sets the ordering constraint of this load instruction. May not be Release
237 /// or AcquireRelease.
238 void setOrdering(AtomicOrdering Ordering) {
239 setSubclassData<OrderingField>(Ordering);
240 }
241
242 /// Returns the synchronization scope ID of this load instruction.
243 SyncScope::ID getSyncScopeID() const {
244 return SSID;
245 }
246
247 /// Sets the synchronization scope ID of this load instruction.
248 void setSyncScopeID(SyncScope::ID SSID) {
249 this->SSID = SSID;
250 }
251
252 /// Sets the ordering constraint and the synchronization scope ID of this load
253 /// instruction.
254 void setAtomic(AtomicOrdering Ordering,
255 SyncScope::ID SSID = SyncScope::System) {
256 setOrdering(Ordering);
257 setSyncScopeID(SSID);
258 }
259
260 bool isSimple() const { return !isAtomic() && !isVolatile(); }
261
262 bool isUnordered() const {
263 return (getOrdering() == AtomicOrdering::NotAtomic ||
264 getOrdering() == AtomicOrdering::Unordered) &&
265 !isVolatile();
266 }
267
268 Value *getPointerOperand() { return getOperand(0); }
269 const Value *getPointerOperand() const { return getOperand(0); }
270 static unsigned getPointerOperandIndex() { return 0U; }
271 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
272
273 /// Returns the address space of the pointer operand.
274 unsigned getPointerAddressSpace() const {
275 return getPointerOperandType()->getPointerAddressSpace();
276 }
277
278 // Methods for support type inquiry through isa, cast, and dyn_cast:
279 static bool classof(const Instruction *I) {
280 return I->getOpcode() == Instruction::Load;
281 }
282 static bool classof(const Value *V) {
283 return isa<Instruction>(V) && classof(cast<Instruction>(V));
284 }
285
286private:
287 // Shadow Instruction::setInstructionSubclassData with a private forwarding
288 // method so that subclasses cannot accidentally use it.
289 template <typename Bitfield>
290 void setSubclassData(typename Bitfield::Type Value) {
291 Instruction::setSubclassData<Bitfield>(Value);
292 }
293
294 /// The synchronization scope ID of this load instruction. Not quite enough
295 /// room in SubClassData for everything, so synchronization scope ID gets its
296 /// own field.
297 SyncScope::ID SSID;
298};
299
300//===----------------------------------------------------------------------===//
301// StoreInst Class
302//===----------------------------------------------------------------------===//
303
304/// An instruction for storing to memory.
305class StoreInst : public Instruction {
306 using VolatileField = BoolBitfieldElementT<0>;
307 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
308 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
309 static_assert(
310 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
311 "Bitfields must be contiguous");
312
313 void AssertOK();
314
315protected:
316 // Note: Instruction needs to be a friend here to call cloneImpl.
317 friend class Instruction;
318
319 StoreInst *cloneImpl() const;
320
321public:
322 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 Instruction *InsertBefore = nullptr);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 BasicBlock *InsertAtEnd);
330 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
332 Instruction *InsertBefore = nullptr);
333 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
334 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
335
336 // allocate space for exactly two operands
337 void *operator new(size_t S) { return User::operator new(S, 2); }
338 void operator delete(void *Ptr) { User::operator delete(Ptr); }
339
340 /// Return true if this is a store to a volatile memory location.
341 bool isVolatile() const { return getSubclassData<VolatileField>(); }
342
343 /// Specify whether this is a volatile store or not.
344 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
345
346 /// Transparently provide more efficient getOperand methods.
347 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
348
349 /// Return the alignment of the access that is being performed
350 /// FIXME: Remove this function once transition to Align is over.
351 /// Use getAlign() instead.
352 uint64_t getAlignment() const { return getAlign().value(); }
353
354 Align getAlign() const {
355 return Align(1ULL << (getSubclassData<AlignmentField>()));
356 }
357
358 void setAlignment(Align Align) {
359 setSubclassData<AlignmentField>(Log2(Align));
360 }
361
362 /// Returns the ordering constraint of this store instruction.
363 AtomicOrdering getOrdering() const {
364 return getSubclassData<OrderingField>();
365 }
366
367 /// Sets the ordering constraint of this store instruction. May not be
368 /// Acquire or AcquireRelease.
369 void setOrdering(AtomicOrdering Ordering) {
370 setSubclassData<OrderingField>(Ordering);
371 }
372
373 /// Returns the synchronization scope ID of this store instruction.
374 SyncScope::ID getSyncScopeID() const {
375 return SSID;
376 }
377
378 /// Sets the synchronization scope ID of this store instruction.
379 void setSyncScopeID(SyncScope::ID SSID) {
380 this->SSID = SSID;
381 }
382
383 /// Sets the ordering constraint and the synchronization scope ID of this
384 /// store instruction.
385 void setAtomic(AtomicOrdering Ordering,
386 SyncScope::ID SSID = SyncScope::System) {
387 setOrdering(Ordering);
388 setSyncScopeID(SSID);
389 }
390
391 bool isSimple() const { return !isAtomic() && !isVolatile(); }
392
393 bool isUnordered() const {
394 return (getOrdering() == AtomicOrdering::NotAtomic ||
395 getOrdering() == AtomicOrdering::Unordered) &&
396 !isVolatile();
397 }
398
399 Value *getValueOperand() { return getOperand(0); }
400 const Value *getValueOperand() const { return getOperand(0); }
401
402 Value *getPointerOperand() { return getOperand(1); }
403 const Value *getPointerOperand() const { return getOperand(1); }
404 static unsigned getPointerOperandIndex() { return 1U; }
405 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
406
407 /// Returns the address space of the pointer operand.
408 unsigned getPointerAddressSpace() const {
409 return getPointerOperandType()->getPointerAddressSpace();
410 }
411
412 // Methods for support type inquiry through isa, cast, and dyn_cast:
413 static bool classof(const Instruction *I) {
414 return I->getOpcode() == Instruction::Store;
415 }
416 static bool classof(const Value *V) {
417 return isa<Instruction>(V) && classof(cast<Instruction>(V));
418 }
419
420private:
421 // Shadow Instruction::setInstructionSubclassData with a private forwarding
422 // method so that subclasses cannot accidentally use it.
423 template <typename Bitfield>
424 void setSubclassData(typename Bitfield::Type Value) {
425 Instruction::setSubclassData<Bitfield>(Value);
426 }
427
428 /// The synchronization scope ID of this store instruction. Not quite enough
429 /// room in SubClassData for everything, so synchronization scope ID gets its
430 /// own field.
431 SyncScope::ID SSID;
432};
433
434template <>
435struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
436};
437
438DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 438, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 438, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
439
440//===----------------------------------------------------------------------===//
441// FenceInst Class
442//===----------------------------------------------------------------------===//
443
444/// An instruction for ordering other memory operations.
445class FenceInst : public Instruction {
446 using OrderingField = AtomicOrderingBitfieldElementT<0>;
447
448 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
449
450protected:
451 // Note: Instruction needs to be a friend here to call cloneImpl.
452 friend class Instruction;
453
454 FenceInst *cloneImpl() const;
455
456public:
457 // Ordering may only be Acquire, Release, AcquireRelease, or
458 // SequentiallyConsistent.
459 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
460 SyncScope::ID SSID = SyncScope::System,
461 Instruction *InsertBefore = nullptr);
462 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
463 BasicBlock *InsertAtEnd);
464
465 // allocate space for exactly zero operands
466 void *operator new(size_t S) { return User::operator new(S, 0); }
467 void operator delete(void *Ptr) { User::operator delete(Ptr); }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t S) { return User::operator new(S, 3); }
550 void operator delete(void *Ptr) { User::operator delete(Ptr); }
551
552 using VolatileField = BoolBitfieldElementT<0>;
553 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
554 using SuccessOrderingField =
555 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
556 using FailureOrderingField =
557 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
558 using AlignmentField =
559 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
560 static_assert(
561 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
562 FailureOrderingField, AlignmentField>(),
563 "Bitfields must be contiguous");
564
565 /// Return the alignment of the memory that is being allocated by the
566 /// instruction.
567 Align getAlign() const {
568 return Align(1ULL << getSubclassData<AlignmentField>());
569 }
570
571 void setAlignment(Align Align) {
572 setSubclassData<AlignmentField>(Log2(Align));
573 }
574
575 /// Return true if this is a cmpxchg from a volatile memory
576 /// location.
577 ///
578 bool isVolatile() const { return getSubclassData<VolatileField>(); }
579
580 /// Specify whether this is a volatile cmpxchg.
581 ///
582 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
583
584 /// Return true if this cmpxchg may spuriously fail.
585 bool isWeak() const { return getSubclassData<WeakField>(); }
586
587 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
588
589 /// Transparently provide more efficient getOperand methods.
590 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
591
592 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
593 return Ordering != AtomicOrdering::NotAtomic &&
594 Ordering != AtomicOrdering::Unordered;
595 }
596
597 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
598 return Ordering != AtomicOrdering::NotAtomic &&
599 Ordering != AtomicOrdering::Unordered &&
600 Ordering != AtomicOrdering::AcquireRelease &&
601 Ordering != AtomicOrdering::Release;
602 }
603
604 /// Returns the success ordering constraint of this cmpxchg instruction.
605 AtomicOrdering getSuccessOrdering() const {
606 return getSubclassData<SuccessOrderingField>();
607 }
608
609 /// Sets the success ordering constraint of this cmpxchg instruction.
610 void setSuccessOrdering(AtomicOrdering Ordering) {
611 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
612 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
;
613 setSubclassData<SuccessOrderingField>(Ordering);
614 }
615
616 /// Returns the failure ordering constraint of this cmpxchg instruction.
617 AtomicOrdering getFailureOrdering() const {
618 return getSubclassData<FailureOrderingField>();
619 }
620
621 /// Sets the failure ordering constraint of this cmpxchg instruction.
622 void setFailureOrdering(AtomicOrdering Ordering) {
623 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 624, __extension__ __PRETTY_FUNCTION__
))
624 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 624, __extension__ __PRETTY_FUNCTION__
))
;
625 setSubclassData<FailureOrderingField>(Ordering);
626 }
627
628 /// Returns a single ordering which is at least as strong as both the
629 /// success and failure orderings for this cmpxchg.
630 AtomicOrdering getMergedOrdering() const {
631 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
632 return AtomicOrdering::SequentiallyConsistent;
633 if (getFailureOrdering() == AtomicOrdering::Acquire) {
634 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
635 return AtomicOrdering::Acquire;
636 if (getSuccessOrdering() == AtomicOrdering::Release)
637 return AtomicOrdering::AcquireRelease;
638 }
639 return getSuccessOrdering();
640 }
641
642 /// Returns the synchronization scope ID of this cmpxchg instruction.
643 SyncScope::ID getSyncScopeID() const {
644 return SSID;
645 }
646
647 /// Sets the synchronization scope ID of this cmpxchg instruction.
648 void setSyncScopeID(SyncScope::ID SSID) {
649 this->SSID = SSID;
650 }
651
652 Value *getPointerOperand() { return getOperand(0); }
653 const Value *getPointerOperand() const { return getOperand(0); }
654 static unsigned getPointerOperandIndex() { return 0U; }
655
656 Value *getCompareOperand() { return getOperand(1); }
657 const Value *getCompareOperand() const { return getOperand(1); }
658
659 Value *getNewValOperand() { return getOperand(2); }
660 const Value *getNewValOperand() const { return getOperand(2); }
661
662 /// Returns the address space of the pointer operand.
663 unsigned getPointerAddressSpace() const {
664 return getPointerOperand()->getType()->getPointerAddressSpace();
665 }
666
667 /// Returns the strongest permitted ordering on failure, given the
668 /// desired ordering on success.
669 ///
670 /// If the comparison in a cmpxchg operation fails, there is no atomic store
671 /// so release semantics cannot be provided. So this function drops explicit
672 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
673 /// operation would remain SequentiallyConsistent.
674 static AtomicOrdering
675 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
676 switch (SuccessOrdering) {
677 default:
678 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 678)
;
679 case AtomicOrdering::Release:
680 case AtomicOrdering::Monotonic:
681 return AtomicOrdering::Monotonic;
682 case AtomicOrdering::AcquireRelease:
683 case AtomicOrdering::Acquire:
684 return AtomicOrdering::Acquire;
685 case AtomicOrdering::SequentiallyConsistent:
686 return AtomicOrdering::SequentiallyConsistent;
687 }
688 }
689
690 // Methods for support type inquiry through isa, cast, and dyn_cast:
691 static bool classof(const Instruction *I) {
692 return I->getOpcode() == Instruction::AtomicCmpXchg;
693 }
694 static bool classof(const Value *V) {
695 return isa<Instruction>(V) && classof(cast<Instruction>(V));
696 }
697
698private:
699 // Shadow Instruction::setInstructionSubclassData with a private forwarding
700 // method so that subclasses cannot accidentally use it.
701 template <typename Bitfield>
702 void setSubclassData(typename Bitfield::Type Value) {
703 Instruction::setSubclassData<Bitfield>(Value);
704 }
705
706 /// The synchronization scope ID of this cmpxchg instruction. Not quite
707 /// enough room in SubClassData for everything, so synchronization scope ID
708 /// gets its own field.
709 SyncScope::ID SSID;
710};
711
712template <>
713struct OperandTraits<AtomicCmpXchgInst> :
714 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
715};
716
717DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 717, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 717, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
718
719//===----------------------------------------------------------------------===//
720// AtomicRMWInst Class
721//===----------------------------------------------------------------------===//
722
723/// an instruction that atomically reads a memory location,
724/// combines it with another value, and then stores the result back. Returns
725/// the old value.
726///
727class AtomicRMWInst : public Instruction {
728protected:
729 // Note: Instruction needs to be a friend here to call cloneImpl.
730 friend class Instruction;
731
732 AtomicRMWInst *cloneImpl() const;
733
734public:
735 /// This enumeration lists the possible modifications atomicrmw can make. In
736 /// the descriptions, 'p' is the pointer to the instruction's memory location,
737 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
738 /// instruction. These instructions always return 'old'.
739 enum BinOp : unsigned {
740 /// *p = v
741 Xchg,
742 /// *p = old + v
743 Add,
744 /// *p = old - v
745 Sub,
746 /// *p = old & v
747 And,
748 /// *p = ~(old & v)
749 Nand,
750 /// *p = old | v
751 Or,
752 /// *p = old ^ v
753 Xor,
754 /// *p = old >signed v ? old : v
755 Max,
756 /// *p = old <signed v ? old : v
757 Min,
758 /// *p = old >unsigned v ? old : v
759 UMax,
760 /// *p = old <unsigned v ? old : v
761 UMin,
762
763 /// *p = old + v
764 FAdd,
765
766 /// *p = old - v
767 FSub,
768
769 FIRST_BINOP = Xchg,
770 LAST_BINOP = FSub,
771 BAD_BINOP
772 };
773
774private:
775 template <unsigned Offset>
776 using AtomicOrderingBitfieldElement =
777 typename Bitfield::Element<AtomicOrdering, Offset, 3,
778 AtomicOrdering::LAST>;
779
780 template <unsigned Offset>
781 using BinOpBitfieldElement =
782 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
783
784public:
785 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
786 AtomicOrdering Ordering, SyncScope::ID SSID,
787 Instruction *InsertBefore = nullptr);
788 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
789 AtomicOrdering Ordering, SyncScope::ID SSID,
790 BasicBlock *InsertAtEnd);
791
792 // allocate space for exactly two operands
793 void *operator new(size_t S) { return User::operator new(S, 2); }
794 void operator delete(void *Ptr) { User::operator delete(Ptr); }
795
796 using VolatileField = BoolBitfieldElementT<0>;
797 using AtomicOrderingField =
798 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
799 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
800 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
801 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
802 OperationField, AlignmentField>(),
803 "Bitfields must be contiguous");
804
805 BinOp getOperation() const { return getSubclassData<OperationField>(); }
806
807 static StringRef getOperationName(BinOp Op);
808
809 static bool isFPOperation(BinOp Op) {
810 switch (Op) {
811 case AtomicRMWInst::FAdd:
812 case AtomicRMWInst::FSub:
813 return true;
814 default:
815 return false;
816 }
817 }
818
819 void setOperation(BinOp Operation) {
820 setSubclassData<OperationField>(Operation);
821 }
822
823 /// Return the alignment of the memory that is being allocated by the
824 /// instruction.
825 Align getAlign() const {
826 return Align(1ULL << getSubclassData<AlignmentField>());
827 }
828
829 void setAlignment(Align Align) {
830 setSubclassData<AlignmentField>(Log2(Align));
831 }
832
833 /// Return true if this is a RMW on a volatile memory location.
834 ///
835 bool isVolatile() const { return getSubclassData<VolatileField>(); }
836
837 /// Specify whether this is a volatile RMW or not.
838 ///
839 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
840
841 /// Transparently provide more efficient getOperand methods.
842 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
843
844 /// Returns the ordering constraint of this rmw instruction.
845 AtomicOrdering getOrdering() const {
846 return getSubclassData<AtomicOrderingField>();
847 }
848
849 /// Sets the ordering constraint of this rmw instruction.
850 void setOrdering(AtomicOrdering Ordering) {
851 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
852 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
;
853 setSubclassData<AtomicOrderingField>(Ordering);
854 }
855
856 /// Returns the synchronization scope ID of this rmw instruction.
857 SyncScope::ID getSyncScopeID() const {
858 return SSID;
859 }
860
861 /// Sets the synchronization scope ID of this rmw instruction.
862 void setSyncScopeID(SyncScope::ID SSID) {
863 this->SSID = SSID;
864 }
865
866 Value *getPointerOperand() { return getOperand(0); }
867 const Value *getPointerOperand() const { return getOperand(0); }
868 static unsigned getPointerOperandIndex() { return 0U; }
869
870 Value *getValOperand() { return getOperand(1); }
871 const Value *getValOperand() const { return getOperand(1); }
872
873 /// Returns the address space of the pointer operand.
874 unsigned getPointerAddressSpace() const {
875 return getPointerOperand()->getType()->getPointerAddressSpace();
876 }
877
878 bool isFloatingPointOperation() const {
879 return isFPOperation(getOperation());
880 }
881
882 // Methods for support type inquiry through isa, cast, and dyn_cast:
883 static bool classof(const Instruction *I) {
884 return I->getOpcode() == Instruction::AtomicRMW;
885 }
886 static bool classof(const Value *V) {
887 return isa<Instruction>(V) && classof(cast<Instruction>(V));
888 }
889
890private:
891 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
892 AtomicOrdering Ordering, SyncScope::ID SSID);
893
894 // Shadow Instruction::setInstructionSubclassData with a private forwarding
895 // method so that subclasses cannot accidentally use it.
896 template <typename Bitfield>
897 void setSubclassData(typename Bitfield::Type Value) {
898 Instruction::setSubclassData<Bitfield>(Value);
899 }
900
901 /// The synchronization scope ID of this rmw instruction. Not quite enough
902 /// room in SubClassData for everything, so synchronization scope ID gets its
903 /// own field.
904 SyncScope::ID SSID;
905};
906
907template <>
908struct OperandTraits<AtomicRMWInst>
909 : public FixedNumOperandTraits<AtomicRMWInst,2> {
910};
911
912DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
913
914//===----------------------------------------------------------------------===//
915// GetElementPtrInst Class
916//===----------------------------------------------------------------------===//
917
918// checkGEPType - Simple wrapper function to give a better assertion failure
919// message on bad indexes for a gep instruction.
920//
921inline Type *checkGEPType(Type *Ty) {
922 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 922, __extension__ __PRETTY_FUNCTION__
))
;
923 return Ty;
924}
925
926/// an instruction for type-safe pointer arithmetic to
927/// access elements of arrays and structs
928///
929class GetElementPtrInst : public Instruction {
930 Type *SourceElementType;
931 Type *ResultElementType;
932
933 GetElementPtrInst(const GetElementPtrInst &GEPI);
934
935 /// Constructors - Create a getelementptr instruction with a base pointer an
936 /// list of indices. The first ctor can optionally insert before an existing
937 /// instruction, the second appends the new instruction to the specified
938 /// BasicBlock.
939 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
940 ArrayRef<Value *> IdxList, unsigned Values,
941 const Twine &NameStr, Instruction *InsertBefore);
942 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
943 ArrayRef<Value *> IdxList, unsigned Values,
944 const Twine &NameStr, BasicBlock *InsertAtEnd);
945
946 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
947
948protected:
949 // Note: Instruction needs to be a friend here to call cloneImpl.
950 friend class Instruction;
951
952 GetElementPtrInst *cloneImpl() const;
953
954public:
955 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
956 ArrayRef<Value *> IdxList,
957 const Twine &NameStr = "",
958 Instruction *InsertBefore = nullptr) {
959 unsigned Values = 1 + unsigned(IdxList.size());
960 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 960, __extension__ __PRETTY_FUNCTION__
))
;
961 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
962 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
;
963 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
964 NameStr, InsertBefore);
965 }
966
967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
968 ArrayRef<Value *> IdxList,
969 const Twine &NameStr,
970 BasicBlock *InsertAtEnd) {
971 unsigned Values = 1 + unsigned(IdxList.size());
972 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 972, __extension__ __PRETTY_FUNCTION__
))
;
973 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
974 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
;
975 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
976 NameStr, InsertAtEnd);
977 }
978
979 /// Create an "inbounds" getelementptr. See the documentation for the
980 /// "inbounds" flag in LangRef.html for details.
981 static GetElementPtrInst *
982 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
983 const Twine &NameStr = "",
984 Instruction *InsertBefore = nullptr) {
985 GetElementPtrInst *GEP =
986 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
987 GEP->setIsInBounds(true);
988 return GEP;
989 }
990
991 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr,
994 BasicBlock *InsertAtEnd) {
995 GetElementPtrInst *GEP =
996 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
997 GEP->setIsInBounds(true);
998 return GEP;
999 }
1000
1001 /// Transparently provide more efficient getOperand methods.
1002 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1003
1004 Type *getSourceElementType() const { return SourceElementType; }
1005
1006 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1007 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1008
1009 Type *getResultElementType() const {
1010 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
1011 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
;
1012 return ResultElementType;
1013 }
1014
1015 /// Returns the address space of this instruction's pointer type.
1016 unsigned getAddressSpace() const {
1017 // Note that this is always the same as the pointer operand's address space
1018 // and that is cheaper to compute, so cheat here.
1019 return getPointerAddressSpace();
1020 }
1021
1022 /// Returns the result type of a getelementptr with the given source
1023 /// element type and indexes.
1024 ///
1025 /// Null is returned if the indices are invalid for the specified
1026 /// source element type.
1027 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1028 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1029 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1030
1031 /// Return the type of the element at the given index of an indexable
1032 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1033 ///
1034 /// Returns null if the type can't be indexed, or the given index is not
1035 /// legal for the given type.
1036 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1037 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1038
1039 inline op_iterator idx_begin() { return op_begin()+1; }
1040 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1041 inline op_iterator idx_end() { return op_end(); }
1042 inline const_op_iterator idx_end() const { return op_end(); }
1043
1044 inline iterator_range<op_iterator> indices() {
1045 return make_range(idx_begin(), idx_end());
1046 }
1047
1048 inline iterator_range<const_op_iterator> indices() const {
1049 return make_range(idx_begin(), idx_end());
1050 }
1051
1052 Value *getPointerOperand() {
1053 return getOperand(0);
1054 }
1055 const Value *getPointerOperand() const {
1056 return getOperand(0);
1057 }
1058 static unsigned getPointerOperandIndex() {
1059 return 0U; // get index for modifying correct operand.
1060 }
1061
1062 /// Method to return the pointer operand as a
1063 /// PointerType.
1064 Type *getPointerOperandType() const {
1065 return getPointerOperand()->getType();
1066 }
1067
1068 /// Returns the address space of the pointer operand.
1069 unsigned getPointerAddressSpace() const {
1070 return getPointerOperandType()->getPointerAddressSpace();
1071 }
1072
1073 /// Returns the pointer type returned by the GEP
1074 /// instruction, which may be a vector of pointers.
1075 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1076 ArrayRef<Value *> IdxList) {
1077 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1078 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1079 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1080 Type *PtrTy = OrigPtrTy->isOpaque()
1081 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1082 : PointerType::get(ResultElemTy, AddrSpace);
1083 // Vector GEP
1084 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1085 ElementCount EltCount = PtrVTy->getElementCount();
1086 return VectorType::get(PtrTy, EltCount);
1087 }
1088 for (Value *Index : IdxList)
1089 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1090 ElementCount EltCount = IndexVTy->getElementCount();
1091 return VectorType::get(PtrTy, EltCount);
1092 }
1093 // Scalar GEP
1094 return PtrTy;
1095 }
1096
1097 unsigned getNumIndices() const { // Note: always non-negative
1098 return getNumOperands() - 1;
1099 }
1100
1101 bool hasIndices() const {
1102 return getNumOperands() > 1;
1103 }
1104
1105 /// Return true if all of the indices of this GEP are
1106 /// zeros. If so, the result pointer and the first operand have the same
1107 /// value, just potentially different types.
1108 bool hasAllZeroIndices() const;
1109
1110 /// Return true if all of the indices of this GEP are
1111 /// constant integers. If so, the result pointer and the first operand have
1112 /// a constant offset between them.
1113 bool hasAllConstantIndices() const;
1114
1115 /// Set or clear the inbounds flag on this GEP instruction.
1116 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1117 void setIsInBounds(bool b = true);
1118
1119 /// Determine whether the GEP has the inbounds flag.
1120 bool isInBounds() const;
1121
1122 /// Accumulate the constant address offset of this GEP if possible.
1123 ///
1124 /// This routine accepts an APInt into which it will accumulate the constant
1125 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1126 /// all-constant, it returns false and the value of the offset APInt is
1127 /// undefined (it is *not* preserved!). The APInt passed into this routine
1128 /// must be at least as wide as the IntPtr type for the address space of
1129 /// the base GEP pointer.
1130 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1131 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1132 MapVector<Value *, APInt> &VariableOffsets,
1133 APInt &ConstantOffset) const;
1134 // Methods for support type inquiry through isa, cast, and dyn_cast:
1135 static bool classof(const Instruction *I) {
1136 return (I->getOpcode() == Instruction::GetElementPtr);
1137 }
1138 static bool classof(const Value *V) {
1139 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1140 }
1141};
1142
1143template <>
1144struct OperandTraits<GetElementPtrInst> :
1145 public VariadicOperandTraits<GetElementPtrInst, 1> {
1146};
1147
1148GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1149 ArrayRef<Value *> IdxList, unsigned Values,
1150 const Twine &NameStr,
1151 Instruction *InsertBefore)
1152 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1153 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1154 Values, InsertBefore),
1155 SourceElementType(PointeeType),
1156 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1157 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
1158 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
;
1159 init(Ptr, IdxList, NameStr);
1160}
1161
1162GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1163 ArrayRef<Value *> IdxList, unsigned Values,
1164 const Twine &NameStr,
1165 BasicBlock *InsertAtEnd)
1166 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1167 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1168 Values, InsertAtEnd),
1169 SourceElementType(PointeeType),
1170 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1171 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
1172 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
;
1173 init(Ptr, IdxList, NameStr);
1174}
1175
1176DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1177
1178//===----------------------------------------------------------------------===//
1179// ICmpInst Class
1180//===----------------------------------------------------------------------===//
1181
1182/// This instruction compares its operands according to the predicate given
1183/// to the constructor. It only operates on integers or pointers. The operands
1184/// must be identical types.
1185/// Represent an integer comparison operator.
1186class ICmpInst: public CmpInst {
1187 void AssertOK() {
1188 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
1189 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
;
1190 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
1191 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
;
1192 // Check that the operands are the right type
1193 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1194 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1195 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
;
1196 }
1197
1198protected:
1199 // Note: Instruction needs to be a friend here to call cloneImpl.
1200 friend class Instruction;
1201
1202 /// Clone an identical ICmpInst
1203 ICmpInst *cloneImpl() const;
1204
1205public:
1206 /// Constructor with insert-before-instruction semantics.
1207 ICmpInst(
1208 Instruction *InsertBefore, ///< Where to insert
1209 Predicate pred, ///< The predicate to use for the comparison
1210 Value *LHS, ///< The left-hand-side of the expression
1211 Value *RHS, ///< The right-hand-side of the expression
1212 const Twine &NameStr = "" ///< Name of the instruction
1213 ) : CmpInst(makeCmpResultType(LHS->getType()),
1214 Instruction::ICmp, pred, LHS, RHS, NameStr,
1215 InsertBefore) {
1216#ifndef NDEBUG
1217 AssertOK();
1218#endif
1219 }
1220
1221 /// Constructor with insert-at-end semantics.
1222 ICmpInst(
1223 BasicBlock &InsertAtEnd, ///< Block to insert into.
1224 Predicate pred, ///< The predicate to use for the comparison
1225 Value *LHS, ///< The left-hand-side of the expression
1226 Value *RHS, ///< The right-hand-side of the expression
1227 const Twine &NameStr = "" ///< Name of the instruction
1228 ) : CmpInst(makeCmpResultType(LHS->getType()),
1229 Instruction::ICmp, pred, LHS, RHS, NameStr,
1230 &InsertAtEnd) {
1231#ifndef NDEBUG
1232 AssertOK();
1233#endif
1234 }
1235
1236 /// Constructor with no-insertion semantics
1237 ICmpInst(
1238 Predicate pred, ///< The predicate to use for the comparison
1239 Value *LHS, ///< The left-hand-side of the expression
1240 Value *RHS, ///< The right-hand-side of the expression
1241 const Twine &NameStr = "" ///< Name of the instruction
1242 ) : CmpInst(makeCmpResultType(LHS->getType()),
1243 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1244#ifndef NDEBUG
1245 AssertOK();
1246#endif
1247 }
1248
1249 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1250 /// @returns the predicate that would be the result if the operand were
1251 /// regarded as signed.
1252 /// Return the signed version of the predicate
1253 Predicate getSignedPredicate() const {
1254 return getSignedPredicate(getPredicate());
1255 }
1256
1257 /// This is a static version that you can use without an instruction.
1258 /// Return the signed version of the predicate.
1259 static Predicate getSignedPredicate(Predicate pred);
1260
1261 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1262 /// @returns the predicate that would be the result if the operand were
1263 /// regarded as unsigned.
1264 /// Return the unsigned version of the predicate
1265 Predicate getUnsignedPredicate() const {
1266 return getUnsignedPredicate(getPredicate());
1267 }
1268
1269 /// This is a static version that you can use without an instruction.
1270 /// Return the unsigned version of the predicate.
1271 static Predicate getUnsignedPredicate(Predicate pred);
1272
1273 /// Return true if this predicate is either EQ or NE. This also
1274 /// tests for commutativity.
1275 static bool isEquality(Predicate P) {
1276 return P == ICMP_EQ || P == ICMP_NE;
1277 }
1278
1279 /// Return true if this predicate is either EQ or NE. This also
1280 /// tests for commutativity.
1281 bool isEquality() const {
1282 return isEquality(getPredicate());
1283 }
1284
1285 /// @returns true if the predicate of this ICmpInst is commutative
1286 /// Determine if this relation is commutative.
1287 bool isCommutative() const { return isEquality(); }
1288
1289 /// Return true if the predicate is relational (not EQ or NE).
1290 ///
1291 bool isRelational() const {
1292 return !isEquality();
1293 }
1294
1295 /// Return true if the predicate is relational (not EQ or NE).
1296 ///
1297 static bool isRelational(Predicate P) {
1298 return !isEquality(P);
1299 }
1300
1301 /// Return true if the predicate is SGT or UGT.
1302 ///
1303 static bool isGT(Predicate P) {
1304 return P == ICMP_SGT || P == ICMP_UGT;
1305 }
1306
1307 /// Return true if the predicate is SLT or ULT.
1308 ///
1309 static bool isLT(Predicate P) {
1310 return P == ICMP_SLT || P == ICMP_ULT;
1311 }
1312
1313 /// Return true if the predicate is SGE or UGE.
1314 ///
1315 static bool isGE(Predicate P) {
1316 return P == ICMP_SGE || P == ICMP_UGE;
1317 }
1318
1319 /// Return true if the predicate is SLE or ULE.
1320 ///
1321 static bool isLE(Predicate P) {
1322 return P == ICMP_SLE || P == ICMP_ULE;
1323 }
1324
1325 /// Returns the sequence of all ICmp predicates.
1326 ///
1327 static auto predicates() { return ICmpPredicates(); }
1328
1329 /// Exchange the two operands to this instruction in such a way that it does
1330 /// not modify the semantics of the instruction. The predicate value may be
1331 /// changed to retain the same result if the predicate is order dependent
1332 /// (e.g. ult).
1333 /// Swap operands and adjust predicate.
1334 void swapOperands() {
1335 setPredicate(getSwappedPredicate());
1336 Op<0>().swap(Op<1>());
1337 }
1338
1339 /// Return result of `LHS Pred RHS` comparison.
1340 static bool compare(const APInt &LHS, const APInt &RHS,
1341 ICmpInst::Predicate Pred);
1342
1343 // Methods for support type inquiry through isa, cast, and dyn_cast:
1344 static bool classof(const Instruction *I) {
1345 return I->getOpcode() == Instruction::ICmp;
1346 }
1347 static bool classof(const Value *V) {
1348 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1349 }
1350};
1351
1352//===----------------------------------------------------------------------===//
1353// FCmpInst Class
1354//===----------------------------------------------------------------------===//
1355
1356/// This instruction compares its operands according to the predicate given
1357/// to the constructor. It only operates on floating point values or packed
1358/// vectors of floating point values. The operands must be identical types.
1359/// Represents a floating point comparison operator.
1360class FCmpInst: public CmpInst {
1361 void AssertOK() {
1362 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1362, __extension__ __PRETTY_FUNCTION__
))
;
1363 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
1364 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
;
1365 // Check that the operands are the right type
1366 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
1367 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
;
1368 }
1369
1370protected:
1371 // Note: Instruction needs to be a friend here to call cloneImpl.
1372 friend class Instruction;
1373
1374 /// Clone an identical FCmpInst
1375 FCmpInst *cloneImpl() const;
1376
1377public:
1378 /// Constructor with insert-before-instruction semantics.
1379 FCmpInst(
1380 Instruction *InsertBefore, ///< Where to insert
1381 Predicate pred, ///< The predicate to use for the comparison
1382 Value *LHS, ///< The left-hand-side of the expression
1383 Value *RHS, ///< The right-hand-side of the expression
1384 const Twine &NameStr = "" ///< Name of the instruction
1385 ) : CmpInst(makeCmpResultType(LHS->getType()),
1386 Instruction::FCmp, pred, LHS, RHS, NameStr,
1387 InsertBefore) {
1388 AssertOK();
1389 }
1390
1391 /// Constructor with insert-at-end semantics.
1392 FCmpInst(
1393 BasicBlock &InsertAtEnd, ///< Block to insert into.
1394 Predicate pred, ///< The predicate to use for the comparison
1395 Value *LHS, ///< The left-hand-side of the expression
1396 Value *RHS, ///< The right-hand-side of the expression
1397 const Twine &NameStr = "" ///< Name of the instruction
1398 ) : CmpInst(makeCmpResultType(LHS->getType()),
1399 Instruction::FCmp, pred, LHS, RHS, NameStr,
1400 &InsertAtEnd) {
1401 AssertOK();
1402 }
1403
1404 /// Constructor with no-insertion semantics
1405 FCmpInst(
1406 Predicate Pred, ///< The predicate to use for the comparison
1407 Value *LHS, ///< The left-hand-side of the expression
1408 Value *RHS, ///< The right-hand-side of the expression
1409 const Twine &NameStr = "", ///< Name of the instruction
1410 Instruction *FlagsSource = nullptr
1411 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1412 RHS, NameStr, nullptr, FlagsSource) {
1413 AssertOK();
1414 }
1415
1416 /// @returns true if the predicate of this instruction is EQ or NE.
1417 /// Determine if this is an equality predicate.
1418 static bool isEquality(Predicate Pred) {
1419 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1420 Pred == FCMP_UNE;
1421 }
1422
1423 /// @returns true if the predicate of this instruction is EQ or NE.
1424 /// Determine if this is an equality predicate.
1425 bool isEquality() const { return isEquality(getPredicate()); }
1426
1427 /// @returns true if the predicate of this instruction is commutative.
1428 /// Determine if this is a commutative predicate.
1429 bool isCommutative() const {
1430 return isEquality() ||
1431 getPredicate() == FCMP_FALSE ||
1432 getPredicate() == FCMP_TRUE ||
1433 getPredicate() == FCMP_ORD ||
1434 getPredicate() == FCMP_UNO;
1435 }
1436
1437 /// @returns true if the predicate is relational (not EQ or NE).
1438 /// Determine if this a relational predicate.
1439 bool isRelational() const { return !isEquality(); }
1440
1441 /// Exchange the two operands to this instruction in such a way that it does
1442 /// not modify the semantics of the instruction. The predicate value may be
1443 /// changed to retain the same result if the predicate is order dependent
1444 /// (e.g. ult).
1445 /// Swap operands and adjust predicate.
1446 void swapOperands() {
1447 setPredicate(getSwappedPredicate());
1448 Op<0>().swap(Op<1>());
1449 }
1450
1451 /// Returns the sequence of all FCmp predicates.
1452 ///
1453 static auto predicates() { return FCmpPredicates(); }
1454
1455 /// Return result of `LHS Pred RHS` comparison.
1456 static bool compare(const APFloat &LHS, const APFloat &RHS,
1457 FCmpInst::Predicate Pred);
1458
1459 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1460 static bool classof(const Instruction *I) {
1461 return I->getOpcode() == Instruction::FCmp;
1462 }
1463 static bool classof(const Value *V) {
1464 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1465 }
1466};
1467
1468//===----------------------------------------------------------------------===//
1469/// This class represents a function call, abstracting a target
1470/// machine's calling convention. This class uses low bit of the SubClassData
1471/// field to indicate whether or not this is a tail call. The rest of the bits
1472/// hold the calling convention of the call.
1473///
1474class CallInst : public CallBase {
1475 CallInst(const CallInst &CI);
1476
1477 /// Construct a CallInst given a range of arguments.
1478 /// Construct a CallInst from a range of arguments
1479 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1480 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1481 Instruction *InsertBefore);
1482
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 const Twine &NameStr, Instruction *InsertBefore)
1485 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1486
1487 /// Construct a CallInst given a range of arguments.
1488 /// Construct a CallInst from a range of arguments
1489 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1490 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1491 BasicBlock *InsertAtEnd);
1492
1493 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1494 Instruction *InsertBefore);
1495
1496 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1497 BasicBlock *InsertAtEnd);
1498
1499 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1500 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1501 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1502
1503 /// Compute the number of operands to allocate.
1504 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1505 // We need one operand for the called function, plus the input operand
1506 // counts provided.
1507 return 1 + NumArgs + NumBundleInputs;
1508 }
1509
1510protected:
1511 // Note: Instruction needs to be a friend here to call cloneImpl.
1512 friend class Instruction;
1513
1514 CallInst *cloneImpl() const;
1515
1516public:
1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1518 Instruction *InsertBefore = nullptr) {
1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1520 }
1521
1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1523 const Twine &NameStr,
1524 Instruction *InsertBefore = nullptr) {
1525 return new (ComputeNumOperands(Args.size()))
1526 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1527 }
1528
1529 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1530 ArrayRef<OperandBundleDef> Bundles = None,
1531 const Twine &NameStr = "",
1532 Instruction *InsertBefore = nullptr) {
1533 const int NumOperands =
1534 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1535 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1536
1537 return new (NumOperands, DescriptorBytes)
1538 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1539 }
1540
1541 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1542 BasicBlock *InsertAtEnd) {
1543 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1544 }
1545
1546 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1547 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1548 return new (ComputeNumOperands(Args.size()))
1549 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1550 }
1551
1552 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1553 ArrayRef<OperandBundleDef> Bundles,
1554 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1555 const int NumOperands =
1556 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1557 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1558
1559 return new (NumOperands, DescriptorBytes)
1560 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1561 }
1562
1563 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1564 Instruction *InsertBefore = nullptr) {
1565 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1566 InsertBefore);
1567 }
1568
1569 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1570 ArrayRef<OperandBundleDef> Bundles = None,
1571 const Twine &NameStr = "",
1572 Instruction *InsertBefore = nullptr) {
1573 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1574 NameStr, InsertBefore);
1575 }
1576
1577 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1578 const Twine &NameStr,
1579 Instruction *InsertBefore = nullptr) {
1580 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1581 InsertBefore);
1582 }
1583
1584 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1585 BasicBlock *InsertAtEnd) {
1586 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1587 InsertAtEnd);
1588 }
1589
1590 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1591 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1592 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1593 InsertAtEnd);
1594 }
1595
1596 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1597 ArrayRef<OperandBundleDef> Bundles,
1598 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1599 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1600 NameStr, InsertAtEnd);
1601 }
1602
1603 /// Create a clone of \p CI with a different set of operand bundles and
1604 /// insert it before \p InsertPt.
1605 ///
1606 /// The returned call instruction is identical \p CI in every way except that
1607 /// the operand bundles for the new instruction are set to the operand bundles
1608 /// in \p Bundles.
1609 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1610 Instruction *InsertPt = nullptr);
1611
1612 /// Generate the IR for a call to malloc:
1613 /// 1. Compute the malloc call's argument as the specified type's size,
1614 /// possibly multiplied by the array size if the array size is not
1615 /// constant 1.
1616 /// 2. Call malloc with that argument.
1617 /// 3. Bitcast the result of the malloc call to the specified type.
1618 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1619 Type *AllocTy, Value *AllocSize,
1620 Value *ArraySize = nullptr,
1621 Function *MallocF = nullptr,
1622 const Twine &Name = "");
1623 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1624 Type *AllocTy, Value *AllocSize,
1625 Value *ArraySize = nullptr,
1626 Function *MallocF = nullptr,
1627 const Twine &Name = "");
1628 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1629 Type *AllocTy, Value *AllocSize,
1630 Value *ArraySize = nullptr,
1631 ArrayRef<OperandBundleDef> Bundles = None,
1632 Function *MallocF = nullptr,
1633 const Twine &Name = "");
1634 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1635 Type *AllocTy, Value *AllocSize,
1636 Value *ArraySize = nullptr,
1637 ArrayRef<OperandBundleDef> Bundles = None,
1638 Function *MallocF = nullptr,
1639 const Twine &Name = "");
1640 /// Generate the IR for a call to the builtin free function.
1641 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1642 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1643 static Instruction *CreateFree(Value *Source,
1644 ArrayRef<OperandBundleDef> Bundles,
1645 Instruction *InsertBefore);
1646 static Instruction *CreateFree(Value *Source,
1647 ArrayRef<OperandBundleDef> Bundles,
1648 BasicBlock *InsertAtEnd);
1649
1650 // Note that 'musttail' implies 'tail'.
1651 enum TailCallKind : unsigned {
1652 TCK_None = 0,
1653 TCK_Tail = 1,
1654 TCK_MustTail = 2,
1655 TCK_NoTail = 3,
1656 TCK_LAST = TCK_NoTail
1657 };
1658
1659 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1660 static_assert(
1661 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1662 "Bitfields must be contiguous");
1663
1664 TailCallKind getTailCallKind() const {
1665 return getSubclassData<TailCallKindField>();
1666 }
1667
1668 bool isTailCall() const {
1669 TailCallKind Kind = getTailCallKind();
1670 return Kind == TCK_Tail || Kind == TCK_MustTail;
1671 }
1672
1673 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1674
1675 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1676
1677 void setTailCallKind(TailCallKind TCK) {
1678 setSubclassData<TailCallKindField>(TCK);
1679 }
1680
1681 void setTailCall(bool IsTc = true) {
1682 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1683 }
1684
1685 /// Return true if the call can return twice
1686 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1687 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1688
1689 // Methods for support type inquiry through isa, cast, and dyn_cast:
1690 static bool classof(const Instruction *I) {
1691 return I->getOpcode() == Instruction::Call;
1692 }
1693 static bool classof(const Value *V) {
1694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1695 }
1696
1697 /// Updates profile metadata by scaling it by \p S / \p T.
1698 void updateProfWeight(uint64_t S, uint64_t T);
1699
1700private:
1701 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1702 // method so that subclasses cannot accidentally use it.
1703 template <typename Bitfield>
1704 void setSubclassData(typename Bitfield::Type Value) {
1705 Instruction::setSubclassData<Bitfield>(Value);
1706 }
1707};
1708
1709CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1710 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1711 BasicBlock *InsertAtEnd)
1712 : CallBase(Ty->getReturnType(), Instruction::Call,
1713 OperandTraits<CallBase>::op_end(this) -
1714 (Args.size() + CountBundleInputs(Bundles) + 1),
1715 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1716 InsertAtEnd) {
1717 init(Ty, Func, Args, Bundles, NameStr);
1718}
1719
1720CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1721 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1722 Instruction *InsertBefore)
1723 : CallBase(Ty->getReturnType(), Instruction::Call,
1724 OperandTraits<CallBase>::op_end(this) -
1725 (Args.size() + CountBundleInputs(Bundles) + 1),
1726 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1727 InsertBefore) {
1728 init(Ty, Func, Args, Bundles, NameStr);
1729}
1730
1731//===----------------------------------------------------------------------===//
1732// SelectInst Class
1733//===----------------------------------------------------------------------===//
1734
1735/// This class represents the LLVM 'select' instruction.
1736///
1737class SelectInst : public Instruction {
1738 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1739 Instruction *InsertBefore)
1740 : Instruction(S1->getType(), Instruction::Select,
1741 &Op<0>(), 3, InsertBefore) {
1742 init(C, S1, S2);
1743 setName(NameStr);
1744 }
1745
1746 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1747 BasicBlock *InsertAtEnd)
1748 : Instruction(S1->getType(), Instruction::Select,
1749 &Op<0>(), 3, InsertAtEnd) {
1750 init(C, S1, S2);
1751 setName(NameStr);
1752 }
1753
1754 void init(Value *C, Value *S1, Value *S2) {
1755 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1755, __extension__ __PRETTY_FUNCTION__
))
;
1756 Op<0>() = C;
1757 Op<1>() = S1;
1758 Op<2>() = S2;
1759 }
1760
1761protected:
1762 // Note: Instruction needs to be a friend here to call cloneImpl.
1763 friend class Instruction;
1764
1765 SelectInst *cloneImpl() const;
1766
1767public:
1768 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1769 const Twine &NameStr = "",
1770 Instruction *InsertBefore = nullptr,
1771 Instruction *MDFrom = nullptr) {
1772 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1773 if (MDFrom)
1774 Sel->copyMetadata(*MDFrom);
1775 return Sel;
1776 }
1777
1778 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1779 const Twine &NameStr,
1780 BasicBlock *InsertAtEnd) {
1781 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1782 }
1783
1784 const Value *getCondition() const { return Op<0>(); }
1785 const Value *getTrueValue() const { return Op<1>(); }
1786 const Value *getFalseValue() const { return Op<2>(); }
1787 Value *getCondition() { return Op<0>(); }
1788 Value *getTrueValue() { return Op<1>(); }
1789 Value *getFalseValue() { return Op<2>(); }
1790
1791 void setCondition(Value *V) { Op<0>() = V; }
1792 void setTrueValue(Value *V) { Op<1>() = V; }
1793 void setFalseValue(Value *V) { Op<2>() = V; }
1794
1795 /// Swap the true and false values of the select instruction.
1796 /// This doesn't swap prof metadata.
1797 void swapValues() { Op<1>().swap(Op<2>()); }
1798
1799 /// Return a string if the specified operands are invalid
1800 /// for a select operation, otherwise return null.
1801 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1802
1803 /// Transparently provide more efficient getOperand methods.
1804 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1805
1806 OtherOps getOpcode() const {
1807 return static_cast<OtherOps>(Instruction::getOpcode());
1808 }
1809
1810 // Methods for support type inquiry through isa, cast, and dyn_cast:
1811 static bool classof(const Instruction *I) {
1812 return I->getOpcode() == Instruction::Select;
1813 }
1814 static bool classof(const Value *V) {
1815 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1816 }
1817};
1818
1819template <>
1820struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1821};
1822
1823DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1824
1825//===----------------------------------------------------------------------===//
1826// VAArgInst Class
1827//===----------------------------------------------------------------------===//
1828
1829/// This class represents the va_arg llvm instruction, which returns
1830/// an argument of the specified type given a va_list and increments that list
1831///
1832class VAArgInst : public UnaryInstruction {
1833protected:
1834 // Note: Instruction needs to be a friend here to call cloneImpl.
1835 friend class Instruction;
1836
1837 VAArgInst *cloneImpl() const;
1838
1839public:
1840 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1841 Instruction *InsertBefore = nullptr)
1842 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1843 setName(NameStr);
1844 }
1845
1846 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1847 BasicBlock *InsertAtEnd)
1848 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1849 setName(NameStr);
1850 }
1851
1852 Value *getPointerOperand() { return getOperand(0); }
1853 const Value *getPointerOperand() const { return getOperand(0); }
1854 static unsigned getPointerOperandIndex() { return 0U; }
1855
1856 // Methods for support type inquiry through isa, cast, and dyn_cast:
1857 static bool classof(const Instruction *I) {
1858 return I->getOpcode() == VAArg;
1859 }
1860 static bool classof(const Value *V) {
1861 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1862 }
1863};
1864
1865//===----------------------------------------------------------------------===//
1866// ExtractElementInst Class
1867//===----------------------------------------------------------------------===//
1868
1869/// This instruction extracts a single (scalar)
1870/// element from a VectorType value
1871///
1872class ExtractElementInst : public Instruction {
1873 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1874 Instruction *InsertBefore = nullptr);
1875 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1876 BasicBlock *InsertAtEnd);
1877
1878protected:
1879 // Note: Instruction needs to be a friend here to call cloneImpl.
1880 friend class Instruction;
1881
1882 ExtractElementInst *cloneImpl() const;
1883
1884public:
1885 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1886 const Twine &NameStr = "",
1887 Instruction *InsertBefore = nullptr) {
1888 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1889 }
1890
1891 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1892 const Twine &NameStr,
1893 BasicBlock *InsertAtEnd) {
1894 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1895 }
1896
1897 /// Return true if an extractelement instruction can be
1898 /// formed with the specified operands.
1899 static bool isValidOperands(const Value *Vec, const Value *Idx);
1900
1901 Value *getVectorOperand() { return Op<0>(); }
1902 Value *getIndexOperand() { return Op<1>(); }
1903 const Value *getVectorOperand() const { return Op<0>(); }
1904 const Value *getIndexOperand() const { return Op<1>(); }
1905
1906 VectorType *getVectorOperandType() const {
1907 return cast<VectorType>(getVectorOperand()->getType());
1908 }
1909
1910 /// Transparently provide more efficient getOperand methods.
1911 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1912
1913 // Methods for support type inquiry through isa, cast, and dyn_cast:
1914 static bool classof(const Instruction *I) {
1915 return I->getOpcode() == Instruction::ExtractElement;
1916 }
1917 static bool classof(const Value *V) {
1918 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1919 }
1920};
1921
1922template <>
1923struct OperandTraits<ExtractElementInst> :
1924 public FixedNumOperandTraits<ExtractElementInst, 2> {
1925};
1926
1927DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1928
1929//===----------------------------------------------------------------------===//
1930// InsertElementInst Class
1931//===----------------------------------------------------------------------===//
1932
1933/// This instruction inserts a single (scalar)
1934/// element into a VectorType value
1935///
1936class InsertElementInst : public Instruction {
1937 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1938 const Twine &NameStr = "",
1939 Instruction *InsertBefore = nullptr);
1940 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1941 BasicBlock *InsertAtEnd);
1942
1943protected:
1944 // Note: Instruction needs to be a friend here to call cloneImpl.
1945 friend class Instruction;
1946
1947 InsertElementInst *cloneImpl() const;
1948
1949public:
1950 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1951 const Twine &NameStr = "",
1952 Instruction *InsertBefore = nullptr) {
1953 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1954 }
1955
1956 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1957 const Twine &NameStr,
1958 BasicBlock *InsertAtEnd) {
1959 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1960 }
1961
1962 /// Return true if an insertelement instruction can be
1963 /// formed with the specified operands.
1964 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1965 const Value *Idx);
1966
1967 /// Overload to return most specific vector type.
1968 ///
1969 VectorType *getType() const {
1970 return cast<VectorType>(Instruction::getType());
1971 }
1972
1973 /// Transparently provide more efficient getOperand methods.
1974 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1975
1976 // Methods for support type inquiry through isa, cast, and dyn_cast:
1977 static bool classof(const Instruction *I) {
1978 return I->getOpcode() == Instruction::InsertElement;
1979 }
1980 static bool classof(const Value *V) {
1981 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1982 }
1983};
1984
1985template <>
1986struct OperandTraits<InsertElementInst> :
1987 public FixedNumOperandTraits<InsertElementInst, 3> {
1988};
1989
1990DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1991
1992//===----------------------------------------------------------------------===//
1993// ShuffleVectorInst Class
1994//===----------------------------------------------------------------------===//
1995
1996constexpr int UndefMaskElem = -1;
1997
1998/// This instruction constructs a fixed permutation of two
1999/// input vectors.
2000///
2001/// For each element of the result vector, the shuffle mask selects an element
2002/// from one of the input vectors to copy to the result. Non-negative elements
2003/// in the mask represent an index into the concatenated pair of input vectors.
2004/// UndefMaskElem (-1) specifies that the result element is undefined.
2005///
2006/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2007/// requirement may be relaxed in the future.
2008class ShuffleVectorInst : public Instruction {
2009 SmallVector<int, 4> ShuffleMask;
2010 Constant *ShuffleMaskForBitcode;
2011
2012protected:
2013 // Note: Instruction needs to be a friend here to call cloneImpl.
2014 friend class Instruction;
2015
2016 ShuffleVectorInst *cloneImpl() const;
2017
2018public:
2019 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2020 Instruction *InsertBefore = nullptr);
2021 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2022 BasicBlock *InsertAtEnd);
2023 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2024 Instruction *InsertBefore = nullptr);
2025 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2026 BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2028 const Twine &NameStr = "",
2029 Instruction *InsertBefor = nullptr);
2030 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2031 const Twine &NameStr, BasicBlock *InsertAtEnd);
2032 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2033 const Twine &NameStr = "",
2034 Instruction *InsertBefor = nullptr);
2035 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2036 const Twine &NameStr, BasicBlock *InsertAtEnd);
2037
2038 void *operator new(size_t S) { return User::operator new(S, 2); }
2039 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2040
2041 /// Swap the operands and adjust the mask to preserve the semantics
2042 /// of the instruction.
2043 void commute();
2044
2045 /// Return true if a shufflevector instruction can be
2046 /// formed with the specified operands.
2047 static bool isValidOperands(const Value *V1, const Value *V2,
2048 const Value *Mask);
2049 static bool isValidOperands(const Value *V1, const Value *V2,
2050 ArrayRef<int> Mask);
2051
2052 /// Overload to return most specific vector type.
2053 ///
2054 VectorType *getType() const {
2055 return cast<VectorType>(Instruction::getType());
2056 }
2057
2058 /// Transparently provide more efficient getOperand methods.
2059 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2060
2061 /// Return the shuffle mask value of this instruction for the given element
2062 /// index. Return UndefMaskElem if the element is undef.
2063 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2064
2065 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 static void getShuffleMask(const Constant *Mask,
2068 SmallVectorImpl<int> &Result);
2069
2070 /// Return the mask for this instruction as a vector of integers. Undefined
2071 /// elements of the mask are returned as UndefMaskElem.
2072 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2073 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2074 }
2075
2076 /// Return the mask for this instruction, for use in bitcode.
2077 ///
2078 /// TODO: This is temporary until we decide a new bitcode encoding for
2079 /// shufflevector.
2080 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2081
2082 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2083 Type *ResultTy);
2084
2085 void setShuffleMask(ArrayRef<int> Mask);
2086
2087 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2088
2089 /// Return true if this shuffle returns a vector with a different number of
2090 /// elements than its source vectors.
2091 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2092 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2093 bool changesLength() const {
2094 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2095 ->getElementCount()
2096 .getKnownMinValue();
2097 unsigned NumMaskElts = ShuffleMask.size();
2098 return NumSourceElts != NumMaskElts;
2099 }
2100
2101 /// Return true if this shuffle returns a vector with a greater number of
2102 /// elements than its source vectors.
2103 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2104 bool increasesLength() const {
2105 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2106 ->getElementCount()
2107 .getKnownMinValue();
2108 unsigned NumMaskElts = ShuffleMask.size();
2109 return NumSourceElts < NumMaskElts;
2110 }
2111
2112 /// Return true if this shuffle mask chooses elements from exactly one source
2113 /// vector.
2114 /// Example: <7,5,undef,7>
2115 /// This assumes that vector operands are the same length as the mask.
2116 static bool isSingleSourceMask(ArrayRef<int> Mask);
2117 static bool isSingleSourceMask(const Constant *Mask) {
2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2118, __extension__ __PRETTY_FUNCTION__
))
;
2119 SmallVector<int, 16> MaskAsInts;
2120 getShuffleMask(Mask, MaskAsInts);
2121 return isSingleSourceMask(MaskAsInts);
2122 }
2123
2124 /// Return true if this shuffle chooses elements from exactly one source
2125 /// vector without changing the length of that vector.
2126 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2127 /// TODO: Optionally allow length-changing shuffles.
2128 bool isSingleSource() const {
2129 return !changesLength() && isSingleSourceMask(ShuffleMask);
2130 }
2131
2132 /// Return true if this shuffle mask chooses elements from exactly one source
2133 /// vector without lane crossings. A shuffle using this mask is not
2134 /// necessarily a no-op because it may change the number of elements from its
2135 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2136 /// Example: <undef,undef,2,3>
2137 static bool isIdentityMask(ArrayRef<int> Mask);
2138 static bool isIdentityMask(const Constant *Mask) {
2139 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2139, __extension__ __PRETTY_FUNCTION__
))
;
2140 SmallVector<int, 16> MaskAsInts;
2141 getShuffleMask(Mask, MaskAsInts);
2142 return isIdentityMask(MaskAsInts);
2143 }
2144
2145 /// Return true if this shuffle chooses elements from exactly one source
2146 /// vector without lane crossings and does not change the number of elements
2147 /// from its input vectors.
2148 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2149 bool isIdentity() const {
2150 return !changesLength() && isIdentityMask(ShuffleMask);
2151 }
2152
2153 /// Return true if this shuffle lengthens exactly one source vector with
2154 /// undefs in the high elements.
2155 bool isIdentityWithPadding() const;
2156
2157 /// Return true if this shuffle extracts the first N elements of exactly one
2158 /// source vector.
2159 bool isIdentityWithExtract() const;
2160
2161 /// Return true if this shuffle concatenates its 2 source vectors. This
2162 /// returns false if either input is undefined. In that case, the shuffle is
2163 /// is better classified as an identity with padding operation.
2164 bool isConcat() const;
2165
2166 /// Return true if this shuffle mask chooses elements from its source vectors
2167 /// without lane crossings. A shuffle using this mask would be
2168 /// equivalent to a vector select with a constant condition operand.
2169 /// Example: <4,1,6,undef>
2170 /// This returns false if the mask does not choose from both input vectors.
2171 /// In that case, the shuffle is better classified as an identity shuffle.
2172 /// This assumes that vector operands are the same length as the mask
2173 /// (a length-changing shuffle can never be equivalent to a vector select).
2174 static bool isSelectMask(ArrayRef<int> Mask);
2175 static bool isSelectMask(const Constant *Mask) {
2176 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2176, __extension__ __PRETTY_FUNCTION__
))
;
2177 SmallVector<int, 16> MaskAsInts;
2178 getShuffleMask(Mask, MaskAsInts);
2179 return isSelectMask(MaskAsInts);
2180 }
2181
2182 /// Return true if this shuffle chooses elements from its source vectors
2183 /// without lane crossings and all operands have the same number of elements.
2184 /// In other words, this shuffle is equivalent to a vector select with a
2185 /// constant condition operand.
2186 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2187 /// This returns false if the mask does not choose from both input vectors.
2188 /// In that case, the shuffle is better classified as an identity shuffle.
2189 /// TODO: Optionally allow length-changing shuffles.
2190 bool isSelect() const {
2191 return !changesLength() && isSelectMask(ShuffleMask);
2192 }
2193
2194 /// Return true if this shuffle mask swaps the order of elements from exactly
2195 /// one source vector.
2196 /// Example: <7,6,undef,4>
2197 /// This assumes that vector operands are the same length as the mask.
2198 static bool isReverseMask(ArrayRef<int> Mask);
2199 static bool isReverseMask(const Constant *Mask) {
2200 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2200, __extension__ __PRETTY_FUNCTION__
))
;
2201 SmallVector<int, 16> MaskAsInts;
2202 getShuffleMask(Mask, MaskAsInts);
2203 return isReverseMask(MaskAsInts);
2204 }
2205
2206 /// Return true if this shuffle swaps the order of elements from exactly
2207 /// one source vector.
2208 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2209 /// TODO: Optionally allow length-changing shuffles.
2210 bool isReverse() const {
2211 return !changesLength() && isReverseMask(ShuffleMask);
2212 }
2213
2214 /// Return true if this shuffle mask chooses all elements with the same value
2215 /// as the first element of exactly one source vector.
2216 /// Example: <4,undef,undef,4>
2217 /// This assumes that vector operands are the same length as the mask.
2218 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2219 static bool isZeroEltSplatMask(const Constant *Mask) {
2220 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2220, __extension__ __PRETTY_FUNCTION__
))
;
2221 SmallVector<int, 16> MaskAsInts;
2222 getShuffleMask(Mask, MaskAsInts);
2223 return isZeroEltSplatMask(MaskAsInts);
2224 }
2225
2226 /// Return true if all elements of this shuffle are the same value as the
2227 /// first element of exactly one source vector without changing the length
2228 /// of that vector.
2229 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2230 /// TODO: Optionally allow length-changing shuffles.
2231 /// TODO: Optionally allow splats from other elements.
2232 bool isZeroEltSplat() const {
2233 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2234 }
2235
2236 /// Return true if this shuffle mask is a transpose mask.
2237 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2238 /// even- or odd-numbered vector elements from two n-dimensional source
2239 /// vectors and write each result into consecutive elements of an
2240 /// n-dimensional destination vector. Two shuffles are necessary to complete
2241 /// the transpose, one for the even elements and another for the odd elements.
2242 /// This description closely follows how the TRN1 and TRN2 AArch64
2243 /// instructions operate.
2244 ///
2245 /// For example, a simple 2x2 matrix can be transposed with:
2246 ///
2247 /// ; Original matrix
2248 /// m0 = < a, b >
2249 /// m1 = < c, d >
2250 ///
2251 /// ; Transposed matrix
2252 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2253 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2254 ///
2255 /// For matrices having greater than n columns, the resulting nx2 transposed
2256 /// matrix is stored in two result vectors such that one vector contains
2257 /// interleaved elements from all the even-numbered rows and the other vector
2258 /// contains interleaved elements from all the odd-numbered rows. For example,
2259 /// a 2x4 matrix can be transposed with:
2260 ///
2261 /// ; Original matrix
2262 /// m0 = < a, b, c, d >
2263 /// m1 = < e, f, g, h >
2264 ///
2265 /// ; Transposed matrix
2266 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2267 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2268 static bool isTransposeMask(ArrayRef<int> Mask);
2269 static bool isTransposeMask(const Constant *Mask) {
2270 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2270, __extension__ __PRETTY_FUNCTION__
))
;
2271 SmallVector<int, 16> MaskAsInts;
2272 getShuffleMask(Mask, MaskAsInts);
2273 return isTransposeMask(MaskAsInts);
2274 }
2275
2276 /// Return true if this shuffle transposes the elements of its inputs without
2277 /// changing the length of the vectors. This operation may also be known as a
2278 /// merge or interleave. See the description for isTransposeMask() for the
2279 /// exact specification.
2280 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2281 bool isTranspose() const {
2282 return !changesLength() && isTransposeMask(ShuffleMask);
2283 }
2284
2285 /// Return true if this shuffle mask is an extract subvector mask.
2286 /// A valid extract subvector mask returns a smaller vector from a single
2287 /// source operand. The base extraction index is returned as well.
2288 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2289 int &Index);
2290 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2291 int &Index) {
2292 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2292, __extension__ __PRETTY_FUNCTION__
))
;
2293 // Not possible to express a shuffle mask for a scalable vector for this
2294 // case.
2295 if (isa<ScalableVectorType>(Mask->getType()))
2296 return false;
2297 SmallVector<int, 16> MaskAsInts;
2298 getShuffleMask(Mask, MaskAsInts);
2299 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2300 }
2301
2302 /// Return true if this shuffle mask is an extract subvector mask.
2303 bool isExtractSubvectorMask(int &Index) const {
2304 // Not possible to express a shuffle mask for a scalable vector for this
2305 // case.
2306 if (isa<ScalableVectorType>(getType()))
2307 return false;
2308
2309 int NumSrcElts =
2310 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2311 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2312 }
2313
2314 /// Return true if this shuffle mask is an insert subvector mask.
2315 /// A valid insert subvector mask inserts the lowest elements of a second
2316 /// source operand into an in-place first source operand operand.
2317 /// Both the sub vector width and the insertion index is returned.
2318 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2319 int &NumSubElts, int &Index);
2320 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2321 int &NumSubElts, int &Index) {
2322 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2322, __extension__ __PRETTY_FUNCTION__
))
;
2323 // Not possible to express a shuffle mask for a scalable vector for this
2324 // case.
2325 if (isa<ScalableVectorType>(Mask->getType()))
2326 return false;
2327 SmallVector<int, 16> MaskAsInts;
2328 getShuffleMask(Mask, MaskAsInts);
2329 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2330 }
2331
2332 /// Return true if this shuffle mask is an insert subvector mask.
2333 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2334 // Not possible to express a shuffle mask for a scalable vector for this
2335 // case.
2336 if (isa<ScalableVectorType>(getType()))
2337 return false;
2338
2339 int NumSrcElts =
2340 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2341 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2342 }
2343
2344 /// Return true if this shuffle mask replicates each of the \p VF elements
2345 /// in a vector \p ReplicationFactor times.
2346 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2347 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2348 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2349 int &VF);
2350 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2351 int &VF) {
2352 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2352, __extension__ __PRETTY_FUNCTION__
))
;
2353 // Not possible to express a shuffle mask for a scalable vector for this
2354 // case.
2355 if (isa<ScalableVectorType>(Mask->getType()))
2356 return false;
2357 SmallVector<int, 16> MaskAsInts;
2358 getShuffleMask(Mask, MaskAsInts);
2359 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2360 }
2361
2362 /// Return true if this shuffle mask is a replication mask.
2363 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2364
2365 /// Change values in a shuffle permute mask assuming the two vector operands
2366 /// of length InVecNumElts have swapped position.
2367 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2368 unsigned InVecNumElts) {
2369 for (int &Idx : Mask) {
2370 if (Idx == -1)
2371 continue;
2372 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2373 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2374, __extension__ __PRETTY_FUNCTION__
))
2374 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2374, __extension__ __PRETTY_FUNCTION__
))
;
2375 }
2376 }
2377
2378 // Methods for support type inquiry through isa, cast, and dyn_cast:
2379 static bool classof(const Instruction *I) {
2380 return I->getOpcode() == Instruction::ShuffleVector;
2381 }
2382 static bool classof(const Value *V) {
2383 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2384 }
2385};
2386
2387template <>
2388struct OperandTraits<ShuffleVectorInst>
2389 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2390
2391DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2391, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst
>::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture
].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2391, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands
() const { return OperandTraits<ShuffleVectorInst>::operands
(this); } template <int Idx_nocapture> Use &ShuffleVectorInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ShuffleVectorInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2392
2393//===----------------------------------------------------------------------===//
2394// ExtractValueInst Class
2395//===----------------------------------------------------------------------===//
2396
2397/// This instruction extracts a struct member or array
2398/// element value from an aggregate value.
2399///
2400class ExtractValueInst : public UnaryInstruction {
2401 SmallVector<unsigned, 4> Indices;
2402
2403 ExtractValueInst(const ExtractValueInst &EVI);
2404
2405 /// Constructors - Create a extractvalue instruction with a base aggregate
2406 /// value and a list of indices. The first ctor can optionally insert before
2407 /// an existing instruction, the second appends the new instruction to the
2408 /// specified BasicBlock.
2409 inline ExtractValueInst(Value *Agg,
2410 ArrayRef<unsigned> Idxs,
2411 const Twine &NameStr,
2412 Instruction *InsertBefore);
2413 inline ExtractValueInst(Value *Agg,
2414 ArrayRef<unsigned> Idxs,
2415 const Twine &NameStr, BasicBlock *InsertAtEnd);
2416
2417 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2418
2419protected:
2420 // Note: Instruction needs to be a friend here to call cloneImpl.
2421 friend class Instruction;
2422
2423 ExtractValueInst *cloneImpl() const;
2424
2425public:
2426 static ExtractValueInst *Create(Value *Agg,
2427 ArrayRef<unsigned> Idxs,
2428 const Twine &NameStr = "",
2429 Instruction *InsertBefore = nullptr) {
2430 return new
2431 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2432 }
2433
2434 static ExtractValueInst *Create(Value *Agg,
2435 ArrayRef<unsigned> Idxs,
2436 const Twine &NameStr,
2437 BasicBlock *InsertAtEnd) {
2438 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2439 }
2440
2441 /// Returns the type of the element that would be extracted
2442 /// with an extractvalue instruction with the specified parameters.
2443 ///
2444 /// Null is returned if the indices are invalid for the specified type.
2445 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2446
2447 using idx_iterator = const unsigned*;
2448
2449 inline idx_iterator idx_begin() const { return Indices.begin(); }
2450 inline idx_iterator idx_end() const { return Indices.end(); }
2451 inline iterator_range<idx_iterator> indices() const {
2452 return make_range(idx_begin(), idx_end());
2453 }
2454
2455 Value *getAggregateOperand() {
2456 return getOperand(0);
2457 }
2458 const Value *getAggregateOperand() const {
2459 return getOperand(0);
2460 }
2461 static unsigned getAggregateOperandIndex() {
2462 return 0U; // get index for modifying correct operand
2463 }
2464
2465 ArrayRef<unsigned> getIndices() const {
2466 return Indices;
2467 }
2468
2469 unsigned getNumIndices() const {
2470 return (unsigned)Indices.size();
2471 }
2472
2473 bool hasIndices() const {
2474 return true;
2475 }
2476
2477 // Methods for support type inquiry through isa, cast, and dyn_cast:
2478 static bool classof(const Instruction *I) {
2479 return I->getOpcode() == Instruction::ExtractValue;
2480 }
2481 static bool classof(const Value *V) {
2482 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2483 }
2484};
2485
2486ExtractValueInst::ExtractValueInst(Value *Agg,
2487 ArrayRef<unsigned> Idxs,
2488 const Twine &NameStr,
2489 Instruction *InsertBefore)
2490 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2491 ExtractValue, Agg, InsertBefore) {
2492 init(Idxs, NameStr);
2493}
2494
2495ExtractValueInst::ExtractValueInst(Value *Agg,
2496 ArrayRef<unsigned> Idxs,
2497 const Twine &NameStr,
2498 BasicBlock *InsertAtEnd)
2499 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2500 ExtractValue, Agg, InsertAtEnd) {
2501 init(Idxs, NameStr);
2502}
2503
2504//===----------------------------------------------------------------------===//
2505// InsertValueInst Class
2506//===----------------------------------------------------------------------===//
2507
2508/// This instruction inserts a struct field of array element
2509/// value into an aggregate value.
2510///
2511class InsertValueInst : public Instruction {
2512 SmallVector<unsigned, 4> Indices;
2513
2514 InsertValueInst(const InsertValueInst &IVI);
2515
2516 /// Constructors - Create a insertvalue instruction with a base aggregate
2517 /// value, a value to insert, and a list of indices. The first ctor can
2518 /// optionally insert before an existing instruction, the second appends
2519 /// the new instruction to the specified BasicBlock.
2520 inline InsertValueInst(Value *Agg, Value *Val,
2521 ArrayRef<unsigned> Idxs,
2522 const Twine &NameStr,
2523 Instruction *InsertBefore);
2524 inline InsertValueInst(Value *Agg, Value *Val,
2525 ArrayRef<unsigned> Idxs,
2526 const Twine &NameStr, BasicBlock *InsertAtEnd);
2527
2528 /// Constructors - These two constructors are convenience methods because one
2529 /// and two index insertvalue instructions are so common.
2530 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2531 const Twine &NameStr = "",
2532 Instruction *InsertBefore = nullptr);
2533 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2534 BasicBlock *InsertAtEnd);
2535
2536 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2537 const Twine &NameStr);
2538
2539protected:
2540 // Note: Instruction needs to be a friend here to call cloneImpl.
2541 friend class Instruction;
2542
2543 InsertValueInst *cloneImpl() const;
2544
2545public:
2546 // allocate space for exactly two operands
2547 void *operator new(size_t S) { return User::operator new(S, 2); }
2548 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2549
2550 static InsertValueInst *Create(Value *Agg, Value *Val,
2551 ArrayRef<unsigned> Idxs,
2552 const Twine &NameStr = "",
2553 Instruction *InsertBefore = nullptr) {
2554 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
11
Passing null pointer value via 1st parameter 'Agg'
12
Calling constructor for 'InsertValueInst'
2555 }
2556
2557 static InsertValueInst *Create(Value *Agg, Value *Val,
2558 ArrayRef<unsigned> Idxs,
2559 const Twine &NameStr,
2560 BasicBlock *InsertAtEnd) {
2561 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2562 }
2563
2564 /// Transparently provide more efficient getOperand methods.
2565 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2566
2567 using idx_iterator = const unsigned*;
2568
2569 inline idx_iterator idx_begin() const { return Indices.begin(); }
2570 inline idx_iterator idx_end() const { return Indices.end(); }
2571 inline iterator_range<idx_iterator> indices() const {
2572 return make_range(idx_begin(), idx_end());
2573 }
2574
2575 Value *getAggregateOperand() {
2576 return getOperand(0);
2577 }
2578 const Value *getAggregateOperand() const {
2579 return getOperand(0);
2580 }
2581 static unsigned getAggregateOperandIndex() {
2582 return 0U; // get index for modifying correct operand
2583 }
2584
2585 Value *getInsertedValueOperand() {
2586 return getOperand(1);
2587 }
2588 const Value *getInsertedValueOperand() const {
2589 return getOperand(1);
2590 }
2591 static unsigned getInsertedValueOperandIndex() {
2592 return 1U; // get index for modifying correct operand
2593 }
2594
2595 ArrayRef<unsigned> getIndices() const {
2596 return Indices;
2597 }
2598
2599 unsigned getNumIndices() const {
2600 return (unsigned)Indices.size();
2601 }
2602
2603 bool hasIndices() const {
2604 return true;
2605 }
2606
2607 // Methods for support type inquiry through isa, cast, and dyn_cast:
2608 static bool classof(const Instruction *I) {
2609 return I->getOpcode() == Instruction::InsertValue;
2610 }
2611 static bool classof(const Value *V) {
2612 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2613 }
2614};
2615
2616template <>
2617struct OperandTraits<InsertValueInst> :
2618 public FixedNumOperandTraits<InsertValueInst, 2> {
2619};
2620
2621InsertValueInst::InsertValueInst(Value *Agg,
2622 Value *Val,
2623 ArrayRef<unsigned> Idxs,
2624 const Twine &NameStr,
2625 Instruction *InsertBefore)
2626 : Instruction(Agg->getType(), InsertValue,
13
Called C++ object pointer is null
2627 OperandTraits<InsertValueInst>::op_begin(this),
2628 2, InsertBefore) {
2629 init(Agg, Val, Idxs, NameStr);
2630}
2631
2632InsertValueInst::InsertValueInst(Value *Agg,
2633 Value *Val,
2634 ArrayRef<unsigned> Idxs,
2635 const Twine &NameStr,
2636 BasicBlock *InsertAtEnd)
2637 : Instruction(Agg->getType(), InsertValue,
2638 OperandTraits<InsertValueInst>::op_begin(this),
2639 2, InsertAtEnd) {
2640 init(Agg, Val, Idxs, NameStr);
2641}
2642
2643DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2643, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertValueInst
>::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture
].get()); } void InsertValueInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2643, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertValueInst::getNumOperands
() const { return OperandTraits<InsertValueInst>::operands
(this); } template <int Idx_nocapture> Use &InsertValueInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertValueInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2644
2645//===----------------------------------------------------------------------===//
2646// PHINode Class
2647//===----------------------------------------------------------------------===//
2648
2649// PHINode - The PHINode class is used to represent the magical mystical PHI
2650// node, that can not exist in nature, but can be synthesized in a computer
2651// scientist's overactive imagination.
2652//
2653class PHINode : public Instruction {
2654 /// The number of operands actually allocated. NumOperands is
2655 /// the number actually in use.
2656 unsigned ReservedSpace;
2657
2658 PHINode(const PHINode &PN);
2659
2660 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2661 const Twine &NameStr = "",
2662 Instruction *InsertBefore = nullptr)
2663 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2664 ReservedSpace(NumReservedValues) {
2665 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2665, __extension__ __PRETTY_FUNCTION__
))
;
2666 setName(NameStr);
2667 allocHungoffUses(ReservedSpace);
2668 }
2669
2670 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2671 BasicBlock *InsertAtEnd)
2672 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2673 ReservedSpace(NumReservedValues) {
2674 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2674, __extension__ __PRETTY_FUNCTION__
))
;
2675 setName(NameStr);
2676 allocHungoffUses(ReservedSpace);
2677 }
2678
2679protected:
2680 // Note: Instruction needs to be a friend here to call cloneImpl.
2681 friend class Instruction;
2682
2683 PHINode *cloneImpl() const;
2684
2685 // allocHungoffUses - this is more complicated than the generic
2686 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2687 // values and pointers to the incoming blocks, all in one allocation.
2688 void allocHungoffUses(unsigned N) {
2689 User::allocHungoffUses(N, /* IsPhi */ true);
2690 }
2691
2692public:
2693 /// Constructors - NumReservedValues is a hint for the number of incoming
2694 /// edges that this phi node will have (use 0 if you really have no idea).
2695 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2696 const Twine &NameStr = "",
2697 Instruction *InsertBefore = nullptr) {
2698 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2699 }
2700
2701 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2702 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2703 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2704 }
2705
2706 /// Provide fast operand accessors
2707 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2708
2709 // Block iterator interface. This provides access to the list of incoming
2710 // basic blocks, which parallels the list of incoming values.
2711
2712 using block_iterator = BasicBlock **;
2713 using const_block_iterator = BasicBlock * const *;
2714
2715 block_iterator block_begin() {
2716 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2717 }
2718
2719 const_block_iterator block_begin() const {
2720 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2721 }
2722
2723 block_iterator block_end() {
2724 return block_begin() + getNumOperands();
2725 }
2726
2727 const_block_iterator block_end() const {
2728 return block_begin() + getNumOperands();
2729 }
2730
2731 iterator_range<block_iterator> blocks() {
2732 return make_range(block_begin(), block_end());
2733 }
2734
2735 iterator_range<const_block_iterator> blocks() const {
2736 return make_range(block_begin(), block_end());
2737 }
2738
2739 op_range incoming_values() { return operands(); }
2740
2741 const_op_range incoming_values() const { return operands(); }
2742
2743 /// Return the number of incoming edges
2744 ///
2745 unsigned getNumIncomingValues() const { return getNumOperands(); }
2746
2747 /// Return incoming value number x
2748 ///
2749 Value *getIncomingValue(unsigned i) const {
2750 return getOperand(i);
2751 }
2752 void setIncomingValue(unsigned i, Value *V) {
2753 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "llvm/include/llvm/IR/Instructions.h", 2753, __extension__ __PRETTY_FUNCTION__
))
;
2754 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2755, __extension__ __PRETTY_FUNCTION__
))
2755 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2755, __extension__ __PRETTY_FUNCTION__
))
;
2756 setOperand(i, V);
2757 }
2758
2759 static unsigned getOperandNumForIncomingValue(unsigned i) {
2760 return i;
2761 }
2762
2763 static unsigned getIncomingValueNumForOperand(unsigned i) {
2764 return i;
2765 }
2766
2767 /// Return incoming basic block number @p i.
2768 ///
2769 BasicBlock *getIncomingBlock(unsigned i) const {
2770 return block_begin()[i];
2771 }
2772
2773 /// Return incoming basic block corresponding
2774 /// to an operand of the PHI.
2775 ///
2776 BasicBlock *getIncomingBlock(const Use &U) const {
2777 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "llvm/include/llvm/IR/Instructions.h", 2777, __extension__ __PRETTY_FUNCTION__
))
;
2778 return getIncomingBlock(unsigned(&U - op_begin()));
2779 }
2780
2781 /// Return incoming basic block corresponding
2782 /// to value use iterator.
2783 ///
2784 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2785 return getIncomingBlock(I.getUse());
2786 }
2787
2788 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2789 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2789, __extension__ __PRETTY_FUNCTION__
))
;
2790 block_begin()[i] = BB;
2791 }
2792
2793 /// Replace every incoming basic block \p Old to basic block \p New.
2794 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2795 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2795, __extension__ __PRETTY_FUNCTION__
))
;
2796 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2797 if (getIncomingBlock(Op) == Old)
2798 setIncomingBlock(Op, New);
2799 }
2800
2801 /// Add an incoming value to the end of the PHI list
2802 ///
2803 void addIncoming(Value *V, BasicBlock *BB) {
2804 if (getNumOperands() == ReservedSpace)
2805 growOperands(); // Get more space!
2806 // Initialize some new operands.
2807 setNumHungOffUseOperands(getNumOperands() + 1);
2808 setIncomingValue(getNumOperands() - 1, V);
2809 setIncomingBlock(getNumOperands() - 1, BB);
2810 }
2811
2812 /// Remove an incoming value. This is useful if a
2813 /// predecessor basic block is deleted. The value removed is returned.
2814 ///
2815 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2816 /// is true), the PHI node is destroyed and any uses of it are replaced with
2817 /// dummy values. The only time there should be zero incoming values to a PHI
2818 /// node is when the block is dead, so this strategy is sound.
2819 ///
2820 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2821
2822 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2823 int Idx = getBasicBlockIndex(BB);
2824 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "llvm/include/llvm/IR/Instructions.h", 2824, __extension__ __PRETTY_FUNCTION__
))
;
2825 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2826 }
2827
2828 /// Return the first index of the specified basic
2829 /// block in the value list for this PHI. Returns -1 if no instance.
2830 ///
2831 int getBasicBlockIndex(const BasicBlock *BB) const {
2832 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2833 if (block_begin()[i] == BB)
2834 return i;
2835 return -1;
2836 }
2837
2838 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2839 int Idx = getBasicBlockIndex(BB);
2840 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "llvm/include/llvm/IR/Instructions.h", 2840, __extension__ __PRETTY_FUNCTION__
))
;
2841 return getIncomingValue(Idx);
2842 }
2843
2844 /// Set every incoming value(s) for block \p BB to \p V.
2845 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2846 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2846, __extension__ __PRETTY_FUNCTION__
))
;
2847 bool Found = false;
2848 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2849 if (getIncomingBlock(Op) == BB) {
2850 Found = true;
2851 setIncomingValue(Op, V);
2852 }
2853 (void)Found;
2854 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "llvm/include/llvm/IR/Instructions.h", 2854, __extension__ __PRETTY_FUNCTION__
))
;
2855 }
2856
2857 /// If the specified PHI node always merges together the
2858 /// same value, return the value, otherwise return null.
2859 Value *hasConstantValue() const;
2860
2861 /// Whether the specified PHI node always merges
2862 /// together the same value, assuming undefs are equal to a unique
2863 /// non-undef value.
2864 bool hasConstantOrUndefValue() const;
2865
2866 /// If the PHI node is complete which means all of its parent's predecessors
2867 /// have incoming value in this PHI, return true, otherwise return false.
2868 bool isComplete() const {
2869 return llvm::all_of(predecessors(getParent()),
2870 [this](const BasicBlock *Pred) {
2871 return getBasicBlockIndex(Pred) >= 0;
2872 });
2873 }
2874
2875 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2876 static bool classof(const Instruction *I) {
2877 return I->getOpcode() == Instruction::PHI;
2878 }
2879 static bool classof(const Value *V) {
2880 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2881 }
2882
2883private:
2884 void growOperands();
2885};
2886
2887template <>
2888struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2889};
2890
2891DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2891, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<PHINode
>::op_begin(const_cast<PHINode*>(this))[i_nocapture]
.get()); } void PHINode::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2891, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<PHINode>::op_begin(this)[i_nocapture]
= Val_nocapture; } unsigned PHINode::getNumOperands() const {
return OperandTraits<PHINode>::operands(this); } template
<int Idx_nocapture> Use &PHINode::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &PHINode::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
2892
2893//===----------------------------------------------------------------------===//
2894// LandingPadInst Class
2895//===----------------------------------------------------------------------===//
2896
2897//===---------------------------------------------------------------------------
2898/// The landingpad instruction holds all of the information
2899/// necessary to generate correct exception handling. The landingpad instruction
2900/// cannot be moved from the top of a landing pad block, which itself is
2901/// accessible only from the 'unwind' edge of an invoke. This uses the
2902/// SubclassData field in Value to store whether or not the landingpad is a
2903/// cleanup.
2904///
2905class LandingPadInst : public Instruction {
2906 using CleanupField = BoolBitfieldElementT<0>;
2907
2908 /// The number of operands actually allocated. NumOperands is
2909 /// the number actually in use.
2910 unsigned ReservedSpace;
2911
2912 LandingPadInst(const LandingPadInst &LP);
2913
2914public:
2915 enum ClauseType { Catch, Filter };
2916
2917private:
2918 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2919 const Twine &NameStr, Instruction *InsertBefore);
2920 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2921 const Twine &NameStr, BasicBlock *InsertAtEnd);
2922
2923 // Allocate space for exactly zero operands.
2924 void *operator new(size_t S) { return User::operator new(S); }
2925
2926 void growOperands(unsigned Size);
2927 void init(unsigned NumReservedValues, const Twine &NameStr);
2928
2929protected:
2930 // Note: Instruction needs to be a friend here to call cloneImpl.
2931 friend class Instruction;
2932
2933 LandingPadInst *cloneImpl() const;
2934
2935public:
2936 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2937
2938 /// Constructors - NumReservedClauses is a hint for the number of incoming
2939 /// clauses that this landingpad will have (use 0 if you really have no idea).
2940 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2941 const Twine &NameStr = "",
2942 Instruction *InsertBefore = nullptr);
2943 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2944 const Twine &NameStr, BasicBlock *InsertAtEnd);
2945
2946 /// Provide fast operand accessors
2947 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2948
2949 /// Return 'true' if this landingpad instruction is a
2950 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2951 /// doesn't catch the exception.
2952 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2953
2954 /// Indicate that this landingpad instruction is a cleanup.
2955 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2956
2957 /// Add a catch or filter clause to the landing pad.
2958 void addClause(Constant *ClauseVal);
2959
2960 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2961 /// determine what type of clause this is.
2962 Constant *getClause(unsigned Idx) const {
2963 return cast<Constant>(getOperandList()[Idx]);
2964 }
2965
2966 /// Return 'true' if the clause and index Idx is a catch clause.
2967 bool isCatch(unsigned Idx) const {
2968 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2969 }
2970
2971 /// Return 'true' if the clause and index Idx is a filter clause.
2972 bool isFilter(unsigned Idx) const {
2973 return isa<ArrayType>(getOperandList()[Idx]->getType());
2974 }
2975
2976 /// Get the number of clauses for this landing pad.
2977 unsigned getNumClauses() const { return getNumOperands(); }
2978
2979 /// Grow the size of the operand list to accommodate the new
2980 /// number of clauses.
2981 void reserveClauses(unsigned Size) { growOperands(Size); }
2982
2983 // Methods for support type inquiry through isa, cast, and dyn_cast:
2984 static bool classof(const Instruction *I) {
2985 return I->getOpcode() == Instruction::LandingPad;
2986 }
2987 static bool classof(const Value *V) {
2988 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2989 }
2990};
2991
2992template <>
2993struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2994};
2995
2996DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2996, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<LandingPadInst
>::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture
].get()); } void LandingPadInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2996, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned LandingPadInst::getNumOperands(
) const { return OperandTraits<LandingPadInst>::operands
(this); } template <int Idx_nocapture> Use &LandingPadInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &LandingPadInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2997
2998//===----------------------------------------------------------------------===//
2999// ReturnInst Class
3000//===----------------------------------------------------------------------===//
3001
3002//===---------------------------------------------------------------------------
3003/// Return a value (possibly void), from a function. Execution
3004/// does not continue in this function any longer.
3005///
3006class ReturnInst : public Instruction {
3007 ReturnInst(const ReturnInst &RI);
3008
3009private:
3010 // ReturnInst constructors:
3011 // ReturnInst() - 'ret void' instruction
3012 // ReturnInst( null) - 'ret void' instruction
3013 // ReturnInst(Value* X) - 'ret X' instruction
3014 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3015 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3016 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3017 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3018 //
3019 // NOTE: If the Value* passed is of type void then the constructor behaves as
3020 // if it was passed NULL.
3021 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3022 Instruction *InsertBefore = nullptr);
3023 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3024 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3025
3026protected:
3027 // Note: Instruction needs to be a friend here to call cloneImpl.
3028 friend class Instruction;
3029
3030 ReturnInst *cloneImpl() const;
3031
3032public:
3033 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3034 Instruction *InsertBefore = nullptr) {
3035 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3036 }
3037
3038 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3039 BasicBlock *InsertAtEnd) {
3040 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3041 }
3042
3043 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3044 return new(0) ReturnInst(C, InsertAtEnd);
3045 }
3046
3047 /// Provide fast operand accessors
3048 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3049
3050 /// Convenience accessor. Returns null if there is no return value.
3051 Value *getReturnValue() const {
3052 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3053 }
3054
3055 unsigned getNumSuccessors() const { return 0; }
3056
3057 // Methods for support type inquiry through isa, cast, and dyn_cast:
3058 static bool classof(const Instruction *I) {
3059 return (I->getOpcode() == Instruction::Ret);
3060 }
3061 static bool classof(const Value *V) {
3062 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3063 }
3064
3065private:
3066 BasicBlock *getSuccessor(unsigned idx) const {
3067 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3067)
;
3068 }
3069
3070 void setSuccessor(unsigned idx, BasicBlock *B) {
3071 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3071)
;
3072 }
3073};
3074
3075template <>
3076struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3077};
3078
3079DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3079, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this))[i_nocapture
].get()); } void ReturnInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3079, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const
{ return OperandTraits<ReturnInst>::operands(this); } template
<int Idx_nocapture> Use &ReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ReturnInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3080
3081//===----------------------------------------------------------------------===//
3082// BranchInst Class
3083//===----------------------------------------------------------------------===//
3084
3085//===---------------------------------------------------------------------------
3086/// Conditional or Unconditional Branch instruction.
3087///
3088class BranchInst : public Instruction {
3089 /// Ops list - Branches are strange. The operands are ordered:
3090 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3091 /// they don't have to check for cond/uncond branchness. These are mostly
3092 /// accessed relative from op_end().
3093 BranchInst(const BranchInst &BI);
3094 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3095 // BranchInst(BB *B) - 'br B'
3096 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3097 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3098 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3099 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3100 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3101 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3102 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3103 Instruction *InsertBefore = nullptr);
3104 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3105 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3106 BasicBlock *InsertAtEnd);
3107
3108 void AssertOK();
3109
3110protected:
3111 // Note: Instruction needs to be a friend here to call cloneImpl.
3112 friend class Instruction;
3113
3114 BranchInst *cloneImpl() const;
3115
3116public:
3117 /// Iterator type that casts an operand to a basic block.
3118 ///
3119 /// This only makes sense because the successors are stored as adjacent
3120 /// operands for branch instructions.
3121 struct succ_op_iterator
3122 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3123 std::random_access_iterator_tag, BasicBlock *,
3124 ptrdiff_t, BasicBlock *, BasicBlock *> {
3125 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3126
3127 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3128 BasicBlock *operator->() const { return operator*(); }
3129 };
3130
3131 /// The const version of `succ_op_iterator`.
3132 struct const_succ_op_iterator
3133 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3134 std::random_access_iterator_tag,
3135 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3136 const BasicBlock *> {
3137 explicit const_succ_op_iterator(const_value_op_iterator I)
3138 : iterator_adaptor_base(I) {}
3139
3140 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3141 const BasicBlock *operator->() const { return operator*(); }
3142 };
3143
3144 static BranchInst *Create(BasicBlock *IfTrue,
3145 Instruction *InsertBefore = nullptr) {
3146 return new(1) BranchInst(IfTrue, InsertBefore);
3147 }
3148
3149 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3150 Value *Cond, Instruction *InsertBefore = nullptr) {
3151 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3152 }
3153
3154 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3155 return new(1) BranchInst(IfTrue, InsertAtEnd);
3156 }
3157
3158 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3159 Value *Cond, BasicBlock *InsertAtEnd) {
3160 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3161 }
3162
3163 /// Transparently provide more efficient getOperand methods.
3164 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3165
3166 bool isUnconditional() const { return getNumOperands() == 1; }
3167 bool isConditional() const { return getNumOperands() == 3; }
3168
3169 Value *getCondition() const {
3170 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3170, __extension__ __PRETTY_FUNCTION__
))
;
3171 return Op<-3>();
3172 }
3173
3174 void setCondition(Value *V) {
3175 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3175, __extension__ __PRETTY_FUNCTION__
))
;
3176 Op<-3>() = V;
3177 }
3178
3179 unsigned getNumSuccessors() const { return 1+isConditional(); }
3180
3181 BasicBlock *getSuccessor(unsigned i) const {
3182 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3182, __extension__ __PRETTY_FUNCTION__
))
;
3183 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3184 }
3185
3186 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3187 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3187, __extension__ __PRETTY_FUNCTION__
))
;
3188 *(&Op<-1>() - idx) = NewSucc;
3189 }
3190
3191 /// Swap the successors of this branch instruction.
3192 ///
3193 /// Swaps the successors of the branch instruction. This also swaps any
3194 /// branch weight metadata associated with the instruction so that it
3195 /// continues to map correctly to each operand.
3196 void swapSuccessors();
3197
3198 iterator_range<succ_op_iterator> successors() {
3199 return make_range(
3200 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3201 succ_op_iterator(value_op_end()));
3202 }
3203
3204 iterator_range<const_succ_op_iterator> successors() const {
3205 return make_range(const_succ_op_iterator(
3206 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3207 const_succ_op_iterator(value_op_end()));
3208 }
3209
3210 // Methods for support type inquiry through isa, cast, and dyn_cast:
3211 static bool classof(const Instruction *I) {
3212 return (I->getOpcode() == Instruction::Br);
3213 }
3214 static bool classof(const Value *V) {
3215 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3216 }
3217};
3218
3219template <>
3220struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3221};
3222
3223DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3223, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this))[i_nocapture
].get()); } void BranchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<BranchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3223, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<BranchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned BranchInst::getNumOperands() const
{ return OperandTraits<BranchInst>::operands(this); } template
<int Idx_nocapture> Use &BranchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &BranchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3224
3225//===----------------------------------------------------------------------===//
3226// SwitchInst Class
3227//===----------------------------------------------------------------------===//
3228
3229//===---------------------------------------------------------------------------
3230/// Multiway switch
3231///
3232class SwitchInst : public Instruction {
3233 unsigned ReservedSpace;
3234
3235 // Operand[0] = Value to switch on
3236 // Operand[1] = Default basic block destination
3237 // Operand[2n ] = Value to match
3238 // Operand[2n+1] = BasicBlock to go to on match
3239 SwitchInst(const SwitchInst &SI);
3240
3241 /// Create a new switch instruction, specifying a value to switch on and a
3242 /// default destination. The number of additional cases can be specified here
3243 /// to make memory allocation more efficient. This constructor can also
3244 /// auto-insert before another instruction.
3245 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3246 Instruction *InsertBefore);
3247
3248 /// Create a new switch instruction, specifying a value to switch on and a
3249 /// default destination. The number of additional cases can be specified here
3250 /// to make memory allocation more efficient. This constructor also
3251 /// auto-inserts at the end of the specified BasicBlock.
3252 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3253 BasicBlock *InsertAtEnd);
3254
3255 // allocate space for exactly zero operands
3256 void *operator new(size_t S) { return User::operator new(S); }
3257
3258 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3259 void growOperands();
3260
3261protected:
3262 // Note: Instruction needs to be a friend here to call cloneImpl.
3263 friend class Instruction;
3264
3265 SwitchInst *cloneImpl() const;
3266
3267public:
3268 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3269
3270 // -2
3271 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3272
3273 template <typename CaseHandleT> class CaseIteratorImpl;
3274
3275 /// A handle to a particular switch case. It exposes a convenient interface
3276 /// to both the case value and the successor block.
3277 ///
3278 /// We define this as a template and instantiate it to form both a const and
3279 /// non-const handle.
3280 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3281 class CaseHandleImpl {
3282 // Directly befriend both const and non-const iterators.
3283 friend class SwitchInst::CaseIteratorImpl<
3284 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3285
3286 protected:
3287 // Expose the switch type we're parameterized with to the iterator.
3288 using SwitchInstType = SwitchInstT;
3289
3290 SwitchInstT *SI;
3291 ptrdiff_t Index;
3292
3293 CaseHandleImpl() = default;
3294 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3295
3296 public:
3297 /// Resolves case value for current case.
3298 ConstantIntT *getCaseValue() const {
3299 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3300, __extension__ __PRETTY_FUNCTION__
))
3300 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3300, __extension__ __PRETTY_FUNCTION__
))
;
3301 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3302 }
3303
3304 /// Resolves successor for current case.
3305 BasicBlockT *getCaseSuccessor() const {
3306 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__
))
3307 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__
))
3308 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__
))
;
3309 return SI->getSuccessor(getSuccessorIndex());
3310 }
3311
3312 /// Returns number of current case.
3313 unsigned getCaseIndex() const { return Index; }
3314
3315 /// Returns successor index for current case successor.
3316 unsigned getSuccessorIndex() const {
3317 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__
))
3318 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__
))
3319 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__
))
;
3320 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3321 }
3322
3323 bool operator==(const CaseHandleImpl &RHS) const {
3324 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3324, __extension__ __PRETTY_FUNCTION__
))
;
3325 return Index == RHS.Index;
3326 }
3327 };
3328
3329 using ConstCaseHandle =
3330 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3331
3332 class CaseHandle
3333 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3334 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3335
3336 public:
3337 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3338
3339 /// Sets the new value for current case.
3340 void setValue(ConstantInt *V) const {
3341 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3342, __extension__ __PRETTY_FUNCTION__
))
3342 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3342, __extension__ __PRETTY_FUNCTION__
))
;
3343 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3344 }
3345
3346 /// Sets the new successor for current case.
3347 void setSuccessor(BasicBlock *S) const {
3348 SI->setSuccessor(getSuccessorIndex(), S);
3349 }
3350 };
3351
3352 template <typename CaseHandleT>
3353 class CaseIteratorImpl
3354 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3355 std::random_access_iterator_tag,
3356 const CaseHandleT> {
3357 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3358
3359 CaseHandleT Case;
3360
3361 public:
3362 /// Default constructed iterator is in an invalid state until assigned to
3363 /// a case for a particular switch.
3364 CaseIteratorImpl() = default;
3365
3366 /// Initializes case iterator for given SwitchInst and for given
3367 /// case number.
3368 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3369
3370 /// Initializes case iterator for given SwitchInst and for given
3371 /// successor index.
3372 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3373 unsigned SuccessorIndex) {
3374 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3375, __extension__ __PRETTY_FUNCTION__
))
3375 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3375, __extension__ __PRETTY_FUNCTION__
))
;
3376 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3377 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3378 }
3379
3380 /// Support converting to the const variant. This will be a no-op for const
3381 /// variant.
3382 operator CaseIteratorImpl<ConstCaseHandle>() const {
3383 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3384 }
3385
3386 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3387 // Check index correctness after addition.
3388 // Note: Index == getNumCases() means end().
3389 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__
))
3390 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__
))
3391 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__
))
;
3392 Case.Index += N;
3393 return *this;
3394 }
3395 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3396 // Check index correctness after subtraction.
3397 // Note: Case.Index == getNumCases() means end().
3398 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__
))
3399 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__
))
3400 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__
))
;
3401 Case.Index -= N;
3402 return *this;
3403 }
3404 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3405 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3405, __extension__ __PRETTY_FUNCTION__
))
;
3406 return Case.Index - RHS.Case.Index;
3407 }
3408 bool operator==(const CaseIteratorImpl &RHS) const {
3409 return Case == RHS.Case;
3410 }
3411 bool operator<(const CaseIteratorImpl &RHS) const {
3412 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3412, __extension__ __PRETTY_FUNCTION__
))
;
3413 return Case.Index < RHS.Case.Index;
3414 }
3415 const CaseHandleT &operator*() const { return Case; }
3416 };
3417
3418 using CaseIt = CaseIteratorImpl<CaseHandle>;
3419 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3420
3421 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3422 unsigned NumCases,
3423 Instruction *InsertBefore = nullptr) {
3424 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3425 }
3426
3427 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3428 unsigned NumCases, BasicBlock *InsertAtEnd) {
3429 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3430 }
3431
3432 /// Provide fast operand accessors
3433 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3434
3435 // Accessor Methods for Switch stmt
3436 Value *getCondition() const { return getOperand(0); }
3437 void setCondition(Value *V) { setOperand(0, V); }
3438
3439 BasicBlock *getDefaultDest() const {
3440 return cast<BasicBlock>(getOperand(1));
3441 }
3442
3443 void setDefaultDest(BasicBlock *DefaultCase) {
3444 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3445 }
3446
3447 /// Return the number of 'cases' in this switch instruction, excluding the
3448 /// default case.
3449 unsigned getNumCases() const {
3450 return getNumOperands()/2 - 1;
3451 }
3452
3453 /// Returns a read/write iterator that points to the first case in the
3454 /// SwitchInst.
3455 CaseIt case_begin() {
3456 return CaseIt(this, 0);
3457 }
3458
3459 /// Returns a read-only iterator that points to the first case in the
3460 /// SwitchInst.
3461 ConstCaseIt case_begin() const {
3462 return ConstCaseIt(this, 0);
3463 }
3464
3465 /// Returns a read/write iterator that points one past the last in the
3466 /// SwitchInst.
3467 CaseIt case_end() {
3468 return CaseIt(this, getNumCases());
3469 }
3470
3471 /// Returns a read-only iterator that points one past the last in the
3472 /// SwitchInst.
3473 ConstCaseIt case_end() const {
3474 return ConstCaseIt(this, getNumCases());
3475 }
3476
3477 /// Iteration adapter for range-for loops.
3478 iterator_range<CaseIt> cases() {
3479 return make_range(case_begin(), case_end());
3480 }
3481
3482 /// Constant iteration adapter for range-for loops.
3483 iterator_range<ConstCaseIt> cases() const {
3484 return make_range(case_begin(), case_end());
3485 }
3486
3487 /// Returns an iterator that points to the default case.
3488 /// Note: this iterator allows to resolve successor only. Attempt
3489 /// to resolve case value causes an assertion.
3490 /// Also note, that increment and decrement also causes an assertion and
3491 /// makes iterator invalid.
3492 CaseIt case_default() {
3493 return CaseIt(this, DefaultPseudoIndex);
3494 }
3495 ConstCaseIt case_default() const {
3496 return ConstCaseIt(this, DefaultPseudoIndex);
3497 }
3498
3499 /// Search all of the case values for the specified constant. If it is
3500 /// explicitly handled, return the case iterator of it, otherwise return
3501 /// default case iterator to indicate that it is handled by the default
3502 /// handler.
3503 CaseIt findCaseValue(const ConstantInt *C) {
3504 return CaseIt(
3505 this,
3506 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3507 }
3508 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3509 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3510 return Case.getCaseValue() == C;
3511 });
3512 if (I != case_end())
3513 return I;
3514
3515 return case_default();
3516 }
3517
3518 /// Finds the unique case value for a given successor. Returns null if the
3519 /// successor is not found, not unique, or is the default case.
3520 ConstantInt *findCaseDest(BasicBlock *BB) {
3521 if (BB == getDefaultDest())
3522 return nullptr;
3523
3524 ConstantInt *CI = nullptr;
3525 for (auto Case : cases()) {
3526 if (Case.getCaseSuccessor() != BB)
3527 continue;
3528
3529 if (CI)
3530 return nullptr; // Multiple cases lead to BB.
3531
3532 CI = Case.getCaseValue();
3533 }
3534
3535 return CI;
3536 }
3537
3538 /// Add an entry to the switch instruction.
3539 /// Note:
3540 /// This action invalidates case_end(). Old case_end() iterator will
3541 /// point to the added case.
3542 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3543
3544 /// This method removes the specified case and its successor from the switch
3545 /// instruction. Note that this operation may reorder the remaining cases at
3546 /// index idx and above.
3547 /// Note:
3548 /// This action invalidates iterators for all cases following the one removed,
3549 /// including the case_end() iterator. It returns an iterator for the next
3550 /// case.
3551 CaseIt removeCase(CaseIt I);
3552
3553 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3554 BasicBlock *getSuccessor(unsigned idx) const {
3555 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3555, __extension__ __PRETTY_FUNCTION__
))
;
3556 return cast<BasicBlock>(getOperand(idx*2+1));
3557 }
3558 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3559 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3559, __extension__ __PRETTY_FUNCTION__
))
;
3560 setOperand(idx * 2 + 1, NewSucc);
3561 }
3562
3563 // Methods for support type inquiry through isa, cast, and dyn_cast:
3564 static bool classof(const Instruction *I) {
3565 return I->getOpcode() == Instruction::Switch;
3566 }
3567 static bool classof(const Value *V) {
3568 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3569 }
3570};
3571
3572/// A wrapper class to simplify modification of SwitchInst cases along with
3573/// their prof branch_weights metadata.
3574class SwitchInstProfUpdateWrapper {
3575 SwitchInst &SI;
3576 Optional<SmallVector<uint32_t, 8> > Weights = None;
3577 bool Changed = false;
3578
3579protected:
3580 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3581
3582 MDNode *buildProfBranchWeightsMD();
3583
3584 void init();
3585
3586public:
3587 using CaseWeightOpt = Optional<uint32_t>;
3588 SwitchInst *operator->() { return &SI; }
3589 SwitchInst &operator*() { return SI; }
3590 operator SwitchInst *() { return &SI; }
3591
3592 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3593
3594 ~SwitchInstProfUpdateWrapper() {
3595 if (Changed)
3596 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3597 }
3598
3599 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3600 /// correspondent branch weight.
3601 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3602
3603 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3604 /// specified branch weight for the added case.
3605 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3606
3607 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3608 /// this object to not touch the underlying SwitchInst in destructor.
3609 SymbolTableList<Instruction>::iterator eraseFromParent();
3610
3611 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3612 CaseWeightOpt getSuccessorWeight(unsigned idx);
3613
3614 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3615};
3616
3617template <>
3618struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3619};
3620
3621DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3621, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this))[i_nocapture
].get()); } void SwitchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3621, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const
{ return OperandTraits<SwitchInst>::operands(this); } template
<int Idx_nocapture> Use &SwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SwitchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3622
3623//===----------------------------------------------------------------------===//
3624// IndirectBrInst Class
3625//===----------------------------------------------------------------------===//
3626
3627//===---------------------------------------------------------------------------
3628/// Indirect Branch Instruction.
3629///
3630class IndirectBrInst : public Instruction {
3631 unsigned ReservedSpace;
3632
3633 // Operand[0] = Address to jump to
3634 // Operand[n+1] = n-th destination
3635 IndirectBrInst(const IndirectBrInst &IBI);
3636
3637 /// Create a new indirectbr instruction, specifying an
3638 /// Address to jump to. The number of expected destinations can be specified
3639 /// here to make memory allocation more efficient. This constructor can also
3640 /// autoinsert before another instruction.
3641 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3642
3643 /// Create a new indirectbr instruction, specifying an
3644 /// Address to jump to. The number of expected destinations can be specified
3645 /// here to make memory allocation more efficient. This constructor also
3646 /// autoinserts at the end of the specified BasicBlock.
3647 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3648
3649 // allocate space for exactly zero operands
3650 void *operator new(size_t S) { return User::operator new(S); }
3651
3652 void init(Value *Address, unsigned NumDests);
3653 void growOperands();
3654
3655protected:
3656 // Note: Instruction needs to be a friend here to call cloneImpl.
3657 friend class Instruction;
3658
3659 IndirectBrInst *cloneImpl() const;
3660
3661public:
3662 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3663
3664 /// Iterator type that casts an operand to a basic block.
3665 ///
3666 /// This only makes sense because the successors are stored as adjacent
3667 /// operands for indirectbr instructions.
3668 struct succ_op_iterator
3669 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3670 std::random_access_iterator_tag, BasicBlock *,
3671 ptrdiff_t, BasicBlock *, BasicBlock *> {
3672 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3673
3674 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3675 BasicBlock *operator->() const { return operator*(); }
3676 };
3677
3678 /// The const version of `succ_op_iterator`.
3679 struct const_succ_op_iterator
3680 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3681 std::random_access_iterator_tag,
3682 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3683 const BasicBlock *> {
3684 explicit const_succ_op_iterator(const_value_op_iterator I)
3685 : iterator_adaptor_base(I) {}
3686
3687 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3688 const BasicBlock *operator->() const { return operator*(); }
3689 };
3690
3691 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3692 Instruction *InsertBefore = nullptr) {
3693 return new IndirectBrInst(Address, NumDests, InsertBefore);
3694 }
3695
3696 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3697 BasicBlock *InsertAtEnd) {
3698 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3699 }
3700
3701 /// Provide fast operand accessors.
3702 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3703
3704 // Accessor Methods for IndirectBrInst instruction.
3705 Value *getAddress() { return getOperand(0); }
3706 const Value *getAddress() const { return getOperand(0); }
3707 void setAddress(Value *V) { setOperand(0, V); }
3708
3709 /// return the number of possible destinations in this
3710 /// indirectbr instruction.
3711 unsigned getNumDestinations() const { return getNumOperands()-1; }
3712
3713 /// Return the specified destination.
3714 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3715 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3716
3717 /// Add a destination.
3718 ///
3719 void addDestination(BasicBlock *Dest);
3720
3721 /// This method removes the specified successor from the
3722 /// indirectbr instruction.
3723 void removeDestination(unsigned i);
3724
3725 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3726 BasicBlock *getSuccessor(unsigned i) const {
3727 return cast<BasicBlock>(getOperand(i+1));
3728 }
3729 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3730 setOperand(i + 1, NewSucc);
3731 }
3732
3733 iterator_range<succ_op_iterator> successors() {
3734 return make_range(succ_op_iterator(std::next(value_op_begin())),
3735 succ_op_iterator(value_op_end()));
3736 }
3737
3738 iterator_range<const_succ_op_iterator> successors() const {
3739 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3740 const_succ_op_iterator(value_op_end()));
3741 }
3742
3743 // Methods for support type inquiry through isa, cast, and dyn_cast:
3744 static bool classof(const Instruction *I) {
3745 return I->getOpcode() == Instruction::IndirectBr;
3746 }
3747 static bool classof(const Value *V) {
3748 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3749 }
3750};
3751
3752template <>
3753struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3754};
3755
3756DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3756, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<IndirectBrInst
>::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture
].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3756, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands(
) const { return OperandTraits<IndirectBrInst>::operands
(this); } template <int Idx_nocapture> Use &IndirectBrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &IndirectBrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3757
3758//===----------------------------------------------------------------------===//
3759// InvokeInst Class
3760//===----------------------------------------------------------------------===//
3761
3762/// Invoke instruction. The SubclassData field is used to hold the
3763/// calling convention of the call.
3764///
3765class InvokeInst : public CallBase {
3766 /// The number of operands for this call beyond the called function,
3767 /// arguments, and operand bundles.
3768 static constexpr int NumExtraOperands = 2;
3769
3770 /// The index from the end of the operand array to the normal destination.
3771 static constexpr int NormalDestOpEndIdx = -3;
3772
3773 /// The index from the end of the operand array to the unwind destination.
3774 static constexpr int UnwindDestOpEndIdx = -2;
3775
3776 InvokeInst(const InvokeInst &BI);
3777
3778 /// Construct an InvokeInst given a range of arguments.
3779 ///
3780 /// Construct an InvokeInst from a range of arguments
3781 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3782 BasicBlock *IfException, ArrayRef<Value *> Args,
3783 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3784 const Twine &NameStr, Instruction *InsertBefore);
3785
3786 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3787 BasicBlock *IfException, ArrayRef<Value *> Args,
3788 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3789 const Twine &NameStr, BasicBlock *InsertAtEnd);
3790
3791 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3792 BasicBlock *IfException, ArrayRef<Value *> Args,
3793 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3794
3795 /// Compute the number of operands to allocate.
3796 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3797 // We need one operand for the called function, plus our extra operands and
3798 // the input operand counts provided.
3799 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3800 }
3801
3802protected:
3803 // Note: Instruction needs to be a friend here to call cloneImpl.
3804 friend class Instruction;
3805
3806 InvokeInst *cloneImpl() const;
3807
3808public:
3809 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3810 BasicBlock *IfException, ArrayRef<Value *> Args,
3811 const Twine &NameStr,
3812 Instruction *InsertBefore = nullptr) {
3813 int NumOperands = ComputeNumOperands(Args.size());
3814 return new (NumOperands)
3815 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3816 NameStr, InsertBefore);
3817 }
3818
3819 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3820 BasicBlock *IfException, ArrayRef<Value *> Args,
3821 ArrayRef<OperandBundleDef> Bundles = None,
3822 const Twine &NameStr = "",
3823 Instruction *InsertBefore = nullptr) {
3824 int NumOperands =
3825 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3826 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3827
3828 return new (NumOperands, DescriptorBytes)
3829 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3830 NameStr, InsertBefore);
3831 }
3832
3833 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3834 BasicBlock *IfException, ArrayRef<Value *> Args,
3835 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3836 int NumOperands = ComputeNumOperands(Args.size());
3837 return new (NumOperands)
3838 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3839 NameStr, InsertAtEnd);
3840 }
3841
3842 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3843 BasicBlock *IfException, ArrayRef<Value *> Args,
3844 ArrayRef<OperandBundleDef> Bundles,
3845 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3846 int NumOperands =
3847 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3848 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3849
3850 return new (NumOperands, DescriptorBytes)
3851 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3852 NameStr, InsertAtEnd);
3853 }
3854
3855 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3856 BasicBlock *IfException, ArrayRef<Value *> Args,
3857 const Twine &NameStr,
3858 Instruction *InsertBefore = nullptr) {
3859 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3860 IfException, Args, None, NameStr, InsertBefore);
3861 }
3862
3863 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3864 BasicBlock *IfException, ArrayRef<Value *> Args,
3865 ArrayRef<OperandBundleDef> Bundles = None,
3866 const Twine &NameStr = "",
3867 Instruction *InsertBefore = nullptr) {
3868 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3869 IfException, Args, Bundles, NameStr, InsertBefore);
3870 }
3871
3872 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3873 BasicBlock *IfException, ArrayRef<Value *> Args,
3874 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3875 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3876 IfException, Args, NameStr, InsertAtEnd);
3877 }
3878
3879 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3880 BasicBlock *IfException, ArrayRef<Value *> Args,
3881 ArrayRef<OperandBundleDef> Bundles,
3882 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3883 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3884 IfException, Args, Bundles, NameStr, InsertAtEnd);
3885 }
3886
3887 /// Create a clone of \p II with a different set of operand bundles and
3888 /// insert it before \p InsertPt.
3889 ///
3890 /// The returned invoke instruction is identical to \p II in every way except
3891 /// that the operand bundles for the new instruction are set to the operand
3892 /// bundles in \p Bundles.
3893 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3894 Instruction *InsertPt = nullptr);
3895
3896 // get*Dest - Return the destination basic blocks...
3897 BasicBlock *getNormalDest() const {
3898 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3899 }
3900 BasicBlock *getUnwindDest() const {
3901 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3902 }
3903 void setNormalDest(BasicBlock *B) {
3904 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3905 }
3906 void setUnwindDest(BasicBlock *B) {
3907 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3908 }
3909
3910 /// Get the landingpad instruction from the landing pad
3911 /// block (the unwind destination).
3912 LandingPadInst *getLandingPadInst() const;
3913
3914 BasicBlock *getSuccessor(unsigned i) const {
3915 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3915, __extension__ __PRETTY_FUNCTION__
))
;
3916 return i == 0 ? getNormalDest() : getUnwindDest();
3917 }
3918
3919 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3920 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3920, __extension__ __PRETTY_FUNCTION__
))
;
3921 if (i == 0)
3922 setNormalDest(NewSucc);
3923 else
3924 setUnwindDest(NewSucc);
3925 }
3926
3927 unsigned getNumSuccessors() const { return 2; }
3928
3929 // Methods for support type inquiry through isa, cast, and dyn_cast:
3930 static bool classof(const Instruction *I) {
3931 return (I->getOpcode() == Instruction::Invoke);
3932 }
3933 static bool classof(const Value *V) {
3934 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3935 }
3936
3937private:
3938 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3939 // method so that subclasses cannot accidentally use it.
3940 template <typename Bitfield>
3941 void setSubclassData(typename Bitfield::Type Value) {
3942 Instruction::setSubclassData<Bitfield>(Value);
3943 }
3944};
3945
3946InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3947 BasicBlock *IfException, ArrayRef<Value *> Args,
3948 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3949 const Twine &NameStr, Instruction *InsertBefore)
3950 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3951 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3952 InsertBefore) {
3953 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3954}
3955
3956InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3957 BasicBlock *IfException, ArrayRef<Value *> Args,
3958 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3959 const Twine &NameStr, BasicBlock *InsertAtEnd)
3960 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3961 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3962 InsertAtEnd) {
3963 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3964}
3965
3966//===----------------------------------------------------------------------===//
3967// CallBrInst Class
3968//===----------------------------------------------------------------------===//
3969
3970/// CallBr instruction, tracking function calls that may not return control but
3971/// instead transfer it to a third location. The SubclassData field is used to
3972/// hold the calling convention of the call.
3973///
3974class CallBrInst : public CallBase {
3975
3976 unsigned NumIndirectDests;
3977
3978 CallBrInst(const CallBrInst &BI);
3979
3980 /// Construct a CallBrInst given a range of arguments.
3981 ///
3982 /// Construct a CallBrInst from a range of arguments
3983 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3984 ArrayRef<BasicBlock *> IndirectDests,
3985 ArrayRef<Value *> Args,
3986 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3987 const Twine &NameStr, Instruction *InsertBefore);
3988
3989 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3990 ArrayRef<BasicBlock *> IndirectDests,
3991 ArrayRef<Value *> Args,
3992 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3993 const Twine &NameStr, BasicBlock *InsertAtEnd);
3994
3995 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3996 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3997 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3998
3999 /// Should the Indirect Destinations change, scan + update the Arg list.
4000 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
4001
4002 /// Compute the number of operands to allocate.
4003 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4004 int NumBundleInputs = 0) {
4005 // We need one operand for the called function, plus our extra operands and
4006 // the input operand counts provided.
4007 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4008 }
4009
4010protected:
4011 // Note: Instruction needs to be a friend here to call cloneImpl.
4012 friend class Instruction;
4013
4014 CallBrInst *cloneImpl() const;
4015
4016public:
4017 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4018 BasicBlock *DefaultDest,
4019 ArrayRef<BasicBlock *> IndirectDests,
4020 ArrayRef<Value *> Args, const Twine &NameStr,
4021 Instruction *InsertBefore = nullptr) {
4022 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4023 return new (NumOperands)
4024 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4025 NumOperands, NameStr, InsertBefore);
4026 }
4027
4028 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4029 BasicBlock *DefaultDest,
4030 ArrayRef<BasicBlock *> IndirectDests,
4031 ArrayRef<Value *> Args,
4032 ArrayRef<OperandBundleDef> Bundles = None,
4033 const Twine &NameStr = "",
4034 Instruction *InsertBefore = nullptr) {
4035 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4036 CountBundleInputs(Bundles));
4037 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4038
4039 return new (NumOperands, DescriptorBytes)
4040 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4041 NumOperands, NameStr, InsertBefore);
4042 }
4043
4044 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4045 BasicBlock *DefaultDest,
4046 ArrayRef<BasicBlock *> IndirectDests,
4047 ArrayRef<Value *> Args, const Twine &NameStr,
4048 BasicBlock *InsertAtEnd) {
4049 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4050 return new (NumOperands)
4051 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4052 NumOperands, NameStr, InsertAtEnd);
4053 }
4054
4055 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4056 BasicBlock *DefaultDest,
4057 ArrayRef<BasicBlock *> IndirectDests,
4058 ArrayRef<Value *> Args,
4059 ArrayRef<OperandBundleDef> Bundles,
4060 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4061 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4062 CountBundleInputs(Bundles));
4063 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4064
4065 return new (NumOperands, DescriptorBytes)
4066 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4067 NumOperands, NameStr, InsertAtEnd);
4068 }
4069
4070 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4071 ArrayRef<BasicBlock *> IndirectDests,
4072 ArrayRef<Value *> Args, const Twine &NameStr,
4073 Instruction *InsertBefore = nullptr) {
4074 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4075 IndirectDests, Args, NameStr, InsertBefore);
4076 }
4077
4078 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4079 ArrayRef<BasicBlock *> IndirectDests,
4080 ArrayRef<Value *> Args,
4081 ArrayRef<OperandBundleDef> Bundles = None,
4082 const Twine &NameStr = "",
4083 Instruction *InsertBefore = nullptr) {
4084 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4085 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4086 }
4087
4088 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4089 ArrayRef<BasicBlock *> IndirectDests,
4090 ArrayRef<Value *> Args, const Twine &NameStr,
4091 BasicBlock *InsertAtEnd) {
4092 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4093 IndirectDests, Args, NameStr, InsertAtEnd);
4094 }
4095
4096 static CallBrInst *Create(FunctionCallee Func,
4097 BasicBlock *DefaultDest,
4098 ArrayRef<BasicBlock *> IndirectDests,
4099 ArrayRef<Value *> Args,
4100 ArrayRef<OperandBundleDef> Bundles,
4101 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4102 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4103 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4104 }
4105
4106 /// Create a clone of \p CBI with a different set of operand bundles and
4107 /// insert it before \p InsertPt.
4108 ///
4109 /// The returned callbr instruction is identical to \p CBI in every way
4110 /// except that the operand bundles for the new instruction are set to the
4111 /// operand bundles in \p Bundles.
4112 static CallBrInst *Create(CallBrInst *CBI,
4113 ArrayRef<OperandBundleDef> Bundles,
4114 Instruction *InsertPt = nullptr);
4115
4116 /// Return the number of callbr indirect dest labels.
4117 ///
4118 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4119
4120 /// getIndirectDestLabel - Return the i-th indirect dest label.
4121 ///
4122 Value *getIndirectDestLabel(unsigned i) const {
4123 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4123, __extension__ __PRETTY_FUNCTION__
))
;
4124 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4125 }
4126
4127 Value *getIndirectDestLabelUse(unsigned i) const {
4128 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4128, __extension__ __PRETTY_FUNCTION__
))
;
4129 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4130 }
4131
4132 // Return the destination basic blocks...
4133 BasicBlock *getDefaultDest() const {
4134 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4135 }
4136 BasicBlock *getIndirectDest(unsigned i) const {
4137 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4138 }
4139 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4140 SmallVector<BasicBlock *, 16> IndirectDests;
4141 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4142 IndirectDests.push_back(getIndirectDest(i));
4143 return IndirectDests;
4144 }
4145 void setDefaultDest(BasicBlock *B) {
4146 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4147 }
4148 void setIndirectDest(unsigned i, BasicBlock *B) {
4149 updateArgBlockAddresses(i, B);
4150 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4151 }
4152
4153 BasicBlock *getSuccessor(unsigned i) const {
4154 assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4155, __extension__ __PRETTY_FUNCTION__
))
4155 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4155, __extension__ __PRETTY_FUNCTION__
))
;
4156 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4157 }
4158
4159 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4160 assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4161, __extension__ __PRETTY_FUNCTION__
))
4161 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4161, __extension__ __PRETTY_FUNCTION__
))
;
4162 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4163 }
4164
4165 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4166
4167 // Methods for support type inquiry through isa, cast, and dyn_cast:
4168 static bool classof(const Instruction *I) {
4169 return (I->getOpcode() == Instruction::CallBr);
4170 }
4171 static bool classof(const Value *V) {
4172 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4173 }
4174
4175private:
4176 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4177 // method so that subclasses cannot accidentally use it.
4178 template <typename Bitfield>
4179 void setSubclassData(typename Bitfield::Type Value) {
4180 Instruction::setSubclassData<Bitfield>(Value);
4181 }
4182};
4183
4184CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4185 ArrayRef<BasicBlock *> IndirectDests,
4186 ArrayRef<Value *> Args,
4187 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4188 const Twine &NameStr, Instruction *InsertBefore)
4189 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4190 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4191 InsertBefore) {
4192 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4193}
4194
4195CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4196 ArrayRef<BasicBlock *> IndirectDests,
4197 ArrayRef<Value *> Args,
4198 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4199 const Twine &NameStr, BasicBlock *InsertAtEnd)
4200 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4201 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4202 InsertAtEnd) {
4203 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4204}
4205
4206//===----------------------------------------------------------------------===//
4207// ResumeInst Class
4208//===----------------------------------------------------------------------===//
4209
4210//===---------------------------------------------------------------------------
4211/// Resume the propagation of an exception.
4212///
4213class ResumeInst : public Instruction {
4214 ResumeInst(const ResumeInst &RI);
4215
4216 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4217 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4218
4219protected:
4220 // Note: Instruction needs to be a friend here to call cloneImpl.
4221 friend class Instruction;
4222
4223 ResumeInst *cloneImpl() const;
4224
4225public:
4226 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4227 return new(1) ResumeInst(Exn, InsertBefore);
4228 }
4229
4230 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4231 return new(1) ResumeInst(Exn, InsertAtEnd);
4232 }
4233
4234 /// Provide fast operand accessors
4235 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4236
4237 /// Convenience accessor.
4238 Value *getValue() const { return Op<0>(); }
4239
4240 unsigned getNumSuccessors() const { return 0; }
4241
4242 // Methods for support type inquiry through isa, cast, and dyn_cast:
4243 static bool classof(const Instruction *I) {
4244 return I->getOpcode() == Instruction::Resume;
4245 }
4246 static bool classof(const Value *V) {
4247 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4248 }
4249
4250private:
4251 BasicBlock *getSuccessor(unsigned idx) const {
4252 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4252)
;
4253 }
4254
4255 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4256 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4256)
;
4257 }
4258};
4259
4260template <>
4261struct OperandTraits<ResumeInst> :
4262 public FixedNumOperandTraits<ResumeInst, 1> {
4263};
4264
4265DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4265, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this))[i_nocapture
].get()); } void ResumeInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ResumeInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4265, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const
{ return OperandTraits<ResumeInst>::operands(this); } template
<int Idx_nocapture> Use &ResumeInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ResumeInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
4266
4267//===----------------------------------------------------------------------===//
4268// CatchSwitchInst Class
4269//===----------------------------------------------------------------------===//
4270class CatchSwitchInst : public Instruction {
4271 using UnwindDestField = BoolBitfieldElementT<0>;
4272
4273 /// The number of operands actually allocated. NumOperands is
4274 /// the number actually in use.
4275 unsigned ReservedSpace;
4276
4277 // Operand[0] = Outer scope
4278 // Operand[1] = Unwind block destination
4279 // Operand[n] = BasicBlock to go to on match
4280 CatchSwitchInst(const CatchSwitchInst &CSI);
4281
4282 /// Create a new switch instruction, specifying a
4283 /// default destination. The number of additional handlers can be specified
4284 /// here to make memory allocation more efficient.
4285 /// This constructor can also autoinsert before another instruction.
4286 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4287 unsigned NumHandlers, const Twine &NameStr,
4288 Instruction *InsertBefore);
4289
4290 /// Create a new switch instruction, specifying a
4291 /// default destination. The number of additional handlers can be specified
4292 /// here to make memory allocation more efficient.
4293 /// This constructor also autoinserts at the end of the specified BasicBlock.
4294 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4295 unsigned NumHandlers, const Twine &NameStr,
4296 BasicBlock *InsertAtEnd);
4297
4298 // allocate space for exactly zero operands
4299 void *operator new(size_t S) { return User::operator new(S); }
4300
4301 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4302 void growOperands(unsigned Size);
4303
4304protected:
4305 // Note: Instruction needs to be a friend here to call cloneImpl.
4306 friend class Instruction;
4307
4308 CatchSwitchInst *cloneImpl() const;
4309
4310public:
4311 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4312
4313 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4314 unsigned NumHandlers,
4315 const Twine &NameStr = "",
4316 Instruction *InsertBefore = nullptr) {
4317 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4318 InsertBefore);
4319 }
4320
4321 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4322 unsigned NumHandlers, const Twine &NameStr,
4323 BasicBlock *InsertAtEnd) {
4324 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4325 InsertAtEnd);
4326 }
4327
4328 /// Provide fast operand accessors
4329 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4330
4331 // Accessor Methods for CatchSwitch stmt
4332 Value *getParentPad() const { return getOperand(0); }
4333 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4334
4335 // Accessor Methods for CatchSwitch stmt
4336 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4337 bool unwindsToCaller() const { return !hasUnwindDest(); }
4338 BasicBlock *getUnwindDest() const {
4339 if (hasUnwindDest())
4340 return cast<BasicBlock>(getOperand(1));
4341 return nullptr;
4342 }
4343 void setUnwindDest(BasicBlock *UnwindDest) {
4344 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4344, __extension__
__PRETTY_FUNCTION__))
;
4345 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4345
, __extension__ __PRETTY_FUNCTION__))
;
4346 setOperand(1, UnwindDest);
4347 }
4348
4349 /// return the number of 'handlers' in this catchswitch
4350 /// instruction, except the default handler
4351 unsigned getNumHandlers() const {
4352 if (hasUnwindDest())
4353 return getNumOperands() - 2;
4354 return getNumOperands() - 1;
4355 }
4356
4357private:
4358 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4359 static const BasicBlock *handler_helper(const Value *V) {
4360 return cast<BasicBlock>(V);
4361 }
4362
4363public:
4364 using DerefFnTy = BasicBlock *(*)(Value *);
4365 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4366 using handler_range = iterator_range<handler_iterator>;
4367 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4368 using const_handler_iterator =
4369 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4370 using const_handler_range = iterator_range<const_handler_iterator>;
4371
4372 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4373 handler_iterator handler_begin() {
4374 op_iterator It = op_begin() + 1;
4375 if (hasUnwindDest())
4376 ++It;
4377 return handler_iterator(It, DerefFnTy(handler_helper));
4378 }
4379
4380 /// Returns an iterator that points to the first handler in the
4381 /// CatchSwitchInst.
4382 const_handler_iterator handler_begin() const {
4383 const_op_iterator It = op_begin() + 1;
4384 if (hasUnwindDest())
4385 ++It;
4386 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4387 }
4388
4389 /// Returns a read-only iterator that points one past the last
4390 /// handler in the CatchSwitchInst.
4391 handler_iterator handler_end() {
4392 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4393 }
4394
4395 /// Returns an iterator that points one past the last handler in the
4396 /// CatchSwitchInst.
4397 const_handler_iterator handler_end() const {
4398 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4399 }
4400
4401 /// iteration adapter for range-for loops.
4402 handler_range handlers() {
4403 return make_range(handler_begin(), handler_end());
4404 }
4405
4406 /// iteration adapter for range-for loops.
4407 const_handler_range handlers() const {
4408 return make_range(handler_begin(), handler_end());
4409 }
4410
4411 /// Add an entry to the switch instruction...
4412 /// Note:
4413 /// This action invalidates handler_end(). Old handler_end() iterator will
4414 /// point to the added handler.
4415 void addHandler(BasicBlock *Dest);
4416
4417 void removeHandler(handler_iterator HI);
4418
4419 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4420 BasicBlock *getSuccessor(unsigned Idx) const {
4421 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4422, __extension__ __PRETTY_FUNCTION__
))
4422 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4422, __extension__ __PRETTY_FUNCTION__
))
;
4423 return cast<BasicBlock>(getOperand(Idx + 1));
4424 }
4425 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4426 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4427, __extension__ __PRETTY_FUNCTION__
))
4427 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4427, __extension__ __PRETTY_FUNCTION__
))
;
4428 setOperand(Idx + 1, NewSucc);
4429 }
4430
4431 // Methods for support type inquiry through isa, cast, and dyn_cast:
4432 static bool classof(const Instruction *I) {
4433 return I->getOpcode() == Instruction::CatchSwitch;
4434 }
4435 static bool classof(const Value *V) {
4436 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4437 }
4438};
4439
4440template <>
4441struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4442
4443DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4443, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchSwitchInst
>::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture
].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4443, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands
() const { return OperandTraits<CatchSwitchInst>::operands
(this); } template <int Idx_nocapture> Use &CatchSwitchInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchSwitchInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4444
4445//===----------------------------------------------------------------------===//
4446// CleanupPadInst Class
4447//===----------------------------------------------------------------------===//
4448class CleanupPadInst : public FuncletPadInst {
4449private:
4450 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4451 unsigned Values, const Twine &NameStr,
4452 Instruction *InsertBefore)
4453 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4454 NameStr, InsertBefore) {}
4455 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4456 unsigned Values, const Twine &NameStr,
4457 BasicBlock *InsertAtEnd)
4458 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4459 NameStr, InsertAtEnd) {}
4460
4461public:
4462 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4463 const Twine &NameStr = "",
4464 Instruction *InsertBefore = nullptr) {
4465 unsigned Values = 1 + Args.size();
4466 return new (Values)
4467 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4468 }
4469
4470 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4471 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4472 unsigned Values = 1 + Args.size();
4473 return new (Values)
4474 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4475 }
4476
4477 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4478 static bool classof(const Instruction *I) {
4479 return I->getOpcode() == Instruction::CleanupPad;
4480 }
4481 static bool classof(const Value *V) {
4482 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4483 }
4484};
4485
4486//===----------------------------------------------------------------------===//
4487// CatchPadInst Class
4488//===----------------------------------------------------------------------===//
4489class CatchPadInst : public FuncletPadInst {
4490private:
4491 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4492 unsigned Values, const Twine &NameStr,
4493 Instruction *InsertBefore)
4494 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4495 NameStr, InsertBefore) {}
4496 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4497 unsigned Values, const Twine &NameStr,
4498 BasicBlock *InsertAtEnd)
4499 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4500 NameStr, InsertAtEnd) {}
4501
4502public:
4503 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4504 const Twine &NameStr = "",
4505 Instruction *InsertBefore = nullptr) {
4506 unsigned Values = 1 + Args.size();
4507 return new (Values)
4508 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4509 }
4510
4511 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4512 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4513 unsigned Values = 1 + Args.size();
4514 return new (Values)
4515 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4516 }
4517
4518 /// Convenience accessors
4519 CatchSwitchInst *getCatchSwitch() const {
4520 return cast<CatchSwitchInst>(Op<-1>());
4521 }
4522 void setCatchSwitch(Value *CatchSwitch) {
4523 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4523,
__extension__ __PRETTY_FUNCTION__))
;
4524 Op<-1>() = CatchSwitch;
4525 }
4526
4527 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4528 static bool classof(const Instruction *I) {
4529 return I->getOpcode() == Instruction::CatchPad;
4530 }
4531 static bool classof(const Value *V) {
4532 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4533 }
4534};
4535
4536//===----------------------------------------------------------------------===//
4537// CatchReturnInst Class
4538//===----------------------------------------------------------------------===//
4539
4540class CatchReturnInst : public Instruction {
4541 CatchReturnInst(const CatchReturnInst &RI);
4542 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4543 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4544
4545 void init(Value *CatchPad, BasicBlock *BB);
4546
4547protected:
4548 // Note: Instruction needs to be a friend here to call cloneImpl.
4549 friend class Instruction;
4550
4551 CatchReturnInst *cloneImpl() const;
4552
4553public:
4554 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4555 Instruction *InsertBefore = nullptr) {
4556 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4556, __extension__
__PRETTY_FUNCTION__))
;
4557 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4557, __extension__ __PRETTY_FUNCTION__
))
;
4558 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4559 }
4560
4561 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4562 BasicBlock *InsertAtEnd) {
4563 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4563, __extension__
__PRETTY_FUNCTION__))
;
4564 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4564, __extension__ __PRETTY_FUNCTION__
))
;
4565 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4566 }
4567
4568 /// Provide fast operand accessors
4569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4570
4571 /// Convenience accessors.
4572 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4573 void setCatchPad(CatchPadInst *CatchPad) {
4574 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4574, __extension__
__PRETTY_FUNCTION__))
;
4575 Op<0>() = CatchPad;
4576 }
4577
4578 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4579 void setSuccessor(BasicBlock *NewSucc) {
4580 assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail
("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4580, __extension__
__PRETTY_FUNCTION__))
;
4581 Op<1>() = NewSucc;
4582 }
4583 unsigned getNumSuccessors() const { return 1; }
4584
4585 /// Get the parentPad of this catchret's catchpad's catchswitch.
4586 /// The successor block is implicitly a member of this funclet.
4587 Value *getCatchSwitchParentPad() const {
4588 return getCatchPad()->getCatchSwitch()->getParentPad();
4589 }
4590
4591 // Methods for support type inquiry through isa, cast, and dyn_cast:
4592 static bool classof(const Instruction *I) {
4593 return (I->getOpcode() == Instruction::CatchRet);
4594 }
4595 static bool classof(const Value *V) {
4596 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4597 }
4598
4599private:
4600 BasicBlock *getSuccessor(unsigned Idx) const {
4601 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4601, __extension__ __PRETTY_FUNCTION__
))
;
4602 return getSuccessor();
4603 }
4604
4605 void setSuccessor(unsigned Idx, BasicBlock *B) {
4606 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4606, __extension__ __PRETTY_FUNCTION__
))
;
4607 setSuccessor(B);
4608 }
4609};
4610
4611template <>
4612struct OperandTraits<CatchReturnInst>
4613 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4614
4615DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchReturnInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4615, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchReturnInst
>::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture
].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4615, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands
() const { return OperandTraits<CatchReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CatchReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4616
4617//===----------------------------------------------------------------------===//
4618// CleanupReturnInst Class
4619//===----------------------------------------------------------------------===//
4620
4621class CleanupReturnInst : public Instruction {
4622 using UnwindDestField = BoolBitfieldElementT<0>;
4623
4624private:
4625 CleanupReturnInst(const CleanupReturnInst &RI);
4626 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4627 Instruction *InsertBefore = nullptr);
4628 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4629 BasicBlock *InsertAtEnd);
4630
4631 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4632
4633protected:
4634 // Note: Instruction needs to be a friend here to call cloneImpl.
4635 friend class Instruction;
4636
4637 CleanupReturnInst *cloneImpl() const;
4638
4639public:
4640 static CleanupReturnInst *Create(Value *CleanupPad,
4641 BasicBlock *UnwindBB = nullptr,
4642 Instruction *InsertBefore = nullptr) {
4643 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4643, __extension__
__PRETTY_FUNCTION__))
;
4644 unsigned Values = 1;
4645 if (UnwindBB)
4646 ++Values;
4647 return new (Values)
4648 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4649 }
4650
4651 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4652 BasicBlock *InsertAtEnd) {
4653 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4653, __extension__
__PRETTY_FUNCTION__))
;
4654 unsigned Values = 1;
4655 if (UnwindBB)
4656 ++Values;
4657 return new (Values)
4658 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4659 }
4660
4661 /// Provide fast operand accessors
4662 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4663
4664 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4665 bool unwindsToCaller() const { return !hasUnwindDest(); }
4666
4667 /// Convenience accessor.
4668 CleanupPadInst *getCleanupPad() const {
4669 return cast<CleanupPadInst>(Op<0>());
4670 }
4671 void setCleanupPad(CleanupPadInst *CleanupPad) {
4672 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4672, __extension__
__PRETTY_FUNCTION__))
;
4673 Op<0>() = CleanupPad;
4674 }
4675
4676 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4677
4678 BasicBlock *getUnwindDest() const {
4679 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4680 }
4681 void setUnwindDest(BasicBlock *NewDest) {
4682 assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail
("NewDest", "llvm/include/llvm/IR/Instructions.h", 4682, __extension__
__PRETTY_FUNCTION__))
;
4683 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4683
, __extension__ __PRETTY_FUNCTION__))
;
4684 Op<1>() = NewDest;
4685 }
4686
4687 // Methods for support type inquiry through isa, cast, and dyn_cast:
4688 static bool classof(const Instruction *I) {
4689 return (I->getOpcode() == Instruction::CleanupRet);
4690 }
4691 static bool classof(const Value *V) {
4692 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4693 }
4694
4695private:
4696 BasicBlock *getSuccessor(unsigned Idx) const {
4697 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4697, __extension__
__PRETTY_FUNCTION__))
;
4698 return getUnwindDest();
4699 }
4700
4701 void setSuccessor(unsigned Idx, BasicBlock *B) {
4702 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4702, __extension__
__PRETTY_FUNCTION__))
;
4703 setUnwindDest(B);
4704 }
4705
4706 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4707 // method so that subclasses cannot accidentally use it.
4708 template <typename Bitfield>
4709 void setSubclassData(typename Bitfield::Type Value) {
4710 Instruction::setSubclassData<Bitfield>(Value);
4711 }
4712};
4713
4714template <>
4715struct OperandTraits<CleanupReturnInst>
4716 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4717
4718DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CleanupReturnInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4718, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CleanupReturnInst
>::op_begin(const_cast<CleanupReturnInst*>(this))[i_nocapture
].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CleanupReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4718, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CleanupReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands
() const { return OperandTraits<CleanupReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CleanupReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CleanupReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4719
4720//===----------------------------------------------------------------------===//
4721// UnreachableInst Class
4722//===----------------------------------------------------------------------===//
4723
4724//===---------------------------------------------------------------------------
4725/// This function has undefined behavior. In particular, the
4726/// presence of this instruction indicates some higher level knowledge that the
4727/// end of the block cannot be reached.
4728///
4729class UnreachableInst : public Instruction {
4730protected:
4731 // Note: Instruction needs to be a friend here to call cloneImpl.
4732 friend class Instruction;
4733
4734 UnreachableInst *cloneImpl() const;
4735
4736public:
4737 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4738 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4739
4740 // allocate space for exactly zero operands
4741 void *operator new(size_t S) { return User::operator new(S, 0); }
4742 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4743
4744 unsigned getNumSuccessors() const { return 0; }
4745
4746 // Methods for support type inquiry through isa, cast, and dyn_cast:
4747 static bool classof(const Instruction *I) {
4748 return I->getOpcode() == Instruction::Unreachable;
4749 }
4750 static bool classof(const Value *V) {
4751 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4752 }
4753
4754private:
4755 BasicBlock *getSuccessor(unsigned idx) const {
4756 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4756)
;
4757 }
4758
4759 void setSuccessor(unsigned idx, BasicBlock *B) {
4760 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4760)
;
4761 }
4762};
4763
4764//===----------------------------------------------------------------------===//
4765// TruncInst Class
4766//===----------------------------------------------------------------------===//
4767
4768/// This class represents a truncation of integer types.
4769class TruncInst : public CastInst {
4770protected:
4771 // Note: Instruction needs to be a friend here to call cloneImpl.
4772 friend class Instruction;
4773
4774 /// Clone an identical TruncInst
4775 TruncInst *cloneImpl() const;
4776
4777public:
4778 /// Constructor with insert-before-instruction semantics
4779 TruncInst(
4780 Value *S, ///< The value to be truncated
4781 Type *Ty, ///< The (smaller) type to truncate to
4782 const Twine &NameStr = "", ///< A name for the new instruction
4783 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4784 );
4785
4786 /// Constructor with insert-at-end-of-block semantics
4787 TruncInst(
4788 Value *S, ///< The value to be truncated
4789 Type *Ty, ///< The (smaller) type to truncate to
4790 const Twine &NameStr, ///< A name for the new instruction
4791 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4792 );
4793
4794 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4795 static bool classof(const Instruction *I) {
4796 return I->getOpcode() == Trunc;
4797 }
4798 static bool classof(const Value *V) {
4799 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4800 }
4801};
4802
4803//===----------------------------------------------------------------------===//
4804// ZExtInst Class
4805//===----------------------------------------------------------------------===//
4806
4807/// This class represents zero extension of integer types.
4808class ZExtInst : public CastInst {
4809protected:
4810 // Note: Instruction needs to be a friend here to call cloneImpl.
4811 friend class Instruction;
4812
4813 /// Clone an identical ZExtInst
4814 ZExtInst *cloneImpl() const;
4815
4816public:
4817 /// Constructor with insert-before-instruction semantics
4818 ZExtInst(
4819 Value *S, ///< The value to be zero extended
4820 Type *Ty, ///< The type to zero extend to
4821 const Twine &NameStr = "", ///< A name for the new instruction
4822 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4823 );
4824
4825 /// Constructor with insert-at-end semantics.
4826 ZExtInst(
4827 Value *S, ///< The value to be zero extended
4828 Type *Ty, ///< The type to zero extend to
4829 const Twine &NameStr, ///< A name for the new instruction
4830 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4831 );
4832
4833 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4834 static bool classof(const Instruction *I) {
4835 return I->getOpcode() == ZExt;
4836 }
4837 static bool classof(const Value *V) {
4838 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4839 }
4840};
4841
4842//===----------------------------------------------------------------------===//
4843// SExtInst Class
4844//===----------------------------------------------------------------------===//
4845
4846/// This class represents a sign extension of integer types.
4847class SExtInst : public CastInst {
4848protected:
4849 // Note: Instruction needs to be a friend here to call cloneImpl.
4850 friend class Instruction;
4851
4852 /// Clone an identical SExtInst
4853 SExtInst *cloneImpl() const;
4854
4855public:
4856 /// Constructor with insert-before-instruction semantics
4857 SExtInst(
4858 Value *S, ///< The value to be sign extended
4859 Type *Ty, ///< The type to sign extend to
4860 const Twine &NameStr = "", ///< A name for the new instruction
4861 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4862 );
4863
4864 /// Constructor with insert-at-end-of-block semantics
4865 SExtInst(
4866 Value *S, ///< The value to be sign extended
4867 Type *Ty, ///< The type to sign extend to
4868 const Twine &NameStr, ///< A name for the new instruction
4869 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4870 );
4871
4872 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4873 static bool classof(const Instruction *I) {
4874 return I->getOpcode() == SExt;
4875 }
4876 static bool classof(const Value *V) {
4877 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4878 }
4879};
4880
4881//===----------------------------------------------------------------------===//
4882// FPTruncInst Class
4883//===----------------------------------------------------------------------===//
4884
4885/// This class represents a truncation of floating point types.
4886class FPTruncInst : public CastInst {
4887protected:
4888 // Note: Instruction needs to be a friend here to call cloneImpl.
4889 friend class Instruction;
4890
4891 /// Clone an identical FPTruncInst
4892 FPTruncInst *cloneImpl() const;
4893
4894public:
4895 /// Constructor with insert-before-instruction semantics
4896 FPTruncInst(
4897 Value *S, ///< The value to be truncated
4898 Type *Ty, ///< The type to truncate to
4899 const Twine &NameStr = "", ///< A name for the new instruction
4900 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4901 );
4902
4903 /// Constructor with insert-before-instruction semantics
4904 FPTruncInst(
4905 Value *S, ///< The value to be truncated
4906 Type *Ty, ///< The type to truncate to
4907 const Twine &NameStr, ///< A name for the new instruction
4908 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4909 );
4910
4911 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4912 static bool classof(const Instruction *I) {
4913 return I->getOpcode() == FPTrunc;
4914 }
4915 static bool classof(const Value *V) {
4916 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4917 }
4918};
4919
4920//===----------------------------------------------------------------------===//
4921// FPExtInst Class
4922//===----------------------------------------------------------------------===//
4923
4924/// This class represents an extension of floating point types.
4925class FPExtInst : public CastInst {
4926protected:
4927 // Note: Instruction needs to be a friend here to call cloneImpl.
4928 friend class Instruction;
4929
4930 /// Clone an identical FPExtInst
4931 FPExtInst *cloneImpl() const;
4932
4933public:
4934 /// Constructor with insert-before-instruction semantics
4935 FPExtInst(
4936 Value *S, ///< The value to be extended
4937 Type *Ty, ///< The type to extend to
4938 const Twine &NameStr = "", ///< A name for the new instruction
4939 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4940 );
4941
4942 /// Constructor with insert-at-end-of-block semantics
4943 FPExtInst(
4944 Value *S, ///< The value to be extended
4945 Type *Ty, ///< The type to extend to
4946 const Twine &NameStr, ///< A name for the new instruction
4947 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4948 );
4949
4950 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4951 static bool classof(const Instruction *I) {
4952 return I->getOpcode() == FPExt;
4953 }
4954 static bool classof(const Value *V) {
4955 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4956 }
4957};
4958
4959//===----------------------------------------------------------------------===//
4960// UIToFPInst Class
4961//===----------------------------------------------------------------------===//
4962
4963/// This class represents a cast unsigned integer to floating point.
4964class UIToFPInst : public CastInst {
4965protected:
4966 // Note: Instruction needs to be a friend here to call cloneImpl.
4967 friend class Instruction;
4968
4969 /// Clone an identical UIToFPInst
4970 UIToFPInst *cloneImpl() const;
4971
4972public:
4973 /// Constructor with insert-before-instruction semantics
4974 UIToFPInst(
4975 Value *S, ///< The value to be converted
4976 Type *Ty, ///< The type to convert to
4977 const Twine &NameStr = "", ///< A name for the new instruction
4978 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4979 );
4980
4981 /// Constructor with insert-at-end-of-block semantics
4982 UIToFPInst(
4983 Value *S, ///< The value to be converted
4984 Type *Ty, ///< The type to convert to
4985 const Twine &NameStr, ///< A name for the new instruction
4986 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4987 );
4988
4989 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4990 static bool classof(const Instruction *I) {
4991 return I->getOpcode() == UIToFP;
4992 }
4993 static bool classof(const Value *V) {
4994 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4995 }
4996};
4997
4998//===----------------------------------------------------------------------===//
4999// SIToFPInst Class
5000//===----------------------------------------------------------------------===//
5001
5002/// This class represents a cast from signed integer to floating point.
5003class SIToFPInst : public CastInst {
5004protected:
5005 // Note: Instruction needs to be a friend here to call cloneImpl.
5006 friend class Instruction;
5007
5008 /// Clone an identical SIToFPInst
5009 SIToFPInst *cloneImpl() const;
5010
5011public:
5012 /// Constructor with insert-before-instruction semantics
5013 SIToFPInst(
5014 Value *S, ///< The value to be converted
5015 Type *Ty, ///< The type to convert to
5016 const Twine &NameStr = "", ///< A name for the new instruction
5017 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5018 );
5019
5020 /// Constructor with insert-at-end-of-block semantics
5021 SIToFPInst(
5022 Value *S, ///< The value to be converted
5023 Type *Ty, ///< The type to convert to
5024 const Twine &NameStr, ///< A name for the new instruction
5025 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5026 );
5027
5028 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5029 static bool classof(const Instruction *I) {
5030 return I->getOpcode() == SIToFP;
5031 }
5032 static bool classof(const Value *V) {
5033 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5034 }
5035};
5036
5037//===----------------------------------------------------------------------===//
5038// FPToUIInst Class
5039//===----------------------------------------------------------------------===//
5040
5041/// This class represents a cast from floating point to unsigned integer
5042class FPToUIInst : public CastInst {
5043protected:
5044 // Note: Instruction needs to be a friend here to call cloneImpl.
5045 friend class Instruction;
5046
5047 /// Clone an identical FPToUIInst
5048 FPToUIInst *cloneImpl() const;
5049
5050public:
5051 /// Constructor with insert-before-instruction semantics
5052 FPToUIInst(
5053 Value *S, ///< The value to be converted
5054 Type *Ty, ///< The type to convert to
5055 const Twine &NameStr = "", ///< A name for the new instruction
5056 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5057 );
5058
5059 /// Constructor with insert-at-end-of-block semantics
5060 FPToUIInst(
5061 Value *S, ///< The value to be converted
5062 Type *Ty, ///< The type to convert to
5063 const Twine &NameStr, ///< A name for the new instruction
5064 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5065 );
5066
5067 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5068 static bool classof(const Instruction *I) {
5069 return I->getOpcode() == FPToUI;
5070 }
5071 static bool classof(const Value *V) {
5072 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5073 }
5074};
5075
5076//===----------------------------------------------------------------------===//
5077// FPToSIInst Class
5078//===----------------------------------------------------------------------===//
5079
5080/// This class represents a cast from floating point to signed integer.
5081class FPToSIInst : public CastInst {
5082protected:
5083 // Note: Instruction needs to be a friend here to call cloneImpl.
5084 friend class Instruction;
5085
5086 /// Clone an identical FPToSIInst
5087 FPToSIInst *cloneImpl() const;
5088
5089public:
5090 /// Constructor with insert-before-instruction semantics
5091 FPToSIInst(
5092 Value *S, ///< The value to be converted
5093 Type *Ty, ///< The type to convert to
5094 const Twine &NameStr = "", ///< A name for the new instruction
5095 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5096 );
5097
5098 /// Constructor with insert-at-end-of-block semantics
5099 FPToSIInst(
5100 Value *S, ///< The value to be converted
5101 Type *Ty, ///< The type to convert to
5102 const Twine &NameStr, ///< A name for the new instruction
5103 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5104 );
5105
5106 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5107 static bool classof(const Instruction *I) {
5108 return I->getOpcode() == FPToSI;
5109 }
5110 static bool classof(const Value *V) {
5111 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5112 }
5113};
5114
5115//===----------------------------------------------------------------------===//
5116// IntToPtrInst Class
5117//===----------------------------------------------------------------------===//
5118
5119/// This class represents a cast from an integer to a pointer.
5120class IntToPtrInst : public CastInst {
5121public:
5122 // Note: Instruction needs to be a friend here to call cloneImpl.
5123 friend class Instruction;
5124
5125 /// Constructor with insert-before-instruction semantics
5126 IntToPtrInst(
5127 Value *S, ///< The value to be converted
5128 Type *Ty, ///< The type to convert to
5129 const Twine &NameStr = "", ///< A name for the new instruction
5130 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5131 );
5132
5133 /// Constructor with insert-at-end-of-block semantics
5134 IntToPtrInst(
5135 Value *S, ///< The value to be converted
5136 Type *Ty, ///< The type to convert to
5137 const Twine &NameStr, ///< A name for the new instruction
5138 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5139 );
5140
5141 /// Clone an identical IntToPtrInst.
5142 IntToPtrInst *cloneImpl() const;
5143
5144 /// Returns the address space of this instruction's pointer type.
5145 unsigned getAddressSpace() const {
5146 return getType()->getPointerAddressSpace();
5147 }
5148
5149 // Methods for support type inquiry through isa, cast, and dyn_cast:
5150 static bool classof(const Instruction *I) {
5151 return I->getOpcode() == IntToPtr;
5152 }
5153 static bool classof(const Value *V) {
5154 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5155 }
5156};
5157
5158//===----------------------------------------------------------------------===//
5159// PtrToIntInst Class
5160//===----------------------------------------------------------------------===//
5161
5162/// This class represents a cast from a pointer to an integer.
5163class PtrToIntInst : public CastInst {
5164protected:
5165 // Note: Instruction needs to be a friend here to call cloneImpl.
5166 friend class Instruction;
5167
5168 /// Clone an identical PtrToIntInst.
5169 PtrToIntInst *cloneImpl() const;
5170
5171public:
5172 /// Constructor with insert-before-instruction semantics
5173 PtrToIntInst(
5174 Value *S, ///< The value to be converted
5175 Type *Ty, ///< The type to convert to
5176 const Twine &NameStr = "", ///< A name for the new instruction
5177 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5178 );
5179
5180 /// Constructor with insert-at-end-of-block semantics
5181 PtrToIntInst(
5182 Value *S, ///< The value to be converted
5183 Type *Ty, ///< The type to convert to
5184 const Twine &NameStr, ///< A name for the new instruction
5185 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5186 );
5187
5188 /// Gets the pointer operand.
5189 Value *getPointerOperand() { return getOperand(0); }
5190 /// Gets the pointer operand.
5191 const Value *getPointerOperand() const { return getOperand(0); }
5192 /// Gets the operand index of the pointer operand.
5193 static unsigned getPointerOperandIndex() { return 0U; }
5194
5195 /// Returns the address space of the pointer operand.
5196 unsigned getPointerAddressSpace() const {
5197 return getPointerOperand()->getType()->getPointerAddressSpace();
5198 }
5199
5200 // Methods for support type inquiry through isa, cast, and dyn_cast:
5201 static bool classof(const Instruction *I) {
5202 return I->getOpcode() == PtrToInt;
5203 }
5204 static bool classof(const Value *V) {
5205 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5206 }
5207};
5208
5209//===----------------------------------------------------------------------===//
5210// BitCastInst Class
5211//===----------------------------------------------------------------------===//
5212
5213/// This class represents a no-op cast from one type to another.
5214class BitCastInst : public CastInst {
5215protected:
5216 // Note: Instruction needs to be a friend here to call cloneImpl.
5217 friend class Instruction;
5218
5219 /// Clone an identical BitCastInst.
5220 BitCastInst *cloneImpl() const;
5221
5222public:
5223 /// Constructor with insert-before-instruction semantics
5224 BitCastInst(
5225 Value *S, ///< The value to be casted
5226 Type *Ty, ///< The type to casted to
5227 const Twine &NameStr = "", ///< A name for the new instruction
5228 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5229 );
5230
5231 /// Constructor with insert-at-end-of-block semantics
5232 BitCastInst(
5233 Value *S, ///< The value to be casted
5234 Type *Ty, ///< The type to casted to
5235 const Twine &NameStr, ///< A name for the new instruction
5236 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5237 );
5238
5239 // Methods for support type inquiry through isa, cast, and dyn_cast:
5240 static bool classof(const Instruction *I) {
5241 return I->getOpcode() == BitCast;
5242 }
5243 static bool classof(const Value *V) {
5244 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5245 }
5246};
5247
5248//===----------------------------------------------------------------------===//
5249// AddrSpaceCastInst Class
5250//===----------------------------------------------------------------------===//
5251
5252/// This class represents a conversion between pointers from one address space
5253/// to another.
5254class AddrSpaceCastInst : public CastInst {
5255protected:
5256 // Note: Instruction needs to be a friend here to call cloneImpl.
5257 friend class Instruction;
5258
5259 /// Clone an identical AddrSpaceCastInst.
5260 AddrSpaceCastInst *cloneImpl() const;
5261
5262public:
5263 /// Constructor with insert-before-instruction semantics
5264 AddrSpaceCastInst(
5265 Value *S, ///< The value to be casted
5266 Type *Ty, ///< The type to casted to
5267 const Twine &NameStr = "", ///< A name for the new instruction
5268 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5269 );
5270
5271 /// Constructor with insert-at-end-of-block semantics
5272 AddrSpaceCastInst(
5273 Value *S, ///< The value to be casted
5274 Type *Ty, ///< The type to casted to
5275 const Twine &NameStr, ///< A name for the new instruction
5276 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5277 );
5278
5279 // Methods for support type inquiry through isa, cast, and dyn_cast:
5280 static bool classof(const Instruction *I) {
5281 return I->getOpcode() == AddrSpaceCast;
5282 }
5283 static bool classof(const Value *V) {
5284 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5285 }
5286
5287 /// Gets the pointer operand.
5288 Value *getPointerOperand() {
5289 return getOperand(0);
5290 }
5291
5292 /// Gets the pointer operand.
5293 const Value *getPointerOperand() const {
5294 return getOperand(0);
5295 }
5296
5297 /// Gets the operand index of the pointer operand.
5298 static unsigned getPointerOperandIndex() {
5299 return 0U;
5300 }
5301
5302 /// Returns the address space of the pointer operand.
5303 unsigned getSrcAddressSpace() const {
5304 return getPointerOperand()->getType()->getPointerAddressSpace();
5305 }
5306
5307 /// Returns the address space of the result.
5308 unsigned getDestAddressSpace() const {
5309 return getType()->getPointerAddressSpace();
5310 }
5311};
5312
5313//===----------------------------------------------------------------------===//
5314// Helper functions
5315//===----------------------------------------------------------------------===//
5316
5317/// A helper function that returns the pointer operand of a load or store
5318/// instruction. Returns nullptr if not load or store.
5319inline const Value *getLoadStorePointerOperand(const Value *V) {
5320 if (auto *Load = dyn_cast<LoadInst>(V))
5321 return Load->getPointerOperand();
5322 if (auto *Store = dyn_cast<StoreInst>(V))
5323 return Store->getPointerOperand();
5324 return nullptr;
5325}
5326inline Value *getLoadStorePointerOperand(Value *V) {
5327 return const_cast<Value *>(
5328 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5329}
5330
5331/// A helper function that returns the pointer operand of a load, store
5332/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5333inline const Value *getPointerOperand(const Value *V) {
5334 if (auto *Ptr = getLoadStorePointerOperand(V))
5335 return Ptr;
5336 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5337 return Gep->getPointerOperand();
5338 return nullptr;
5339}
5340inline Value *getPointerOperand(Value *V) {
5341 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5342}
5343
5344/// A helper function that returns the alignment of load or store instruction.
5345inline Align getLoadStoreAlignment(Value *I) {
5346 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5347, __extension__ __PRETTY_FUNCTION__
))
5347 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5347, __extension__ __PRETTY_FUNCTION__
))
;
5348 if (auto *LI = dyn_cast<LoadInst>(I))
5349 return LI->getAlign();
5350 return cast<StoreInst>(I)->getAlign();
5351}
5352
5353/// A helper function that returns the address space of the pointer operand of
5354/// load or store instruction.
5355inline unsigned getLoadStoreAddressSpace(Value *I) {
5356 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5357, __extension__ __PRETTY_FUNCTION__
))
5357 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5357, __extension__ __PRETTY_FUNCTION__
))
;
5358 if (auto *LI = dyn_cast<LoadInst>(I))
5359 return LI->getPointerAddressSpace();
5360 return cast<StoreInst>(I)->getPointerAddressSpace();
5361}
5362
5363/// A helper function that returns the type of a load or store instruction.
5364inline Type *getLoadStoreType(Value *I) {
5365 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5366, __extension__ __PRETTY_FUNCTION__
))
5366 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5366, __extension__ __PRETTY_FUNCTION__
))
;
5367 if (auto *LI = dyn_cast<LoadInst>(I))
5368 return LI->getType();
5369 return cast<StoreInst>(I)->getValueOperand()->getType();
5370}
5371
5372/// A helper function that returns an atomic operation's sync scope; returns
5373/// None if it is not an atomic operation.
5374inline Optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5375 if (!I->isAtomic())
5376 return None;
5377 if (auto *AI = dyn_cast<LoadInst>(I))
5378 return AI->getSyncScopeID();
5379 if (auto *AI = dyn_cast<StoreInst>(I))
5380 return AI->getSyncScopeID();
5381 if (auto *AI = dyn_cast<FenceInst>(I))
5382 return AI->getSyncScopeID();
5383 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5384 return AI->getSyncScopeID();
5385 if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5386 return AI->getSyncScopeID();
5387 llvm_unreachable("unhandled atomic operation")::llvm::llvm_unreachable_internal("unhandled atomic operation"
, "llvm/include/llvm/IR/Instructions.h", 5387)
;
5388}
5389
5390//===----------------------------------------------------------------------===//
5391// FreezeInst Class
5392//===----------------------------------------------------------------------===//
5393
5394/// This class represents a freeze function that returns random concrete
5395/// value if an operand is either a poison value or an undef value
5396class FreezeInst : public UnaryInstruction {
5397protected:
5398 // Note: Instruction needs to be a friend here to call cloneImpl.
5399 friend class Instruction;
5400
5401 /// Clone an identical FreezeInst
5402 FreezeInst *cloneImpl() const;
5403
5404public:
5405 explicit FreezeInst(Value *S,
5406 const Twine &NameStr = "",
5407 Instruction *InsertBefore = nullptr);
5408 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5409
5410 // Methods for support type inquiry through isa, cast, and dyn_cast:
5411 static inline bool classof(const Instruction *I) {
5412 return I->getOpcode() == Freeze;
5413 }
5414 static inline bool classof(const Value *V) {
5415 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5416 }
5417};
5418
5419} // end namespace llvm
5420
5421#endif // LLVM_IR_INSTRUCTIONS_H