Bug Summary

File:llvm/include/llvm/IR/Instructions.h
Warning:line 2514, column 17
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ValueTracking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Analysis -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-11-181444-25759-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp

1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/AssumptionCache.h"
28#include "llvm/Analysis/GuardUtils.h"
29#include "llvm/Analysis/InstructionSimplify.h"
30#include "llvm/Analysis/Loads.h"
31#include "llvm/Analysis/LoopInfo.h"
32#include "llvm/Analysis/OptimizationRemarkEmitter.h"
33#include "llvm/Analysis/TargetLibraryInfo.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/Attributes.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/CallSite.h"
38#include "llvm/IR/Constant.h"
39#include "llvm/IR/ConstantRange.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/DiagnosticInfo.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalAlias.h"
47#include "llvm/IR/GlobalValue.h"
48#include "llvm/IR/GlobalVariable.h"
49#include "llvm/IR/InstrTypes.h"
50#include "llvm/IR/Instruction.h"
51#include "llvm/IR/Instructions.h"
52#include "llvm/IR/IntrinsicInst.h"
53#include "llvm/IR/Intrinsics.h"
54#include "llvm/IR/LLVMContext.h"
55#include "llvm/IR/Metadata.h"
56#include "llvm/IR/Module.h"
57#include "llvm/IR/Operator.h"
58#include "llvm/IR/PatternMatch.h"
59#include "llvm/IR/Type.h"
60#include "llvm/IR/User.h"
61#include "llvm/IR/Value.h"
62#include "llvm/Support/Casting.h"
63#include "llvm/Support/CommandLine.h"
64#include "llvm/Support/Compiler.h"
65#include "llvm/Support/ErrorHandling.h"
66#include "llvm/Support/KnownBits.h"
67#include "llvm/Support/MathExtras.h"
68#include <algorithm>
69#include <array>
70#include <cassert>
71#include <cstdint>
72#include <iterator>
73#include <utility>
74
75using namespace llvm;
76using namespace llvm::PatternMatch;
77
78const unsigned MaxDepth = 6;
79
80// Controls the number of uses of the value searched for possible
81// dominating comparisons.
82static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83 cl::Hidden, cl::init(20));
84
85/// Returns the bitwidth of the given scalar or pointer type. For vector types,
86/// returns the element type's bitwidth.
87static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88 if (unsigned BitWidth = Ty->getScalarSizeInBits())
89 return BitWidth;
90
91 return DL.getIndexTypeSizeInBits(Ty);
92}
93
94namespace {
95
96// Simplifying using an assume can only be done in a particular control-flow
97// context (the context instruction provides that context). If an assume and
98// the context instruction are not in the same block then the DT helps in
99// figuring out if we can use it.
100struct Query {
101 const DataLayout &DL;
102 AssumptionCache *AC;
103 const Instruction *CxtI;
104 const DominatorTree *DT;
105
106 // Unlike the other analyses, this may be a nullptr because not all clients
107 // provide it currently.
108 OptimizationRemarkEmitter *ORE;
109
110 /// Set of assumptions that should be excluded from further queries.
111 /// This is because of the potential for mutual recursion to cause
112 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113 /// classic case of this is assume(x = y), which will attempt to determine
114 /// bits in x from bits in y, which will attempt to determine bits in y from
115 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117 /// (all of which can call computeKnownBits), and so on.
118 std::array<const Value *, MaxDepth> Excluded;
119
120 /// If true, it is safe to use metadata during simplification.
121 InstrInfoQuery IIQ;
122
123 unsigned NumExcluded = 0;
124
125 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126 const DominatorTree *DT, bool UseInstrInfo,
127 OptimizationRemarkEmitter *ORE = nullptr)
128 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129
130 Query(const Query &Q, const Value *NewExcl)
131 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132 NumExcluded(Q.NumExcluded) {
133 Excluded = Q.Excluded;
134 Excluded[NumExcluded++] = NewExcl;
135 assert(NumExcluded <= Excluded.size())((NumExcluded <= Excluded.size()) ? static_cast<void>
(0) : __assert_fail ("NumExcluded <= Excluded.size()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 135, __PRETTY_FUNCTION__))
;
136 }
137
138 bool isExcluded(const Value *Value) const {
139 if (NumExcluded == 0)
140 return false;
141 auto End = Excluded.begin() + NumExcluded;
142 return std::find(Excluded.begin(), End, Value) != End;
143 }
144};
145
146} // end anonymous namespace
147
148// Given the provided Value and, potentially, a context instruction, return
149// the preferred context instruction (if any).
150static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151 // If we've been provided with a context instruction, then use that (provided
152 // it has been inserted).
153 if (CxtI && CxtI->getParent())
154 return CxtI;
155
156 // If the value is really an already-inserted instruction, then use that.
157 CxtI = dyn_cast<Instruction>(V);
158 if (CxtI && CxtI->getParent())
159 return CxtI;
160
161 return nullptr;
162}
163
164static void computeKnownBits(const Value *V, KnownBits &Known,
165 unsigned Depth, const Query &Q);
166
167void llvm::computeKnownBits(const Value *V, KnownBits &Known,
168 const DataLayout &DL, unsigned Depth,
169 AssumptionCache *AC, const Instruction *CxtI,
170 const DominatorTree *DT,
171 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
172 ::computeKnownBits(V, Known, Depth,
173 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
174}
175
176static KnownBits computeKnownBits(const Value *V, unsigned Depth,
177 const Query &Q);
178
179KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
180 unsigned Depth, AssumptionCache *AC,
181 const Instruction *CxtI,
182 const DominatorTree *DT,
183 OptimizationRemarkEmitter *ORE,
184 bool UseInstrInfo) {
185 return ::computeKnownBits(
186 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
187}
188
189bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
190 const DataLayout &DL, AssumptionCache *AC,
191 const Instruction *CxtI, const DominatorTree *DT,
192 bool UseInstrInfo) {
193 assert(LHS->getType() == RHS->getType() &&((LHS->getType() == RHS->getType() && "LHS and RHS should have the same type"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 194, __PRETTY_FUNCTION__))
194 "LHS and RHS should have the same type")((LHS->getType() == RHS->getType() && "LHS and RHS should have the same type"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 194, __PRETTY_FUNCTION__))
;
195 assert(LHS->getType()->isIntOrIntVectorTy() &&((LHS->getType()->isIntOrIntVectorTy() && "LHS and RHS should be integers"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 196, __PRETTY_FUNCTION__))
196 "LHS and RHS should be integers")((LHS->getType()->isIntOrIntVectorTy() && "LHS and RHS should be integers"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 196, __PRETTY_FUNCTION__))
;
197 // Look for an inverted mask: (X & ~M) op (Y & M).
198 Value *M;
199 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
200 match(RHS, m_c_And(m_Specific(M), m_Value())))
201 return true;
202 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
203 match(LHS, m_c_And(m_Specific(M), m_Value())))
204 return true;
205 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
206 KnownBits LHSKnown(IT->getBitWidth());
207 KnownBits RHSKnown(IT->getBitWidth());
208 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
209 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
210 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
211}
212
213bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
214 for (const User *U : CxtI->users()) {
215 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
216 if (IC->isEquality())
217 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
218 if (C->isNullValue())
219 continue;
220 return false;
221 }
222 return true;
223}
224
225static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
226 const Query &Q);
227
228bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
229 bool OrZero, unsigned Depth,
230 AssumptionCache *AC, const Instruction *CxtI,
231 const DominatorTree *DT, bool UseInstrInfo) {
232 return ::isKnownToBeAPowerOfTwo(
233 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
234}
235
236static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
237
238bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
239 AssumptionCache *AC, const Instruction *CxtI,
240 const DominatorTree *DT, bool UseInstrInfo) {
241 return ::isKnownNonZero(V, Depth,
242 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
243}
244
245bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
246 unsigned Depth, AssumptionCache *AC,
247 const Instruction *CxtI, const DominatorTree *DT,
248 bool UseInstrInfo) {
249 KnownBits Known =
250 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
251 return Known.isNonNegative();
252}
253
254bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
255 AssumptionCache *AC, const Instruction *CxtI,
256 const DominatorTree *DT, bool UseInstrInfo) {
257 if (auto *CI = dyn_cast<ConstantInt>(V))
258 return CI->getValue().isStrictlyPositive();
259
260 // TODO: We'd doing two recursive queries here. We should factor this such
261 // that only a single query is needed.
262 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
263 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
264}
265
266bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
267 AssumptionCache *AC, const Instruction *CxtI,
268 const DominatorTree *DT, bool UseInstrInfo) {
269 KnownBits Known =
270 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
271 return Known.isNegative();
272}
273
274static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
275
276bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
277 const DataLayout &DL, AssumptionCache *AC,
278 const Instruction *CxtI, const DominatorTree *DT,
279 bool UseInstrInfo) {
280 return ::isKnownNonEqual(V1, V2,
281 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
282 UseInstrInfo, /*ORE=*/nullptr));
283}
284
285static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
286 const Query &Q);
287
288bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
289 const DataLayout &DL, unsigned Depth,
290 AssumptionCache *AC, const Instruction *CxtI,
291 const DominatorTree *DT, bool UseInstrInfo) {
292 return ::MaskedValueIsZero(
293 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
294}
295
296static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
297 const Query &Q);
298
299unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
300 unsigned Depth, AssumptionCache *AC,
301 const Instruction *CxtI,
302 const DominatorTree *DT, bool UseInstrInfo) {
303 return ::ComputeNumSignBits(
304 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
305}
306
307static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
308 bool NSW,
309 KnownBits &KnownOut, KnownBits &Known2,
310 unsigned Depth, const Query &Q) {
311 unsigned BitWidth = KnownOut.getBitWidth();
312
313 // If an initial sequence of bits in the result is not needed, the
314 // corresponding bits in the operands are not needed.
315 KnownBits LHSKnown(BitWidth);
316 computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
317 computeKnownBits(Op1, Known2, Depth + 1, Q);
318
319 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
320}
321
322static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
323 KnownBits &Known, KnownBits &Known2,
324 unsigned Depth, const Query &Q) {
325 unsigned BitWidth = Known.getBitWidth();
326 computeKnownBits(Op1, Known, Depth + 1, Q);
327 computeKnownBits(Op0, Known2, Depth + 1, Q);
328
329 bool isKnownNegative = false;
330 bool isKnownNonNegative = false;
331 // If the multiplication is known not to overflow, compute the sign bit.
332 if (NSW) {
333 if (Op0 == Op1) {
334 // The product of a number with itself is non-negative.
335 isKnownNonNegative = true;
336 } else {
337 bool isKnownNonNegativeOp1 = Known.isNonNegative();
338 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
339 bool isKnownNegativeOp1 = Known.isNegative();
340 bool isKnownNegativeOp0 = Known2.isNegative();
341 // The product of two numbers with the same sign is non-negative.
342 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
343 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
344 // The product of a negative number and a non-negative number is either
345 // negative or zero.
346 if (!isKnownNonNegative)
347 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
348 isKnownNonZero(Op0, Depth, Q)) ||
349 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
350 isKnownNonZero(Op1, Depth, Q));
351 }
352 }
353
354 assert(!Known.hasConflict() && !Known2.hasConflict())((!Known.hasConflict() && !Known2.hasConflict()) ? static_cast
<void> (0) : __assert_fail ("!Known.hasConflict() && !Known2.hasConflict()"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 354, __PRETTY_FUNCTION__))
;
355 // Compute a conservative estimate for high known-0 bits.
356 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
357 Known2.countMinLeadingZeros(),
358 BitWidth) - BitWidth;
359 LeadZ = std::min(LeadZ, BitWidth);
360
361 // The result of the bottom bits of an integer multiply can be
362 // inferred by looking at the bottom bits of both operands and
363 // multiplying them together.
364 // We can infer at least the minimum number of known trailing bits
365 // of both operands. Depending on number of trailing zeros, we can
366 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
367 // a and b are divisible by m and n respectively.
368 // We then calculate how many of those bits are inferrable and set
369 // the output. For example, the i8 mul:
370 // a = XXXX1100 (12)
371 // b = XXXX1110 (14)
372 // We know the bottom 3 bits are zero since the first can be divided by
373 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
374 // Applying the multiplication to the trimmed arguments gets:
375 // XX11 (3)
376 // X111 (7)
377 // -------
378 // XX11
379 // XX11
380 // XX11
381 // XX11
382 // -------
383 // XXXXX01
384 // Which allows us to infer the 2 LSBs. Since we're multiplying the result
385 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
386 // The proof for this can be described as:
387 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
388 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
389 // umin(countTrailingZeros(C2), C6) +
390 // umin(C5 - umin(countTrailingZeros(C1), C5),
391 // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
392 // %aa = shl i8 %a, C5
393 // %bb = shl i8 %b, C6
394 // %aaa = or i8 %aa, C1
395 // %bbb = or i8 %bb, C2
396 // %mul = mul i8 %aaa, %bbb
397 // %mask = and i8 %mul, C7
398 // =>
399 // %mask = i8 ((C1*C2)&C7)
400 // Where C5, C6 describe the known bits of %a, %b
401 // C1, C2 describe the known bottom bits of %a, %b.
402 // C7 describes the mask of the known bits of the result.
403 APInt Bottom0 = Known.One;
404 APInt Bottom1 = Known2.One;
405
406 // How many times we'd be able to divide each argument by 2 (shr by 1).
407 // This gives us the number of trailing zeros on the multiplication result.
408 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
409 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
410 unsigned TrailZero0 = Known.countMinTrailingZeros();
411 unsigned TrailZero1 = Known2.countMinTrailingZeros();
412 unsigned TrailZ = TrailZero0 + TrailZero1;
413
414 // Figure out the fewest known-bits operand.
415 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
416 TrailBitsKnown1 - TrailZero1);
417 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
418
419 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
420 Bottom1.getLoBits(TrailBitsKnown1);
421
422 Known.resetAll();
423 Known.Zero.setHighBits(LeadZ);
424 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
425 Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
426
427 // Only make use of no-wrap flags if we failed to compute the sign bit
428 // directly. This matters if the multiplication always overflows, in
429 // which case we prefer to follow the result of the direct computation,
430 // though as the program is invoking undefined behaviour we can choose
431 // whatever we like here.
432 if (isKnownNonNegative && !Known.isNegative())
433 Known.makeNonNegative();
434 else if (isKnownNegative && !Known.isNonNegative())
435 Known.makeNegative();
436}
437
438void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
439 KnownBits &Known) {
440 unsigned BitWidth = Known.getBitWidth();
441 unsigned NumRanges = Ranges.getNumOperands() / 2;
442 assert(NumRanges >= 1)((NumRanges >= 1) ? static_cast<void> (0) : __assert_fail
("NumRanges >= 1", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 442, __PRETTY_FUNCTION__))
;
443
444 Known.Zero.setAllBits();
445 Known.One.setAllBits();
446
447 for (unsigned i = 0; i < NumRanges; ++i) {
448 ConstantInt *Lower =
449 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
450 ConstantInt *Upper =
451 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
452 ConstantRange Range(Lower->getValue(), Upper->getValue());
453
454 // The first CommonPrefixBits of all values in Range are equal.
455 unsigned CommonPrefixBits =
456 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
457
458 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
459 Known.One &= Range.getUnsignedMax() & Mask;
460 Known.Zero &= ~Range.getUnsignedMax() & Mask;
461 }
462}
463
464static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
465 SmallVector<const Value *, 16> WorkSet(1, I);
466 SmallPtrSet<const Value *, 32> Visited;
467 SmallPtrSet<const Value *, 16> EphValues;
468
469 // The instruction defining an assumption's condition itself is always
470 // considered ephemeral to that assumption (even if it has other
471 // non-ephemeral users). See r246696's test case for an example.
472 if (is_contained(I->operands(), E))
473 return true;
474
475 while (!WorkSet.empty()) {
476 const Value *V = WorkSet.pop_back_val();
477 if (!Visited.insert(V).second)
478 continue;
479
480 // If all uses of this value are ephemeral, then so is this value.
481 if (llvm::all_of(V->users(), [&](const User *U) {
482 return EphValues.count(U);
483 })) {
484 if (V == E)
485 return true;
486
487 if (V == I || isSafeToSpeculativelyExecute(V)) {
488 EphValues.insert(V);
489 if (const User *U = dyn_cast<User>(V))
490 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
491 J != JE; ++J)
492 WorkSet.push_back(*J);
493 }
494 }
495 }
496
497 return false;
498}
499
500// Is this an intrinsic that cannot be speculated but also cannot trap?
501bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
502 if (const CallInst *CI = dyn_cast<CallInst>(I))
503 if (Function *F = CI->getCalledFunction())
504 switch (F->getIntrinsicID()) {
505 default: break;
506 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
507 case Intrinsic::assume:
508 case Intrinsic::sideeffect:
509 case Intrinsic::dbg_declare:
510 case Intrinsic::dbg_value:
511 case Intrinsic::dbg_label:
512 case Intrinsic::invariant_start:
513 case Intrinsic::invariant_end:
514 case Intrinsic::lifetime_start:
515 case Intrinsic::lifetime_end:
516 case Intrinsic::objectsize:
517 case Intrinsic::ptr_annotation:
518 case Intrinsic::var_annotation:
519 return true;
520 }
521
522 return false;
523}
524
525bool llvm::isValidAssumeForContext(const Instruction *Inv,
526 const Instruction *CxtI,
527 const DominatorTree *DT) {
528 // There are two restrictions on the use of an assume:
529 // 1. The assume must dominate the context (or the control flow must
530 // reach the assume whenever it reaches the context).
531 // 2. The context must not be in the assume's set of ephemeral values
532 // (otherwise we will use the assume to prove that the condition
533 // feeding the assume is trivially true, thus causing the removal of
534 // the assume).
535
536 if (DT) {
537 if (DT->dominates(Inv, CxtI))
538 return true;
539 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
540 // We don't have a DT, but this trivially dominates.
541 return true;
542 }
543
544 // With or without a DT, the only remaining case we will check is if the
545 // instructions are in the same BB. Give up if that is not the case.
546 if (Inv->getParent() != CxtI->getParent())
547 return false;
548
549 // If we have a dom tree, then we now know that the assume doesn't dominate
550 // the other instruction. If we don't have a dom tree then we can check if
551 // the assume is first in the BB.
552 if (!DT) {
553 // Search forward from the assume until we reach the context (or the end
554 // of the block); the common case is that the assume will come first.
555 for (auto I = std::next(BasicBlock::const_iterator(Inv)),
556 IE = Inv->getParent()->end(); I != IE; ++I)
557 if (&*I == CxtI)
558 return true;
559 }
560
561 // Don't let an assume affect itself - this would cause the problems
562 // `isEphemeralValueOf` is trying to prevent, and it would also make
563 // the loop below go out of bounds.
564 if (Inv == CxtI)
565 return false;
566
567 // The context comes first, but they're both in the same block. Make sure
568 // there is nothing in between that might interrupt the control flow.
569 for (BasicBlock::const_iterator I =
570 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
571 I != IE; ++I)
572 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
573 return false;
574
575 return !isEphemeralValueOf(Inv, CxtI);
576}
577
578static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
579 unsigned Depth, const Query &Q) {
580 // Use of assumptions is context-sensitive. If we don't have a context, we
581 // cannot use them!
582 if (!Q.AC || !Q.CxtI)
583 return;
584
585 unsigned BitWidth = Known.getBitWidth();
586
587 // Note that the patterns below need to be kept in sync with the code
588 // in AssumptionCache::updateAffectedValues.
589
590 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
591 if (!AssumeVH)
592 continue;
593 CallInst *I = cast<CallInst>(AssumeVH);
594 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&((I->getParent()->getParent() == Q.CxtI->getParent()
->getParent() && "Got assumption for the wrong function!"
) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 595, __PRETTY_FUNCTION__))
595 "Got assumption for the wrong function!")((I->getParent()->getParent() == Q.CxtI->getParent()
->getParent() && "Got assumption for the wrong function!"
) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 595, __PRETTY_FUNCTION__))
;
596 if (Q.isExcluded(I))
597 continue;
598
599 // Warning: This loop can end up being somewhat performance sensitive.
600 // We're running this loop for once for each value queried resulting in a
601 // runtime of ~O(#assumes * #values).
602
603 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&((I->getCalledFunction()->getIntrinsicID() == Intrinsic
::assume && "must be an assume intrinsic") ? static_cast
<void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 604, __PRETTY_FUNCTION__))
604 "must be an assume intrinsic")((I->getCalledFunction()->getIntrinsicID() == Intrinsic
::assume && "must be an assume intrinsic") ? static_cast
<void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 604, __PRETTY_FUNCTION__))
;
605
606 Value *Arg = I->getArgOperand(0);
607
608 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
609 assert(BitWidth == 1 && "assume operand is not i1?")((BitWidth == 1 && "assume operand is not i1?") ? static_cast
<void> (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 609, __PRETTY_FUNCTION__))
;
610 Known.setAllOnes();
611 return;
612 }
613 if (match(Arg, m_Not(m_Specific(V))) &&
614 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
615 assert(BitWidth == 1 && "assume operand is not i1?")((BitWidth == 1 && "assume operand is not i1?") ? static_cast
<void> (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 615, __PRETTY_FUNCTION__))
;
616 Known.setAllZero();
617 return;
618 }
619
620 // The remaining tests are all recursive, so bail out if we hit the limit.
621 if (Depth == MaxDepth)
622 continue;
623
624 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
625 if (!Cmp)
626 continue;
627
628 Value *A, *B;
629 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
630
631 CmpInst::Predicate Pred;
632 uint64_t C;
633 switch (Cmp->getPredicate()) {
634 default:
635 break;
636 case ICmpInst::ICMP_EQ:
637 // assume(v = a)
638 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
639 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
640 KnownBits RHSKnown(BitWidth);
641 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
642 Known.Zero |= RHSKnown.Zero;
643 Known.One |= RHSKnown.One;
644 // assume(v & b = a)
645 } else if (match(Cmp,
646 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
647 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
648 KnownBits RHSKnown(BitWidth);
649 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
650 KnownBits MaskKnown(BitWidth);
651 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
652
653 // For those bits in the mask that are known to be one, we can propagate
654 // known bits from the RHS to V.
655 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
656 Known.One |= RHSKnown.One & MaskKnown.One;
657 // assume(~(v & b) = a)
658 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
659 m_Value(A))) &&
660 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
661 KnownBits RHSKnown(BitWidth);
662 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
663 KnownBits MaskKnown(BitWidth);
664 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
665
666 // For those bits in the mask that are known to be one, we can propagate
667 // inverted known bits from the RHS to V.
668 Known.Zero |= RHSKnown.One & MaskKnown.One;
669 Known.One |= RHSKnown.Zero & MaskKnown.One;
670 // assume(v | b = a)
671 } else if (match(Cmp,
672 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
673 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
674 KnownBits RHSKnown(BitWidth);
675 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
676 KnownBits BKnown(BitWidth);
677 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
678
679 // For those bits in B that are known to be zero, we can propagate known
680 // bits from the RHS to V.
681 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
682 Known.One |= RHSKnown.One & BKnown.Zero;
683 // assume(~(v | b) = a)
684 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
685 m_Value(A))) &&
686 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
687 KnownBits RHSKnown(BitWidth);
688 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
689 KnownBits BKnown(BitWidth);
690 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
691
692 // For those bits in B that are known to be zero, we can propagate
693 // inverted known bits from the RHS to V.
694 Known.Zero |= RHSKnown.One & BKnown.Zero;
695 Known.One |= RHSKnown.Zero & BKnown.Zero;
696 // assume(v ^ b = a)
697 } else if (match(Cmp,
698 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
699 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
700 KnownBits RHSKnown(BitWidth);
701 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
702 KnownBits BKnown(BitWidth);
703 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
704
705 // For those bits in B that are known to be zero, we can propagate known
706 // bits from the RHS to V. For those bits in B that are known to be one,
707 // we can propagate inverted known bits from the RHS to V.
708 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
709 Known.One |= RHSKnown.One & BKnown.Zero;
710 Known.Zero |= RHSKnown.One & BKnown.One;
711 Known.One |= RHSKnown.Zero & BKnown.One;
712 // assume(~(v ^ b) = a)
713 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
714 m_Value(A))) &&
715 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
716 KnownBits RHSKnown(BitWidth);
717 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
718 KnownBits BKnown(BitWidth);
719 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
720
721 // For those bits in B that are known to be zero, we can propagate
722 // inverted known bits from the RHS to V. For those bits in B that are
723 // known to be one, we can propagate known bits from the RHS to V.
724 Known.Zero |= RHSKnown.One & BKnown.Zero;
725 Known.One |= RHSKnown.Zero & BKnown.Zero;
726 Known.Zero |= RHSKnown.Zero & BKnown.One;
727 Known.One |= RHSKnown.One & BKnown.One;
728 // assume(v << c = a)
729 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
730 m_Value(A))) &&
731 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
732 KnownBits RHSKnown(BitWidth);
733 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
734 // For those bits in RHS that are known, we can propagate them to known
735 // bits in V shifted to the right by C.
736 RHSKnown.Zero.lshrInPlace(C);
737 Known.Zero |= RHSKnown.Zero;
738 RHSKnown.One.lshrInPlace(C);
739 Known.One |= RHSKnown.One;
740 // assume(~(v << c) = a)
741 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
742 m_Value(A))) &&
743 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
744 KnownBits RHSKnown(BitWidth);
745 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
746 // For those bits in RHS that are known, we can propagate them inverted
747 // to known bits in V shifted to the right by C.
748 RHSKnown.One.lshrInPlace(C);
749 Known.Zero |= RHSKnown.One;
750 RHSKnown.Zero.lshrInPlace(C);
751 Known.One |= RHSKnown.Zero;
752 // assume(v >> c = a)
753 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
754 m_Value(A))) &&
755 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
756 KnownBits RHSKnown(BitWidth);
757 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
758 // For those bits in RHS that are known, we can propagate them to known
759 // bits in V shifted to the right by C.
760 Known.Zero |= RHSKnown.Zero << C;
761 Known.One |= RHSKnown.One << C;
762 // assume(~(v >> c) = a)
763 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
764 m_Value(A))) &&
765 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
766 KnownBits RHSKnown(BitWidth);
767 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
768 // For those bits in RHS that are known, we can propagate them inverted
769 // to known bits in V shifted to the right by C.
770 Known.Zero |= RHSKnown.One << C;
771 Known.One |= RHSKnown.Zero << C;
772 }
773 break;
774 case ICmpInst::ICMP_SGE:
775 // assume(v >=_s c) where c is non-negative
776 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
777 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
778 KnownBits RHSKnown(BitWidth);
779 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
780
781 if (RHSKnown.isNonNegative()) {
782 // We know that the sign bit is zero.
783 Known.makeNonNegative();
784 }
785 }
786 break;
787 case ICmpInst::ICMP_SGT:
788 // assume(v >_s c) where c is at least -1.
789 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
790 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
791 KnownBits RHSKnown(BitWidth);
792 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
793
794 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
795 // We know that the sign bit is zero.
796 Known.makeNonNegative();
797 }
798 }
799 break;
800 case ICmpInst::ICMP_SLE:
801 // assume(v <=_s c) where c is negative
802 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
803 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
804 KnownBits RHSKnown(BitWidth);
805 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
806
807 if (RHSKnown.isNegative()) {
808 // We know that the sign bit is one.
809 Known.makeNegative();
810 }
811 }
812 break;
813 case ICmpInst::ICMP_SLT:
814 // assume(v <_s c) where c is non-positive
815 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
816 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
817 KnownBits RHSKnown(BitWidth);
818 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
819
820 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
821 // We know that the sign bit is one.
822 Known.makeNegative();
823 }
824 }
825 break;
826 case ICmpInst::ICMP_ULE:
827 // assume(v <=_u c)
828 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
829 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
830 KnownBits RHSKnown(BitWidth);
831 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
832
833 // Whatever high bits in c are zero are known to be zero.
834 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
835 }
836 break;
837 case ICmpInst::ICMP_ULT:
838 // assume(v <_u c)
839 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
840 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
841 KnownBits RHSKnown(BitWidth);
842 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
843
844 // If the RHS is known zero, then this assumption must be wrong (nothing
845 // is unsigned less than zero). Signal a conflict and get out of here.
846 if (RHSKnown.isZero()) {
847 Known.Zero.setAllBits();
848 Known.One.setAllBits();
849 break;
850 }
851
852 // Whatever high bits in c are zero are known to be zero (if c is a power
853 // of 2, then one more).
854 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
855 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
856 else
857 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
858 }
859 break;
860 }
861 }
862
863 // If assumptions conflict with each other or previous known bits, then we
864 // have a logical fallacy. It's possible that the assumption is not reachable,
865 // so this isn't a real bug. On the other hand, the program may have undefined
866 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
867 // clear out the known bits, try to warn the user, and hope for the best.
868 if (Known.Zero.intersects(Known.One)) {
869 Known.resetAll();
870
871 if (Q.ORE)
872 Q.ORE->emit([&]() {
873 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
874 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
875 CxtI)
876 << "Detected conflicting code assumptions. Program may "
877 "have undefined behavior, or compiler may have "
878 "internal error.";
879 });
880 }
881}
882
883/// Compute known bits from a shift operator, including those with a
884/// non-constant shift amount. Known is the output of this function. Known2 is a
885/// pre-allocated temporary with the same bit width as Known. KZF and KOF are
886/// operator-specific functions that, given the known-zero or known-one bits
887/// respectively, and a shift amount, compute the implied known-zero or
888/// known-one bits of the shift operator's result respectively for that shift
889/// amount. The results from calling KZF and KOF are conservatively combined for
890/// all permitted shift amounts.
891static void computeKnownBitsFromShiftOperator(
892 const Operator *I, KnownBits &Known, KnownBits &Known2,
893 unsigned Depth, const Query &Q,
894 function_ref<APInt(const APInt &, unsigned)> KZF,
895 function_ref<APInt(const APInt &, unsigned)> KOF) {
896 unsigned BitWidth = Known.getBitWidth();
897
898 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
899 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
900
901 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
902 Known.Zero = KZF(Known.Zero, ShiftAmt);
903 Known.One = KOF(Known.One, ShiftAmt);
904 // If the known bits conflict, this must be an overflowing left shift, so
905 // the shift result is poison. We can return anything we want. Choose 0 for
906 // the best folding opportunity.
907 if (Known.hasConflict())
908 Known.setAllZero();
909
910 return;
911 }
912
913 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
914
915 // If the shift amount could be greater than or equal to the bit-width of the
916 // LHS, the value could be poison, but bail out because the check below is
917 // expensive. TODO: Should we just carry on?
918 if ((~Known.Zero).uge(BitWidth)) {
919 Known.resetAll();
920 return;
921 }
922
923 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
924 // BitWidth > 64 and any upper bits are known, we'll end up returning the
925 // limit value (which implies all bits are known).
926 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
927 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
928
929 // It would be more-clearly correct to use the two temporaries for this
930 // calculation. Reusing the APInts here to prevent unnecessary allocations.
931 Known.resetAll();
932
933 // If we know the shifter operand is nonzero, we can sometimes infer more
934 // known bits. However this is expensive to compute, so be lazy about it and
935 // only compute it when absolutely necessary.
936 Optional<bool> ShifterOperandIsNonZero;
937
938 // Early exit if we can't constrain any well-defined shift amount.
939 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
940 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
941 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
942 if (!*ShifterOperandIsNonZero)
943 return;
944 }
945
946 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
947
948 Known.Zero.setAllBits();
949 Known.One.setAllBits();
950 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
951 // Combine the shifted known input bits only for those shift amounts
952 // compatible with its known constraints.
953 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
954 continue;
955 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
956 continue;
957 // If we know the shifter is nonzero, we may be able to infer more known
958 // bits. This check is sunk down as far as possible to avoid the expensive
959 // call to isKnownNonZero if the cheaper checks above fail.
960 if (ShiftAmt == 0) {
961 if (!ShifterOperandIsNonZero.hasValue())
962 ShifterOperandIsNonZero =
963 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
964 if (*ShifterOperandIsNonZero)
965 continue;
966 }
967
968 Known.Zero &= KZF(Known2.Zero, ShiftAmt);
969 Known.One &= KOF(Known2.One, ShiftAmt);
970 }
971
972 // If the known bits conflict, the result is poison. Return a 0 and hope the
973 // caller can further optimize that.
974 if (Known.hasConflict())
975 Known.setAllZero();
976}
977
978static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
979 unsigned Depth, const Query &Q) {
980 unsigned BitWidth = Known.getBitWidth();
981
982 KnownBits Known2(Known);
983 switch (I->getOpcode()) {
984 default: break;
985 case Instruction::Load:
986 if (MDNode *MD =
987 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
988 computeKnownBitsFromRangeMetadata(*MD, Known);
989 break;
990 case Instruction::And: {
991 // If either the LHS or the RHS are Zero, the result is zero.
992 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
993 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
994
995 // Output known-1 bits are only known if set in both the LHS & RHS.
996 Known.One &= Known2.One;
997 // Output known-0 are known to be clear if zero in either the LHS | RHS.
998 Known.Zero |= Known2.Zero;
999
1000 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1001 // here we handle the more general case of adding any odd number by
1002 // matching the form add(x, add(x, y)) where y is odd.
1003 // TODO: This could be generalized to clearing any bit set in y where the
1004 // following bit is known to be unset in y.
1005 Value *X = nullptr, *Y = nullptr;
1006 if (!Known.Zero[0] && !Known.One[0] &&
1007 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1008 Known2.resetAll();
1009 computeKnownBits(Y, Known2, Depth + 1, Q);
1010 if (Known2.countMinTrailingOnes() > 0)
1011 Known.Zero.setBit(0);
1012 }
1013 break;
1014 }
1015 case Instruction::Or:
1016 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1017 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1018
1019 // Output known-0 bits are only known if clear in both the LHS & RHS.
1020 Known.Zero &= Known2.Zero;
1021 // Output known-1 are known to be set if set in either the LHS | RHS.
1022 Known.One |= Known2.One;
1023 break;
1024 case Instruction::Xor: {
1025 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1026 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1027
1028 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1029 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1030 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1031 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1032 Known.Zero = std::move(KnownZeroOut);
1033 break;
1034 }
1035 case Instruction::Mul: {
1036 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1037 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1038 Known2, Depth, Q);
1039 break;
1040 }
1041 case Instruction::UDiv: {
1042 // For the purposes of computing leading zeros we can conservatively
1043 // treat a udiv as a logical right shift by the power of 2 known to
1044 // be less than the denominator.
1045 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1046 unsigned LeadZ = Known2.countMinLeadingZeros();
1047
1048 Known2.resetAll();
1049 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1050 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1051 if (RHSMaxLeadingZeros != BitWidth)
1052 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1053
1054 Known.Zero.setHighBits(LeadZ);
1055 break;
1056 }
1057 case Instruction::Select: {
1058 const Value *LHS = nullptr, *RHS = nullptr;
1059 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1060 if (SelectPatternResult::isMinOrMax(SPF)) {
1061 computeKnownBits(RHS, Known, Depth + 1, Q);
1062 computeKnownBits(LHS, Known2, Depth + 1, Q);
1063 } else {
1064 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1065 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1066 }
1067
1068 unsigned MaxHighOnes = 0;
1069 unsigned MaxHighZeros = 0;
1070 if (SPF == SPF_SMAX) {
1071 // If both sides are negative, the result is negative.
1072 if (Known.isNegative() && Known2.isNegative())
1073 // We can derive a lower bound on the result by taking the max of the
1074 // leading one bits.
1075 MaxHighOnes =
1076 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1077 // If either side is non-negative, the result is non-negative.
1078 else if (Known.isNonNegative() || Known2.isNonNegative())
1079 MaxHighZeros = 1;
1080 } else if (SPF == SPF_SMIN) {
1081 // If both sides are non-negative, the result is non-negative.
1082 if (Known.isNonNegative() && Known2.isNonNegative())
1083 // We can derive an upper bound on the result by taking the max of the
1084 // leading zero bits.
1085 MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1086 Known2.countMinLeadingZeros());
1087 // If either side is negative, the result is negative.
1088 else if (Known.isNegative() || Known2.isNegative())
1089 MaxHighOnes = 1;
1090 } else if (SPF == SPF_UMAX) {
1091 // We can derive a lower bound on the result by taking the max of the
1092 // leading one bits.
1093 MaxHighOnes =
1094 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1095 } else if (SPF == SPF_UMIN) {
1096 // We can derive an upper bound on the result by taking the max of the
1097 // leading zero bits.
1098 MaxHighZeros =
1099 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1100 } else if (SPF == SPF_ABS) {
1101 // RHS from matchSelectPattern returns the negation part of abs pattern.
1102 // If the negate has an NSW flag we can assume the sign bit of the result
1103 // will be 0 because that makes abs(INT_MIN) undefined.
1104 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1105 Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1106 MaxHighZeros = 1;
1107 }
1108
1109 // Only known if known in both the LHS and RHS.
1110 Known.One &= Known2.One;
1111 Known.Zero &= Known2.Zero;
1112 if (MaxHighOnes > 0)
1113 Known.One.setHighBits(MaxHighOnes);
1114 if (MaxHighZeros > 0)
1115 Known.Zero.setHighBits(MaxHighZeros);
1116 break;
1117 }
1118 case Instruction::FPTrunc:
1119 case Instruction::FPExt:
1120 case Instruction::FPToUI:
1121 case Instruction::FPToSI:
1122 case Instruction::SIToFP:
1123 case Instruction::UIToFP:
1124 break; // Can't work with floating point.
1125 case Instruction::PtrToInt:
1126 case Instruction::IntToPtr:
1127 // Fall through and handle them the same as zext/trunc.
1128 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1129 case Instruction::ZExt:
1130 case Instruction::Trunc: {
1131 Type *SrcTy = I->getOperand(0)->getType();
1132
1133 unsigned SrcBitWidth;
1134 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1135 // which fall through here.
1136 Type *ScalarTy = SrcTy->getScalarType();
1137 SrcBitWidth = ScalarTy->isPointerTy() ?
1138 Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1139 Q.DL.getTypeSizeInBits(ScalarTy);
1140
1141 assert(SrcBitWidth && "SrcBitWidth can't be zero")((SrcBitWidth && "SrcBitWidth can't be zero") ? static_cast
<void> (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1141, __PRETTY_FUNCTION__))
;
1142 Known = Known.zextOrTrunc(SrcBitWidth, false);
1143 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1144 Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */);
1145 break;
1146 }
1147 case Instruction::BitCast: {
1148 Type *SrcTy = I->getOperand(0)->getType();
1149 if (SrcTy->isIntOrPtrTy() &&
1150 // TODO: For now, not handling conversions like:
1151 // (bitcast i64 %x to <2 x i32>)
1152 !I->getType()->isVectorTy()) {
1153 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1154 break;
1155 }
1156 break;
1157 }
1158 case Instruction::SExt: {
1159 // Compute the bits in the result that are not present in the input.
1160 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1161
1162 Known = Known.trunc(SrcBitWidth);
1163 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1164 // If the sign bit of the input is known set or clear, then we know the
1165 // top bits of the result.
1166 Known = Known.sext(BitWidth);
1167 break;
1168 }
1169 case Instruction::Shl: {
1170 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1171 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1172 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1173 APInt KZResult = KnownZero << ShiftAmt;
1174 KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1175 // If this shift has "nsw" keyword, then the result is either a poison
1176 // value or has the same sign bit as the first operand.
1177 if (NSW && KnownZero.isSignBitSet())
1178 KZResult.setSignBit();
1179 return KZResult;
1180 };
1181
1182 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1183 APInt KOResult = KnownOne << ShiftAmt;
1184 if (NSW && KnownOne.isSignBitSet())
1185 KOResult.setSignBit();
1186 return KOResult;
1187 };
1188
1189 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1190 break;
1191 }
1192 case Instruction::LShr: {
1193 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1194 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1195 APInt KZResult = KnownZero.lshr(ShiftAmt);
1196 // High bits known zero.
1197 KZResult.setHighBits(ShiftAmt);
1198 return KZResult;
1199 };
1200
1201 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1202 return KnownOne.lshr(ShiftAmt);
1203 };
1204
1205 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1206 break;
1207 }
1208 case Instruction::AShr: {
1209 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1210 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1211 return KnownZero.ashr(ShiftAmt);
1212 };
1213
1214 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1215 return KnownOne.ashr(ShiftAmt);
1216 };
1217
1218 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1219 break;
1220 }
1221 case Instruction::Sub: {
1222 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1223 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1224 Known, Known2, Depth, Q);
1225 break;
1226 }
1227 case Instruction::Add: {
1228 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1229 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1230 Known, Known2, Depth, Q);
1231 break;
1232 }
1233 case Instruction::SRem:
1234 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1235 APInt RA = Rem->getValue().abs();
1236 if (RA.isPowerOf2()) {
1237 APInt LowBits = RA - 1;
1238 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1239
1240 // The low bits of the first operand are unchanged by the srem.
1241 Known.Zero = Known2.Zero & LowBits;
1242 Known.One = Known2.One & LowBits;
1243
1244 // If the first operand is non-negative or has all low bits zero, then
1245 // the upper bits are all zero.
1246 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1247 Known.Zero |= ~LowBits;
1248
1249 // If the first operand is negative and not all low bits are zero, then
1250 // the upper bits are all one.
1251 if (Known2.isNegative() && LowBits.intersects(Known2.One))
1252 Known.One |= ~LowBits;
1253
1254 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1254, __PRETTY_FUNCTION__))
;
1255 break;
1256 }
1257 }
1258
1259 // The sign bit is the LHS's sign bit, except when the result of the
1260 // remainder is zero.
1261 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1262 // If it's known zero, our sign bit is also zero.
1263 if (Known2.isNonNegative())
1264 Known.makeNonNegative();
1265
1266 break;
1267 case Instruction::URem: {
1268 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1269 const APInt &RA = Rem->getValue();
1270 if (RA.isPowerOf2()) {
1271 APInt LowBits = (RA - 1);
1272 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1273 Known.Zero |= ~LowBits;
1274 Known.One &= LowBits;
1275 break;
1276 }
1277 }
1278
1279 // Since the result is less than or equal to either operand, any leading
1280 // zero bits in either operand must also exist in the result.
1281 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1282 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1283
1284 unsigned Leaders =
1285 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1286 Known.resetAll();
1287 Known.Zero.setHighBits(Leaders);
1288 break;
1289 }
1290
1291 case Instruction::Alloca: {
1292 const AllocaInst *AI = cast<AllocaInst>(I);
1293 unsigned Align = AI->getAlignment();
1294 if (Align == 0)
1295 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1296
1297 if (Align > 0)
1298 Known.Zero.setLowBits(countTrailingZeros(Align));
1299 break;
1300 }
1301 case Instruction::GetElementPtr: {
1302 // Analyze all of the subscripts of this getelementptr instruction
1303 // to determine if we can prove known low zero bits.
1304 KnownBits LocalKnown(BitWidth);
1305 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1306 unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1307
1308 gep_type_iterator GTI = gep_type_begin(I);
1309 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1310 Value *Index = I->getOperand(i);
1311 if (StructType *STy = GTI.getStructTypeOrNull()) {
1312 // Handle struct member offset arithmetic.
1313
1314 // Handle case when index is vector zeroinitializer
1315 Constant *CIndex = cast<Constant>(Index);
1316 if (CIndex->isZeroValue())
1317 continue;
1318
1319 if (CIndex->getType()->isVectorTy())
1320 Index = CIndex->getSplatValue();
1321
1322 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1323 const StructLayout *SL = Q.DL.getStructLayout(STy);
1324 uint64_t Offset = SL->getElementOffset(Idx);
1325 TrailZ = std::min<unsigned>(TrailZ,
1326 countTrailingZeros(Offset));
1327 } else {
1328 // Handle array index arithmetic.
1329 Type *IndexedTy = GTI.getIndexedType();
1330 if (!IndexedTy->isSized()) {
1331 TrailZ = 0;
1332 break;
1333 }
1334 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1335 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1336 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1337 computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1338 TrailZ = std::min(TrailZ,
1339 unsigned(countTrailingZeros(TypeSize) +
1340 LocalKnown.countMinTrailingZeros()));
1341 }
1342 }
1343
1344 Known.Zero.setLowBits(TrailZ);
1345 break;
1346 }
1347 case Instruction::PHI: {
1348 const PHINode *P = cast<PHINode>(I);
1349 // Handle the case of a simple two-predecessor recurrence PHI.
1350 // There's a lot more that could theoretically be done here, but
1351 // this is sufficient to catch some interesting cases.
1352 if (P->getNumIncomingValues() == 2) {
1353 for (unsigned i = 0; i != 2; ++i) {
1354 Value *L = P->getIncomingValue(i);
1355 Value *R = P->getIncomingValue(!i);
1356 Operator *LU = dyn_cast<Operator>(L);
1357 if (!LU)
1358 continue;
1359 unsigned Opcode = LU->getOpcode();
1360 // Check for operations that have the property that if
1361 // both their operands have low zero bits, the result
1362 // will have low zero bits.
1363 if (Opcode == Instruction::Add ||
1364 Opcode == Instruction::Sub ||
1365 Opcode == Instruction::And ||
1366 Opcode == Instruction::Or ||
1367 Opcode == Instruction::Mul) {
1368 Value *LL = LU->getOperand(0);
1369 Value *LR = LU->getOperand(1);
1370 // Find a recurrence.
1371 if (LL == I)
1372 L = LR;
1373 else if (LR == I)
1374 L = LL;
1375 else
1376 continue; // Check for recurrence with L and R flipped.
1377 // Ok, we have a PHI of the form L op= R. Check for low
1378 // zero bits.
1379 computeKnownBits(R, Known2, Depth + 1, Q);
1380
1381 // We need to take the minimum number of known bits
1382 KnownBits Known3(Known);
1383 computeKnownBits(L, Known3, Depth + 1, Q);
1384
1385 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1386 Known3.countMinTrailingZeros()));
1387
1388 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1389 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1390 // If initial value of recurrence is nonnegative, and we are adding
1391 // a nonnegative number with nsw, the result can only be nonnegative
1392 // or poison value regardless of the number of times we execute the
1393 // add in phi recurrence. If initial value is negative and we are
1394 // adding a negative number with nsw, the result can only be
1395 // negative or poison value. Similar arguments apply to sub and mul.
1396 //
1397 // (add non-negative, non-negative) --> non-negative
1398 // (add negative, negative) --> negative
1399 if (Opcode == Instruction::Add) {
1400 if (Known2.isNonNegative() && Known3.isNonNegative())
1401 Known.makeNonNegative();
1402 else if (Known2.isNegative() && Known3.isNegative())
1403 Known.makeNegative();
1404 }
1405
1406 // (sub nsw non-negative, negative) --> non-negative
1407 // (sub nsw negative, non-negative) --> negative
1408 else if (Opcode == Instruction::Sub && LL == I) {
1409 if (Known2.isNonNegative() && Known3.isNegative())
1410 Known.makeNonNegative();
1411 else if (Known2.isNegative() && Known3.isNonNegative())
1412 Known.makeNegative();
1413 }
1414
1415 // (mul nsw non-negative, non-negative) --> non-negative
1416 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1417 Known3.isNonNegative())
1418 Known.makeNonNegative();
1419 }
1420
1421 break;
1422 }
1423 }
1424 }
1425
1426 // Unreachable blocks may have zero-operand PHI nodes.
1427 if (P->getNumIncomingValues() == 0)
1428 break;
1429
1430 // Otherwise take the unions of the known bit sets of the operands,
1431 // taking conservative care to avoid excessive recursion.
1432 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1433 // Skip if every incoming value references to ourself.
1434 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1435 break;
1436
1437 Known.Zero.setAllBits();
1438 Known.One.setAllBits();
1439 for (Value *IncValue : P->incoming_values()) {
1440 // Skip direct self references.
1441 if (IncValue == P) continue;
1442
1443 Known2 = KnownBits(BitWidth);
1444 // Recurse, but cap the recursion to one level, because we don't
1445 // want to waste time spinning around in loops.
1446 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1447 Known.Zero &= Known2.Zero;
1448 Known.One &= Known2.One;
1449 // If all bits have been ruled out, there's no need to check
1450 // more operands.
1451 if (!Known.Zero && !Known.One)
1452 break;
1453 }
1454 }
1455 break;
1456 }
1457 case Instruction::Call:
1458 case Instruction::Invoke:
1459 // If range metadata is attached to this call, set known bits from that,
1460 // and then intersect with known bits based on other properties of the
1461 // function.
1462 if (MDNode *MD =
1463 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1464 computeKnownBitsFromRangeMetadata(*MD, Known);
1465 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1466 computeKnownBits(RV, Known2, Depth + 1, Q);
1467 Known.Zero |= Known2.Zero;
1468 Known.One |= Known2.One;
1469 }
1470 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1471 switch (II->getIntrinsicID()) {
1472 default: break;
1473 case Intrinsic::bitreverse:
1474 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1475 Known.Zero |= Known2.Zero.reverseBits();
1476 Known.One |= Known2.One.reverseBits();
1477 break;
1478 case Intrinsic::bswap:
1479 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1480 Known.Zero |= Known2.Zero.byteSwap();
1481 Known.One |= Known2.One.byteSwap();
1482 break;
1483 case Intrinsic::ctlz: {
1484 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1485 // If we have a known 1, its position is our upper bound.
1486 unsigned PossibleLZ = Known2.One.countLeadingZeros();
1487 // If this call is undefined for 0, the result will be less than 2^n.
1488 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1489 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1490 unsigned LowBits = Log2_32(PossibleLZ)+1;
1491 Known.Zero.setBitsFrom(LowBits);
1492 break;
1493 }
1494 case Intrinsic::cttz: {
1495 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1496 // If we have a known 1, its position is our upper bound.
1497 unsigned PossibleTZ = Known2.One.countTrailingZeros();
1498 // If this call is undefined for 0, the result will be less than 2^n.
1499 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1500 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1501 unsigned LowBits = Log2_32(PossibleTZ)+1;
1502 Known.Zero.setBitsFrom(LowBits);
1503 break;
1504 }
1505 case Intrinsic::ctpop: {
1506 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1507 // We can bound the space the count needs. Also, bits known to be zero
1508 // can't contribute to the population.
1509 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1510 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1511 Known.Zero.setBitsFrom(LowBits);
1512 // TODO: we could bound KnownOne using the lower bound on the number
1513 // of bits which might be set provided by popcnt KnownOne2.
1514 break;
1515 }
1516 case Intrinsic::fshr:
1517 case Intrinsic::fshl: {
1518 const APInt *SA;
1519 if (!match(I->getOperand(2), m_APInt(SA)))
1520 break;
1521
1522 // Normalize to funnel shift left.
1523 uint64_t ShiftAmt = SA->urem(BitWidth);
1524 if (II->getIntrinsicID() == Intrinsic::fshr)
1525 ShiftAmt = BitWidth - ShiftAmt;
1526
1527 KnownBits Known3(Known);
1528 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1529 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1530
1531 Known.Zero =
1532 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1533 Known.One =
1534 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1535 break;
1536 }
1537 case Intrinsic::uadd_sat:
1538 case Intrinsic::usub_sat: {
1539 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1540 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1541 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1542
1543 // Add: Leading ones of either operand are preserved.
1544 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1545 // as leading zeros in the result.
1546 unsigned LeadingKnown;
1547 if (IsAdd)
1548 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1549 Known2.countMinLeadingOnes());
1550 else
1551 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1552 Known2.countMinLeadingOnes());
1553
1554 Known = KnownBits::computeForAddSub(
1555 IsAdd, /* NSW */ false, Known, Known2);
1556
1557 // We select between the operation result and all-ones/zero
1558 // respectively, so we can preserve known ones/zeros.
1559 if (IsAdd) {
1560 Known.One.setHighBits(LeadingKnown);
1561 Known.Zero.clearAllBits();
1562 } else {
1563 Known.Zero.setHighBits(LeadingKnown);
1564 Known.One.clearAllBits();
1565 }
1566 break;
1567 }
1568 case Intrinsic::x86_sse42_crc32_64_64:
1569 Known.Zero.setBitsFrom(32);
1570 break;
1571 }
1572 }
1573 break;
1574 case Instruction::ExtractElement:
1575 // Look through extract element. At the moment we keep this simple and skip
1576 // tracking the specific element. But at least we might find information
1577 // valid for all elements of the vector (for example if vector is sign
1578 // extended, shifted, etc).
1579 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1580 break;
1581 case Instruction::ExtractValue:
1582 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1583 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1584 if (EVI->getNumIndices() != 1) break;
1585 if (EVI->getIndices()[0] == 0) {
1586 switch (II->getIntrinsicID()) {
1587 default: break;
1588 case Intrinsic::uadd_with_overflow:
1589 case Intrinsic::sadd_with_overflow:
1590 computeKnownBitsAddSub(true, II->getArgOperand(0),
1591 II->getArgOperand(1), false, Known, Known2,
1592 Depth, Q);
1593 break;
1594 case Intrinsic::usub_with_overflow:
1595 case Intrinsic::ssub_with_overflow:
1596 computeKnownBitsAddSub(false, II->getArgOperand(0),
1597 II->getArgOperand(1), false, Known, Known2,
1598 Depth, Q);
1599 break;
1600 case Intrinsic::umul_with_overflow:
1601 case Intrinsic::smul_with_overflow:
1602 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1603 Known, Known2, Depth, Q);
1604 break;
1605 }
1606 }
1607 }
1608 }
1609}
1610
1611/// Determine which bits of V are known to be either zero or one and return
1612/// them.
1613KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1614 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1615 computeKnownBits(V, Known, Depth, Q);
1616 return Known;
1617}
1618
1619/// Determine which bits of V are known to be either zero or one and return
1620/// them in the Known bit set.
1621///
1622/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1623/// we cannot optimize based on the assumption that it is zero without changing
1624/// it to be an explicit zero. If we don't change it to zero, other code could
1625/// optimized based on the contradictory assumption that it is non-zero.
1626/// Because instcombine aggressively folds operations with undef args anyway,
1627/// this won't lose us code quality.
1628///
1629/// This function is defined on values with integer type, values with pointer
1630/// type, and vectors of integers. In the case
1631/// where V is a vector, known zero, and known one values are the
1632/// same width as the vector element, and the bit is set only if it is true
1633/// for all of the elements in the vector.
1634void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1635 const Query &Q) {
1636 assert(V && "No Value?")((V && "No Value?") ? static_cast<void> (0) : __assert_fail
("V && \"No Value?\"", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1636, __PRETTY_FUNCTION__))
;
1637 assert(Depth <= MaxDepth && "Limit Search Depth")((Depth <= MaxDepth && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1637, __PRETTY_FUNCTION__))
;
1638 unsigned BitWidth = Known.getBitWidth();
1639
1640 assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||(((V->getType()->isIntOrIntVectorTy(BitWidth) || V->
getType()->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? static_cast<void> (0) : __assert_fail ("(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1642, __PRETTY_FUNCTION__))
1641 V->getType()->isPtrOrPtrVectorTy()) &&(((V->getType()->isIntOrIntVectorTy(BitWidth) || V->
getType()->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? static_cast<void> (0) : __assert_fail ("(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1642, __PRETTY_FUNCTION__))
1642 "Not integer or pointer type!")(((V->getType()->isIntOrIntVectorTy(BitWidth) || V->
getType()->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? static_cast<void> (0) : __assert_fail ("(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1642, __PRETTY_FUNCTION__))
;
1643
1644 Type *ScalarTy = V->getType()->getScalarType();
1645 unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1646 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1647 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth")((ExpectedWidth == BitWidth && "V and Known should have same BitWidth"
) ? static_cast<void> (0) : __assert_fail ("ExpectedWidth == BitWidth && \"V and Known should have same BitWidth\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1647, __PRETTY_FUNCTION__))
;
1648 (void)BitWidth;
1649 (void)ExpectedWidth;
1650
1651 const APInt *C;
1652 if (match(V, m_APInt(C))) {
1653 // We know all of the bits for a scalar constant or a splat vector constant!
1654 Known.One = *C;
1655 Known.Zero = ~Known.One;
1656 return;
1657 }
1658 // Null and aggregate-zero are all-zeros.
1659 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1660 Known.setAllZero();
1661 return;
1662 }
1663 // Handle a constant vector by taking the intersection of the known bits of
1664 // each element.
1665 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1666 // We know that CDS must be a vector of integers. Take the intersection of
1667 // each element.
1668 Known.Zero.setAllBits(); Known.One.setAllBits();
1669 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1670 APInt Elt = CDS->getElementAsAPInt(i);
1671 Known.Zero &= ~Elt;
1672 Known.One &= Elt;
1673 }
1674 return;
1675 }
1676
1677 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1678 // We know that CV must be a vector of integers. Take the intersection of
1679 // each element.
1680 Known.Zero.setAllBits(); Known.One.setAllBits();
1681 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1682 Constant *Element = CV->getAggregateElement(i);
1683 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1684 if (!ElementCI) {
1685 Known.resetAll();
1686 return;
1687 }
1688 const APInt &Elt = ElementCI->getValue();
1689 Known.Zero &= ~Elt;
1690 Known.One &= Elt;
1691 }
1692 return;
1693 }
1694
1695 // Start out not knowing anything.
1696 Known.resetAll();
1697
1698 // We can't imply anything about undefs.
1699 if (isa<UndefValue>(V))
1700 return;
1701
1702 // There's no point in looking through other users of ConstantData for
1703 // assumptions. Confirm that we've handled them all.
1704 assert(!isa<ConstantData>(V) && "Unhandled constant data!")((!isa<ConstantData>(V) && "Unhandled constant data!"
) ? static_cast<void> (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1704, __PRETTY_FUNCTION__))
;
1705
1706 // Limit search depth.
1707 // All recursive calls that increase depth must come after this.
1708 if (Depth == MaxDepth)
1709 return;
1710
1711 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1712 // the bits of its aliasee.
1713 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1714 if (!GA->isInterposable())
1715 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1716 return;
1717 }
1718
1719 if (const Operator *I = dyn_cast<Operator>(V))
1720 computeKnownBitsFromOperator(I, Known, Depth, Q);
1721
1722 // Aligned pointers have trailing zeros - refine Known.Zero set
1723 if (V->getType()->isPointerTy()) {
1724 const MaybeAlign Align = V->getPointerAlignment(Q.DL);
1725 if (Align)
1726 Known.Zero.setLowBits(countTrailingZeros(Align->value()));
1727 }
1728
1729 // computeKnownBitsFromAssume strictly refines Known.
1730 // Therefore, we run them after computeKnownBitsFromOperator.
1731
1732 // Check whether a nearby assume intrinsic can determine some known bits.
1733 computeKnownBitsFromAssume(V, Known, Depth, Q);
1734
1735 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1735, __PRETTY_FUNCTION__))
;
1736}
1737
1738/// Return true if the given value is known to have exactly one
1739/// bit set when defined. For vectors return true if every element is known to
1740/// be a power of two when defined. Supports values with integer or pointer
1741/// types and vectors of integers.
1742bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1743 const Query &Q) {
1744 assert(Depth <= MaxDepth && "Limit Search Depth")((Depth <= MaxDepth && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1744, __PRETTY_FUNCTION__))
;
1745
1746 // Attempt to match against constants.
1747 if (OrZero && match(V, m_Power2OrZero()))
1748 return true;
1749 if (match(V, m_Power2()))
1750 return true;
1751
1752 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1753 // it is shifted off the end then the result is undefined.
1754 if (match(V, m_Shl(m_One(), m_Value())))
1755 return true;
1756
1757 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1758 // the bottom. If it is shifted off the bottom then the result is undefined.
1759 if (match(V, m_LShr(m_SignMask(), m_Value())))
1760 return true;
1761
1762 // The remaining tests are all recursive, so bail out if we hit the limit.
1763 if (Depth++ == MaxDepth)
1764 return false;
1765
1766 Value *X = nullptr, *Y = nullptr;
1767 // A shift left or a logical shift right of a power of two is a power of two
1768 // or zero.
1769 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1770 match(V, m_LShr(m_Value(X), m_Value()))))
1771 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1772
1773 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1774 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1775
1776 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1777 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1778 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1779
1780 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1781 // A power of two and'd with anything is a power of two or zero.
1782 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1783 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1784 return true;
1785 // X & (-X) is always a power of two or zero.
1786 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1787 return true;
1788 return false;
1789 }
1790
1791 // Adding a power-of-two or zero to the same power-of-two or zero yields
1792 // either the original power-of-two, a larger power-of-two or zero.
1793 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1794 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1795 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1796 Q.IIQ.hasNoSignedWrap(VOBO)) {
1797 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1798 match(X, m_And(m_Value(), m_Specific(Y))))
1799 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1800 return true;
1801 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1802 match(Y, m_And(m_Value(), m_Specific(X))))
1803 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1804 return true;
1805
1806 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1807 KnownBits LHSBits(BitWidth);
1808 computeKnownBits(X, LHSBits, Depth, Q);
1809
1810 KnownBits RHSBits(BitWidth);
1811 computeKnownBits(Y, RHSBits, Depth, Q);
1812 // If i8 V is a power of two or zero:
1813 // ZeroBits: 1 1 1 0 1 1 1 1
1814 // ~ZeroBits: 0 0 0 1 0 0 0 0
1815 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1816 // If OrZero isn't set, we cannot give back a zero result.
1817 // Make sure either the LHS or RHS has a bit set.
1818 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1819 return true;
1820 }
1821 }
1822
1823 // An exact divide or right shift can only shift off zero bits, so the result
1824 // is a power of two only if the first operand is a power of two and not
1825 // copying a sign bit (sdiv int_min, 2).
1826 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1827 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1828 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1829 Depth, Q);
1830 }
1831
1832 return false;
1833}
1834
1835/// Test whether a GEP's result is known to be non-null.
1836///
1837/// Uses properties inherent in a GEP to try to determine whether it is known
1838/// to be non-null.
1839///
1840/// Currently this routine does not support vector GEPs.
1841static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1842 const Query &Q) {
1843 const Function *F = nullptr;
1844 if (const Instruction *I = dyn_cast<Instruction>(GEP))
1845 F = I->getFunction();
1846
1847 if (!GEP->isInBounds() ||
1848 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
1849 return false;
1850
1851 // FIXME: Support vector-GEPs.
1852 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")((GEP->getType()->isPointerTy() && "We only support plain pointer GEP"
) ? static_cast<void> (0) : __assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1852, __PRETTY_FUNCTION__))
;
1853
1854 // If the base pointer is non-null, we cannot walk to a null address with an
1855 // inbounds GEP in address space zero.
1856 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1857 return true;
1858
1859 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1860 // If so, then the GEP cannot produce a null pointer, as doing so would
1861 // inherently violate the inbounds contract within address space zero.
1862 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1863 GTI != GTE; ++GTI) {
1864 // Struct types are easy -- they must always be indexed by a constant.
1865 if (StructType *STy = GTI.getStructTypeOrNull()) {
1866 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1867 unsigned ElementIdx = OpC->getZExtValue();
1868 const StructLayout *SL = Q.DL.getStructLayout(STy);
1869 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1870 if (ElementOffset > 0)
1871 return true;
1872 continue;
1873 }
1874
1875 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1876 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1877 continue;
1878
1879 // Fast path the constant operand case both for efficiency and so we don't
1880 // increment Depth when just zipping down an all-constant GEP.
1881 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1882 if (!OpC->isZero())
1883 return true;
1884 continue;
1885 }
1886
1887 // We post-increment Depth here because while isKnownNonZero increments it
1888 // as well, when we pop back up that increment won't persist. We don't want
1889 // to recurse 10k times just because we have 10k GEP operands. We don't
1890 // bail completely out because we want to handle constant GEPs regardless
1891 // of depth.
1892 if (Depth++ >= MaxDepth)
1893 continue;
1894
1895 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1896 return true;
1897 }
1898
1899 return false;
1900}
1901
1902static bool isKnownNonNullFromDominatingCondition(const Value *V,
1903 const Instruction *CtxI,
1904 const DominatorTree *DT) {
1905 if (isa<Constant>(V))
1906 return false;
1907
1908 if (!CtxI || !DT)
1909 return false;
1910
1911 unsigned NumUsesExplored = 0;
1912 for (auto *U : V->users()) {
1913 // Avoid massive lists
1914 if (NumUsesExplored >= DomConditionsMaxUses)
1915 break;
1916 NumUsesExplored++;
1917
1918 // If the value is used as an argument to a call or invoke, then argument
1919 // attributes may provide an answer about null-ness.
1920 if (auto CS = ImmutableCallSite(U))
1921 if (auto *CalledFunc = CS.getCalledFunction())
1922 for (const Argument &Arg : CalledFunc->args())
1923 if (CS.getArgOperand(Arg.getArgNo()) == V &&
1924 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1925 return true;
1926
1927 // Consider only compare instructions uniquely controlling a branch
1928 CmpInst::Predicate Pred;
1929 if (!match(const_cast<User *>(U),
1930 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1931 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1932 continue;
1933
1934 SmallVector<const User *, 4> WorkList;
1935 SmallPtrSet<const User *, 4> Visited;
1936 for (auto *CmpU : U->users()) {
1937 assert(WorkList.empty() && "Should be!")((WorkList.empty() && "Should be!") ? static_cast<
void> (0) : __assert_fail ("WorkList.empty() && \"Should be!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1937, __PRETTY_FUNCTION__))
;
1938 if (Visited.insert(CmpU).second)
1939 WorkList.push_back(CmpU);
1940
1941 while (!WorkList.empty()) {
1942 auto *Curr = WorkList.pop_back_val();
1943
1944 // If a user is an AND, add all its users to the work list. We only
1945 // propagate "pred != null" condition through AND because it is only
1946 // correct to assume that all conditions of AND are met in true branch.
1947 // TODO: Support similar logic of OR and EQ predicate?
1948 if (Pred == ICmpInst::ICMP_NE)
1949 if (auto *BO = dyn_cast<BinaryOperator>(Curr))
1950 if (BO->getOpcode() == Instruction::And) {
1951 for (auto *BOU : BO->users())
1952 if (Visited.insert(BOU).second)
1953 WorkList.push_back(BOU);
1954 continue;
1955 }
1956
1957 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
1958 assert(BI->isConditional() && "uses a comparison!")((BI->isConditional() && "uses a comparison!") ? static_cast
<void> (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1958, __PRETTY_FUNCTION__))
;
1959
1960 BasicBlock *NonNullSuccessor =
1961 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1962 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1963 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1964 return true;
1965 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
1966 DT->dominates(cast<Instruction>(Curr), CtxI)) {
1967 return true;
1968 }
1969 }
1970 }
1971 }
1972
1973 return false;
1974}
1975
1976/// Does the 'Range' metadata (which must be a valid MD_range operand list)
1977/// ensure that the value it's attached to is never Value? 'RangeType' is
1978/// is the type of the value described by the range.
1979static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1980 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1981 assert(NumRanges >= 1)((NumRanges >= 1) ? static_cast<void> (0) : __assert_fail
("NumRanges >= 1", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 1981, __PRETTY_FUNCTION__))
;
1982 for (unsigned i = 0; i < NumRanges; ++i) {
1983 ConstantInt *Lower =
1984 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1985 ConstantInt *Upper =
1986 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1987 ConstantRange Range(Lower->getValue(), Upper->getValue());
1988 if (Range.contains(Value))
1989 return false;
1990 }
1991 return true;
1992}
1993
1994/// Return true if the given value is known to be non-zero when defined. For
1995/// vectors, return true if every element is known to be non-zero when
1996/// defined. For pointers, if the context instruction and dominator tree are
1997/// specified, perform context-sensitive analysis and return true if the
1998/// pointer couldn't possibly be null at the specified instruction.
1999/// Supports values with integer or pointer type and vectors of integers.
2000bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
2001 if (auto *C = dyn_cast<Constant>(V)) {
2002 if (C->isNullValue())
2003 return false;
2004 if (isa<ConstantInt>(C))
2005 // Must be non-zero due to null test above.
2006 return true;
2007
2008 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2009 // See the comment for IntToPtr/PtrToInt instructions below.
2010 if (CE->getOpcode() == Instruction::IntToPtr ||
2011 CE->getOpcode() == Instruction::PtrToInt)
2012 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
2013 Q.DL.getTypeSizeInBits(CE->getType()))
2014 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2015 }
2016
2017 // For constant vectors, check that all elements are undefined or known
2018 // non-zero to determine that the whole vector is known non-zero.
2019 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
2020 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2021 Constant *Elt = C->getAggregateElement(i);
2022 if (!Elt || Elt->isNullValue())
2023 return false;
2024 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2025 return false;
2026 }
2027 return true;
2028 }
2029
2030 // A global variable in address space 0 is non null unless extern weak
2031 // or an absolute symbol reference. Other address spaces may have null as a
2032 // valid address for a global, so we can't assume anything.
2033 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2034 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2035 GV->getType()->getAddressSpace() == 0)
2036 return true;
2037 } else
2038 return false;
2039 }
2040
2041 if (auto *I = dyn_cast<Instruction>(V)) {
2042 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2043 // If the possible ranges don't contain zero, then the value is
2044 // definitely non-zero.
2045 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2046 const APInt ZeroValue(Ty->getBitWidth(), 0);
2047 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2048 return true;
2049 }
2050 }
2051 }
2052
2053 // Some of the tests below are recursive, so bail out if we hit the limit.
2054 if (Depth++ >= MaxDepth)
2055 return false;
2056
2057 // Check for pointer simplifications.
2058 if (V->getType()->isPointerTy()) {
2059 // Alloca never returns null, malloc might.
2060 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2061 return true;
2062
2063 // A byval, inalloca, or nonnull argument is never null.
2064 if (const Argument *A = dyn_cast<Argument>(V))
2065 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
2066 return true;
2067
2068 // A Load tagged with nonnull metadata is never null.
2069 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2070 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2071 return true;
2072
2073 if (const auto *Call = dyn_cast<CallBase>(V)) {
2074 if (Call->isReturnNonNull())
2075 return true;
2076 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2077 return isKnownNonZero(RP, Depth, Q);
2078 }
2079 }
2080
2081 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2082 return true;
2083
2084 // Check for recursive pointer simplifications.
2085 if (V->getType()->isPointerTy()) {
2086 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2087 // do not alter the value, or at least not the nullness property of the
2088 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2089 //
2090 // Note that we have to take special care to avoid looking through
2091 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2092 // as casts that can alter the value, e.g., AddrSpaceCasts.
2093 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2094 if (isGEPKnownNonNull(GEP, Depth, Q))
2095 return true;
2096
2097 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2098 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2099
2100 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2101 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2102 Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2103 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2104 }
2105
2106 // Similar to int2ptr above, we can look through ptr2int here if the cast
2107 // is a no-op or an extend and not a truncate.
2108 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2109 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2110 Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2111 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2112
2113 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2114
2115 // X | Y != 0 if X != 0 or Y != 0.
2116 Value *X = nullptr, *Y = nullptr;
2117 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2118 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
2119
2120 // ext X != 0 if X != 0.
2121 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2122 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2123
2124 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2125 // if the lowest bit is shifted off the end.
2126 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2127 // shl nuw can't remove any non-zero bits.
2128 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2129 if (Q.IIQ.hasNoUnsignedWrap(BO))
2130 return isKnownNonZero(X, Depth, Q);
2131
2132 KnownBits Known(BitWidth);
2133 computeKnownBits(X, Known, Depth, Q);
2134 if (Known.One[0])
2135 return true;
2136 }
2137 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2138 // defined if the sign bit is shifted off the end.
2139 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2140 // shr exact can only shift out zero bits.
2141 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2142 if (BO->isExact())
2143 return isKnownNonZero(X, Depth, Q);
2144
2145 KnownBits Known = computeKnownBits(X, Depth, Q);
2146 if (Known.isNegative())
2147 return true;
2148
2149 // If the shifter operand is a constant, and all of the bits shifted
2150 // out are known to be zero, and X is known non-zero then at least one
2151 // non-zero bit must remain.
2152 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2153 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2154 // Is there a known one in the portion not shifted out?
2155 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2156 return true;
2157 // Are all the bits to be shifted out known zero?
2158 if (Known.countMinTrailingZeros() >= ShiftVal)
2159 return isKnownNonZero(X, Depth, Q);
2160 }
2161 }
2162 // div exact can only produce a zero if the dividend is zero.
2163 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2164 return isKnownNonZero(X, Depth, Q);
2165 }
2166 // X + Y.
2167 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2168 KnownBits XKnown = computeKnownBits(X, Depth, Q);
2169 KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2170
2171 // If X and Y are both non-negative (as signed values) then their sum is not
2172 // zero unless both X and Y are zero.
2173 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2174 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2175 return true;
2176
2177 // If X and Y are both negative (as signed values) then their sum is not
2178 // zero unless both X and Y equal INT_MIN.
2179 if (XKnown.isNegative() && YKnown.isNegative()) {
2180 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2181 // The sign bit of X is set. If some other bit is set then X is not equal
2182 // to INT_MIN.
2183 if (XKnown.One.intersects(Mask))
2184 return true;
2185 // The sign bit of Y is set. If some other bit is set then Y is not equal
2186 // to INT_MIN.
2187 if (YKnown.One.intersects(Mask))
2188 return true;
2189 }
2190
2191 // The sum of a non-negative number and a power of two is not zero.
2192 if (XKnown.isNonNegative() &&
2193 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2194 return true;
2195 if (YKnown.isNonNegative() &&
2196 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2197 return true;
2198 }
2199 // X * Y.
2200 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2201 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2202 // If X and Y are non-zero then so is X * Y as long as the multiplication
2203 // does not overflow.
2204 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2205 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2206 return true;
2207 }
2208 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2209 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2210 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2211 isKnownNonZero(SI->getFalseValue(), Depth, Q))
2212 return true;
2213 }
2214 // PHI
2215 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2216 // Try and detect a recurrence that monotonically increases from a
2217 // starting value, as these are common as induction variables.
2218 if (PN->getNumIncomingValues() == 2) {
2219 Value *Start = PN->getIncomingValue(0);
2220 Value *Induction = PN->getIncomingValue(1);
2221 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2222 std::swap(Start, Induction);
2223 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2224 if (!C->isZero() && !C->isNegative()) {
2225 ConstantInt *X;
2226 if (Q.IIQ.UseInstrInfo &&
2227 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2228 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2229 !X->isNegative())
2230 return true;
2231 }
2232 }
2233 }
2234 // Check if all incoming values are non-zero constant.
2235 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2236 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2237 });
2238 if (AllNonZeroConstants)
2239 return true;
2240 }
2241
2242 KnownBits Known(BitWidth);
2243 computeKnownBits(V, Known, Depth, Q);
2244 return Known.One != 0;
2245}
2246
2247/// Return true if V2 == V1 + X, where X is known non-zero.
2248static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2249 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2250 if (!BO || BO->getOpcode() != Instruction::Add)
2251 return false;
2252 Value *Op = nullptr;
2253 if (V2 == BO->getOperand(0))
2254 Op = BO->getOperand(1);
2255 else if (V2 == BO->getOperand(1))
2256 Op = BO->getOperand(0);
2257 else
2258 return false;
2259 return isKnownNonZero(Op, 0, Q);
2260}
2261
2262/// Return true if it is known that V1 != V2.
2263static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2264 if (V1 == V2)
2265 return false;
2266 if (V1->getType() != V2->getType())
2267 // We can't look through casts yet.
2268 return false;
2269 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2270 return true;
2271
2272 if (V1->getType()->isIntOrIntVectorTy()) {
2273 // Are any known bits in V1 contradictory to known bits in V2? If V1
2274 // has a known zero where V2 has a known one, they must not be equal.
2275 KnownBits Known1 = computeKnownBits(V1, 0, Q);
2276 KnownBits Known2 = computeKnownBits(V2, 0, Q);
2277
2278 if (Known1.Zero.intersects(Known2.One) ||
2279 Known2.Zero.intersects(Known1.One))
2280 return true;
2281 }
2282 return false;
2283}
2284
2285/// Return true if 'V & Mask' is known to be zero. We use this predicate to
2286/// simplify operations downstream. Mask is known to be zero for bits that V
2287/// cannot have.
2288///
2289/// This function is defined on values with integer type, values with pointer
2290/// type, and vectors of integers. In the case
2291/// where V is a vector, the mask, known zero, and known one values are the
2292/// same width as the vector element, and the bit is set only if it is true
2293/// for all of the elements in the vector.
2294bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2295 const Query &Q) {
2296 KnownBits Known(Mask.getBitWidth());
2297 computeKnownBits(V, Known, Depth, Q);
2298 return Mask.isSubsetOf(Known.Zero);
2299}
2300
2301// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2302// Returns the input and lower/upper bounds.
2303static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2304 const APInt *&CLow, const APInt *&CHigh) {
2305 assert(isa<Operator>(Select) &&((isa<Operator>(Select) && cast<Operator>
(Select)->getOpcode() == Instruction::Select && "Input should be a Select!"
) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2307, __PRETTY_FUNCTION__))
2306 cast<Operator>(Select)->getOpcode() == Instruction::Select &&((isa<Operator>(Select) && cast<Operator>
(Select)->getOpcode() == Instruction::Select && "Input should be a Select!"
) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2307, __PRETTY_FUNCTION__))
2307 "Input should be a Select!")((isa<Operator>(Select) && cast<Operator>
(Select)->getOpcode() == Instruction::Select && "Input should be a Select!"
) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2307, __PRETTY_FUNCTION__))
;
2308
2309 const Value *LHS = nullptr, *RHS = nullptr;
2310 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2311 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2312 return false;
2313
2314 if (!match(RHS, m_APInt(CLow)))
2315 return false;
2316
2317 const Value *LHS2 = nullptr, *RHS2 = nullptr;
2318 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2319 if (getInverseMinMaxFlavor(SPF) != SPF2)
2320 return false;
2321
2322 if (!match(RHS2, m_APInt(CHigh)))
2323 return false;
2324
2325 if (SPF == SPF_SMIN)
2326 std::swap(CLow, CHigh);
2327
2328 In = LHS2;
2329 return CLow->sle(*CHigh);
2330}
2331
2332/// For vector constants, loop over the elements and find the constant with the
2333/// minimum number of sign bits. Return 0 if the value is not a vector constant
2334/// or if any element was not analyzed; otherwise, return the count for the
2335/// element with the minimum number of sign bits.
2336static unsigned computeNumSignBitsVectorConstant(const Value *V,
2337 unsigned TyBits) {
2338 const auto *CV = dyn_cast<Constant>(V);
2339 if (!CV || !CV->getType()->isVectorTy())
2340 return 0;
2341
2342 unsigned MinSignBits = TyBits;
2343 unsigned NumElts = CV->getType()->getVectorNumElements();
2344 for (unsigned i = 0; i != NumElts; ++i) {
2345 // If we find a non-ConstantInt, bail out.
2346 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2347 if (!Elt)
2348 return 0;
2349
2350 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2351 }
2352
2353 return MinSignBits;
2354}
2355
2356static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2357 const Query &Q);
2358
2359static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2360 const Query &Q) {
2361 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2362 assert(Result > 0 && "At least one sign bit needs to be present!")((Result > 0 && "At least one sign bit needs to be present!"
) ? static_cast<void> (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2362, __PRETTY_FUNCTION__))
;
2363 return Result;
2364}
2365
2366/// Return the number of times the sign bit of the register is replicated into
2367/// the other bits. We know that at least 1 bit is always equal to the sign bit
2368/// (itself), but other cases can give us information. For example, immediately
2369/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2370/// other, so we return 3. For vectors, return the number of sign bits for the
2371/// vector element with the minimum number of known sign bits.
2372static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2373 const Query &Q) {
2374 assert(Depth <= MaxDepth && "Limit Search Depth")((Depth <= MaxDepth && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2374, __PRETTY_FUNCTION__))
;
2375
2376 // We return the minimum number of sign bits that are guaranteed to be present
2377 // in V, so for undef we have to conservatively return 1. We don't have the
2378 // same behavior for poison though -- that's a FIXME today.
2379
2380 Type *ScalarTy = V->getType()->getScalarType();
2381 unsigned TyBits = ScalarTy->isPointerTy() ?
2382 Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2383 Q.DL.getTypeSizeInBits(ScalarTy);
2384
2385 unsigned Tmp, Tmp2;
2386 unsigned FirstAnswer = 1;
2387
2388 // Note that ConstantInt is handled by the general computeKnownBits case
2389 // below.
2390
2391 if (Depth == MaxDepth)
2392 return 1; // Limit search depth.
2393
2394 if (auto *U = dyn_cast<Operator>(V)) {
2395 switch (Operator::getOpcode(V)) {
2396 default: break;
2397 case Instruction::SExt:
2398 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2399 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2400
2401 case Instruction::SDiv: {
2402 const APInt *Denominator;
2403 // sdiv X, C -> adds log(C) sign bits.
2404 if (match(U->getOperand(1), m_APInt(Denominator))) {
2405
2406 // Ignore non-positive denominator.
2407 if (!Denominator->isStrictlyPositive())
2408 break;
2409
2410 // Calculate the incoming numerator bits.
2411 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2412
2413 // Add floor(log(C)) bits to the numerator bits.
2414 return std::min(TyBits, NumBits + Denominator->logBase2());
2415 }
2416 break;
2417 }
2418
2419 case Instruction::SRem: {
2420 const APInt *Denominator;
2421 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2422 // positive constant. This let us put a lower bound on the number of sign
2423 // bits.
2424 if (match(U->getOperand(1), m_APInt(Denominator))) {
2425
2426 // Ignore non-positive denominator.
2427 if (!Denominator->isStrictlyPositive())
2428 break;
2429
2430 // Calculate the incoming numerator bits. SRem by a positive constant
2431 // can't lower the number of sign bits.
2432 unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2433
2434 // Calculate the leading sign bit constraints by examining the
2435 // denominator. Given that the denominator is positive, there are two
2436 // cases:
2437 //
2438 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2439 // (1 << ceilLogBase2(C)).
2440 //
2441 // 2. the numerator is negative. Then the result range is (-C,0] and
2442 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2443 //
2444 // Thus a lower bound on the number of sign bits is `TyBits -
2445 // ceilLogBase2(C)`.
2446
2447 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2448 return std::max(NumrBits, ResBits);
2449 }
2450 break;
2451 }
2452
2453 case Instruction::AShr: {
2454 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2455 // ashr X, C -> adds C sign bits. Vectors too.
2456 const APInt *ShAmt;
2457 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2458 if (ShAmt->uge(TyBits))
2459 break; // Bad shift.
2460 unsigned ShAmtLimited = ShAmt->getZExtValue();
2461 Tmp += ShAmtLimited;
2462 if (Tmp > TyBits) Tmp = TyBits;
2463 }
2464 return Tmp;
2465 }
2466 case Instruction::Shl: {
2467 const APInt *ShAmt;
2468 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2469 // shl destroys sign bits.
2470 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2471 if (ShAmt->uge(TyBits) || // Bad shift.
2472 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2473 Tmp2 = ShAmt->getZExtValue();
2474 return Tmp - Tmp2;
2475 }
2476 break;
2477 }
2478 case Instruction::And:
2479 case Instruction::Or:
2480 case Instruction::Xor: // NOT is handled here.
2481 // Logical binary ops preserve the number of sign bits at the worst.
2482 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2483 if (Tmp != 1) {
2484 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2485 FirstAnswer = std::min(Tmp, Tmp2);
2486 // We computed what we know about the sign bits as our first
2487 // answer. Now proceed to the generic code that uses
2488 // computeKnownBits, and pick whichever answer is better.
2489 }
2490 break;
2491
2492 case Instruction::Select: {
2493 // If we have a clamp pattern, we know that the number of sign bits will
2494 // be the minimum of the clamp min/max range.
2495 const Value *X;
2496 const APInt *CLow, *CHigh;
2497 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2498 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2499
2500 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2501 if (Tmp == 1) break;
2502 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2503 return std::min(Tmp, Tmp2);
2504 }
2505
2506 case Instruction::Add:
2507 // Add can have at most one carry bit. Thus we know that the output
2508 // is, at worst, one more bit than the inputs.
2509 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2510 if (Tmp == 1) break;
2511
2512 // Special case decrementing a value (ADD X, -1):
2513 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2514 if (CRHS->isAllOnesValue()) {
2515 KnownBits Known(TyBits);
2516 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2517
2518 // If the input is known to be 0 or 1, the output is 0/-1, which is
2519 // all sign bits set.
2520 if ((Known.Zero | 1).isAllOnesValue())
2521 return TyBits;
2522
2523 // If we are subtracting one from a positive number, there is no carry
2524 // out of the result.
2525 if (Known.isNonNegative())
2526 return Tmp;
2527 }
2528
2529 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2530 if (Tmp2 == 1) break;
2531 return std::min(Tmp, Tmp2) - 1;
2532
2533 case Instruction::Sub:
2534 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2535 if (Tmp2 == 1) break;
2536
2537 // Handle NEG.
2538 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2539 if (CLHS->isNullValue()) {
2540 KnownBits Known(TyBits);
2541 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2542 // If the input is known to be 0 or 1, the output is 0/-1, which is
2543 // all sign bits set.
2544 if ((Known.Zero | 1).isAllOnesValue())
2545 return TyBits;
2546
2547 // If the input is known to be positive (the sign bit is known clear),
2548 // the output of the NEG has the same number of sign bits as the
2549 // input.
2550 if (Known.isNonNegative())
2551 return Tmp2;
2552
2553 // Otherwise, we treat this like a SUB.
2554 }
2555
2556 // Sub can have at most one carry bit. Thus we know that the output
2557 // is, at worst, one more bit than the inputs.
2558 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2559 if (Tmp == 1) break;
2560 return std::min(Tmp, Tmp2) - 1;
2561
2562 case Instruction::Mul: {
2563 // The output of the Mul can be at most twice the valid bits in the
2564 // inputs.
2565 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2566 if (SignBitsOp0 == 1) break;
2567 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2568 if (SignBitsOp1 == 1) break;
2569 unsigned OutValidBits =
2570 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2571 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2572 }
2573
2574 case Instruction::PHI: {
2575 const PHINode *PN = cast<PHINode>(U);
2576 unsigned NumIncomingValues = PN->getNumIncomingValues();
2577 // Don't analyze large in-degree PHIs.
2578 if (NumIncomingValues > 4) break;
2579 // Unreachable blocks may have zero-operand PHI nodes.
2580 if (NumIncomingValues == 0) break;
2581
2582 // Take the minimum of all incoming values. This can't infinitely loop
2583 // because of our depth threshold.
2584 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2585 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2586 if (Tmp == 1) return Tmp;
2587 Tmp = std::min(
2588 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2589 }
2590 return Tmp;
2591 }
2592
2593 case Instruction::Trunc:
2594 // FIXME: it's tricky to do anything useful for this, but it is an
2595 // important case for targets like X86.
2596 break;
2597
2598 case Instruction::ExtractElement:
2599 // Look through extract element. At the moment we keep this simple and
2600 // skip tracking the specific element. But at least we might find
2601 // information valid for all elements of the vector (for example if vector
2602 // is sign extended, shifted, etc).
2603 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2604
2605 case Instruction::ShuffleVector: {
2606 // TODO: This is copied almost directly from the SelectionDAG version of
2607 // ComputeNumSignBits. It would be better if we could share common
2608 // code. If not, make sure that changes are translated to the DAG.
2609
2610 // Collect the minimum number of sign bits that are shared by every vector
2611 // element referenced by the shuffle.
2612 auto *Shuf = cast<ShuffleVectorInst>(U);
2613 int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
2614 int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
2615 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2616 for (int i = 0; i != NumMaskElts; ++i) {
2617 int M = Shuf->getMaskValue(i);
2618 assert(M < NumElts * 2 && "Invalid shuffle mask constant")((M < NumElts * 2 && "Invalid shuffle mask constant"
) ? static_cast<void> (0) : __assert_fail ("M < NumElts * 2 && \"Invalid shuffle mask constant\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2618, __PRETTY_FUNCTION__))
;
2619 // For undef elements, we don't know anything about the common state of
2620 // the shuffle result.
2621 if (M == -1)
2622 return 1;
2623 if (M < NumElts)
2624 DemandedLHS.setBit(M % NumElts);
2625 else
2626 DemandedRHS.setBit(M % NumElts);
2627 }
2628 Tmp = std::numeric_limits<unsigned>::max();
2629 if (!!DemandedLHS)
2630 Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
2631 if (!!DemandedRHS) {
2632 Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
2633 Tmp = std::min(Tmp, Tmp2);
2634 }
2635 // If we don't know anything, early out and try computeKnownBits
2636 // fall-back.
2637 if (Tmp == 1)
2638 break;
2639 assert(Tmp <= V->getType()->getScalarSizeInBits() &&((Tmp <= V->getType()->getScalarSizeInBits() &&
"Failed to determine minimum sign bits") ? static_cast<void
> (0) : __assert_fail ("Tmp <= V->getType()->getScalarSizeInBits() && \"Failed to determine minimum sign bits\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2640, __PRETTY_FUNCTION__))
2640 "Failed to determine minimum sign bits")((Tmp <= V->getType()->getScalarSizeInBits() &&
"Failed to determine minimum sign bits") ? static_cast<void
> (0) : __assert_fail ("Tmp <= V->getType()->getScalarSizeInBits() && \"Failed to determine minimum sign bits\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2640, __PRETTY_FUNCTION__))
;
2641 return Tmp;
2642 }
2643 }
2644 }
2645
2646 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2647 // use this information.
2648
2649 // If we can examine all elements of a vector constant successfully, we're
2650 // done (we can't do any better than that). If not, keep trying.
2651 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2652 return VecSignBits;
2653
2654 KnownBits Known(TyBits);
2655 computeKnownBits(V, Known, Depth, Q);
2656
2657 // If we know that the sign bit is either zero or one, determine the number of
2658 // identical bits in the top of the input value.
2659 return std::max(FirstAnswer, Known.countMinSignBits());
2660}
2661
2662/// This function computes the integer multiple of Base that equals V.
2663/// If successful, it returns true and returns the multiple in
2664/// Multiple. If unsuccessful, it returns false. It looks
2665/// through SExt instructions only if LookThroughSExt is true.
2666bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2667 bool LookThroughSExt, unsigned Depth) {
2668 assert(V && "No Value?")((V && "No Value?") ? static_cast<void> (0) : __assert_fail
("V && \"No Value?\"", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2668, __PRETTY_FUNCTION__))
;
2669 assert(Depth <= MaxDepth && "Limit Search Depth")((Depth <= MaxDepth && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2669, __PRETTY_FUNCTION__))
;
2670 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!")((V->getType()->isIntegerTy() && "Not integer or pointer type!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 2670, __PRETTY_FUNCTION__))
;
2671
2672 Type *T = V->getType();
2673
2674 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2675
2676 if (Base == 0)
2677 return false;
2678
2679 if (Base == 1) {
2680 Multiple = V;
2681 return true;
2682 }
2683
2684 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2685 Constant *BaseVal = ConstantInt::get(T, Base);
2686 if (CO && CO == BaseVal) {
2687 // Multiple is 1.
2688 Multiple = ConstantInt::get(T, 1);
2689 return true;
2690 }
2691
2692 if (CI && CI->getZExtValue() % Base == 0) {
2693 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2694 return true;
2695 }
2696
2697 if (Depth == MaxDepth) return false; // Limit search depth.
2698
2699 Operator *I = dyn_cast<Operator>(V);
2700 if (!I) return false;
2701
2702 switch (I->getOpcode()) {
2703 default: break;
2704 case Instruction::SExt:
2705 if (!LookThroughSExt) return false;
2706 // otherwise fall through to ZExt
2707 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2708 case Instruction::ZExt:
2709 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2710 LookThroughSExt, Depth+1);
2711 case Instruction::Shl:
2712 case Instruction::Mul: {
2713 Value *Op0 = I->getOperand(0);
2714 Value *Op1 = I->getOperand(1);
2715
2716 if (I->getOpcode() == Instruction::Shl) {
2717 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2718 if (!Op1CI) return false;
2719 // Turn Op0 << Op1 into Op0 * 2^Op1
2720 APInt Op1Int = Op1CI->getValue();
2721 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2722 APInt API(Op1Int.getBitWidth(), 0);
2723 API.setBit(BitToSet);
2724 Op1 = ConstantInt::get(V->getContext(), API);
2725 }
2726
2727 Value *Mul0 = nullptr;
2728 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2729 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2730 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2731 if (Op1C->getType()->getPrimitiveSizeInBits() <
2732 MulC->getType()->getPrimitiveSizeInBits())
2733 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2734 if (Op1C->getType()->getPrimitiveSizeInBits() >
2735 MulC->getType()->getPrimitiveSizeInBits())
2736 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2737
2738 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2739 Multiple = ConstantExpr::getMul(MulC, Op1C);
2740 return true;
2741 }
2742
2743 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2744 if (Mul0CI->getValue() == 1) {
2745 // V == Base * Op1, so return Op1
2746 Multiple = Op1;
2747 return true;
2748 }
2749 }
2750
2751 Value *Mul1 = nullptr;
2752 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2753 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2754 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2755 if (Op0C->getType()->getPrimitiveSizeInBits() <
2756 MulC->getType()->getPrimitiveSizeInBits())
2757 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2758 if (Op0C->getType()->getPrimitiveSizeInBits() >
2759 MulC->getType()->getPrimitiveSizeInBits())
2760 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2761
2762 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2763 Multiple = ConstantExpr::getMul(MulC, Op0C);
2764 return true;
2765 }
2766
2767 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2768 if (Mul1CI->getValue() == 1) {
2769 // V == Base * Op0, so return Op0
2770 Multiple = Op0;
2771 return true;
2772 }
2773 }
2774 }
2775 }
2776
2777 // We could not determine if V is a multiple of Base.
2778 return false;
2779}
2780
2781Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2782 const TargetLibraryInfo *TLI) {
2783 const Function *F = ICS.getCalledFunction();
2784 if (!F)
2785 return Intrinsic::not_intrinsic;
2786
2787 if (F->isIntrinsic())
2788 return F->getIntrinsicID();
2789
2790 if (!TLI)
2791 return Intrinsic::not_intrinsic;
2792
2793 LibFunc Func;
2794 // We're going to make assumptions on the semantics of the functions, check
2795 // that the target knows that it's available in this environment and it does
2796 // not have local linkage.
2797 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2798 return Intrinsic::not_intrinsic;
2799
2800 if (!ICS.onlyReadsMemory())
2801 return Intrinsic::not_intrinsic;
2802
2803 // Otherwise check if we have a call to a function that can be turned into a
2804 // vector intrinsic.
2805 switch (Func) {
2806 default:
2807 break;
2808 case LibFunc_sin:
2809 case LibFunc_sinf:
2810 case LibFunc_sinl:
2811 return Intrinsic::sin;
2812 case LibFunc_cos:
2813 case LibFunc_cosf:
2814 case LibFunc_cosl:
2815 return Intrinsic::cos;
2816 case LibFunc_exp:
2817 case LibFunc_expf:
2818 case LibFunc_expl:
2819 return Intrinsic::exp;
2820 case LibFunc_exp2:
2821 case LibFunc_exp2f:
2822 case LibFunc_exp2l:
2823 return Intrinsic::exp2;
2824 case LibFunc_log:
2825 case LibFunc_logf:
2826 case LibFunc_logl:
2827 return Intrinsic::log;
2828 case LibFunc_log10:
2829 case LibFunc_log10f:
2830 case LibFunc_log10l:
2831 return Intrinsic::log10;
2832 case LibFunc_log2:
2833 case LibFunc_log2f:
2834 case LibFunc_log2l:
2835 return Intrinsic::log2;
2836 case LibFunc_fabs:
2837 case LibFunc_fabsf:
2838 case LibFunc_fabsl:
2839 return Intrinsic::fabs;
2840 case LibFunc_fmin:
2841 case LibFunc_fminf:
2842 case LibFunc_fminl:
2843 return Intrinsic::minnum;
2844 case LibFunc_fmax:
2845 case LibFunc_fmaxf:
2846 case LibFunc_fmaxl:
2847 return Intrinsic::maxnum;
2848 case LibFunc_copysign:
2849 case LibFunc_copysignf:
2850 case LibFunc_copysignl:
2851 return Intrinsic::copysign;
2852 case LibFunc_floor:
2853 case LibFunc_floorf:
2854 case LibFunc_floorl:
2855 return Intrinsic::floor;
2856 case LibFunc_ceil:
2857 case LibFunc_ceilf:
2858 case LibFunc_ceill:
2859 return Intrinsic::ceil;
2860 case LibFunc_trunc:
2861 case LibFunc_truncf:
2862 case LibFunc_truncl:
2863 return Intrinsic::trunc;
2864 case LibFunc_rint:
2865 case LibFunc_rintf:
2866 case LibFunc_rintl:
2867 return Intrinsic::rint;
2868 case LibFunc_nearbyint:
2869 case LibFunc_nearbyintf:
2870 case LibFunc_nearbyintl:
2871 return Intrinsic::nearbyint;
2872 case LibFunc_round:
2873 case LibFunc_roundf:
2874 case LibFunc_roundl:
2875 return Intrinsic::round;
2876 case LibFunc_pow:
2877 case LibFunc_powf:
2878 case LibFunc_powl:
2879 return Intrinsic::pow;
2880 case LibFunc_sqrt:
2881 case LibFunc_sqrtf:
2882 case LibFunc_sqrtl:
2883 return Intrinsic::sqrt;
2884 }
2885
2886 return Intrinsic::not_intrinsic;
2887}
2888
2889/// Return true if we can prove that the specified FP value is never equal to
2890/// -0.0.
2891///
2892/// NOTE: this function will need to be revisited when we support non-default
2893/// rounding modes!
2894bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2895 unsigned Depth) {
2896 if (auto *CFP = dyn_cast<ConstantFP>(V))
2897 return !CFP->getValueAPF().isNegZero();
2898
2899 // Limit search depth.
2900 if (Depth == MaxDepth)
2901 return false;
2902
2903 auto *Op = dyn_cast<Operator>(V);
2904 if (!Op)
2905 return false;
2906
2907 // Check if the nsz fast-math flag is set.
2908 if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2909 if (FPO->hasNoSignedZeros())
2910 return true;
2911
2912 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2913 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2914 return true;
2915
2916 // sitofp and uitofp turn into +0.0 for zero.
2917 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2918 return true;
2919
2920 if (auto *Call = dyn_cast<CallInst>(Op)) {
2921 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2922 switch (IID) {
2923 default:
2924 break;
2925 // sqrt(-0.0) = -0.0, no other negative results are possible.
2926 case Intrinsic::sqrt:
2927 case Intrinsic::canonicalize:
2928 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2929 // fabs(x) != -0.0
2930 case Intrinsic::fabs:
2931 return true;
2932 }
2933 }
2934
2935 return false;
2936}
2937
2938/// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2939/// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2940/// bit despite comparing equal.
2941static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2942 const TargetLibraryInfo *TLI,
2943 bool SignBitOnly,
2944 unsigned Depth) {
2945 // TODO: This function does not do the right thing when SignBitOnly is true
2946 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2947 // which flips the sign bits of NaNs. See
2948 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2949
2950 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2951 return !CFP->getValueAPF().isNegative() ||
2952 (!SignBitOnly && CFP->getValueAPF().isZero());
2953 }
2954
2955 // Handle vector of constants.
2956 if (auto *CV = dyn_cast<Constant>(V)) {
2957 if (CV->getType()->isVectorTy()) {
2958 unsigned NumElts = CV->getType()->getVectorNumElements();
2959 for (unsigned i = 0; i != NumElts; ++i) {
2960 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2961 if (!CFP)
2962 return false;
2963 if (CFP->getValueAPF().isNegative() &&
2964 (SignBitOnly || !CFP->getValueAPF().isZero()))
2965 return false;
2966 }
2967
2968 // All non-negative ConstantFPs.
2969 return true;
2970 }
2971 }
2972
2973 if (Depth == MaxDepth)
2974 return false; // Limit search depth.
2975
2976 const Operator *I = dyn_cast<Operator>(V);
2977 if (!I)
2978 return false;
2979
2980 switch (I->getOpcode()) {
2981 default:
2982 break;
2983 // Unsigned integers are always nonnegative.
2984 case Instruction::UIToFP:
2985 return true;
2986 case Instruction::FMul:
2987 // x*x is always non-negative or a NaN.
2988 if (I->getOperand(0) == I->getOperand(1) &&
2989 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2990 return true;
2991
2992 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2993 case Instruction::FAdd:
2994 case Instruction::FDiv:
2995 case Instruction::FRem:
2996 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2997 Depth + 1) &&
2998 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2999 Depth + 1);
3000 case Instruction::Select:
3001 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3002 Depth + 1) &&
3003 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3004 Depth + 1);
3005 case Instruction::FPExt:
3006 case Instruction::FPTrunc:
3007 // Widening/narrowing never change sign.
3008 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3009 Depth + 1);
3010 case Instruction::ExtractElement:
3011 // Look through extract element. At the moment we keep this simple and skip
3012 // tracking the specific element. But at least we might find information
3013 // valid for all elements of the vector.
3014 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3015 Depth + 1);
3016 case Instruction::Call:
3017 const auto *CI = cast<CallInst>(I);
3018 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
3019 switch (IID) {
3020 default:
3021 break;
3022 case Intrinsic::maxnum:
3023 return (isKnownNeverNaN(I->getOperand(0), TLI) &&
3024 cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI,
3025 SignBitOnly, Depth + 1)) ||
3026 (isKnownNeverNaN(I->getOperand(1), TLI) &&
3027 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3028 SignBitOnly, Depth + 1));
3029
3030 case Intrinsic::maximum:
3031 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3032 Depth + 1) ||
3033 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3034 Depth + 1);
3035 case Intrinsic::minnum:
3036 case Intrinsic::minimum:
3037 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3038 Depth + 1) &&
3039 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3040 Depth + 1);
3041 case Intrinsic::exp:
3042 case Intrinsic::exp2:
3043 case Intrinsic::fabs:
3044 return true;
3045
3046 case Intrinsic::sqrt:
3047 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3048 if (!SignBitOnly)
3049 return true;
3050 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3051 CannotBeNegativeZero(CI->getOperand(0), TLI));
3052
3053 case Intrinsic::powi:
3054 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3055 // powi(x,n) is non-negative if n is even.
3056 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3057 return true;
3058 }
3059 // TODO: This is not correct. Given that exp is an integer, here are the
3060 // ways that pow can return a negative value:
3061 //
3062 // pow(x, exp) --> negative if exp is odd and x is negative.
3063 // pow(-0, exp) --> -inf if exp is negative odd.
3064 // pow(-0, exp) --> -0 if exp is positive odd.
3065 // pow(-inf, exp) --> -0 if exp is negative odd.
3066 // pow(-inf, exp) --> -inf if exp is positive odd.
3067 //
3068 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3069 // but we must return false if x == -0. Unfortunately we do not currently
3070 // have a way of expressing this constraint. See details in
3071 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3072 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3073 Depth + 1);
3074
3075 case Intrinsic::fma:
3076 case Intrinsic::fmuladd:
3077 // x*x+y is non-negative if y is non-negative.
3078 return I->getOperand(0) == I->getOperand(1) &&
3079 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3080 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3081 Depth + 1);
3082 }
3083 break;
3084 }
3085 return false;
3086}
3087
3088bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3089 const TargetLibraryInfo *TLI) {
3090 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3091}
3092
3093bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3094 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3095}
3096
3097bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3098 unsigned Depth) {
3099 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")((V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3099, __PRETTY_FUNCTION__))
;
3100
3101 // If we're told that NaNs won't happen, assume they won't.
3102 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3103 if (FPMathOp->hasNoNaNs())
3104 return true;
3105
3106 // Handle scalar constants.
3107 if (auto *CFP = dyn_cast<ConstantFP>(V))
3108 return !CFP->isNaN();
3109
3110 if (Depth == MaxDepth)
3111 return false;
3112
3113 if (auto *Inst = dyn_cast<Instruction>(V)) {
3114 switch (Inst->getOpcode()) {
3115 case Instruction::FAdd:
3116 case Instruction::FMul:
3117 case Instruction::FSub:
3118 case Instruction::FDiv:
3119 case Instruction::FRem: {
3120 // TODO: Need isKnownNeverInfinity
3121 return false;
3122 }
3123 case Instruction::Select: {
3124 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3125 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3126 }
3127 case Instruction::SIToFP:
3128 case Instruction::UIToFP:
3129 return true;
3130 case Instruction::FPTrunc:
3131 case Instruction::FPExt:
3132 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3133 default:
3134 break;
3135 }
3136 }
3137
3138 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3139 switch (II->getIntrinsicID()) {
3140 case Intrinsic::canonicalize:
3141 case Intrinsic::fabs:
3142 case Intrinsic::copysign:
3143 case Intrinsic::exp:
3144 case Intrinsic::exp2:
3145 case Intrinsic::floor:
3146 case Intrinsic::ceil:
3147 case Intrinsic::trunc:
3148 case Intrinsic::rint:
3149 case Intrinsic::nearbyint:
3150 case Intrinsic::round:
3151 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3152 case Intrinsic::sqrt:
3153 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3154 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3155 case Intrinsic::minnum:
3156 case Intrinsic::maxnum:
3157 // If either operand is not NaN, the result is not NaN.
3158 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3159 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3160 default:
3161 return false;
3162 }
3163 }
3164
3165 // Bail out for constant expressions, but try to handle vector constants.
3166 if (!V->getType()->isVectorTy() || !isa<Constant>(V))
3167 return false;
3168
3169 // For vectors, verify that each element is not NaN.
3170 unsigned NumElts = V->getType()->getVectorNumElements();
3171 for (unsigned i = 0; i != NumElts; ++i) {
3172 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3173 if (!Elt)
3174 return false;
3175 if (isa<UndefValue>(Elt))
3176 continue;
3177 auto *CElt = dyn_cast<ConstantFP>(Elt);
3178 if (!CElt || CElt->isNaN())
3179 return false;
3180 }
3181 // All elements were confirmed not-NaN or undefined.
3182 return true;
3183}
3184
3185Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3186
3187 // All byte-wide stores are splatable, even of arbitrary variables.
3188 if (V->getType()->isIntegerTy(8))
3189 return V;
3190
3191 LLVMContext &Ctx = V->getContext();
3192
3193 // Undef don't care.
3194 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3195 if (isa<UndefValue>(V))
3196 return UndefInt8;
3197
3198 const uint64_t Size = DL.getTypeStoreSize(V->getType());
3199 if (!Size)
3200 return UndefInt8;
3201
3202 Constant *C = dyn_cast<Constant>(V);
3203 if (!C) {
3204 // Conceptually, we could handle things like:
3205 // %a = zext i8 %X to i16
3206 // %b = shl i16 %a, 8
3207 // %c = or i16 %a, %b
3208 // but until there is an example that actually needs this, it doesn't seem
3209 // worth worrying about.
3210 return nullptr;
3211 }
3212
3213 // Handle 'null' ConstantArrayZero etc.
3214 if (C->isNullValue())
3215 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3216
3217 // Constant floating-point values can be handled as integer values if the
3218 // corresponding integer value is "byteable". An important case is 0.0.
3219 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3220 Type *Ty = nullptr;
3221 if (CFP->getType()->isHalfTy())
3222 Ty = Type::getInt16Ty(Ctx);
3223 else if (CFP->getType()->isFloatTy())
3224 Ty = Type::getInt32Ty(Ctx);
3225 else if (CFP->getType()->isDoubleTy())
3226 Ty = Type::getInt64Ty(Ctx);
3227 // Don't handle long double formats, which have strange constraints.
3228 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3229 : nullptr;
3230 }
3231
3232 // We can handle constant integers that are multiple of 8 bits.
3233 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3234 if (CI->getBitWidth() % 8 == 0) {
3235 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")((CI->getBitWidth() > 8 && "8 bits should be handled above!"
) ? static_cast<void> (0) : __assert_fail ("CI->getBitWidth() > 8 && \"8 bits should be handled above!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3235, __PRETTY_FUNCTION__))
;
3236 if (!CI->getValue().isSplat(8))
3237 return nullptr;
3238 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3239 }
3240 }
3241
3242 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3243 if (CE->getOpcode() == Instruction::IntToPtr) {
3244 auto PS = DL.getPointerSizeInBits(
3245 cast<PointerType>(CE->getType())->getAddressSpace());
3246 return isBytewiseValue(
3247 ConstantExpr::getIntegerCast(CE->getOperand(0),
3248 Type::getIntNTy(Ctx, PS), false),
3249 DL);
3250 }
3251 }
3252
3253 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3254 if (LHS == RHS)
3255 return LHS;
3256 if (!LHS || !RHS)
3257 return nullptr;
3258 if (LHS == UndefInt8)
3259 return RHS;
3260 if (RHS == UndefInt8)
3261 return LHS;
3262 return nullptr;
3263 };
3264
3265 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3266 Value *Val = UndefInt8;
3267 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3268 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3269 return nullptr;
3270 return Val;
3271 }
3272
3273 if (isa<ConstantAggregate>(C)) {
3274 Value *Val = UndefInt8;
3275 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3276 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3277 return nullptr;
3278 return Val;
3279 }
3280
3281 // Don't try to handle the handful of other constants.
3282 return nullptr;
3283}
3284
3285// This is the recursive version of BuildSubAggregate. It takes a few different
3286// arguments. Idxs is the index within the nested struct From that we are
3287// looking at now (which is of type IndexedType). IdxSkip is the number of
3288// indices from Idxs that should be left out when inserting into the resulting
3289// struct. To is the result struct built so far, new insertvalue instructions
3290// build on that.
3291static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3292 SmallVectorImpl<unsigned> &Idxs,
3293 unsigned IdxSkip,
3294 Instruction *InsertBefore) {
3295 StructType *STy = dyn_cast<StructType>(IndexedType);
1
Assuming 'IndexedType' is a 'StructType'
3296 if (STy
1.1
'STy' is non-null
1.1
'STy' is non-null
) {
2
Taking true branch
3297 // Save the original To argument so we can modify it
3298 Value *OrigTo = To;
3299 // General case, the type indexed by Idxs is a struct
3300 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3
Assuming 'i' is equal to 'e'
4
Loop condition is false. Execution continues on line 3319
3301 // Process each struct element recursively
3302 Idxs.push_back(i);
3303 Value *PrevTo = To;
3304 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3305 InsertBefore);
3306 Idxs.pop_back();
3307 if (!To) {
3308 // Couldn't find any inserted value for this index? Cleanup
3309 while (PrevTo != OrigTo) {
3310 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3311 PrevTo = Del->getAggregateOperand();
3312 Del->eraseFromParent();
3313 }
3314 // Stop processing elements
3315 break;
3316 }
3317 }
3318 // If we successfully found a value for each of our subaggregates
3319 if (To)
5
Assuming 'To' is null
6
Taking false branch
3320 return To;
3321 }
3322 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3323 // the struct's elements had a value that was inserted directly. In the latter
3324 // case, perhaps we can't determine each of the subelements individually, but
3325 // we might be able to find the complete struct somewhere.
3326
3327 // Find the value that is at that particular spot
3328 Value *V = FindInsertedValue(From, Idxs);
3329
3330 if (!V)
7
Assuming 'V' is non-null
8
Taking false branch
3331 return nullptr;
3332
3333 // Insert the value in the new (sub) aggregate
3334 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
9
Passing null pointer value via 1st parameter 'Agg'
10
Calling 'InsertValueInst::Create'
3335 "tmp", InsertBefore);
3336}
3337
3338// This helper takes a nested struct and extracts a part of it (which is again a
3339// struct) into a new value. For example, given the struct:
3340// { a, { b, { c, d }, e } }
3341// and the indices "1, 1" this returns
3342// { c, d }.
3343//
3344// It does this by inserting an insertvalue for each element in the resulting
3345// struct, as opposed to just inserting a single struct. This will only work if
3346// each of the elements of the substruct are known (ie, inserted into From by an
3347// insertvalue instruction somewhere).
3348//
3349// All inserted insertvalue instructions are inserted before InsertBefore
3350static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3351 Instruction *InsertBefore) {
3352 assert(InsertBefore && "Must have someplace to insert!")((InsertBefore && "Must have someplace to insert!") ?
static_cast<void> (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3352, __PRETTY_FUNCTION__))
;
3353 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3354 idx_range);
3355 Value *To = UndefValue::get(IndexedType);
3356 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3357 unsigned IdxSkip = Idxs.size();
3358
3359 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3360}
3361
3362/// Given an aggregate and a sequence of indices, see if the scalar value
3363/// indexed is already around as a register, for example if it was inserted
3364/// directly into the aggregate.
3365///
3366/// If InsertBefore is not null, this function will duplicate (modified)
3367/// insertvalues when a part of a nested struct is extracted.
3368Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3369 Instruction *InsertBefore) {
3370 // Nothing to index? Just return V then (this is useful at the end of our
3371 // recursion).
3372 if (idx_range.empty())
3373 return V;
3374 // We have indices, so V should have an indexable type.
3375 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(((V->getType()->isStructTy() || V->getType()->isArrayTy
()) && "Not looking at a struct or array?") ? static_cast
<void> (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3376, __PRETTY_FUNCTION__))
3376 "Not looking at a struct or array?")(((V->getType()->isStructTy() || V->getType()->isArrayTy
()) && "Not looking at a struct or array?") ? static_cast
<void> (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3376, __PRETTY_FUNCTION__))
;
3377 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&((ExtractValueInst::getIndexedType(V->getType(), idx_range
) && "Invalid indices for type?") ? static_cast<void
> (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3378, __PRETTY_FUNCTION__))
3378 "Invalid indices for type?")((ExtractValueInst::getIndexedType(V->getType(), idx_range
) && "Invalid indices for type?") ? static_cast<void
> (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3378, __PRETTY_FUNCTION__))
;
3379
3380 if (Constant *C = dyn_cast<Constant>(V)) {
3381 C = C->getAggregateElement(idx_range[0]);
3382 if (!C) return nullptr;
3383 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3384 }
3385
3386 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3387 // Loop the indices for the insertvalue instruction in parallel with the
3388 // requested indices
3389 const unsigned *req_idx = idx_range.begin();
3390 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3391 i != e; ++i, ++req_idx) {
3392 if (req_idx == idx_range.end()) {
3393 // We can't handle this without inserting insertvalues
3394 if (!InsertBefore)
3395 return nullptr;
3396
3397 // The requested index identifies a part of a nested aggregate. Handle
3398 // this specially. For example,
3399 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3400 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3401 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3402 // This can be changed into
3403 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3404 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3405 // which allows the unused 0,0 element from the nested struct to be
3406 // removed.
3407 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3408 InsertBefore);
3409 }
3410
3411 // This insert value inserts something else than what we are looking for.
3412 // See if the (aggregate) value inserted into has the value we are
3413 // looking for, then.
3414 if (*req_idx != *i)
3415 return FindInsertedValue(I->getAggregateOperand(), idx_range,
3416 InsertBefore);
3417 }
3418 // If we end up here, the indices of the insertvalue match with those
3419 // requested (though possibly only partially). Now we recursively look at
3420 // the inserted value, passing any remaining indices.
3421 return FindInsertedValue(I->getInsertedValueOperand(),
3422 makeArrayRef(req_idx, idx_range.end()),
3423 InsertBefore);
3424 }
3425
3426 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3427 // If we're extracting a value from an aggregate that was extracted from
3428 // something else, we can extract from that something else directly instead.
3429 // However, we will need to chain I's indices with the requested indices.
3430
3431 // Calculate the number of indices required
3432 unsigned size = I->getNumIndices() + idx_range.size();
3433 // Allocate some space to put the new indices in
3434 SmallVector<unsigned, 5> Idxs;
3435 Idxs.reserve(size);
3436 // Add indices from the extract value instruction
3437 Idxs.append(I->idx_begin(), I->idx_end());
3438
3439 // Add requested indices
3440 Idxs.append(idx_range.begin(), idx_range.end());
3441
3442 assert(Idxs.size() == size((Idxs.size() == size && "Number of indices added not correct?"
) ? static_cast<void> (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3443, __PRETTY_FUNCTION__))
3443 && "Number of indices added not correct?")((Idxs.size() == size && "Number of indices added not correct?"
) ? static_cast<void> (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3443, __PRETTY_FUNCTION__))
;
3444
3445 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3446 }
3447 // Otherwise, we don't know (such as, extracting from a function return value
3448 // or load instruction)
3449 return nullptr;
3450}
3451
3452bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3453 unsigned CharSize) {
3454 // Make sure the GEP has exactly three arguments.
3455 if (GEP->getNumOperands() != 3)
3456 return false;
3457
3458 // Make sure the index-ee is a pointer to array of \p CharSize integers.
3459 // CharSize.
3460 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3461 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3462 return false;
3463
3464 // Check to make sure that the first operand of the GEP is an integer and
3465 // has value 0 so that we are sure we're indexing into the initializer.
3466 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3467 if (!FirstIdx || !FirstIdx->isZero())
3468 return false;
3469
3470 return true;
3471}
3472
3473bool llvm::getConstantDataArrayInfo(const Value *V,
3474 ConstantDataArraySlice &Slice,
3475 unsigned ElementSize, uint64_t Offset) {
3476 assert(V)((V) ? static_cast<void> (0) : __assert_fail ("V", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3476, __PRETTY_FUNCTION__))
;
3477
3478 // Look through bitcast instructions and geps.
3479 V = V->stripPointerCasts();
3480
3481 // If the value is a GEP instruction or constant expression, treat it as an
3482 // offset.
3483 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3484 // The GEP operator should be based on a pointer to string constant, and is
3485 // indexing into the string constant.
3486 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3487 return false;
3488
3489 // If the second index isn't a ConstantInt, then this is a variable index
3490 // into the array. If this occurs, we can't say anything meaningful about
3491 // the string.
3492 uint64_t StartIdx = 0;
3493 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3494 StartIdx = CI->getZExtValue();
3495 else
3496 return false;
3497 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3498 StartIdx + Offset);
3499 }
3500
3501 // The GEP instruction, constant or instruction, must reference a global
3502 // variable that is a constant and is initialized. The referenced constant
3503 // initializer is the array that we'll use for optimization.
3504 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3505 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3506 return false;
3507
3508 const ConstantDataArray *Array;
3509 ArrayType *ArrayTy;
3510 if (GV->getInitializer()->isNullValue()) {
3511 Type *GVTy = GV->getValueType();
3512 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3513 // A zeroinitializer for the array; there is no ConstantDataArray.
3514 Array = nullptr;
3515 } else {
3516 const DataLayout &DL = GV->getParent()->getDataLayout();
3517 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3518 uint64_t Length = SizeInBytes / (ElementSize / 8);
3519 if (Length <= Offset)
3520 return false;
3521
3522 Slice.Array = nullptr;
3523 Slice.Offset = 0;
3524 Slice.Length = Length - Offset;
3525 return true;
3526 }
3527 } else {
3528 // This must be a ConstantDataArray.
3529 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3530 if (!Array)
3531 return false;
3532 ArrayTy = Array->getType();
3533 }
3534 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3535 return false;
3536
3537 uint64_t NumElts = ArrayTy->getArrayNumElements();
3538 if (Offset > NumElts)
3539 return false;
3540
3541 Slice.Array = Array;
3542 Slice.Offset = Offset;
3543 Slice.Length = NumElts - Offset;
3544 return true;
3545}
3546
3547/// This function computes the length of a null-terminated C string pointed to
3548/// by V. If successful, it returns true and returns the string in Str.
3549/// If unsuccessful, it returns false.
3550bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3551 uint64_t Offset, bool TrimAtNul) {
3552 ConstantDataArraySlice Slice;
3553 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3554 return false;
3555
3556 if (Slice.Array == nullptr) {
3557 if (TrimAtNul) {
3558 Str = StringRef();
3559 return true;
3560 }
3561 if (Slice.Length == 1) {
3562 Str = StringRef("", 1);
3563 return true;
3564 }
3565 // We cannot instantiate a StringRef as we do not have an appropriate string
3566 // of 0s at hand.
3567 return false;
3568 }
3569
3570 // Start out with the entire array in the StringRef.
3571 Str = Slice.Array->getAsString();
3572 // Skip over 'offset' bytes.
3573 Str = Str.substr(Slice.Offset);
3574
3575 if (TrimAtNul) {
3576 // Trim off the \0 and anything after it. If the array is not nul
3577 // terminated, we just return the whole end of string. The client may know
3578 // some other way that the string is length-bound.
3579 Str = Str.substr(0, Str.find('\0'));
3580 }
3581 return true;
3582}
3583
3584// These next two are very similar to the above, but also look through PHI
3585// nodes.
3586// TODO: See if we can integrate these two together.
3587
3588/// If we can compute the length of the string pointed to by
3589/// the specified pointer, return 'len+1'. If we can't, return 0.
3590static uint64_t GetStringLengthH(const Value *V,
3591 SmallPtrSetImpl<const PHINode*> &PHIs,
3592 unsigned CharSize) {
3593 // Look through noop bitcast instructions.
3594 V = V->stripPointerCasts();
3595
3596 // If this is a PHI node, there are two cases: either we have already seen it
3597 // or we haven't.
3598 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3599 if (!PHIs.insert(PN).second)
3600 return ~0ULL; // already in the set.
3601
3602 // If it was new, see if all the input strings are the same length.
3603 uint64_t LenSoFar = ~0ULL;
3604 for (Value *IncValue : PN->incoming_values()) {
3605 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3606 if (Len == 0) return 0; // Unknown length -> unknown.
3607
3608 if (Len == ~0ULL) continue;
3609
3610 if (Len != LenSoFar && LenSoFar != ~0ULL)
3611 return 0; // Disagree -> unknown.
3612 LenSoFar = Len;
3613 }
3614
3615 // Success, all agree.
3616 return LenSoFar;
3617 }
3618
3619 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3620 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3621 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3622 if (Len1 == 0) return 0;
3623 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3624 if (Len2 == 0) return 0;
3625 if (Len1 == ~0ULL) return Len2;
3626 if (Len2 == ~0ULL) return Len1;
3627 if (Len1 != Len2) return 0;
3628 return Len1;
3629 }
3630
3631 // Otherwise, see if we can read the string.
3632 ConstantDataArraySlice Slice;
3633 if (!getConstantDataArrayInfo(V, Slice, CharSize))
3634 return 0;
3635
3636 if (Slice.Array == nullptr)
3637 return 1;
3638
3639 // Search for nul characters
3640 unsigned NullIndex = 0;
3641 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3642 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3643 break;
3644 }
3645
3646 return NullIndex + 1;
3647}
3648
3649/// If we can compute the length of the string pointed to by
3650/// the specified pointer, return 'len+1'. If we can't, return 0.
3651uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3652 if (!V->getType()->isPointerTy())
3653 return 0;
3654
3655 SmallPtrSet<const PHINode*, 32> PHIs;
3656 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3657 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3658 // an empty string as a length.
3659 return Len == ~0ULL ? 1 : Len;
3660}
3661
3662const Value *
3663llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
3664 bool MustPreserveNullness) {
3665 assert(Call &&((Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? static_cast<void> (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3666, __PRETTY_FUNCTION__))
3666 "getArgumentAliasingToReturnedPointer only works on nonnull calls")((Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? static_cast<void> (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3666, __PRETTY_FUNCTION__))
;
3667 if (const Value *RV = Call->getReturnedArgOperand())
3668 return RV;
3669 // This can be used only as a aliasing property.
3670 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3671 Call, MustPreserveNullness))
3672 return Call->getArgOperand(0);
3673 return nullptr;
3674}
3675
3676bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3677 const CallBase *Call, bool MustPreserveNullness) {
3678 return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
3679 Call->getIntrinsicID() == Intrinsic::strip_invariant_group ||
3680 Call->getIntrinsicID() == Intrinsic::aarch64_irg ||
3681 Call->getIntrinsicID() == Intrinsic::aarch64_tagp ||
3682 (!MustPreserveNullness &&
3683 Call->getIntrinsicID() == Intrinsic::ptrmask);
3684}
3685
3686/// \p PN defines a loop-variant pointer to an object. Check if the
3687/// previous iteration of the loop was referring to the same object as \p PN.
3688static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3689 const LoopInfo *LI) {
3690 // Find the loop-defined value.
3691 Loop *L = LI->getLoopFor(PN->getParent());
3692 if (PN->getNumIncomingValues() != 2)
3693 return true;
3694
3695 // Find the value from previous iteration.
3696 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3697 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3698 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3699 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3700 return true;
3701
3702 // If a new pointer is loaded in the loop, the pointer references a different
3703 // object in every iteration. E.g.:
3704 // for (i)
3705 // int *p = a[i];
3706 // ...
3707 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3708 if (!L->isLoopInvariant(Load->getPointerOperand()))
3709 return false;
3710 return true;
3711}
3712
3713Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3714 unsigned MaxLookup) {
3715 if (!V->getType()->isPointerTy())
3716 return V;
3717 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3718 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3719 V = GEP->getPointerOperand();
3720 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3721 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3722 V = cast<Operator>(V)->getOperand(0);
3723 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3724 if (GA->isInterposable())
3725 return V;
3726 V = GA->getAliasee();
3727 } else if (isa<AllocaInst>(V)) {
3728 // An alloca can't be further simplified.
3729 return V;
3730 } else {
3731 if (auto *Call = dyn_cast<CallBase>(V)) {
3732 // CaptureTracking can know about special capturing properties of some
3733 // intrinsics like launder.invariant.group, that can't be expressed with
3734 // the attributes, but have properties like returning aliasing pointer.
3735 // Because some analysis may assume that nocaptured pointer is not
3736 // returned from some special intrinsic (because function would have to
3737 // be marked with returns attribute), it is crucial to use this function
3738 // because it should be in sync with CaptureTracking. Not using it may
3739 // cause weird miscompilations where 2 aliasing pointers are assumed to
3740 // noalias.
3741 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
3742 V = RP;
3743 continue;
3744 }
3745 }
3746
3747 // See if InstructionSimplify knows any relevant tricks.
3748 if (Instruction *I = dyn_cast<Instruction>(V))
3749 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3750 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3751 V = Simplified;
3752 continue;
3753 }
3754
3755 return V;
3756 }
3757 assert(V->getType()->isPointerTy() && "Unexpected operand type!")((V->getType()->isPointerTy() && "Unexpected operand type!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3757, __PRETTY_FUNCTION__))
;
3758 }
3759 return V;
3760}
3761
3762void llvm::GetUnderlyingObjects(const Value *V,
3763 SmallVectorImpl<const Value *> &Objects,
3764 const DataLayout &DL, LoopInfo *LI,
3765 unsigned MaxLookup) {
3766 SmallPtrSet<const Value *, 4> Visited;
3767 SmallVector<const Value *, 4> Worklist;
3768 Worklist.push_back(V);
3769 do {
3770 const Value *P = Worklist.pop_back_val();
3771 P = GetUnderlyingObject(P, DL, MaxLookup);
3772
3773 if (!Visited.insert(P).second)
3774 continue;
3775
3776 if (auto *SI = dyn_cast<SelectInst>(P)) {
3777 Worklist.push_back(SI->getTrueValue());
3778 Worklist.push_back(SI->getFalseValue());
3779 continue;
3780 }
3781
3782 if (auto *PN = dyn_cast<PHINode>(P)) {
3783 // If this PHI changes the underlying object in every iteration of the
3784 // loop, don't look through it. Consider:
3785 // int **A;
3786 // for (i) {
3787 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3788 // Curr = A[i];
3789 // *Prev, *Curr;
3790 //
3791 // Prev is tracking Curr one iteration behind so they refer to different
3792 // underlying objects.
3793 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3794 isSameUnderlyingObjectInLoop(PN, LI))
3795 for (Value *IncValue : PN->incoming_values())
3796 Worklist.push_back(IncValue);
3797 continue;
3798 }
3799
3800 Objects.push_back(P);
3801 } while (!Worklist.empty());
3802}
3803
3804/// This is the function that does the work of looking through basic
3805/// ptrtoint+arithmetic+inttoptr sequences.
3806static const Value *getUnderlyingObjectFromInt(const Value *V) {
3807 do {
3808 if (const Operator *U = dyn_cast<Operator>(V)) {
3809 // If we find a ptrtoint, we can transfer control back to the
3810 // regular getUnderlyingObjectFromInt.
3811 if (U->getOpcode() == Instruction::PtrToInt)
3812 return U->getOperand(0);
3813 // If we find an add of a constant, a multiplied value, or a phi, it's
3814 // likely that the other operand will lead us to the base
3815 // object. We don't have to worry about the case where the
3816 // object address is somehow being computed by the multiply,
3817 // because our callers only care when the result is an
3818 // identifiable object.
3819 if (U->getOpcode() != Instruction::Add ||
3820 (!isa<ConstantInt>(U->getOperand(1)) &&
3821 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3822 !isa<PHINode>(U->getOperand(1))))
3823 return V;
3824 V = U->getOperand(0);
3825 } else {
3826 return V;
3827 }
3828 assert(V->getType()->isIntegerTy() && "Unexpected operand type!")((V->getType()->isIntegerTy() && "Unexpected operand type!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3828, __PRETTY_FUNCTION__))
;
3829 } while (true);
3830}
3831
3832/// This is a wrapper around GetUnderlyingObjects and adds support for basic
3833/// ptrtoint+arithmetic+inttoptr sequences.
3834/// It returns false if unidentified object is found in GetUnderlyingObjects.
3835bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3836 SmallVectorImpl<Value *> &Objects,
3837 const DataLayout &DL) {
3838 SmallPtrSet<const Value *, 16> Visited;
3839 SmallVector<const Value *, 4> Working(1, V);
3840 do {
3841 V = Working.pop_back_val();
3842
3843 SmallVector<const Value *, 4> Objs;
3844 GetUnderlyingObjects(V, Objs, DL);
3845
3846 for (const Value *V : Objs) {
3847 if (!Visited.insert(V).second)
3848 continue;
3849 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3850 const Value *O =
3851 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3852 if (O->getType()->isPointerTy()) {
3853 Working.push_back(O);
3854 continue;
3855 }
3856 }
3857 // If GetUnderlyingObjects fails to find an identifiable object,
3858 // getUnderlyingObjectsForCodeGen also fails for safety.
3859 if (!isIdentifiedObject(V)) {
3860 Objects.clear();
3861 return false;
3862 }
3863 Objects.push_back(const_cast<Value *>(V));
3864 }
3865 } while (!Working.empty());
3866 return true;
3867}
3868
3869/// Return true if the only users of this pointer are lifetime markers.
3870bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3871 for (const User *U : V->users()) {
3872 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3873 if (!II) return false;
3874
3875 if (!II->isLifetimeStartOrEnd())
3876 return false;
3877 }
3878 return true;
3879}
3880
3881bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
3882 if (!LI.isUnordered())
3883 return true;
3884 const Function &F = *LI.getFunction();
3885 // Speculative load may create a race that did not exist in the source.
3886 return F.hasFnAttribute(Attribute::SanitizeThread) ||
3887 // Speculative load may load data from dirty regions.
3888 F.hasFnAttribute(Attribute::SanitizeAddress) ||
3889 F.hasFnAttribute(Attribute::SanitizeHWAddress);
3890}
3891
3892
3893bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3894 const Instruction *CtxI,
3895 const DominatorTree *DT) {
3896 const Operator *Inst = dyn_cast<Operator>(V);
3897 if (!Inst)
3898 return false;
3899
3900 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3901 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3902 if (C->canTrap())
3903 return false;
3904
3905 switch (Inst->getOpcode()) {
3906 default:
3907 return true;
3908 case Instruction::UDiv:
3909 case Instruction::URem: {
3910 // x / y is undefined if y == 0.
3911 const APInt *V;
3912 if (match(Inst->getOperand(1), m_APInt(V)))
3913 return *V != 0;
3914 return false;
3915 }
3916 case Instruction::SDiv:
3917 case Instruction::SRem: {
3918 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3919 const APInt *Numerator, *Denominator;
3920 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3921 return false;
3922 // We cannot hoist this division if the denominator is 0.
3923 if (*Denominator == 0)
3924 return false;
3925 // It's safe to hoist if the denominator is not 0 or -1.
3926 if (*Denominator != -1)
3927 return true;
3928 // At this point we know that the denominator is -1. It is safe to hoist as
3929 // long we know that the numerator is not INT_MIN.
3930 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3931 return !Numerator->isMinSignedValue();
3932 // The numerator *might* be MinSignedValue.
3933 return false;
3934 }
3935 case Instruction::Load: {
3936 const LoadInst *LI = cast<LoadInst>(Inst);
3937 if (mustSuppressSpeculation(*LI))
3938 return false;
3939 const DataLayout &DL = LI->getModule()->getDataLayout();
3940 return isDereferenceableAndAlignedPointer(
3941 LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
3942 DL, CtxI, DT);
3943 }
3944 case Instruction::Call: {
3945 auto *CI = cast<const CallInst>(Inst);
3946 const Function *Callee = CI->getCalledFunction();
3947
3948 // The called function could have undefined behavior or side-effects, even
3949 // if marked readnone nounwind.
3950 return Callee && Callee->isSpeculatable();
3951 }
3952 case Instruction::VAArg:
3953 case Instruction::Alloca:
3954 case Instruction::Invoke:
3955 case Instruction::CallBr:
3956 case Instruction::PHI:
3957 case Instruction::Store:
3958 case Instruction::Ret:
3959 case Instruction::Br:
3960 case Instruction::IndirectBr:
3961 case Instruction::Switch:
3962 case Instruction::Unreachable:
3963 case Instruction::Fence:
3964 case Instruction::AtomicRMW:
3965 case Instruction::AtomicCmpXchg:
3966 case Instruction::LandingPad:
3967 case Instruction::Resume:
3968 case Instruction::CatchSwitch:
3969 case Instruction::CatchPad:
3970 case Instruction::CatchRet:
3971 case Instruction::CleanupPad:
3972 case Instruction::CleanupRet:
3973 return false; // Misc instructions which have effects
3974 }
3975}
3976
3977bool llvm::mayBeMemoryDependent(const Instruction &I) {
3978 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3979}
3980
3981/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
3982static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
3983 switch (OR) {
3984 case ConstantRange::OverflowResult::MayOverflow:
3985 return OverflowResult::MayOverflow;
3986 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
3987 return OverflowResult::AlwaysOverflowsLow;
3988 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
3989 return OverflowResult::AlwaysOverflowsHigh;
3990 case ConstantRange::OverflowResult::NeverOverflows:
3991 return OverflowResult::NeverOverflows;
3992 }
3993 llvm_unreachable("Unknown OverflowResult")::llvm::llvm_unreachable_internal("Unknown OverflowResult", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 3993)
;
3994}
3995
3996/// Combine constant ranges from computeConstantRange() and computeKnownBits().
3997static ConstantRange computeConstantRangeIncludingKnownBits(
3998 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
3999 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4000 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4001 KnownBits Known = computeKnownBits(
4002 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4003 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4004 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4005 ConstantRange::PreferredRangeType RangeType =
4006 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4007 return CR1.intersectWith(CR2, RangeType);
4008}
4009
4010OverflowResult llvm::computeOverflowForUnsignedMul(
4011 const Value *LHS, const Value *RHS, const DataLayout &DL,
4012 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4013 bool UseInstrInfo) {
4014 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4015 nullptr, UseInstrInfo);
4016 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4017 nullptr, UseInstrInfo);
4018 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4019 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4020 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4021}
4022
4023OverflowResult
4024llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4025 const DataLayout &DL, AssumptionCache *AC,
4026 const Instruction *CxtI,
4027 const DominatorTree *DT, bool UseInstrInfo) {
4028 // Multiplying n * m significant bits yields a result of n + m significant
4029 // bits. If the total number of significant bits does not exceed the
4030 // result bit width (minus 1), there is no overflow.
4031 // This means if we have enough leading sign bits in the operands
4032 // we can guarantee that the result does not overflow.
4033 // Ref: "Hacker's Delight" by Henry Warren
4034 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4035
4036 // Note that underestimating the number of sign bits gives a more
4037 // conservative answer.
4038 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4039 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4040
4041 // First handle the easy case: if we have enough sign bits there's
4042 // definitely no overflow.
4043 if (SignBits > BitWidth + 1)
4044 return OverflowResult::NeverOverflows;
4045
4046 // There are two ambiguous cases where there can be no overflow:
4047 // SignBits == BitWidth + 1 and
4048 // SignBits == BitWidth
4049 // The second case is difficult to check, therefore we only handle the
4050 // first case.
4051 if (SignBits == BitWidth + 1) {
4052 // It overflows only when both arguments are negative and the true
4053 // product is exactly the minimum negative number.
4054 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4055 // For simplicity we just check if at least one side is not negative.
4056 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4057 nullptr, UseInstrInfo);
4058 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4059 nullptr, UseInstrInfo);
4060 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4061 return OverflowResult::NeverOverflows;
4062 }
4063 return OverflowResult::MayOverflow;
4064}
4065
4066OverflowResult llvm::computeOverflowForUnsignedAdd(
4067 const Value *LHS, const Value *RHS, const DataLayout &DL,
4068 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4069 bool UseInstrInfo) {
4070 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4071 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4072 nullptr, UseInstrInfo);
4073 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4074 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4075 nullptr, UseInstrInfo);
4076 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4077}
4078
4079static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4080 const Value *RHS,
4081 const AddOperator *Add,
4082 const DataLayout &DL,
4083 AssumptionCache *AC,
4084 const Instruction *CxtI,
4085 const DominatorTree *DT) {
4086 if (Add && Add->hasNoSignedWrap()) {
4087 return OverflowResult::NeverOverflows;
4088 }
4089
4090 // If LHS and RHS each have at least two sign bits, the addition will look
4091 // like
4092 //
4093 // XX..... +
4094 // YY.....
4095 //
4096 // If the carry into the most significant position is 0, X and Y can't both
4097 // be 1 and therefore the carry out of the addition is also 0.
4098 //
4099 // If the carry into the most significant position is 1, X and Y can't both
4100 // be 0 and therefore the carry out of the addition is also 1.
4101 //
4102 // Since the carry into the most significant position is always equal to
4103 // the carry out of the addition, there is no signed overflow.
4104 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4105 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4106 return OverflowResult::NeverOverflows;
4107
4108 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4109 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4110 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4111 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4112 OverflowResult OR =
4113 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4114 if (OR != OverflowResult::MayOverflow)
4115 return OR;
4116
4117 // The remaining code needs Add to be available. Early returns if not so.
4118 if (!Add)
4119 return OverflowResult::MayOverflow;
4120
4121 // If the sign of Add is the same as at least one of the operands, this add
4122 // CANNOT overflow. If this can be determined from the known bits of the
4123 // operands the above signedAddMayOverflow() check will have already done so.
4124 // The only other way to improve on the known bits is from an assumption, so
4125 // call computeKnownBitsFromAssume() directly.
4126 bool LHSOrRHSKnownNonNegative =
4127 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4128 bool LHSOrRHSKnownNegative =
4129 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4130 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4131 KnownBits AddKnown(LHSRange.getBitWidth());
4132 computeKnownBitsFromAssume(
4133 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4134 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4135 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4136 return OverflowResult::NeverOverflows;
4137 }
4138
4139 return OverflowResult::MayOverflow;
4140}
4141
4142OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4143 const Value *RHS,
4144 const DataLayout &DL,
4145 AssumptionCache *AC,
4146 const Instruction *CxtI,
4147 const DominatorTree *DT) {
4148 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4149 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4150 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4151 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4152 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4153}
4154
4155OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4156 const Value *RHS,
4157 const DataLayout &DL,
4158 AssumptionCache *AC,
4159 const Instruction *CxtI,
4160 const DominatorTree *DT) {
4161 // If LHS and RHS each have at least two sign bits, the subtraction
4162 // cannot overflow.
4163 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4164 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4165 return OverflowResult::NeverOverflows;
4166
4167 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4168 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4169 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4170 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4171 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4172}
4173
4174bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4175 const DominatorTree &DT) {
4176 SmallVector<const BranchInst *, 2> GuardingBranches;
4177 SmallVector<const ExtractValueInst *, 2> Results;
4178
4179 for (const User *U : WO->users()) {
4180 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4181 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")((EVI->getNumIndices() == 1 && "Obvious from CI's type"
) ? static_cast<void> (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 4181, __PRETTY_FUNCTION__))
;
4182
4183 if (EVI->getIndices()[0] == 0)
4184 Results.push_back(EVI);
4185 else {
4186 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")((EVI->getIndices()[0] == 1 && "Obvious from CI's type"
) ? static_cast<void> (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 4186, __PRETTY_FUNCTION__))
;
4187
4188 for (const auto *U : EVI->users())
4189 if (const auto *B = dyn_cast<BranchInst>(U)) {
4190 assert(B->isConditional() && "How else is it using an i1?")((B->isConditional() && "How else is it using an i1?"
) ? static_cast<void> (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 4190, __PRETTY_FUNCTION__))
;
4191 GuardingBranches.push_back(B);
4192 }
4193 }
4194 } else {
4195 // We are using the aggregate directly in a way we don't want to analyze
4196 // here (storing it to a global, say).
4197 return false;
4198 }
4199 }
4200
4201 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4202 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4203 if (!NoWrapEdge.isSingleEdge())
4204 return false;
4205
4206 // Check if all users of the add are provably no-wrap.
4207 for (const auto *Result : Results) {
4208 // If the extractvalue itself is not executed on overflow, the we don't
4209 // need to check each use separately, since domination is transitive.
4210 if (DT.dominates(NoWrapEdge, Result->getParent()))
4211 continue;
4212
4213 for (auto &RU : Result->uses())
4214 if (!DT.dominates(NoWrapEdge, RU))
4215 return false;
4216 }
4217
4218 return true;
4219 };
4220
4221 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4222}
4223
4224
4225OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4226 const DataLayout &DL,
4227 AssumptionCache *AC,
4228 const Instruction *CxtI,
4229 const DominatorTree *DT) {
4230 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4231 Add, DL, AC, CxtI, DT);
4232}
4233
4234OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4235 const Value *RHS,
4236 const DataLayout &DL,
4237 AssumptionCache *AC,
4238 const Instruction *CxtI,
4239 const DominatorTree *DT) {
4240 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4241}
4242
4243bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4244 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
4245 // of time because it's possible for another thread to interfere with it for an
4246 // arbitrary length of time, but programs aren't allowed to rely on that.
4247
4248 // If there is no successor, then execution can't transfer to it.
4249 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4250 return !CRI->unwindsToCaller();
4251 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4252 return !CatchSwitch->unwindsToCaller();
4253 if (isa<ResumeInst>(I))
4254 return false;
4255 if (isa<ReturnInst>(I))
4256 return false;
4257 if (isa<UnreachableInst>(I))
4258 return false;
4259
4260 // Calls can throw, or contain an infinite loop, or kill the process.
4261 if (auto CS = ImmutableCallSite(I)) {
4262 // Call sites that throw have implicit non-local control flow.
4263 if (!CS.doesNotThrow())
4264 return false;
4265
4266 // A function which doens't throw and has "willreturn" attribute will
4267 // always return.
4268 if (CS.hasFnAttr(Attribute::WillReturn))
4269 return true;
4270
4271 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4272 // etc. and thus not return. However, LLVM already assumes that
4273 //
4274 // - Thread exiting actions are modeled as writes to memory invisible to
4275 // the program.
4276 //
4277 // - Loops that don't have side effects (side effects are volatile/atomic
4278 // stores and IO) always terminate (see http://llvm.org/PR965).
4279 // Furthermore IO itself is also modeled as writes to memory invisible to
4280 // the program.
4281 //
4282 // We rely on those assumptions here, and use the memory effects of the call
4283 // target as a proxy for checking that it always returns.
4284
4285 // FIXME: This isn't aggressive enough; a call which only writes to a global
4286 // is guaranteed to return.
4287 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory();
4288 }
4289
4290 // Other instructions return normally.
4291 return true;
4292}
4293
4294bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
4295 // TODO: This is slightly conservative for invoke instruction since exiting
4296 // via an exception *is* normal control for them.
4297 for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4298 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
4299 return false;
4300 return true;
4301}
4302
4303bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
4304 const Loop *L) {
4305 // The loop header is guaranteed to be executed for every iteration.
4306 //
4307 // FIXME: Relax this constraint to cover all basic blocks that are
4308 // guaranteed to be executed at every iteration.
4309 if (I->getParent() != L->getHeader()) return false;
4310
4311 for (const Instruction &LI : *L->getHeader()) {
4312 if (&LI == I) return true;
4313 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4314 }
4315 llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block."
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 4315)
;
4316}
4317
4318bool llvm::propagatesFullPoison(const Instruction *I) {
4319 // TODO: This should include all instructions apart from phis, selects and
4320 // call-like instructions.
4321 switch (I->getOpcode()) {
4322 case Instruction::Add:
4323 case Instruction::Sub:
4324 case Instruction::Xor:
4325 case Instruction::Trunc:
4326 case Instruction::BitCast:
4327 case Instruction::AddrSpaceCast:
4328 case Instruction::Mul:
4329 case Instruction::Shl:
4330 case Instruction::GetElementPtr:
4331 // These operations all propagate poison unconditionally. Note that poison
4332 // is not any particular value, so xor or subtraction of poison with
4333 // itself still yields poison, not zero.
4334 return true;
4335
4336 case Instruction::AShr:
4337 case Instruction::SExt:
4338 // For these operations, one bit of the input is replicated across
4339 // multiple output bits. A replicated poison bit is still poison.
4340 return true;
4341
4342 case Instruction::ICmp:
4343 // Comparing poison with any value yields poison. This is why, for
4344 // instance, x s< (x +nsw 1) can be folded to true.
4345 return true;
4346
4347 default:
4348 return false;
4349 }
4350}
4351
4352const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
4353 switch (I->getOpcode()) {
4354 case Instruction::Store:
4355 return cast<StoreInst>(I)->getPointerOperand();
4356
4357 case Instruction::Load:
4358 return cast<LoadInst>(I)->getPointerOperand();
4359
4360 case Instruction::AtomicCmpXchg:
4361 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4362
4363 case Instruction::AtomicRMW:
4364 return cast<AtomicRMWInst>(I)->getPointerOperand();
4365
4366 case Instruction::UDiv:
4367 case Instruction::SDiv:
4368 case Instruction::URem:
4369 case Instruction::SRem:
4370 return I->getOperand(1);
4371
4372 default:
4373 // Note: It's really tempting to think that a conditional branch or
4374 // switch should be listed here, but that's incorrect. It's not
4375 // branching off of poison which is UB, it is executing a side effecting
4376 // instruction which follows the branch.
4377 return nullptr;
4378 }
4379}
4380
4381bool llvm::mustTriggerUB(const Instruction *I,
4382 const SmallSet<const Value *, 16>& KnownPoison) {
4383 auto *NotPoison = getGuaranteedNonFullPoisonOp(I);
4384 return (NotPoison && KnownPoison.count(NotPoison));
4385}
4386
4387
4388bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4389 // We currently only look for uses of poison values within the same basic
4390 // block, as that makes it easier to guarantee that the uses will be
4391 // executed given that PoisonI is executed.
4392 //
4393 // FIXME: Expand this to consider uses beyond the same basic block. To do
4394 // this, look out for the distinction between post-dominance and strong
4395 // post-dominance.
4396 const BasicBlock *BB = PoisonI->getParent();
4397
4398 // Set of instructions that we have proved will yield poison if PoisonI
4399 // does.
4400 SmallSet<const Value *, 16> YieldsPoison;
4401 SmallSet<const BasicBlock *, 4> Visited;
4402 YieldsPoison.insert(PoisonI);
4403 Visited.insert(PoisonI->getParent());
4404
4405 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4406
4407 unsigned Iter = 0;
4408 while (Iter++ < MaxDepth) {
4409 for (auto &I : make_range(Begin, End)) {
4410 if (&I != PoisonI) {
4411 if (mustTriggerUB(&I, YieldsPoison))
4412 return true;
4413 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4414 return false;
4415 }
4416
4417 // Mark poison that propagates from I through uses of I.
4418 if (YieldsPoison.count(&I)) {
4419 for (const User *User : I.users()) {
4420 const Instruction *UserI = cast<Instruction>(User);
4421 if (propagatesFullPoison(UserI))
4422 YieldsPoison.insert(User);
4423 }
4424 }
4425 }
4426
4427 if (auto *NextBB = BB->getSingleSuccessor()) {
4428 if (Visited.insert(NextBB).second) {
4429 BB = NextBB;
4430 Begin = BB->getFirstNonPHI()->getIterator();
4431 End = BB->end();
4432 continue;
4433 }
4434 }
4435
4436 break;
4437 }
4438 return false;
4439}
4440
4441static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4442 if (FMF.noNaNs())
4443 return true;
4444
4445 if (auto *C = dyn_cast<ConstantFP>(V))
4446 return !C->isNaN();
4447
4448 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4449 if (!C->getElementType()->isFloatingPointTy())
4450 return false;
4451 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4452 if (C->getElementAsAPFloat(I).isNaN())
4453 return false;
4454 }
4455 return true;
4456 }
4457
4458 return false;
4459}
4460
4461static bool isKnownNonZero(const Value *V) {
4462 if (auto *C = dyn_cast<ConstantFP>(V))
4463 return !C->isZero();
4464
4465 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4466 if (!C->getElementType()->isFloatingPointTy())
4467 return false;
4468 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4469 if (C->getElementAsAPFloat(I).isZero())
4470 return false;
4471 }
4472 return true;
4473 }
4474
4475 return false;
4476}
4477
4478/// Match clamp pattern for float types without care about NaNs or signed zeros.
4479/// Given non-min/max outer cmp/select from the clamp pattern this
4480/// function recognizes if it can be substitued by a "canonical" min/max
4481/// pattern.
4482static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4483 Value *CmpLHS, Value *CmpRHS,
4484 Value *TrueVal, Value *FalseVal,
4485 Value *&LHS, Value *&RHS) {
4486 // Try to match
4487 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4488 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4489 // and return description of the outer Max/Min.
4490
4491 // First, check if select has inverse order:
4492 if (CmpRHS == FalseVal) {
4493 std::swap(TrueVal, FalseVal);
4494 Pred = CmpInst::getInversePredicate(Pred);
4495 }
4496
4497 // Assume success now. If there's no match, callers should not use these anyway.
4498 LHS = TrueVal;
4499 RHS = FalseVal;
4500
4501 const APFloat *FC1;
4502 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4503 return {SPF_UNKNOWN, SPNB_NA, false};
4504
4505 const APFloat *FC2;
4506 switch (Pred) {
4507 case CmpInst::FCMP_OLT:
4508 case CmpInst::FCMP_OLE:
4509 case CmpInst::FCMP_ULT:
4510 case CmpInst::FCMP_ULE:
4511 if (match(FalseVal,
4512 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4513 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4514 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4515 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4516 break;
4517 case CmpInst::FCMP_OGT:
4518 case CmpInst::FCMP_OGE:
4519 case CmpInst::FCMP_UGT:
4520 case CmpInst::FCMP_UGE:
4521 if (match(FalseVal,
4522 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4523 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4524 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4525 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4526 break;
4527 default:
4528 break;
4529 }
4530
4531 return {SPF_UNKNOWN, SPNB_NA, false};
4532}
4533
4534/// Recognize variations of:
4535/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4536static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4537 Value *CmpLHS, Value *CmpRHS,
4538 Value *TrueVal, Value *FalseVal) {
4539 // Swap the select operands and predicate to match the patterns below.
4540 if (CmpRHS != TrueVal) {
4541 Pred = ICmpInst::getSwappedPredicate(Pred);
4542 std::swap(TrueVal, FalseVal);
4543 }
4544 const APInt *C1;
4545 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4546 const APInt *C2;
4547 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4548 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4549 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4550 return {SPF_SMAX, SPNB_NA, false};
4551
4552 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4553 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4554 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4555 return {SPF_SMIN, SPNB_NA, false};
4556
4557 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4558 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4559 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4560 return {SPF_UMAX, SPNB_NA, false};
4561
4562 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4563 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4564 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4565 return {SPF_UMIN, SPNB_NA, false};
4566 }
4567 return {SPF_UNKNOWN, SPNB_NA, false};
4568}
4569
4570/// Recognize variations of:
4571/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4572static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4573 Value *CmpLHS, Value *CmpRHS,
4574 Value *TVal, Value *FVal,
4575 unsigned Depth) {
4576 // TODO: Allow FP min/max with nnan/nsz.
4577 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")((CmpInst::isIntPredicate(Pred) && "Expected integer comparison"
) ? static_cast<void> (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 4577, __PRETTY_FUNCTION__))
;
4578
4579 Value *A = nullptr, *B = nullptr;
4580 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4581 if (!SelectPatternResult::isMinOrMax(L.Flavor))
4582 return {SPF_UNKNOWN, SPNB_NA, false};
4583
4584 Value *C = nullptr, *D = nullptr;
4585 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4586 if (L.Flavor != R.Flavor)
4587 return {SPF_UNKNOWN, SPNB_NA, false};
4588
4589 // We have something like: x Pred y ? min(a, b) : min(c, d).
4590 // Try to match the compare to the min/max operations of the select operands.
4591 // First, make sure we have the right compare predicate.
4592 switch (L.Flavor) {
4593 case SPF_SMIN:
4594 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4595 Pred = ICmpInst::getSwappedPredicate(Pred);
4596 std::swap(CmpLHS, CmpRHS);
4597 }
4598 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4599 break;
4600 return {SPF_UNKNOWN, SPNB_NA, false};
4601 case SPF_SMAX:
4602 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4603 Pred = ICmpInst::getSwappedPredicate(Pred);
4604 std::swap(CmpLHS, CmpRHS);
4605 }
4606 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4607 break;
4608 return {SPF_UNKNOWN, SPNB_NA, false};
4609 case SPF_UMIN:
4610 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4611 Pred = ICmpInst::getSwappedPredicate(Pred);
4612 std::swap(CmpLHS, CmpRHS);
4613 }
4614 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4615 break;
4616 return {SPF_UNKNOWN, SPNB_NA, false};
4617 case SPF_UMAX:
4618 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4619 Pred = ICmpInst::getSwappedPredicate(Pred);
4620 std::swap(CmpLHS, CmpRHS);
4621 }
4622 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4623 break;
4624 return {SPF_UNKNOWN, SPNB_NA, false};
4625 default:
4626 return {SPF_UNKNOWN, SPNB_NA, false};
4627 }
4628
4629 // If there is a common operand in the already matched min/max and the other
4630 // min/max operands match the compare operands (either directly or inverted),
4631 // then this is min/max of the same flavor.
4632
4633 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4634 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4635 if (D == B) {
4636 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4637 match(A, m_Not(m_Specific(CmpRHS)))))
4638 return {L.Flavor, SPNB_NA, false};
4639 }
4640 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4641 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4642 if (C == B) {
4643 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4644 match(A, m_Not(m_Specific(CmpRHS)))))
4645 return {L.Flavor, SPNB_NA, false};
4646 }
4647 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4648 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4649 if (D == A) {
4650 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4651 match(B, m_Not(m_Specific(CmpRHS)))))
4652 return {L.Flavor, SPNB_NA, false};
4653 }
4654 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4655 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4656 if (C == A) {
4657 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4658 match(B, m_Not(m_Specific(CmpRHS)))))
4659 return {L.Flavor, SPNB_NA, false};
4660 }
4661
4662 return {SPF_UNKNOWN, SPNB_NA, false};
4663}
4664
4665/// Match non-obvious integer minimum and maximum sequences.
4666static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4667 Value *CmpLHS, Value *CmpRHS,
4668 Value *TrueVal, Value *FalseVal,
4669 Value *&LHS, Value *&RHS,
4670 unsigned Depth) {
4671 // Assume success. If there's no match, callers should not use these anyway.
4672 LHS = TrueVal;
4673 RHS = FalseVal;
4674
4675 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4676 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4677 return SPR;
4678
4679 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4680 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4681 return SPR;
4682
4683 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4684 return {SPF_UNKNOWN, SPNB_NA, false};
4685
4686 // Z = X -nsw Y
4687 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4688 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4689 if (match(TrueVal, m_Zero()) &&
4690 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4691 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4692
4693 // Z = X -nsw Y
4694 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4695 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4696 if (match(FalseVal, m_Zero()) &&
4697 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4698 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4699
4700 const APInt *C1;
4701 if (!match(CmpRHS, m_APInt(C1)))
4702 return {SPF_UNKNOWN, SPNB_NA, false};
4703
4704 // An unsigned min/max can be written with a signed compare.
4705 const APInt *C2;
4706 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4707 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4708 // Is the sign bit set?
4709 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4710 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4711 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4712 C2->isMaxSignedValue())
4713 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4714
4715 // Is the sign bit clear?
4716 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4717 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4718 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4719 C2->isMinSignedValue())
4720 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4721 }
4722
4723 // Look through 'not' ops to find disguised signed min/max.
4724 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4725 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4726 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4727 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4728 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4729
4730 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4731 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4732 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4733 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4734 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4735
4736 return {SPF_UNKNOWN, SPNB_NA, false};
4737}
4738
4739bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
4740 assert(X && Y && "Invalid operand")((X && Y && "Invalid operand") ? static_cast<
void> (0) : __assert_fail ("X && Y && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 4740, __PRETTY_FUNCTION__))
;
4741
4742 // X = sub (0, Y) || X = sub nsw (0, Y)
4743 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
4744 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
4745 return true;
4746
4747 // Y = sub (0, X) || Y = sub nsw (0, X)
4748 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
4749 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
4750 return true;
4751
4752 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4753 Value *A, *B;
4754 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
4755 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
4756 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
4757 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
4758}
4759
4760static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4761 FastMathFlags FMF,
4762 Value *CmpLHS, Value *CmpRHS,
4763 Value *TrueVal, Value *FalseVal,
4764 Value *&LHS, Value *&RHS,
4765 unsigned Depth) {
4766 if (CmpInst::isFPPredicate(Pred)) {
4767 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
4768 // 0.0 operand, set the compare's 0.0 operands to that same value for the
4769 // purpose of identifying min/max. Disregard vector constants with undefined
4770 // elements because those can not be back-propagated for analysis.
4771 Value *OutputZeroVal = nullptr;
4772 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
4773 !cast<Constant>(TrueVal)->containsUndefElement())
4774 OutputZeroVal = TrueVal;
4775 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
4776 !cast<Constant>(FalseVal)->containsUndefElement())
4777 OutputZeroVal = FalseVal;
4778
4779 if (OutputZeroVal) {
4780 if (match(CmpLHS, m_AnyZeroFP()))
4781 CmpLHS = OutputZeroVal;
4782 if (match(CmpRHS, m_AnyZeroFP()))
4783 CmpRHS = OutputZeroVal;
4784 }
4785 }
4786
4787 LHS = CmpLHS;
4788 RHS = CmpRHS;
4789
4790 // Signed zero may return inconsistent results between implementations.
4791 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4792 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4793 // Therefore, we behave conservatively and only proceed if at least one of the
4794 // operands is known to not be zero or if we don't care about signed zero.
4795 switch (Pred) {
4796 default: break;
4797 // FIXME: Include OGT/OLT/UGT/ULT.
4798 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4799 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4800 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4801 !isKnownNonZero(CmpRHS))
4802 return {SPF_UNKNOWN, SPNB_NA, false};
4803 }
4804
4805 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4806 bool Ordered = false;
4807
4808 // When given one NaN and one non-NaN input:
4809 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4810 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4811 // ordered comparison fails), which could be NaN or non-NaN.
4812 // so here we discover exactly what NaN behavior is required/accepted.
4813 if (CmpInst::isFPPredicate(Pred)) {
4814 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4815 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4816
4817 if (LHSSafe && RHSSafe) {
4818 // Both operands are known non-NaN.
4819 NaNBehavior = SPNB_RETURNS_ANY;
4820 } else if (CmpInst::isOrdered(Pred)) {
4821 // An ordered comparison will return false when given a NaN, so it
4822 // returns the RHS.
4823 Ordered = true;
4824 if (LHSSafe)
4825 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4826 NaNBehavior = SPNB_RETURNS_NAN;
4827 else if (RHSSafe)
4828 NaNBehavior = SPNB_RETURNS_OTHER;
4829 else
4830 // Completely unsafe.
4831 return {SPF_UNKNOWN, SPNB_NA, false};
4832 } else {
4833 Ordered = false;
4834 // An unordered comparison will return true when given a NaN, so it
4835 // returns the LHS.
4836 if (LHSSafe)
4837 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4838 NaNBehavior = SPNB_RETURNS_OTHER;
4839 else if (RHSSafe)
4840 NaNBehavior = SPNB_RETURNS_NAN;
4841 else
4842 // Completely unsafe.
4843 return {SPF_UNKNOWN, SPNB_NA, false};
4844 }
4845 }
4846
4847 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4848 std::swap(CmpLHS, CmpRHS);
4849 Pred = CmpInst::getSwappedPredicate(Pred);
4850 if (NaNBehavior == SPNB_RETURNS_NAN)
4851 NaNBehavior = SPNB_RETURNS_OTHER;
4852 else if (NaNBehavior == SPNB_RETURNS_OTHER)
4853 NaNBehavior = SPNB_RETURNS_NAN;
4854 Ordered = !Ordered;
4855 }
4856
4857 // ([if]cmp X, Y) ? X : Y
4858 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4859 switch (Pred) {
4860 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4861 case ICmpInst::ICMP_UGT:
4862 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4863 case ICmpInst::ICMP_SGT:
4864 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4865 case ICmpInst::ICMP_ULT:
4866 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4867 case ICmpInst::ICMP_SLT:
4868 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4869 case FCmpInst::FCMP_UGT:
4870 case FCmpInst::FCMP_UGE:
4871 case FCmpInst::FCMP_OGT:
4872 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4873 case FCmpInst::FCMP_ULT:
4874 case FCmpInst::FCMP_ULE:
4875 case FCmpInst::FCMP_OLT:
4876 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4877 }
4878 }
4879
4880 if (isKnownNegation(TrueVal, FalseVal)) {
4881 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4882 // match against either LHS or sext(LHS).
4883 auto MaybeSExtCmpLHS =
4884 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
4885 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
4886 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
4887 if (match(TrueVal, MaybeSExtCmpLHS)) {
4888 // Set the return values. If the compare uses the negated value (-X >s 0),
4889 // swap the return values because the negated value is always 'RHS'.
4890 LHS = TrueVal;
4891 RHS = FalseVal;
4892 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
4893 std::swap(LHS, RHS);
4894
4895 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4896 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4897 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4898 return {SPF_ABS, SPNB_NA, false};
4899
4900 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
4901 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
4902 return {SPF_ABS, SPNB_NA, false};
4903
4904 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4905 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4906 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4907 return {SPF_NABS, SPNB_NA, false};
4908 }
4909 else if (match(FalseVal, MaybeSExtCmpLHS)) {
4910 // Set the return values. If the compare uses the negated value (-X >s 0),
4911 // swap the return values because the negated value is always 'RHS'.
4912 LHS = FalseVal;
4913 RHS = TrueVal;
4914 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
4915 std::swap(LHS, RHS);
4916
4917 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4918 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4919 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4920 return {SPF_NABS, SPNB_NA, false};
4921
4922 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4923 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4924 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4925 return {SPF_ABS, SPNB_NA, false};
4926 }
4927 }
4928
4929 if (CmpInst::isIntPredicate(Pred))
4930 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4931
4932 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4933 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4934 // semantics than minNum. Be conservative in such case.
4935 if (NaNBehavior != SPNB_RETURNS_ANY ||
4936 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4937 !isKnownNonZero(CmpRHS)))
4938 return {SPF_UNKNOWN, SPNB_NA, false};
4939
4940 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4941}
4942
4943/// Helps to match a select pattern in case of a type mismatch.
4944///
4945/// The function processes the case when type of true and false values of a
4946/// select instruction differs from type of the cmp instruction operands because
4947/// of a cast instruction. The function checks if it is legal to move the cast
4948/// operation after "select". If yes, it returns the new second value of
4949/// "select" (with the assumption that cast is moved):
4950/// 1. As operand of cast instruction when both values of "select" are same cast
4951/// instructions.
4952/// 2. As restored constant (by applying reverse cast operation) when the first
4953/// value of the "select" is a cast operation and the second value is a
4954/// constant.
4955/// NOTE: We return only the new second value because the first value could be
4956/// accessed as operand of cast instruction.
4957static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4958 Instruction::CastOps *CastOp) {
4959 auto *Cast1 = dyn_cast<CastInst>(V1);
4960 if (!Cast1)
4961 return nullptr;
4962
4963 *CastOp = Cast1->getOpcode();
4964 Type *SrcTy = Cast1->getSrcTy();
4965 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4966 // If V1 and V2 are both the same cast from the same type, look through V1.
4967 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4968 return Cast2->getOperand(0);
4969 return nullptr;
4970 }
4971
4972 auto *C = dyn_cast<Constant>(V2);
4973 if (!C)
4974 return nullptr;
4975
4976 Constant *CastedTo = nullptr;
4977 switch (*CastOp) {
4978 case Instruction::ZExt:
4979 if (CmpI->isUnsigned())
4980 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4981 break;
4982 case Instruction::SExt:
4983 if (CmpI->isSigned())
4984 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4985 break;
4986 case Instruction::Trunc:
4987 Constant *CmpConst;
4988 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4989 CmpConst->getType() == SrcTy) {
4990 // Here we have the following case:
4991 //
4992 // %cond = cmp iN %x, CmpConst
4993 // %tr = trunc iN %x to iK
4994 // %narrowsel = select i1 %cond, iK %t, iK C
4995 //
4996 // We can always move trunc after select operation:
4997 //
4998 // %cond = cmp iN %x, CmpConst
4999 // %widesel = select i1 %cond, iN %x, iN CmpConst
5000 // %tr = trunc iN %widesel to iK
5001 //
5002 // Note that C could be extended in any way because we don't care about
5003 // upper bits after truncation. It can't be abs pattern, because it would
5004 // look like:
5005 //
5006 // select i1 %cond, x, -x.
5007 //
5008 // So only min/max pattern could be matched. Such match requires widened C
5009 // == CmpConst. That is why set widened C = CmpConst, condition trunc
5010 // CmpConst == C is checked below.
5011 CastedTo = CmpConst;
5012 } else {
5013 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5014 }
5015 break;
5016 case Instruction::FPTrunc:
5017 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5018 break;
5019 case Instruction::FPExt:
5020 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5021 break;
5022 case Instruction::FPToUI:
5023 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5024 break;
5025 case Instruction::FPToSI:
5026 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5027 break;
5028 case Instruction::UIToFP:
5029 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5030 break;
5031 case Instruction::SIToFP:
5032 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5033 break;
5034 default:
5035 break;
5036 }
5037
5038 if (!CastedTo)
5039 return nullptr;
5040
5041 // Make sure the cast doesn't lose any information.
5042 Constant *CastedBack =
5043 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5044 if (CastedBack != C)
5045 return nullptr;
5046
5047 return CastedTo;
5048}
5049
5050SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5051 Instruction::CastOps *CastOp,
5052 unsigned Depth) {
5053 if (Depth >= MaxDepth)
5054 return {SPF_UNKNOWN, SPNB_NA, false};
5055
5056 SelectInst *SI = dyn_cast<SelectInst>(V);
5057 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5058
5059 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5060 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5061
5062 Value *TrueVal = SI->getTrueValue();
5063 Value *FalseVal = SI->getFalseValue();
5064
5065 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5066 CastOp, Depth);
5067}
5068
5069SelectPatternResult llvm::matchDecomposedSelectPattern(
5070 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5071 Instruction::CastOps *CastOp, unsigned Depth) {
5072 CmpInst::Predicate Pred = CmpI->getPredicate();
5073 Value *CmpLHS = CmpI->getOperand(0);
5074 Value *CmpRHS = CmpI->getOperand(1);
5075 FastMathFlags FMF;
5076 if (isa<FPMathOperator>(CmpI))
5077 FMF = CmpI->getFastMathFlags();
5078
5079 // Bail out early.
5080 if (CmpI->isEquality())
5081 return {SPF_UNKNOWN, SPNB_NA, false};
5082
5083 // Deal with type mismatches.
5084 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5085 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5086 // If this is a potential fmin/fmax with a cast to integer, then ignore
5087 // -0.0 because there is no corresponding integer value.
5088 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5089 FMF.setNoSignedZeros();
5090 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5091 cast<CastInst>(TrueVal)->getOperand(0), C,
5092 LHS, RHS, Depth);
5093 }
5094 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5095 // If this is a potential fmin/fmax with a cast to integer, then ignore
5096 // -0.0 because there is no corresponding integer value.
5097 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5098 FMF.setNoSignedZeros();
5099 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5100 C, cast<CastInst>(FalseVal)->getOperand(0),
5101 LHS, RHS, Depth);
5102 }
5103 }
5104 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5105 LHS, RHS, Depth);
5106}
5107
5108CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5109 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5110 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5111 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5112 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5113 if (SPF == SPF_FMINNUM)
5114 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5115 if (SPF == SPF_FMAXNUM)
5116 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5117 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5117)
;
5118}
5119
5120SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5121 if (SPF == SPF_SMIN) return SPF_SMAX;
5122 if (SPF == SPF_UMIN) return SPF_UMAX;
5123 if (SPF == SPF_SMAX) return SPF_SMIN;
5124 if (SPF == SPF_UMAX) return SPF_UMIN;
5125 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5125)
;
5126}
5127
5128CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5129 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5130}
5131
5132/// Return true if "icmp Pred LHS RHS" is always true.
5133static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5134 const Value *RHS, const DataLayout &DL,
5135 unsigned Depth) {
5136 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!")((!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"
) ? static_cast<void> (0) : __assert_fail ("!LHS->getType()->isVectorTy() && \"TODO: extend to handle vectors!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5136, __PRETTY_FUNCTION__))
;
5137 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5138 return true;
5139
5140 switch (Pred) {
5141 default:
5142 return false;
5143
5144 case CmpInst::ICMP_SLE: {
5145 const APInt *C;
5146
5147 // LHS s<= LHS +_{nsw} C if C >= 0
5148 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5149 return !C->isNegative();
5150 return false;
5151 }
5152
5153 case CmpInst::ICMP_ULE: {
5154 const APInt *C;
5155
5156 // LHS u<= LHS +_{nuw} C for any C
5157 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5158 return true;
5159
5160 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5161 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5162 const Value *&X,
5163 const APInt *&CA, const APInt *&CB) {
5164 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5165 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5166 return true;
5167
5168 // If X & C == 0 then (X | C) == X +_{nuw} C
5169 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5170 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5171 KnownBits Known(CA->getBitWidth());
5172 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5173 /*CxtI*/ nullptr, /*DT*/ nullptr);
5174 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5175 return true;
5176 }
5177
5178 return false;
5179 };
5180
5181 const Value *X;
5182 const APInt *CLHS, *CRHS;
5183 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
5184 return CLHS->ule(*CRHS);
5185
5186 return false;
5187 }
5188 }
5189}
5190
5191/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
5192/// ALHS ARHS" is true. Otherwise, return None.
5193static Optional<bool>
5194isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
5195 const Value *ARHS, const Value *BLHS, const Value *BRHS,
5196 const DataLayout &DL, unsigned Depth) {
5197 switch (Pred) {
5198 default:
5199 return None;
5200
5201 case CmpInst::ICMP_SLT:
5202 case CmpInst::ICMP_SLE:
5203 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
5204 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
5205 return true;
5206 return None;
5207
5208 case CmpInst::ICMP_ULT:
5209 case CmpInst::ICMP_ULE:
5210 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
5211 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
5212 return true;
5213 return None;
5214 }
5215}
5216
5217/// Return true if the operands of the two compares match. IsSwappedOps is true
5218/// when the operands match, but are swapped.
5219static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
5220 const Value *BLHS, const Value *BRHS,
5221 bool &IsSwappedOps) {
5222
5223 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
5224 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
5225 return IsMatchingOps || IsSwappedOps;
5226}
5227
5228/// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
5229/// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
5230/// Otherwise, return None if we can't infer anything.
5231static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
5232 CmpInst::Predicate BPred,
5233 bool AreSwappedOps) {
5234 // Canonicalize the predicate as if the operands were not commuted.
5235 if (AreSwappedOps)
5236 BPred = ICmpInst::getSwappedPredicate(BPred);
5237
5238 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
5239 return true;
5240 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
5241 return false;
5242
5243 return None;
5244}
5245
5246/// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
5247/// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
5248/// Otherwise, return None if we can't infer anything.
5249static Optional<bool>
5250isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
5251 const ConstantInt *C1,
5252 CmpInst::Predicate BPred,
5253 const ConstantInt *C2) {
5254 ConstantRange DomCR =
5255 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
5256 ConstantRange CR =
5257 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
5258 ConstantRange Intersection = DomCR.intersectWith(CR);
5259 ConstantRange Difference = DomCR.difference(CR);
5260 if (Intersection.isEmptySet())
5261 return false;
5262 if (Difference.isEmptySet())
5263 return true;
5264 return None;
5265}
5266
5267/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5268/// false. Otherwise, return None if we can't infer anything.
5269static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
5270 const ICmpInst *RHS,
5271 const DataLayout &DL, bool LHSIsTrue,
5272 unsigned Depth) {
5273 Value *ALHS = LHS->getOperand(0);
5274 Value *ARHS = LHS->getOperand(1);
5275 // The rest of the logic assumes the LHS condition is true. If that's not the
5276 // case, invert the predicate to make it so.
5277 ICmpInst::Predicate APred =
5278 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
5279
5280 Value *BLHS = RHS->getOperand(0);
5281 Value *BRHS = RHS->getOperand(1);
5282 ICmpInst::Predicate BPred = RHS->getPredicate();
5283
5284 // Can we infer anything when the two compares have matching operands?
5285 bool AreSwappedOps;
5286 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
5287 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
5288 APred, BPred, AreSwappedOps))
5289 return Implication;
5290 // No amount of additional analysis will infer the second condition, so
5291 // early exit.
5292 return None;
5293 }
5294
5295 // Can we infer anything when the LHS operands match and the RHS operands are
5296 // constants (not necessarily matching)?
5297 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
5298 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
5299 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
5300 return Implication;
5301 // No amount of additional analysis will infer the second condition, so
5302 // early exit.
5303 return None;
5304 }
5305
5306 if (APred == BPred)
5307 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
5308 return None;
5309}
5310
5311/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5312/// false. Otherwise, return None if we can't infer anything. We expect the
5313/// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
5314static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
5315 const ICmpInst *RHS,
5316 const DataLayout &DL, bool LHSIsTrue,
5317 unsigned Depth) {
5318 // The LHS must be an 'or' or an 'and' instruction.
5319 assert((LHS->getOpcode() == Instruction::And ||(((LHS->getOpcode() == Instruction::And || LHS->getOpcode
() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5321, __PRETTY_FUNCTION__))
5320 LHS->getOpcode() == Instruction::Or) &&(((LHS->getOpcode() == Instruction::And || LHS->getOpcode
() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5321, __PRETTY_FUNCTION__))
5321 "Expected LHS to be 'and' or 'or'.")(((LHS->getOpcode() == Instruction::And || LHS->getOpcode
() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5321, __PRETTY_FUNCTION__))
;
5322
5323 assert(Depth <= MaxDepth && "Hit recursion limit")((Depth <= MaxDepth && "Hit recursion limit") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Hit recursion limit\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5323, __PRETTY_FUNCTION__))
;
5324
5325 // If the result of an 'or' is false, then we know both legs of the 'or' are
5326 // false. Similarly, if the result of an 'and' is true, then we know both
5327 // legs of the 'and' are true.
5328 Value *ALHS, *ARHS;
5329 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
5330 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
5331 // FIXME: Make this non-recursion.
5332 if (Optional<bool> Implication =
5333 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
5334 return Implication;
5335 if (Optional<bool> Implication =
5336 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
5337 return Implication;
5338 return None;
5339 }
5340 return None;
5341}
5342
5343Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
5344 const DataLayout &DL, bool LHSIsTrue,
5345 unsigned Depth) {
5346 // Bail out when we hit the limit.
5347 if (Depth == MaxDepth)
5348 return None;
5349
5350 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
5351 // example.
5352 if (LHS->getType() != RHS->getType())
5353 return None;
5354
5355 Type *OpTy = LHS->getType();
5356 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!")((OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"
) ? static_cast<void> (0) : __assert_fail ("OpTy->isIntOrIntVectorTy(1) && \"Expected integer type only!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5356, __PRETTY_FUNCTION__))
;
5357
5358 // LHS ==> RHS by definition
5359 if (LHS == RHS)
5360 return LHSIsTrue;
5361
5362 // FIXME: Extending the code below to handle vectors.
5363 if (OpTy->isVectorTy())
5364 return None;
5365
5366 assert(OpTy->isIntegerTy(1) && "implied by above")((OpTy->isIntegerTy(1) && "implied by above") ? static_cast
<void> (0) : __assert_fail ("OpTy->isIntegerTy(1) && \"implied by above\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5366, __PRETTY_FUNCTION__))
;
5367
5368 // Both LHS and RHS are icmps.
5369 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
5370 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
5371 if (LHSCmp && RHSCmp)
5372 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
5373
5374 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be
5375 // an icmp. FIXME: Add support for and/or on the RHS.
5376 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
5377 if (LHSBO && RHSCmp) {
5378 if ((LHSBO->getOpcode() == Instruction::And ||
5379 LHSBO->getOpcode() == Instruction::Or))
5380 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
5381 }
5382 return None;
5383}
5384
5385Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
5386 const Instruction *ContextI,
5387 const DataLayout &DL) {
5388 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool")((Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"
) ? static_cast<void> (0) : __assert_fail ("Cond->getType()->isIntOrIntVectorTy(1) && \"Condition must be bool\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5388, __PRETTY_FUNCTION__))
;
5389 if (!ContextI || !ContextI->getParent())
5390 return None;
5391
5392 // TODO: This is a poor/cheap way to determine dominance. Should we use a
5393 // dominator tree (eg, from a SimplifyQuery) instead?
5394 const BasicBlock *ContextBB = ContextI->getParent();
5395 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
5396 if (!PredBB)
5397 return None;
5398
5399 // We need a conditional branch in the predecessor.
5400 Value *PredCond;
5401 BasicBlock *TrueBB, *FalseBB;
5402 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
5403 return None;
5404
5405 // The branch should get simplified. Don't bother simplifying this condition.
5406 if (TrueBB == FalseBB)
5407 return None;
5408
5409 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&(((TrueBB == ContextBB || FalseBB == ContextBB) && "Predecessor block does not point to successor?"
) ? static_cast<void> (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5410, __PRETTY_FUNCTION__))
5410 "Predecessor block does not point to successor?")(((TrueBB == ContextBB || FalseBB == ContextBB) && "Predecessor block does not point to successor?"
) ? static_cast<void> (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5410, __PRETTY_FUNCTION__))
;
5411
5412 // Is this condition implied by the predecessor condition?
5413 bool CondIsTrue = TrueBB == ContextBB;
5414 return isImpliedCondition(PredCond, Cond, DL, CondIsTrue);
5415}
5416
5417static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
5418 APInt &Upper, const InstrInfoQuery &IIQ) {
5419 unsigned Width = Lower.getBitWidth();
5420 const APInt *C;
5421 switch (BO.getOpcode()) {
5422 case Instruction::Add:
5423 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5424 // FIXME: If we have both nuw and nsw, we should reduce the range further.
5425 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5426 // 'add nuw x, C' produces [C, UINT_MAX].
5427 Lower = *C;
5428 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5429 if (C->isNegative()) {
5430 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
5431 Lower = APInt::getSignedMinValue(Width);
5432 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5433 } else {
5434 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
5435 Lower = APInt::getSignedMinValue(Width) + *C;
5436 Upper = APInt::getSignedMaxValue(Width) + 1;
5437 }
5438 }
5439 }
5440 break;
5441
5442 case Instruction::And:
5443 if (match(BO.getOperand(1), m_APInt(C)))
5444 // 'and x, C' produces [0, C].
5445 Upper = *C + 1;
5446 break;
5447
5448 case Instruction::Or:
5449 if (match(BO.getOperand(1), m_APInt(C)))
5450 // 'or x, C' produces [C, UINT_MAX].
5451 Lower = *C;
5452 break;
5453
5454 case Instruction::AShr:
5455 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5456 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
5457 Lower = APInt::getSignedMinValue(Width).ashr(*C);
5458 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
5459 } else if (match(BO.getOperand(0), m_APInt(C))) {
5460 unsigned ShiftAmount = Width - 1;
5461 if (!C->isNullValue() && IIQ.isExact(&BO))
5462 ShiftAmount = C->countTrailingZeros();
5463 if (C->isNegative()) {
5464 // 'ashr C, x' produces [C, C >> (Width-1)]
5465 Lower = *C;
5466 Upper = C->ashr(ShiftAmount) + 1;
5467 } else {
5468 // 'ashr C, x' produces [C >> (Width-1), C]
5469 Lower = C->ashr(ShiftAmount);
5470 Upper = *C + 1;
5471 }
5472 }
5473 break;
5474
5475 case Instruction::LShr:
5476 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5477 // 'lshr x, C' produces [0, UINT_MAX >> C].
5478 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
5479 } else if (match(BO.getOperand(0), m_APInt(C))) {
5480 // 'lshr C, x' produces [C >> (Width-1), C].
5481 unsigned ShiftAmount = Width - 1;
5482 if (!C->isNullValue() && IIQ.isExact(&BO))
5483 ShiftAmount = C->countTrailingZeros();
5484 Lower = C->lshr(ShiftAmount);
5485 Upper = *C + 1;
5486 }
5487 break;
5488
5489 case Instruction::Shl:
5490 if (match(BO.getOperand(0), m_APInt(C))) {
5491 if (IIQ.hasNoUnsignedWrap(&BO)) {
5492 // 'shl nuw C, x' produces [C, C << CLZ(C)]
5493 Lower = *C;
5494 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
5495 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
5496 if (C->isNegative()) {
5497 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
5498 unsigned ShiftAmount = C->countLeadingOnes() - 1;
5499 Lower = C->shl(ShiftAmount);
5500 Upper = *C + 1;
5501 } else {
5502 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
5503 unsigned ShiftAmount = C->countLeadingZeros() - 1;
5504 Lower = *C;
5505 Upper = C->shl(ShiftAmount) + 1;
5506 }
5507 }
5508 }
5509 break;
5510
5511 case Instruction::SDiv:
5512 if (match(BO.getOperand(1), m_APInt(C))) {
5513 APInt IntMin = APInt::getSignedMinValue(Width);
5514 APInt IntMax = APInt::getSignedMaxValue(Width);
5515 if (C->isAllOnesValue()) {
5516 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
5517 // where C != -1 and C != 0 and C != 1
5518 Lower = IntMin + 1;
5519 Upper = IntMax + 1;
5520 } else if (C->countLeadingZeros() < Width - 1) {
5521 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
5522 // where C != -1 and C != 0 and C != 1
5523 Lower = IntMin.sdiv(*C);
5524 Upper = IntMax.sdiv(*C);
5525 if (Lower.sgt(Upper))
5526 std::swap(Lower, Upper);
5527 Upper = Upper + 1;
5528 assert(Upper != Lower && "Upper part of range has wrapped!")((Upper != Lower && "Upper part of range has wrapped!"
) ? static_cast<void> (0) : __assert_fail ("Upper != Lower && \"Upper part of range has wrapped!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5528, __PRETTY_FUNCTION__))
;
5529 }
5530 } else if (match(BO.getOperand(0), m_APInt(C))) {
5531 if (C->isMinSignedValue()) {
5532 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
5533 Lower = *C;
5534 Upper = Lower.lshr(1) + 1;
5535 } else {
5536 // 'sdiv C, x' produces [-|C|, |C|].
5537 Upper = C->abs() + 1;
5538 Lower = (-Upper) + 1;
5539 }
5540 }
5541 break;
5542
5543 case Instruction::UDiv:
5544 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5545 // 'udiv x, C' produces [0, UINT_MAX / C].
5546 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
5547 } else if (match(BO.getOperand(0), m_APInt(C))) {
5548 // 'udiv C, x' produces [0, C].
5549 Upper = *C + 1;
5550 }
5551 break;
5552
5553 case Instruction::SRem:
5554 if (match(BO.getOperand(1), m_APInt(C))) {
5555 // 'srem x, C' produces (-|C|, |C|).
5556 Upper = C->abs();
5557 Lower = (-Upper) + 1;
5558 }
5559 break;
5560
5561 case Instruction::URem:
5562 if (match(BO.getOperand(1), m_APInt(C)))
5563 // 'urem x, C' produces [0, C).
5564 Upper = *C;
5565 break;
5566
5567 default:
5568 break;
5569 }
5570}
5571
5572static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
5573 APInt &Upper) {
5574 unsigned Width = Lower.getBitWidth();
5575 const APInt *C;
5576 switch (II.getIntrinsicID()) {
5577 case Intrinsic::uadd_sat:
5578 // uadd.sat(x, C) produces [C, UINT_MAX].
5579 if (match(II.getOperand(0), m_APInt(C)) ||
5580 match(II.getOperand(1), m_APInt(C)))
5581 Lower = *C;
5582 break;
5583 case Intrinsic::sadd_sat:
5584 if (match(II.getOperand(0), m_APInt(C)) ||
5585 match(II.getOperand(1), m_APInt(C))) {
5586 if (C->isNegative()) {
5587 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
5588 Lower = APInt::getSignedMinValue(Width);
5589 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5590 } else {
5591 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
5592 Lower = APInt::getSignedMinValue(Width) + *C;
5593 Upper = APInt::getSignedMaxValue(Width) + 1;
5594 }
5595 }
5596 break;
5597 case Intrinsic::usub_sat:
5598 // usub.sat(C, x) produces [0, C].
5599 if (match(II.getOperand(0), m_APInt(C)))
5600 Upper = *C + 1;
5601 // usub.sat(x, C) produces [0, UINT_MAX - C].
5602 else if (match(II.getOperand(1), m_APInt(C)))
5603 Upper = APInt::getMaxValue(Width) - *C + 1;
5604 break;
5605 case Intrinsic::ssub_sat:
5606 if (match(II.getOperand(0), m_APInt(C))) {
5607 if (C->isNegative()) {
5608 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
5609 Lower = APInt::getSignedMinValue(Width);
5610 Upper = *C - APInt::getSignedMinValue(Width) + 1;
5611 } else {
5612 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
5613 Lower = *C - APInt::getSignedMaxValue(Width);
5614 Upper = APInt::getSignedMaxValue(Width) + 1;
5615 }
5616 } else if (match(II.getOperand(1), m_APInt(C))) {
5617 if (C->isNegative()) {
5618 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
5619 Lower = APInt::getSignedMinValue(Width) - *C;
5620 Upper = APInt::getSignedMaxValue(Width) + 1;
5621 } else {
5622 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
5623 Lower = APInt::getSignedMinValue(Width);
5624 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
5625 }
5626 }
5627 break;
5628 default:
5629 break;
5630 }
5631}
5632
5633static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
5634 APInt &Upper, const InstrInfoQuery &IIQ) {
5635 const Value *LHS = nullptr, *RHS = nullptr;
5636 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
5637 if (R.Flavor == SPF_UNKNOWN)
5638 return;
5639
5640 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
5641
5642 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
5643 // If the negation part of the abs (in RHS) has the NSW flag,
5644 // then the result of abs(X) is [0..SIGNED_MAX],
5645 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
5646 Lower = APInt::getNullValue(BitWidth);
5647 if (match(RHS, m_Neg(m_Specific(LHS))) &&
5648 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
5649 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5650 else
5651 Upper = APInt::getSignedMinValue(BitWidth) + 1;
5652 return;
5653 }
5654
5655 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
5656 // The result of -abs(X) is <= 0.
5657 Lower = APInt::getSignedMinValue(BitWidth);
5658 Upper = APInt(BitWidth, 1);
5659 return;
5660 }
5661
5662 const APInt *C;
5663 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
5664 return;
5665
5666 switch (R.Flavor) {
5667 case SPF_UMIN:
5668 Upper = *C + 1;
5669 break;
5670 case SPF_UMAX:
5671 Lower = *C;
5672 break;
5673 case SPF_SMIN:
5674 Lower = APInt::getSignedMinValue(BitWidth);
5675 Upper = *C + 1;
5676 break;
5677 case SPF_SMAX:
5678 Lower = *C;
5679 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5680 break;
5681 default:
5682 break;
5683 }
5684}
5685
5686ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) {
5687 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction")((V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && \"Expected integer instruction\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Analysis/ValueTracking.cpp"
, 5687, __PRETTY_FUNCTION__))
;
5688
5689 const APInt *C;
5690 if (match(V, m_APInt(C)))
5691 return ConstantRange(*C);
5692
5693 InstrInfoQuery IIQ(UseInstrInfo);
5694 unsigned BitWidth = V->getType()->getScalarSizeInBits();
5695 APInt Lower = APInt(BitWidth, 0);
5696 APInt Upper = APInt(BitWidth, 0);
5697 if (auto *BO = dyn_cast<BinaryOperator>(V))
5698 setLimitsForBinOp(*BO, Lower, Upper, IIQ);
5699 else if (auto *II = dyn_cast<IntrinsicInst>(V))
5700 setLimitsForIntrinsic(*II, Lower, Upper);
5701 else if (auto *SI = dyn_cast<SelectInst>(V))
5702 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
5703
5704 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
5705
5706 if (auto *I = dyn_cast<Instruction>(V))
5707 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
5708 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
5709
5710 return CR;
5711}
5712
5713static Optional<int64_t>
5714getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
5715 // Skip over the first indices.
5716 gep_type_iterator GTI = gep_type_begin(GEP);
5717 for (unsigned i = 1; i != Idx; ++i, ++GTI)
5718 /*skip along*/;
5719
5720 // Compute the offset implied by the rest of the indices.
5721 int64_t Offset = 0;
5722 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
5723 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
5724 if (!OpC)
5725 return None;
5726 if (OpC->isZero())
5727 continue; // No offset.
5728
5729 // Handle struct indices, which add their field offset to the pointer.
5730 if (StructType *STy = GTI.getStructTypeOrNull()) {
5731 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
5732 continue;
5733 }
5734
5735 // Otherwise, we have a sequential type like an array or vector. Multiply
5736 // the index by the ElementSize.
5737 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
5738 Offset += Size * OpC->getSExtValue();
5739 }
5740
5741 return Offset;
5742}
5743
5744Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
5745 const DataLayout &DL) {
5746 Ptr1 = Ptr1->stripPointerCasts();
5747 Ptr2 = Ptr2->stripPointerCasts();
5748
5749 // Handle the trivial case first.
5750 if (Ptr1 == Ptr2) {
5751 return 0;
5752 }
5753
5754 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
5755 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
5756
5757 // If one pointer is a GEP see if the GEP is a constant offset from the base,
5758 // as in "P" and "gep P, 1".
5759 // Also do this iteratively to handle the the following case:
5760 // Ptr_t1 = GEP Ptr1, c1
5761 // Ptr_t2 = GEP Ptr_t1, c2
5762 // Ptr2 = GEP Ptr_t2, c3
5763 // where we will return c1+c2+c3.
5764 // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
5765 // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
5766 // are the same, and return the difference between offsets.
5767 auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
5768 const Value *Ptr) -> Optional<int64_t> {
5769 const GEPOperator *GEP_T = GEP;
5770 int64_t OffsetVal = 0;
5771 bool HasSameBase = false;
5772 while (GEP_T) {
5773 auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
5774 if (!Offset)
5775 return None;
5776 OffsetVal += *Offset;
5777 auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
5778 if (Op0 == Ptr) {
5779 HasSameBase = true;
5780 break;
5781 }
5782 GEP_T = dyn_cast<GEPOperator>(Op0);
5783 }
5784 if (!HasSameBase)
5785 return None;
5786 return OffsetVal;
5787 };
5788
5789 if (GEP1) {
5790 auto Offset = getOffsetFromBase(GEP1, Ptr2);
5791 if (Offset)
5792 return -*Offset;
5793 }
5794 if (GEP2) {
5795 auto Offset = getOffsetFromBase(GEP2, Ptr1);
5796 if (Offset)
5797 return Offset;
5798 }
5799
5800 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
5801 // base. After that base, they may have some number of common (and
5802 // potentially variable) indices. After that they handle some constant
5803 // offset, which determines their offset from each other. At this point, we
5804 // handle no other case.
5805 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
5806 return None;
5807
5808 // Skip any common indices and track the GEP types.
5809 unsigned Idx = 1;
5810 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
5811 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
5812 break;
5813
5814 auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
5815 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
5816 if (!Offset1 || !Offset2)
5817 return None;
5818 return *Offset2 - *Offset1;
5819}

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/CallingConv.h"
29#include "llvm/IR/Constant.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/InstrTypes.h"
33#include "llvm/IR/Instruction.h"
34#include "llvm/IR/OperandTraits.h"
35#include "llvm/IR/Type.h"
36#include "llvm/IR/Use.h"
37#include "llvm/IR/User.h"
38#include "llvm/IR/Value.h"
39#include "llvm/Support/AtomicOrdering.h"
40#include "llvm/Support/Casting.h"
41#include "llvm/Support/ErrorHandling.h"
42#include <cassert>
43#include <cstddef>
44#include <cstdint>
45#include <iterator>
46
47namespace llvm {
48
49class APInt;
50class ConstantInt;
51class DataLayout;
52class LLVMContext;
53
54//===----------------------------------------------------------------------===//
55// AllocaInst Class
56//===----------------------------------------------------------------------===//
57
58/// an instruction to allocate memory on the stack
59class AllocaInst : public UnaryInstruction {
60 Type *AllocatedType;
61
62protected:
63 // Note: Instruction needs to be a friend here to call cloneImpl.
64 friend class