Bug Summary

File:include/llvm/Analysis/ValueTracking.h
Warning:line 585, column 5
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ValueTracking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-9/lib/clang/9.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-9~svn361465/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis -I /build/llvm-toolchain-snapshot-9~svn361465/build-llvm/include -I /build/llvm-toolchain-snapshot-9~svn361465/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/9.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-9/lib/clang/9.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-9~svn361465/build-llvm/lib/Analysis -fdebug-prefix-map=/build/llvm-toolchain-snapshot-9~svn361465=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2019-05-24-031927-21217-1 -x c++ /build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp -faddrsig

/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp

1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/AssumptionCache.h"
28#include "llvm/Analysis/GuardUtils.h"
29#include "llvm/Analysis/InstructionSimplify.h"
30#include "llvm/Analysis/Loads.h"
31#include "llvm/Analysis/LoopInfo.h"
32#include "llvm/Analysis/OptimizationRemarkEmitter.h"
33#include "llvm/Analysis/TargetLibraryInfo.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/Attributes.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/CallSite.h"
38#include "llvm/IR/Constant.h"
39#include "llvm/IR/ConstantRange.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/DerivedTypes.h"
43#include "llvm/IR/DiagnosticInfo.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GetElementPtrTypeIterator.h"
47#include "llvm/IR/GlobalAlias.h"
48#include "llvm/IR/GlobalValue.h"
49#include "llvm/IR/GlobalVariable.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
52#include "llvm/IR/Instructions.h"
53#include "llvm/IR/IntrinsicInst.h"
54#include "llvm/IR/Intrinsics.h"
55#include "llvm/IR/LLVMContext.h"
56#include "llvm/IR/Metadata.h"
57#include "llvm/IR/Module.h"
58#include "llvm/IR/Operator.h"
59#include "llvm/IR/PatternMatch.h"
60#include "llvm/IR/Type.h"
61#include "llvm/IR/User.h"
62#include "llvm/IR/Value.h"
63#include "llvm/Support/Casting.h"
64#include "llvm/Support/CommandLine.h"
65#include "llvm/Support/Compiler.h"
66#include "llvm/Support/ErrorHandling.h"
67#include "llvm/Support/KnownBits.h"
68#include "llvm/Support/MathExtras.h"
69#include <algorithm>
70#include <array>
71#include <cassert>
72#include <cstdint>
73#include <iterator>
74#include <utility>
75
76using namespace llvm;
77using namespace llvm::PatternMatch;
78
79const unsigned MaxDepth = 6;
80
81// Controls the number of uses of the value searched for possible
82// dominating comparisons.
83static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84 cl::Hidden, cl::init(20));
85
86/// Returns the bitwidth of the given scalar or pointer type. For vector types,
87/// returns the element type's bitwidth.
88static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89 if (unsigned BitWidth = Ty->getScalarSizeInBits())
90 return BitWidth;
91
92 return DL.getIndexTypeSizeInBits(Ty);
93}
94
95namespace {
96
97// Simplifying using an assume can only be done in a particular control-flow
98// context (the context instruction provides that context). If an assume and
99// the context instruction are not in the same block then the DT helps in
100// figuring out if we can use it.
101struct Query {
102 const DataLayout &DL;
103 AssumptionCache *AC;
104 const Instruction *CxtI;
105 const DominatorTree *DT;
106
107 // Unlike the other analyses, this may be a nullptr because not all clients
108 // provide it currently.
109 OptimizationRemarkEmitter *ORE;
110
111 /// Set of assumptions that should be excluded from further queries.
112 /// This is because of the potential for mutual recursion to cause
113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
114 /// classic case of this is assume(x = y), which will attempt to determine
115 /// bits in x from bits in y, which will attempt to determine bits in y from
116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
118 /// (all of which can call computeKnownBits), and so on.
119 std::array<const Value *, MaxDepth> Excluded;
120
121 /// If true, it is safe to use metadata during simplification.
122 InstrInfoQuery IIQ;
123
124 unsigned NumExcluded = 0;
125
126 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
127 const DominatorTree *DT, bool UseInstrInfo,
128 OptimizationRemarkEmitter *ORE = nullptr)
129 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
130
131 Query(const Query &Q, const Value *NewExcl)
132 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
133 NumExcluded(Q.NumExcluded) {
134 Excluded = Q.Excluded;
135 Excluded[NumExcluded++] = NewExcl;
136 assert(NumExcluded <= Excluded.size())((NumExcluded <= Excluded.size()) ? static_cast<void>
(0) : __assert_fail ("NumExcluded <= Excluded.size()", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 136, __PRETTY_FUNCTION__))
;
137 }
138
139 bool isExcluded(const Value *Value) const {
140 if (NumExcluded == 0)
141 return false;
142 auto End = Excluded.begin() + NumExcluded;
143 return std::find(Excluded.begin(), End, Value) != End;
144 }
145};
146
147} // end anonymous namespace
148
149// Given the provided Value and, potentially, a context instruction, return
150// the preferred context instruction (if any).
151static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
152 // If we've been provided with a context instruction, then use that (provided
153 // it has been inserted).
154 if (CxtI && CxtI->getParent())
155 return CxtI;
156
157 // If the value is really an already-inserted instruction, then use that.
158 CxtI = dyn_cast<Instruction>(V);
159 if (CxtI && CxtI->getParent())
160 return CxtI;
161
162 return nullptr;
163}
164
165static void computeKnownBits(const Value *V, KnownBits &Known,
166 unsigned Depth, const Query &Q);
167
168void llvm::computeKnownBits(const Value *V, KnownBits &Known,
169 const DataLayout &DL, unsigned Depth,
170 AssumptionCache *AC, const Instruction *CxtI,
171 const DominatorTree *DT,
172 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
173 ::computeKnownBits(V, Known, Depth,
174 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
175}
176
177static KnownBits computeKnownBits(const Value *V, unsigned Depth,
178 const Query &Q);
179
180KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
181 unsigned Depth, AssumptionCache *AC,
182 const Instruction *CxtI,
183 const DominatorTree *DT,
184 OptimizationRemarkEmitter *ORE,
185 bool UseInstrInfo) {
186 return ::computeKnownBits(
187 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
188}
189
190bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
191 const DataLayout &DL, AssumptionCache *AC,
192 const Instruction *CxtI, const DominatorTree *DT,
193 bool UseInstrInfo) {
194 assert(LHS->getType() == RHS->getType() &&((LHS->getType() == RHS->getType() && "LHS and RHS should have the same type"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 195, __PRETTY_FUNCTION__))
195 "LHS and RHS should have the same type")((LHS->getType() == RHS->getType() && "LHS and RHS should have the same type"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 195, __PRETTY_FUNCTION__))
;
196 assert(LHS->getType()->isIntOrIntVectorTy() &&((LHS->getType()->isIntOrIntVectorTy() && "LHS and RHS should be integers"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 197, __PRETTY_FUNCTION__))
197 "LHS and RHS should be integers")((LHS->getType()->isIntOrIntVectorTy() && "LHS and RHS should be integers"
) ? static_cast<void> (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 197, __PRETTY_FUNCTION__))
;
198 // Look for an inverted mask: (X & ~M) op (Y & M).
199 Value *M;
200 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
201 match(RHS, m_c_And(m_Specific(M), m_Value())))
202 return true;
203 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
204 match(LHS, m_c_And(m_Specific(M), m_Value())))
205 return true;
206 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
207 KnownBits LHSKnown(IT->getBitWidth());
208 KnownBits RHSKnown(IT->getBitWidth());
209 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
210 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
211 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
212}
213
214bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
215 for (const User *U : CxtI->users()) {
216 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
217 if (IC->isEquality())
218 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
219 if (C->isNullValue())
220 continue;
221 return false;
222 }
223 return true;
224}
225
226static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
227 const Query &Q);
228
229bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
230 bool OrZero, unsigned Depth,
231 AssumptionCache *AC, const Instruction *CxtI,
232 const DominatorTree *DT, bool UseInstrInfo) {
233 return ::isKnownToBeAPowerOfTwo(
234 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
235}
236
237static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
238
239bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
240 AssumptionCache *AC, const Instruction *CxtI,
241 const DominatorTree *DT, bool UseInstrInfo) {
242 return ::isKnownNonZero(V, Depth,
243 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
244}
245
246bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
247 unsigned Depth, AssumptionCache *AC,
248 const Instruction *CxtI, const DominatorTree *DT,
249 bool UseInstrInfo) {
250 KnownBits Known =
251 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
252 return Known.isNonNegative();
253}
254
255bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
256 AssumptionCache *AC, const Instruction *CxtI,
257 const DominatorTree *DT, bool UseInstrInfo) {
258 if (auto *CI = dyn_cast<ConstantInt>(V))
259 return CI->getValue().isStrictlyPositive();
260
261 // TODO: We'd doing two recursive queries here. We should factor this such
262 // that only a single query is needed.
263 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
264 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
265}
266
267bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
268 AssumptionCache *AC, const Instruction *CxtI,
269 const DominatorTree *DT, bool UseInstrInfo) {
270 KnownBits Known =
271 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
272 return Known.isNegative();
273}
274
275static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
276
277bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
278 const DataLayout &DL, AssumptionCache *AC,
279 const Instruction *CxtI, const DominatorTree *DT,
280 bool UseInstrInfo) {
281 return ::isKnownNonEqual(V1, V2,
282 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
283 UseInstrInfo, /*ORE=*/nullptr));
284}
285
286static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
287 const Query &Q);
288
289bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
290 const DataLayout &DL, unsigned Depth,
291 AssumptionCache *AC, const Instruction *CxtI,
292 const DominatorTree *DT, bool UseInstrInfo) {
293 return ::MaskedValueIsZero(
294 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
295}
296
297static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
298 const Query &Q);
299
300unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
301 unsigned Depth, AssumptionCache *AC,
302 const Instruction *CxtI,
303 const DominatorTree *DT, bool UseInstrInfo) {
304 return ::ComputeNumSignBits(
305 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
306}
307
308static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
309 bool NSW,
310 KnownBits &KnownOut, KnownBits &Known2,
311 unsigned Depth, const Query &Q) {
312 unsigned BitWidth = KnownOut.getBitWidth();
313
314 // If an initial sequence of bits in the result is not needed, the
315 // corresponding bits in the operands are not needed.
316 KnownBits LHSKnown(BitWidth);
317 computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
318 computeKnownBits(Op1, Known2, Depth + 1, Q);
319
320 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
321}
322
323static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
324 KnownBits &Known, KnownBits &Known2,
325 unsigned Depth, const Query &Q) {
326 unsigned BitWidth = Known.getBitWidth();
327 computeKnownBits(Op1, Known, Depth + 1, Q);
328 computeKnownBits(Op0, Known2, Depth + 1, Q);
329
330 bool isKnownNegative = false;
331 bool isKnownNonNegative = false;
332 // If the multiplication is known not to overflow, compute the sign bit.
333 if (NSW) {
334 if (Op0 == Op1) {
335 // The product of a number with itself is non-negative.
336 isKnownNonNegative = true;
337 } else {
338 bool isKnownNonNegativeOp1 = Known.isNonNegative();
339 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
340 bool isKnownNegativeOp1 = Known.isNegative();
341 bool isKnownNegativeOp0 = Known2.isNegative();
342 // The product of two numbers with the same sign is non-negative.
343 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
344 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
345 // The product of a negative number and a non-negative number is either
346 // negative or zero.
347 if (!isKnownNonNegative)
348 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
349 isKnownNonZero(Op0, Depth, Q)) ||
350 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
351 isKnownNonZero(Op1, Depth, Q));
352 }
353 }
354
355 assert(!Known.hasConflict() && !Known2.hasConflict())((!Known.hasConflict() && !Known2.hasConflict()) ? static_cast
<void> (0) : __assert_fail ("!Known.hasConflict() && !Known2.hasConflict()"
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 355, __PRETTY_FUNCTION__))
;
356 // Compute a conservative estimate for high known-0 bits.
357 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
358 Known2.countMinLeadingZeros(),
359 BitWidth) - BitWidth;
360 LeadZ = std::min(LeadZ, BitWidth);
361
362 // The result of the bottom bits of an integer multiply can be
363 // inferred by looking at the bottom bits of both operands and
364 // multiplying them together.
365 // We can infer at least the minimum number of known trailing bits
366 // of both operands. Depending on number of trailing zeros, we can
367 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
368 // a and b are divisible by m and n respectively.
369 // We then calculate how many of those bits are inferrable and set
370 // the output. For example, the i8 mul:
371 // a = XXXX1100 (12)
372 // b = XXXX1110 (14)
373 // We know the bottom 3 bits are zero since the first can be divided by
374 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
375 // Applying the multiplication to the trimmed arguments gets:
376 // XX11 (3)
377 // X111 (7)
378 // -------
379 // XX11
380 // XX11
381 // XX11
382 // XX11
383 // -------
384 // XXXXX01
385 // Which allows us to infer the 2 LSBs. Since we're multiplying the result
386 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
387 // The proof for this can be described as:
388 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
389 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
390 // umin(countTrailingZeros(C2), C6) +
391 // umin(C5 - umin(countTrailingZeros(C1), C5),
392 // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
393 // %aa = shl i8 %a, C5
394 // %bb = shl i8 %b, C6
395 // %aaa = or i8 %aa, C1
396 // %bbb = or i8 %bb, C2
397 // %mul = mul i8 %aaa, %bbb
398 // %mask = and i8 %mul, C7
399 // =>
400 // %mask = i8 ((C1*C2)&C7)
401 // Where C5, C6 describe the known bits of %a, %b
402 // C1, C2 describe the known bottom bits of %a, %b.
403 // C7 describes the mask of the known bits of the result.
404 APInt Bottom0 = Known.One;
405 APInt Bottom1 = Known2.One;
406
407 // How many times we'd be able to divide each argument by 2 (shr by 1).
408 // This gives us the number of trailing zeros on the multiplication result.
409 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
410 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
411 unsigned TrailZero0 = Known.countMinTrailingZeros();
412 unsigned TrailZero1 = Known2.countMinTrailingZeros();
413 unsigned TrailZ = TrailZero0 + TrailZero1;
414
415 // Figure out the fewest known-bits operand.
416 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
417 TrailBitsKnown1 - TrailZero1);
418 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
419
420 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
421 Bottom1.getLoBits(TrailBitsKnown1);
422
423 Known.resetAll();
424 Known.Zero.setHighBits(LeadZ);
425 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
426 Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
427
428 // Only make use of no-wrap flags if we failed to compute the sign bit
429 // directly. This matters if the multiplication always overflows, in
430 // which case we prefer to follow the result of the direct computation,
431 // though as the program is invoking undefined behaviour we can choose
432 // whatever we like here.
433 if (isKnownNonNegative && !Known.isNegative())
434 Known.makeNonNegative();
435 else if (isKnownNegative && !Known.isNonNegative())
436 Known.makeNegative();
437}
438
439void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
440 KnownBits &Known) {
441 unsigned BitWidth = Known.getBitWidth();
442 unsigned NumRanges = Ranges.getNumOperands() / 2;
443 assert(NumRanges >= 1)((NumRanges >= 1) ? static_cast<void> (0) : __assert_fail
("NumRanges >= 1", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 443, __PRETTY_FUNCTION__))
;
444
445 Known.Zero.setAllBits();
446 Known.One.setAllBits();
447
448 for (unsigned i = 0; i < NumRanges; ++i) {
449 ConstantInt *Lower =
450 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
451 ConstantInt *Upper =
452 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
453 ConstantRange Range(Lower->getValue(), Upper->getValue());
454
455 // The first CommonPrefixBits of all values in Range are equal.
456 unsigned CommonPrefixBits =
457 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
458
459 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
460 Known.One &= Range.getUnsignedMax() & Mask;
461 Known.Zero &= ~Range.getUnsignedMax() & Mask;
462 }
463}
464
465static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
466 SmallVector<const Value *, 16> WorkSet(1, I);
467 SmallPtrSet<const Value *, 32> Visited;
468 SmallPtrSet<const Value *, 16> EphValues;
469
470 // The instruction defining an assumption's condition itself is always
471 // considered ephemeral to that assumption (even if it has other
472 // non-ephemeral users). See r246696's test case for an example.
473 if (is_contained(I->operands(), E))
474 return true;
475
476 while (!WorkSet.empty()) {
477 const Value *V = WorkSet.pop_back_val();
478 if (!Visited.insert(V).second)
479 continue;
480
481 // If all uses of this value are ephemeral, then so is this value.
482 if (llvm::all_of(V->users(), [&](const User *U) {
483 return EphValues.count(U);
484 })) {
485 if (V == E)
486 return true;
487
488 if (V == I || isSafeToSpeculativelyExecute(V)) {
489 EphValues.insert(V);
490 if (const User *U = dyn_cast<User>(V))
491 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
492 J != JE; ++J)
493 WorkSet.push_back(*J);
494 }
495 }
496 }
497
498 return false;
499}
500
501// Is this an intrinsic that cannot be speculated but also cannot trap?
502bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
503 if (const CallInst *CI = dyn_cast<CallInst>(I))
504 if (Function *F = CI->getCalledFunction())
505 switch (F->getIntrinsicID()) {
506 default: break;
507 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
508 case Intrinsic::assume:
509 case Intrinsic::sideeffect:
510 case Intrinsic::dbg_declare:
511 case Intrinsic::dbg_value:
512 case Intrinsic::dbg_label:
513 case Intrinsic::invariant_start:
514 case Intrinsic::invariant_end:
515 case Intrinsic::lifetime_start:
516 case Intrinsic::lifetime_end:
517 case Intrinsic::objectsize:
518 case Intrinsic::ptr_annotation:
519 case Intrinsic::var_annotation:
520 return true;
521 }
522
523 return false;
524}
525
526bool llvm::isValidAssumeForContext(const Instruction *Inv,
527 const Instruction *CxtI,
528 const DominatorTree *DT) {
529 // There are two restrictions on the use of an assume:
530 // 1. The assume must dominate the context (or the control flow must
531 // reach the assume whenever it reaches the context).
532 // 2. The context must not be in the assume's set of ephemeral values
533 // (otherwise we will use the assume to prove that the condition
534 // feeding the assume is trivially true, thus causing the removal of
535 // the assume).
536
537 if (DT) {
538 if (DT->dominates(Inv, CxtI))
539 return true;
540 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
541 // We don't have a DT, but this trivially dominates.
542 return true;
543 }
544
545 // With or without a DT, the only remaining case we will check is if the
546 // instructions are in the same BB. Give up if that is not the case.
547 if (Inv->getParent() != CxtI->getParent())
548 return false;
549
550 // If we have a dom tree, then we now know that the assume doesn't dominate
551 // the other instruction. If we don't have a dom tree then we can check if
552 // the assume is first in the BB.
553 if (!DT) {
554 // Search forward from the assume until we reach the context (or the end
555 // of the block); the common case is that the assume will come first.
556 for (auto I = std::next(BasicBlock::const_iterator(Inv)),
557 IE = Inv->getParent()->end(); I != IE; ++I)
558 if (&*I == CxtI)
559 return true;
560 }
561
562 // The context comes first, but they're both in the same block. Make sure
563 // there is nothing in between that might interrupt the control flow.
564 for (BasicBlock::const_iterator I =
565 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
566 I != IE; ++I)
567 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
568 return false;
569
570 return !isEphemeralValueOf(Inv, CxtI);
571}
572
573static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
574 unsigned Depth, const Query &Q) {
575 // Use of assumptions is context-sensitive. If we don't have a context, we
576 // cannot use them!
577 if (!Q.AC || !Q.CxtI)
578 return;
579
580 unsigned BitWidth = Known.getBitWidth();
581
582 // Note that the patterns below need to be kept in sync with the code
583 // in AssumptionCache::updateAffectedValues.
584
585 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
586 if (!AssumeVH)
587 continue;
588 CallInst *I = cast<CallInst>(AssumeVH);
589 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&((I->getParent()->getParent() == Q.CxtI->getParent()
->getParent() && "Got assumption for the wrong function!"
) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 590, __PRETTY_FUNCTION__))
590 "Got assumption for the wrong function!")((I->getParent()->getParent() == Q.CxtI->getParent()
->getParent() && "Got assumption for the wrong function!"
) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 590, __PRETTY_FUNCTION__))
;
591 if (Q.isExcluded(I))
592 continue;
593
594 // Warning: This loop can end up being somewhat performance sensitive.
595 // We're running this loop for once for each value queried resulting in a
596 // runtime of ~O(#assumes * #values).
597
598 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&((I->getCalledFunction()->getIntrinsicID() == Intrinsic
::assume && "must be an assume intrinsic") ? static_cast
<void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 599, __PRETTY_FUNCTION__))
599 "must be an assume intrinsic")((I->getCalledFunction()->getIntrinsicID() == Intrinsic
::assume && "must be an assume intrinsic") ? static_cast
<void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 599, __PRETTY_FUNCTION__))
;
600
601 Value *Arg = I->getArgOperand(0);
602
603 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
604 assert(BitWidth == 1 && "assume operand is not i1?")((BitWidth == 1 && "assume operand is not i1?") ? static_cast
<void> (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 604, __PRETTY_FUNCTION__))
;
605 Known.setAllOnes();
606 return;
607 }
608 if (match(Arg, m_Not(m_Specific(V))) &&
609 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
610 assert(BitWidth == 1 && "assume operand is not i1?")((BitWidth == 1 && "assume operand is not i1?") ? static_cast
<void> (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 610, __PRETTY_FUNCTION__))
;
611 Known.setAllZero();
612 return;
613 }
614
615 // The remaining tests are all recursive, so bail out if we hit the limit.
616 if (Depth == MaxDepth)
617 continue;
618
619 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
620 if (!Cmp)
621 continue;
622
623 Value *A, *B;
624 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
625
626 CmpInst::Predicate Pred;
627 uint64_t C;
628 switch (Cmp->getPredicate()) {
629 default:
630 break;
631 case ICmpInst::ICMP_EQ:
632 // assume(v = a)
633 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
634 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
635 KnownBits RHSKnown(BitWidth);
636 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
637 Known.Zero |= RHSKnown.Zero;
638 Known.One |= RHSKnown.One;
639 // assume(v & b = a)
640 } else if (match(Cmp,
641 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
642 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
643 KnownBits RHSKnown(BitWidth);
644 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
645 KnownBits MaskKnown(BitWidth);
646 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
647
648 // For those bits in the mask that are known to be one, we can propagate
649 // known bits from the RHS to V.
650 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
651 Known.One |= RHSKnown.One & MaskKnown.One;
652 // assume(~(v & b) = a)
653 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
654 m_Value(A))) &&
655 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
656 KnownBits RHSKnown(BitWidth);
657 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
658 KnownBits MaskKnown(BitWidth);
659 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
660
661 // For those bits in the mask that are known to be one, we can propagate
662 // inverted known bits from the RHS to V.
663 Known.Zero |= RHSKnown.One & MaskKnown.One;
664 Known.One |= RHSKnown.Zero & MaskKnown.One;
665 // assume(v | b = a)
666 } else if (match(Cmp,
667 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
668 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
669 KnownBits RHSKnown(BitWidth);
670 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
671 KnownBits BKnown(BitWidth);
672 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
673
674 // For those bits in B that are known to be zero, we can propagate known
675 // bits from the RHS to V.
676 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
677 Known.One |= RHSKnown.One & BKnown.Zero;
678 // assume(~(v | b) = a)
679 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
680 m_Value(A))) &&
681 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
682 KnownBits RHSKnown(BitWidth);
683 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
684 KnownBits BKnown(BitWidth);
685 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
686
687 // For those bits in B that are known to be zero, we can propagate
688 // inverted known bits from the RHS to V.
689 Known.Zero |= RHSKnown.One & BKnown.Zero;
690 Known.One |= RHSKnown.Zero & BKnown.Zero;
691 // assume(v ^ b = a)
692 } else if (match(Cmp,
693 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
694 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
695 KnownBits RHSKnown(BitWidth);
696 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
697 KnownBits BKnown(BitWidth);
698 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
699
700 // For those bits in B that are known to be zero, we can propagate known
701 // bits from the RHS to V. For those bits in B that are known to be one,
702 // we can propagate inverted known bits from the RHS to V.
703 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
704 Known.One |= RHSKnown.One & BKnown.Zero;
705 Known.Zero |= RHSKnown.One & BKnown.One;
706 Known.One |= RHSKnown.Zero & BKnown.One;
707 // assume(~(v ^ b) = a)
708 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
709 m_Value(A))) &&
710 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
711 KnownBits RHSKnown(BitWidth);
712 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
713 KnownBits BKnown(BitWidth);
714 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
715
716 // For those bits in B that are known to be zero, we can propagate
717 // inverted known bits from the RHS to V. For those bits in B that are
718 // known to be one, we can propagate known bits from the RHS to V.
719 Known.Zero |= RHSKnown.One & BKnown.Zero;
720 Known.One |= RHSKnown.Zero & BKnown.Zero;
721 Known.Zero |= RHSKnown.Zero & BKnown.One;
722 Known.One |= RHSKnown.One & BKnown.One;
723 // assume(v << c = a)
724 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
725 m_Value(A))) &&
726 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
727 KnownBits RHSKnown(BitWidth);
728 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
729 // For those bits in RHS that are known, we can propagate them to known
730 // bits in V shifted to the right by C.
731 RHSKnown.Zero.lshrInPlace(C);
732 Known.Zero |= RHSKnown.Zero;
733 RHSKnown.One.lshrInPlace(C);
734 Known.One |= RHSKnown.One;
735 // assume(~(v << c) = a)
736 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
737 m_Value(A))) &&
738 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
739 KnownBits RHSKnown(BitWidth);
740 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
741 // For those bits in RHS that are known, we can propagate them inverted
742 // to known bits in V shifted to the right by C.
743 RHSKnown.One.lshrInPlace(C);
744 Known.Zero |= RHSKnown.One;
745 RHSKnown.Zero.lshrInPlace(C);
746 Known.One |= RHSKnown.Zero;
747 // assume(v >> c = a)
748 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
749 m_Value(A))) &&
750 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
751 KnownBits RHSKnown(BitWidth);
752 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
753 // For those bits in RHS that are known, we can propagate them to known
754 // bits in V shifted to the right by C.
755 Known.Zero |= RHSKnown.Zero << C;
756 Known.One |= RHSKnown.One << C;
757 // assume(~(v >> c) = a)
758 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
759 m_Value(A))) &&
760 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
761 KnownBits RHSKnown(BitWidth);
762 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
763 // For those bits in RHS that are known, we can propagate them inverted
764 // to known bits in V shifted to the right by C.
765 Known.Zero |= RHSKnown.One << C;
766 Known.One |= RHSKnown.Zero << C;
767 }
768 break;
769 case ICmpInst::ICMP_SGE:
770 // assume(v >=_s c) where c is non-negative
771 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
772 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
773 KnownBits RHSKnown(BitWidth);
774 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
775
776 if (RHSKnown.isNonNegative()) {
777 // We know that the sign bit is zero.
778 Known.makeNonNegative();
779 }
780 }
781 break;
782 case ICmpInst::ICMP_SGT:
783 // assume(v >_s c) where c is at least -1.
784 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
785 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
786 KnownBits RHSKnown(BitWidth);
787 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
788
789 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
790 // We know that the sign bit is zero.
791 Known.makeNonNegative();
792 }
793 }
794 break;
795 case ICmpInst::ICMP_SLE:
796 // assume(v <=_s c) where c is negative
797 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
798 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
799 KnownBits RHSKnown(BitWidth);
800 computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
801
802 if (RHSKnown.isNegative()) {
803 // We know that the sign bit is one.
804 Known.makeNegative();
805 }
806 }
807 break;
808 case ICmpInst::ICMP_SLT:
809 // assume(v <_s c) where c is non-positive
810 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
811 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
812 KnownBits RHSKnown(BitWidth);
813 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
814
815 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
816 // We know that the sign bit is one.
817 Known.makeNegative();
818 }
819 }
820 break;
821 case ICmpInst::ICMP_ULE:
822 // assume(v <=_u c)
823 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
824 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
825 KnownBits RHSKnown(BitWidth);
826 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
827
828 // Whatever high bits in c are zero are known to be zero.
829 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
830 }
831 break;
832 case ICmpInst::ICMP_ULT:
833 // assume(v <_u c)
834 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
835 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
836 KnownBits RHSKnown(BitWidth);
837 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
838
839 // If the RHS is known zero, then this assumption must be wrong (nothing
840 // is unsigned less than zero). Signal a conflict and get out of here.
841 if (RHSKnown.isZero()) {
842 Known.Zero.setAllBits();
843 Known.One.setAllBits();
844 break;
845 }
846
847 // Whatever high bits in c are zero are known to be zero (if c is a power
848 // of 2, then one more).
849 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
850 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
851 else
852 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
853 }
854 break;
855 }
856 }
857
858 // If assumptions conflict with each other or previous known bits, then we
859 // have a logical fallacy. It's possible that the assumption is not reachable,
860 // so this isn't a real bug. On the other hand, the program may have undefined
861 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
862 // clear out the known bits, try to warn the user, and hope for the best.
863 if (Known.Zero.intersects(Known.One)) {
864 Known.resetAll();
865
866 if (Q.ORE)
867 Q.ORE->emit([&]() {
868 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
869 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
870 CxtI)
871 << "Detected conflicting code assumptions. Program may "
872 "have undefined behavior, or compiler may have "
873 "internal error.";
874 });
875 }
876}
877
878/// Compute known bits from a shift operator, including those with a
879/// non-constant shift amount. Known is the output of this function. Known2 is a
880/// pre-allocated temporary with the same bit width as Known. KZF and KOF are
881/// operator-specific functions that, given the known-zero or known-one bits
882/// respectively, and a shift amount, compute the implied known-zero or
883/// known-one bits of the shift operator's result respectively for that shift
884/// amount. The results from calling KZF and KOF are conservatively combined for
885/// all permitted shift amounts.
886static void computeKnownBitsFromShiftOperator(
887 const Operator *I, KnownBits &Known, KnownBits &Known2,
888 unsigned Depth, const Query &Q,
889 function_ref<APInt(const APInt &, unsigned)> KZF,
890 function_ref<APInt(const APInt &, unsigned)> KOF) {
891 unsigned BitWidth = Known.getBitWidth();
892
893 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
894 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
895
896 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
897 Known.Zero = KZF(Known.Zero, ShiftAmt);
898 Known.One = KOF(Known.One, ShiftAmt);
899 // If the known bits conflict, this must be an overflowing left shift, so
900 // the shift result is poison. We can return anything we want. Choose 0 for
901 // the best folding opportunity.
902 if (Known.hasConflict())
903 Known.setAllZero();
904
905 return;
906 }
907
908 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
909
910 // If the shift amount could be greater than or equal to the bit-width of the
911 // LHS, the value could be poison, but bail out because the check below is
912 // expensive. TODO: Should we just carry on?
913 if ((~Known.Zero).uge(BitWidth)) {
914 Known.resetAll();
915 return;
916 }
917
918 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
919 // BitWidth > 64 and any upper bits are known, we'll end up returning the
920 // limit value (which implies all bits are known).
921 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
922 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
923
924 // It would be more-clearly correct to use the two temporaries for this
925 // calculation. Reusing the APInts here to prevent unnecessary allocations.
926 Known.resetAll();
927
928 // If we know the shifter operand is nonzero, we can sometimes infer more
929 // known bits. However this is expensive to compute, so be lazy about it and
930 // only compute it when absolutely necessary.
931 Optional<bool> ShifterOperandIsNonZero;
932
933 // Early exit if we can't constrain any well-defined shift amount.
934 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
935 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
936 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
937 if (!*ShifterOperandIsNonZero)
938 return;
939 }
940
941 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
942
943 Known.Zero.setAllBits();
944 Known.One.setAllBits();
945 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
946 // Combine the shifted known input bits only for those shift amounts
947 // compatible with its known constraints.
948 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
949 continue;
950 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
951 continue;
952 // If we know the shifter is nonzero, we may be able to infer more known
953 // bits. This check is sunk down as far as possible to avoid the expensive
954 // call to isKnownNonZero if the cheaper checks above fail.
955 if (ShiftAmt == 0) {
956 if (!ShifterOperandIsNonZero.hasValue())
957 ShifterOperandIsNonZero =
958 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
959 if (*ShifterOperandIsNonZero)
960 continue;
961 }
962
963 Known.Zero &= KZF(Known2.Zero, ShiftAmt);
964 Known.One &= KOF(Known2.One, ShiftAmt);
965 }
966
967 // If the known bits conflict, the result is poison. Return a 0 and hope the
968 // caller can further optimize that.
969 if (Known.hasConflict())
970 Known.setAllZero();
971}
972
973static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
974 unsigned Depth, const Query &Q) {
975 unsigned BitWidth = Known.getBitWidth();
976
977 KnownBits Known2(Known);
978 switch (I->getOpcode()) {
1
Control jumps to 'case Select:' at line 1052
979 default: break;
980 case Instruction::Load:
981 if (MDNode *MD =
982 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
983 computeKnownBitsFromRangeMetadata(*MD, Known);
984 break;
985 case Instruction::And: {
986 // If either the LHS or the RHS are Zero, the result is zero.
987 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
988 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
989
990 // Output known-1 bits are only known if set in both the LHS & RHS.
991 Known.One &= Known2.One;
992 // Output known-0 are known to be clear if zero in either the LHS | RHS.
993 Known.Zero |= Known2.Zero;
994
995 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
996 // here we handle the more general case of adding any odd number by
997 // matching the form add(x, add(x, y)) where y is odd.
998 // TODO: This could be generalized to clearing any bit set in y where the
999 // following bit is known to be unset in y.
1000 Value *X = nullptr, *Y = nullptr;
1001 if (!Known.Zero[0] && !Known.One[0] &&
1002 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1003 Known2.resetAll();
1004 computeKnownBits(Y, Known2, Depth + 1, Q);
1005 if (Known2.countMinTrailingOnes() > 0)
1006 Known.Zero.setBit(0);
1007 }
1008 break;
1009 }
1010 case Instruction::Or:
1011 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1012 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1013
1014 // Output known-0 bits are only known if clear in both the LHS & RHS.
1015 Known.Zero &= Known2.Zero;
1016 // Output known-1 are known to be set if set in either the LHS | RHS.
1017 Known.One |= Known2.One;
1018 break;
1019 case Instruction::Xor: {
1020 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1021 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1022
1023 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1024 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1025 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1026 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1027 Known.Zero = std::move(KnownZeroOut);
1028 break;
1029 }
1030 case Instruction::Mul: {
1031 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1032 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1033 Known2, Depth, Q);
1034 break;
1035 }
1036 case Instruction::UDiv: {
1037 // For the purposes of computing leading zeros we can conservatively
1038 // treat a udiv as a logical right shift by the power of 2 known to
1039 // be less than the denominator.
1040 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1041 unsigned LeadZ = Known2.countMinLeadingZeros();
1042
1043 Known2.resetAll();
1044 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1045 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1046 if (RHSMaxLeadingZeros != BitWidth)
1047 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1048
1049 Known.Zero.setHighBits(LeadZ);
1050 break;
1051 }
1052 case Instruction::Select: {
1053 const Value *LHS, *RHS;
2
'LHS' declared without an initial value
1054 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
3
Passing value via 2nd parameter 'LHS'
4
Calling 'matchSelectPattern'
1055 if (SelectPatternResult::isMinOrMax(SPF)) {
1056 computeKnownBits(RHS, Known, Depth + 1, Q);
1057 computeKnownBits(LHS, Known2, Depth + 1, Q);
1058 } else {
1059 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1060 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1061 }
1062
1063 unsigned MaxHighOnes = 0;
1064 unsigned MaxHighZeros = 0;
1065 if (SPF == SPF_SMAX) {
1066 // If both sides are negative, the result is negative.
1067 if (Known.isNegative() && Known2.isNegative())
1068 // We can derive a lower bound on the result by taking the max of the
1069 // leading one bits.
1070 MaxHighOnes =
1071 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1072 // If either side is non-negative, the result is non-negative.
1073 else if (Known.isNonNegative() || Known2.isNonNegative())
1074 MaxHighZeros = 1;
1075 } else if (SPF == SPF_SMIN) {
1076 // If both sides are non-negative, the result is non-negative.
1077 if (Known.isNonNegative() && Known2.isNonNegative())
1078 // We can derive an upper bound on the result by taking the max of the
1079 // leading zero bits.
1080 MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1081 Known2.countMinLeadingZeros());
1082 // If either side is negative, the result is negative.
1083 else if (Known.isNegative() || Known2.isNegative())
1084 MaxHighOnes = 1;
1085 } else if (SPF == SPF_UMAX) {
1086 // We can derive a lower bound on the result by taking the max of the
1087 // leading one bits.
1088 MaxHighOnes =
1089 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1090 } else if (SPF == SPF_UMIN) {
1091 // We can derive an upper bound on the result by taking the max of the
1092 // leading zero bits.
1093 MaxHighZeros =
1094 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1095 } else if (SPF == SPF_ABS) {
1096 // RHS from matchSelectPattern returns the negation part of abs pattern.
1097 // If the negate has an NSW flag we can assume the sign bit of the result
1098 // will be 0 because that makes abs(INT_MIN) undefined.
1099 if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1100 MaxHighZeros = 1;
1101 }
1102
1103 // Only known if known in both the LHS and RHS.
1104 Known.One &= Known2.One;
1105 Known.Zero &= Known2.Zero;
1106 if (MaxHighOnes > 0)
1107 Known.One.setHighBits(MaxHighOnes);
1108 if (MaxHighZeros > 0)
1109 Known.Zero.setHighBits(MaxHighZeros);
1110 break;
1111 }
1112 case Instruction::FPTrunc:
1113 case Instruction::FPExt:
1114 case Instruction::FPToUI:
1115 case Instruction::FPToSI:
1116 case Instruction::SIToFP:
1117 case Instruction::UIToFP:
1118 break; // Can't work with floating point.
1119 case Instruction::PtrToInt:
1120 case Instruction::IntToPtr:
1121 // Fall through and handle them the same as zext/trunc.
1122 LLVM_FALLTHROUGH[[clang::fallthrough]];
1123 case Instruction::ZExt:
1124 case Instruction::Trunc: {
1125 Type *SrcTy = I->getOperand(0)->getType();
1126
1127 unsigned SrcBitWidth;
1128 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1129 // which fall through here.
1130 Type *ScalarTy = SrcTy->getScalarType();
1131 SrcBitWidth = ScalarTy->isPointerTy() ?
1132 Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1133 Q.DL.getTypeSizeInBits(ScalarTy);
1134
1135 assert(SrcBitWidth && "SrcBitWidth can't be zero")((SrcBitWidth && "SrcBitWidth can't be zero") ? static_cast
<void> (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1135, __PRETTY_FUNCTION__))
;
1136 Known = Known.zextOrTrunc(SrcBitWidth, false);
1137 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1138 Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */);
1139 break;
1140 }
1141 case Instruction::BitCast: {
1142 Type *SrcTy = I->getOperand(0)->getType();
1143 if (SrcTy->isIntOrPtrTy() &&
1144 // TODO: For now, not handling conversions like:
1145 // (bitcast i64 %x to <2 x i32>)
1146 !I->getType()->isVectorTy()) {
1147 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1148 break;
1149 }
1150 break;
1151 }
1152 case Instruction::SExt: {
1153 // Compute the bits in the result that are not present in the input.
1154 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1155
1156 Known = Known.trunc(SrcBitWidth);
1157 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1158 // If the sign bit of the input is known set or clear, then we know the
1159 // top bits of the result.
1160 Known = Known.sext(BitWidth);
1161 break;
1162 }
1163 case Instruction::Shl: {
1164 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1165 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1166 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1167 APInt KZResult = KnownZero << ShiftAmt;
1168 KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1169 // If this shift has "nsw" keyword, then the result is either a poison
1170 // value or has the same sign bit as the first operand.
1171 if (NSW && KnownZero.isSignBitSet())
1172 KZResult.setSignBit();
1173 return KZResult;
1174 };
1175
1176 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1177 APInt KOResult = KnownOne << ShiftAmt;
1178 if (NSW && KnownOne.isSignBitSet())
1179 KOResult.setSignBit();
1180 return KOResult;
1181 };
1182
1183 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1184 break;
1185 }
1186 case Instruction::LShr: {
1187 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1188 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1189 APInt KZResult = KnownZero.lshr(ShiftAmt);
1190 // High bits known zero.
1191 KZResult.setHighBits(ShiftAmt);
1192 return KZResult;
1193 };
1194
1195 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1196 return KnownOne.lshr(ShiftAmt);
1197 };
1198
1199 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1200 break;
1201 }
1202 case Instruction::AShr: {
1203 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1204 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1205 return KnownZero.ashr(ShiftAmt);
1206 };
1207
1208 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1209 return KnownOne.ashr(ShiftAmt);
1210 };
1211
1212 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1213 break;
1214 }
1215 case Instruction::Sub: {
1216 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1217 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1218 Known, Known2, Depth, Q);
1219 break;
1220 }
1221 case Instruction::Add: {
1222 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1223 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1224 Known, Known2, Depth, Q);
1225 break;
1226 }
1227 case Instruction::SRem:
1228 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1229 APInt RA = Rem->getValue().abs();
1230 if (RA.isPowerOf2()) {
1231 APInt LowBits = RA - 1;
1232 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1233
1234 // The low bits of the first operand are unchanged by the srem.
1235 Known.Zero = Known2.Zero & LowBits;
1236 Known.One = Known2.One & LowBits;
1237
1238 // If the first operand is non-negative or has all low bits zero, then
1239 // the upper bits are all zero.
1240 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1241 Known.Zero |= ~LowBits;
1242
1243 // If the first operand is negative and not all low bits are zero, then
1244 // the upper bits are all one.
1245 if (Known2.isNegative() && LowBits.intersects(Known2.One))
1246 Known.One |= ~LowBits;
1247
1248 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1248, __PRETTY_FUNCTION__))
;
1249 break;
1250 }
1251 }
1252
1253 // The sign bit is the LHS's sign bit, except when the result of the
1254 // remainder is zero.
1255 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1256 // If it's known zero, our sign bit is also zero.
1257 if (Known2.isNonNegative())
1258 Known.makeNonNegative();
1259
1260 break;
1261 case Instruction::URem: {
1262 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1263 const APInt &RA = Rem->getValue();
1264 if (RA.isPowerOf2()) {
1265 APInt LowBits = (RA - 1);
1266 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1267 Known.Zero |= ~LowBits;
1268 Known.One &= LowBits;
1269 break;
1270 }
1271 }
1272
1273 // Since the result is less than or equal to either operand, any leading
1274 // zero bits in either operand must also exist in the result.
1275 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1276 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1277
1278 unsigned Leaders =
1279 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1280 Known.resetAll();
1281 Known.Zero.setHighBits(Leaders);
1282 break;
1283 }
1284
1285 case Instruction::Alloca: {
1286 const AllocaInst *AI = cast<AllocaInst>(I);
1287 unsigned Align = AI->getAlignment();
1288 if (Align == 0)
1289 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1290
1291 if (Align > 0)
1292 Known.Zero.setLowBits(countTrailingZeros(Align));
1293 break;
1294 }
1295 case Instruction::GetElementPtr: {
1296 // Analyze all of the subscripts of this getelementptr instruction
1297 // to determine if we can prove known low zero bits.
1298 KnownBits LocalKnown(BitWidth);
1299 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1300 unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1301
1302 gep_type_iterator GTI = gep_type_begin(I);
1303 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1304 Value *Index = I->getOperand(i);
1305 if (StructType *STy = GTI.getStructTypeOrNull()) {
1306 // Handle struct member offset arithmetic.
1307
1308 // Handle case when index is vector zeroinitializer
1309 Constant *CIndex = cast<Constant>(Index);
1310 if (CIndex->isZeroValue())
1311 continue;
1312
1313 if (CIndex->getType()->isVectorTy())
1314 Index = CIndex->getSplatValue();
1315
1316 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1317 const StructLayout *SL = Q.DL.getStructLayout(STy);
1318 uint64_t Offset = SL->getElementOffset(Idx);
1319 TrailZ = std::min<unsigned>(TrailZ,
1320 countTrailingZeros(Offset));
1321 } else {
1322 // Handle array index arithmetic.
1323 Type *IndexedTy = GTI.getIndexedType();
1324 if (!IndexedTy->isSized()) {
1325 TrailZ = 0;
1326 break;
1327 }
1328 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1329 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1330 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1331 computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1332 TrailZ = std::min(TrailZ,
1333 unsigned(countTrailingZeros(TypeSize) +
1334 LocalKnown.countMinTrailingZeros()));
1335 }
1336 }
1337
1338 Known.Zero.setLowBits(TrailZ);
1339 break;
1340 }
1341 case Instruction::PHI: {
1342 const PHINode *P = cast<PHINode>(I);
1343 // Handle the case of a simple two-predecessor recurrence PHI.
1344 // There's a lot more that could theoretically be done here, but
1345 // this is sufficient to catch some interesting cases.
1346 if (P->getNumIncomingValues() == 2) {
1347 for (unsigned i = 0; i != 2; ++i) {
1348 Value *L = P->getIncomingValue(i);
1349 Value *R = P->getIncomingValue(!i);
1350 Operator *LU = dyn_cast<Operator>(L);
1351 if (!LU)
1352 continue;
1353 unsigned Opcode = LU->getOpcode();
1354 // Check for operations that have the property that if
1355 // both their operands have low zero bits, the result
1356 // will have low zero bits.
1357 if (Opcode == Instruction::Add ||
1358 Opcode == Instruction::Sub ||
1359 Opcode == Instruction::And ||
1360 Opcode == Instruction::Or ||
1361 Opcode == Instruction::Mul) {
1362 Value *LL = LU->getOperand(0);
1363 Value *LR = LU->getOperand(1);
1364 // Find a recurrence.
1365 if (LL == I)
1366 L = LR;
1367 else if (LR == I)
1368 L = LL;
1369 else
1370 break;
1371 // Ok, we have a PHI of the form L op= R. Check for low
1372 // zero bits.
1373 computeKnownBits(R, Known2, Depth + 1, Q);
1374
1375 // We need to take the minimum number of known bits
1376 KnownBits Known3(Known);
1377 computeKnownBits(L, Known3, Depth + 1, Q);
1378
1379 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1380 Known3.countMinTrailingZeros()));
1381
1382 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1383 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1384 // If initial value of recurrence is nonnegative, and we are adding
1385 // a nonnegative number with nsw, the result can only be nonnegative
1386 // or poison value regardless of the number of times we execute the
1387 // add in phi recurrence. If initial value is negative and we are
1388 // adding a negative number with nsw, the result can only be
1389 // negative or poison value. Similar arguments apply to sub and mul.
1390 //
1391 // (add non-negative, non-negative) --> non-negative
1392 // (add negative, negative) --> negative
1393 if (Opcode == Instruction::Add) {
1394 if (Known2.isNonNegative() && Known3.isNonNegative())
1395 Known.makeNonNegative();
1396 else if (Known2.isNegative() && Known3.isNegative())
1397 Known.makeNegative();
1398 }
1399
1400 // (sub nsw non-negative, negative) --> non-negative
1401 // (sub nsw negative, non-negative) --> negative
1402 else if (Opcode == Instruction::Sub && LL == I) {
1403 if (Known2.isNonNegative() && Known3.isNegative())
1404 Known.makeNonNegative();
1405 else if (Known2.isNegative() && Known3.isNonNegative())
1406 Known.makeNegative();
1407 }
1408
1409 // (mul nsw non-negative, non-negative) --> non-negative
1410 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1411 Known3.isNonNegative())
1412 Known.makeNonNegative();
1413 }
1414
1415 break;
1416 }
1417 }
1418 }
1419
1420 // Unreachable blocks may have zero-operand PHI nodes.
1421 if (P->getNumIncomingValues() == 0)
1422 break;
1423
1424 // Otherwise take the unions of the known bit sets of the operands,
1425 // taking conservative care to avoid excessive recursion.
1426 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1427 // Skip if every incoming value references to ourself.
1428 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1429 break;
1430
1431 Known.Zero.setAllBits();
1432 Known.One.setAllBits();
1433 for (Value *IncValue : P->incoming_values()) {
1434 // Skip direct self references.
1435 if (IncValue == P) continue;
1436
1437 Known2 = KnownBits(BitWidth);
1438 // Recurse, but cap the recursion to one level, because we don't
1439 // want to waste time spinning around in loops.
1440 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1441 Known.Zero &= Known2.Zero;
1442 Known.One &= Known2.One;
1443 // If all bits have been ruled out, there's no need to check
1444 // more operands.
1445 if (!Known.Zero && !Known.One)
1446 break;
1447 }
1448 }
1449 break;
1450 }
1451 case Instruction::Call:
1452 case Instruction::Invoke:
1453 // If range metadata is attached to this call, set known bits from that,
1454 // and then intersect with known bits based on other properties of the
1455 // function.
1456 if (MDNode *MD =
1457 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1458 computeKnownBitsFromRangeMetadata(*MD, Known);
1459 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1460 computeKnownBits(RV, Known2, Depth + 1, Q);
1461 Known.Zero |= Known2.Zero;
1462 Known.One |= Known2.One;
1463 }
1464 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1465 switch (II->getIntrinsicID()) {
1466 default: break;
1467 case Intrinsic::bitreverse:
1468 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1469 Known.Zero |= Known2.Zero.reverseBits();
1470 Known.One |= Known2.One.reverseBits();
1471 break;
1472 case Intrinsic::bswap:
1473 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1474 Known.Zero |= Known2.Zero.byteSwap();
1475 Known.One |= Known2.One.byteSwap();
1476 break;
1477 case Intrinsic::ctlz: {
1478 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1479 // If we have a known 1, its position is our upper bound.
1480 unsigned PossibleLZ = Known2.One.countLeadingZeros();
1481 // If this call is undefined for 0, the result will be less than 2^n.
1482 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1483 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1484 unsigned LowBits = Log2_32(PossibleLZ)+1;
1485 Known.Zero.setBitsFrom(LowBits);
1486 break;
1487 }
1488 case Intrinsic::cttz: {
1489 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1490 // If we have a known 1, its position is our upper bound.
1491 unsigned PossibleTZ = Known2.One.countTrailingZeros();
1492 // If this call is undefined for 0, the result will be less than 2^n.
1493 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1494 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1495 unsigned LowBits = Log2_32(PossibleTZ)+1;
1496 Known.Zero.setBitsFrom(LowBits);
1497 break;
1498 }
1499 case Intrinsic::ctpop: {
1500 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1501 // We can bound the space the count needs. Also, bits known to be zero
1502 // can't contribute to the population.
1503 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1504 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1505 Known.Zero.setBitsFrom(LowBits);
1506 // TODO: we could bound KnownOne using the lower bound on the number
1507 // of bits which might be set provided by popcnt KnownOne2.
1508 break;
1509 }
1510 case Intrinsic::fshr:
1511 case Intrinsic::fshl: {
1512 const APInt *SA;
1513 if (!match(I->getOperand(2), m_APInt(SA)))
1514 break;
1515
1516 // Normalize to funnel shift left.
1517 uint64_t ShiftAmt = SA->urem(BitWidth);
1518 if (II->getIntrinsicID() == Intrinsic::fshr)
1519 ShiftAmt = BitWidth - ShiftAmt;
1520
1521 KnownBits Known3(Known);
1522 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1523 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1524
1525 Known.Zero =
1526 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1527 Known.One =
1528 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1529 break;
1530 }
1531 case Intrinsic::uadd_sat:
1532 case Intrinsic::usub_sat: {
1533 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1534 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1535 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1536
1537 // Add: Leading ones of either operand are preserved.
1538 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1539 // as leading zeros in the result.
1540 unsigned LeadingKnown;
1541 if (IsAdd)
1542 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1543 Known2.countMinLeadingOnes());
1544 else
1545 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1546 Known2.countMinLeadingOnes());
1547
1548 Known = KnownBits::computeForAddSub(
1549 IsAdd, /* NSW */ false, Known, Known2);
1550
1551 // We select between the operation result and all-ones/zero
1552 // respectively, so we can preserve known ones/zeros.
1553 if (IsAdd) {
1554 Known.One.setHighBits(LeadingKnown);
1555 Known.Zero.clearAllBits();
1556 } else {
1557 Known.Zero.setHighBits(LeadingKnown);
1558 Known.One.clearAllBits();
1559 }
1560 break;
1561 }
1562 case Intrinsic::x86_sse42_crc32_64_64:
1563 Known.Zero.setBitsFrom(32);
1564 break;
1565 }
1566 }
1567 break;
1568 case Instruction::ExtractElement:
1569 // Look through extract element. At the moment we keep this simple and skip
1570 // tracking the specific element. But at least we might find information
1571 // valid for all elements of the vector (for example if vector is sign
1572 // extended, shifted, etc).
1573 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1574 break;
1575 case Instruction::ExtractValue:
1576 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1577 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1578 if (EVI->getNumIndices() != 1) break;
1579 if (EVI->getIndices()[0] == 0) {
1580 switch (II->getIntrinsicID()) {
1581 default: break;
1582 case Intrinsic::uadd_with_overflow:
1583 case Intrinsic::sadd_with_overflow:
1584 computeKnownBitsAddSub(true, II->getArgOperand(0),
1585 II->getArgOperand(1), false, Known, Known2,
1586 Depth, Q);
1587 break;
1588 case Intrinsic::usub_with_overflow:
1589 case Intrinsic::ssub_with_overflow:
1590 computeKnownBitsAddSub(false, II->getArgOperand(0),
1591 II->getArgOperand(1), false, Known, Known2,
1592 Depth, Q);
1593 break;
1594 case Intrinsic::umul_with_overflow:
1595 case Intrinsic::smul_with_overflow:
1596 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1597 Known, Known2, Depth, Q);
1598 break;
1599 }
1600 }
1601 }
1602 }
1603}
1604
1605/// Determine which bits of V are known to be either zero or one and return
1606/// them.
1607KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1608 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1609 computeKnownBits(V, Known, Depth, Q);
1610 return Known;
1611}
1612
1613/// Determine which bits of V are known to be either zero or one and return
1614/// them in the Known bit set.
1615///
1616/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1617/// we cannot optimize based on the assumption that it is zero without changing
1618/// it to be an explicit zero. If we don't change it to zero, other code could
1619/// optimized based on the contradictory assumption that it is non-zero.
1620/// Because instcombine aggressively folds operations with undef args anyway,
1621/// this won't lose us code quality.
1622///
1623/// This function is defined on values with integer type, values with pointer
1624/// type, and vectors of integers. In the case
1625/// where V is a vector, known zero, and known one values are the
1626/// same width as the vector element, and the bit is set only if it is true
1627/// for all of the elements in the vector.
1628void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1629 const Query &Q) {
1630 assert(V && "No Value?")((V && "No Value?") ? static_cast<void> (0) : __assert_fail
("V && \"No Value?\"", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1630, __PRETTY_FUNCTION__))
;
1631 assert(Depth <= MaxDepth && "Limit Search Depth")((Depth <= MaxDepth && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1631, __PRETTY_FUNCTION__))
;
1632 unsigned BitWidth = Known.getBitWidth();
1633
1634 assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||(((V->getType()->isIntOrIntVectorTy(BitWidth) || V->
getType()->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? static_cast<void> (0) : __assert_fail ("(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1636, __PRETTY_FUNCTION__))
1635 V->getType()->isPtrOrPtrVectorTy()) &&(((V->getType()->isIntOrIntVectorTy(BitWidth) || V->
getType()->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? static_cast<void> (0) : __assert_fail ("(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1636, __PRETTY_FUNCTION__))
1636 "Not integer or pointer type!")(((V->getType()->isIntOrIntVectorTy(BitWidth) || V->
getType()->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? static_cast<void> (0) : __assert_fail ("(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1636, __PRETTY_FUNCTION__))
;
1637
1638 Type *ScalarTy = V->getType()->getScalarType();
1639 unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1640 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1641 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth")((ExpectedWidth == BitWidth && "V and Known should have same BitWidth"
) ? static_cast<void> (0) : __assert_fail ("ExpectedWidth == BitWidth && \"V and Known should have same BitWidth\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1641, __PRETTY_FUNCTION__))
;
1642 (void)BitWidth;
1643 (void)ExpectedWidth;
1644
1645 const APInt *C;
1646 if (match(V, m_APInt(C))) {
1647 // We know all of the bits for a scalar constant or a splat vector constant!
1648 Known.One = *C;
1649 Known.Zero = ~Known.One;
1650 return;
1651 }
1652 // Null and aggregate-zero are all-zeros.
1653 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1654 Known.setAllZero();
1655 return;
1656 }
1657 // Handle a constant vector by taking the intersection of the known bits of
1658 // each element.
1659 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1660 // We know that CDS must be a vector of integers. Take the intersection of
1661 // each element.
1662 Known.Zero.setAllBits(); Known.One.setAllBits();
1663 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1664 APInt Elt = CDS->getElementAsAPInt(i);
1665 Known.Zero &= ~Elt;
1666 Known.One &= Elt;
1667 }
1668 return;
1669 }
1670
1671 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1672 // We know that CV must be a vector of integers. Take the intersection of
1673 // each element.
1674 Known.Zero.setAllBits(); Known.One.setAllBits();
1675 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1676 Constant *Element = CV->getAggregateElement(i);
1677 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1678 if (!ElementCI) {
1679 Known.resetAll();
1680 return;
1681 }
1682 const APInt &Elt = ElementCI->getValue();
1683 Known.Zero &= ~Elt;
1684 Known.One &= Elt;
1685 }
1686 return;
1687 }
1688
1689 // Start out not knowing anything.
1690 Known.resetAll();
1691
1692 // We can't imply anything about undefs.
1693 if (isa<UndefValue>(V))
1694 return;
1695
1696 // There's no point in looking through other users of ConstantData for
1697 // assumptions. Confirm that we've handled them all.
1698 assert(!isa<ConstantData>(V) && "Unhandled constant data!")((!isa<ConstantData>(V) && "Unhandled constant data!"
) ? static_cast<void> (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1698, __PRETTY_FUNCTION__))
;
1699
1700 // Limit search depth.
1701 // All recursive calls that increase depth must come after this.
1702 if (Depth == MaxDepth)
1703 return;
1704
1705 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1706 // the bits of its aliasee.
1707 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1708 if (!GA->isInterposable())
1709 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1710 return;
1711 }
1712
1713 if (const Operator *I = dyn_cast<Operator>(V))
1714 computeKnownBitsFromOperator(I, Known, Depth, Q);
1715
1716 // Aligned pointers have trailing zeros - refine Known.Zero set
1717 if (V->getType()->isPointerTy()) {
1718 unsigned Align = V->getPointerAlignment(Q.DL);
1719 if (Align)
1720 Known.Zero.setLowBits(countTrailingZeros(Align));
1721 }
1722
1723 // computeKnownBitsFromAssume strictly refines Known.
1724 // Therefore, we run them after computeKnownBitsFromOperator.
1725
1726 // Check whether a nearby assume intrinsic can determine some known bits.
1727 computeKnownBitsFromAssume(V, Known, Depth, Q);
1728
1729 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1729, __PRETTY_FUNCTION__))
;
1730}
1731
1732/// Return true if the given value is known to have exactly one
1733/// bit set when defined. For vectors return true if every element is known to
1734/// be a power of two when defined. Supports values with integer or pointer
1735/// types and vectors of integers.
1736bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1737 const Query &Q) {
1738 assert(Depth <= MaxDepth && "Limit Search Depth")((Depth <= MaxDepth && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1738, __PRETTY_FUNCTION__))
;
1739
1740 // Attempt to match against constants.
1741 if (OrZero && match(V, m_Power2OrZero()))
1742 return true;
1743 if (match(V, m_Power2()))
1744 return true;
1745
1746 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1747 // it is shifted off the end then the result is undefined.
1748 if (match(V, m_Shl(m_One(), m_Value())))
1749 return true;
1750
1751 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1752 // the bottom. If it is shifted off the bottom then the result is undefined.
1753 if (match(V, m_LShr(m_SignMask(), m_Value())))
1754 return true;
1755
1756 // The remaining tests are all recursive, so bail out if we hit the limit.
1757 if (Depth++ == MaxDepth)
1758 return false;
1759
1760 Value *X = nullptr, *Y = nullptr;
1761 // A shift left or a logical shift right of a power of two is a power of two
1762 // or zero.
1763 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1764 match(V, m_LShr(m_Value(X), m_Value()))))
1765 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1766
1767 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1768 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1769
1770 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1771 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1772 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1773
1774 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1775 // A power of two and'd with anything is a power of two or zero.
1776 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1777 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1778 return true;
1779 // X & (-X) is always a power of two or zero.
1780 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1781 return true;
1782 return false;
1783 }
1784
1785 // Adding a power-of-two or zero to the same power-of-two or zero yields
1786 // either the original power-of-two, a larger power-of-two or zero.
1787 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1788 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1789 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1790 Q.IIQ.hasNoSignedWrap(VOBO)) {
1791 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1792 match(X, m_And(m_Value(), m_Specific(Y))))
1793 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1794 return true;
1795 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1796 match(Y, m_And(m_Value(), m_Specific(X))))
1797 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1798 return true;
1799
1800 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1801 KnownBits LHSBits(BitWidth);
1802 computeKnownBits(X, LHSBits, Depth, Q);
1803
1804 KnownBits RHSBits(BitWidth);
1805 computeKnownBits(Y, RHSBits, Depth, Q);
1806 // If i8 V is a power of two or zero:
1807 // ZeroBits: 1 1 1 0 1 1 1 1
1808 // ~ZeroBits: 0 0 0 1 0 0 0 0
1809 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1810 // If OrZero isn't set, we cannot give back a zero result.
1811 // Make sure either the LHS or RHS has a bit set.
1812 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1813 return true;
1814 }
1815 }
1816
1817 // An exact divide or right shift can only shift off zero bits, so the result
1818 // is a power of two only if the first operand is a power of two and not
1819 // copying a sign bit (sdiv int_min, 2).
1820 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1821 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1822 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1823 Depth, Q);
1824 }
1825
1826 return false;
1827}
1828
1829/// Test whether a GEP's result is known to be non-null.
1830///
1831/// Uses properties inherent in a GEP to try to determine whether it is known
1832/// to be non-null.
1833///
1834/// Currently this routine does not support vector GEPs.
1835static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1836 const Query &Q) {
1837 const Function *F = nullptr;
1838 if (const Instruction *I = dyn_cast<Instruction>(GEP))
1839 F = I->getFunction();
1840
1841 if (!GEP->isInBounds() ||
1842 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
1843 return false;
1844
1845 // FIXME: Support vector-GEPs.
1846 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")((GEP->getType()->isPointerTy() && "We only support plain pointer GEP"
) ? static_cast<void> (0) : __assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1846, __PRETTY_FUNCTION__))
;
1847
1848 // If the base pointer is non-null, we cannot walk to a null address with an
1849 // inbounds GEP in address space zero.
1850 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1851 return true;
1852
1853 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1854 // If so, then the GEP cannot produce a null pointer, as doing so would
1855 // inherently violate the inbounds contract within address space zero.
1856 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1857 GTI != GTE; ++GTI) {
1858 // Struct types are easy -- they must always be indexed by a constant.
1859 if (StructType *STy = GTI.getStructTypeOrNull()) {
1860 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1861 unsigned ElementIdx = OpC->getZExtValue();
1862 const StructLayout *SL = Q.DL.getStructLayout(STy);
1863 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1864 if (ElementOffset > 0)
1865 return true;
1866 continue;
1867 }
1868
1869 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1870 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1871 continue;
1872
1873 // Fast path the constant operand case both for efficiency and so we don't
1874 // increment Depth when just zipping down an all-constant GEP.
1875 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1876 if (!OpC->isZero())
1877 return true;
1878 continue;
1879 }
1880
1881 // We post-increment Depth here because while isKnownNonZero increments it
1882 // as well, when we pop back up that increment won't persist. We don't want
1883 // to recurse 10k times just because we have 10k GEP operands. We don't
1884 // bail completely out because we want to handle constant GEPs regardless
1885 // of depth.
1886 if (Depth++ >= MaxDepth)
1887 continue;
1888
1889 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1890 return true;
1891 }
1892
1893 return false;
1894}
1895
1896static bool isKnownNonNullFromDominatingCondition(const Value *V,
1897 const Instruction *CtxI,
1898 const DominatorTree *DT) {
1899 assert(V->getType()->isPointerTy() && "V must be pointer type")((V->getType()->isPointerTy() && "V must be pointer type"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isPointerTy() && \"V must be pointer type\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1899, __PRETTY_FUNCTION__))
;
1900 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull")((!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"
) ? static_cast<void> (0) : __assert_fail ("!isa<ConstantData>(V) && \"Did not expect ConstantPointerNull\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1900, __PRETTY_FUNCTION__))
;
1901
1902 if (!CtxI || !DT)
1903 return false;
1904
1905 unsigned NumUsesExplored = 0;
1906 for (auto *U : V->users()) {
1907 // Avoid massive lists
1908 if (NumUsesExplored >= DomConditionsMaxUses)
1909 break;
1910 NumUsesExplored++;
1911
1912 // If the value is used as an argument to a call or invoke, then argument
1913 // attributes may provide an answer about null-ness.
1914 if (auto CS = ImmutableCallSite(U))
1915 if (auto *CalledFunc = CS.getCalledFunction())
1916 for (const Argument &Arg : CalledFunc->args())
1917 if (CS.getArgOperand(Arg.getArgNo()) == V &&
1918 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1919 return true;
1920
1921 // Consider only compare instructions uniquely controlling a branch
1922 CmpInst::Predicate Pred;
1923 if (!match(const_cast<User *>(U),
1924 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1925 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1926 continue;
1927
1928 SmallVector<const User *, 4> WorkList;
1929 SmallPtrSet<const User *, 4> Visited;
1930 for (auto *CmpU : U->users()) {
1931 assert(WorkList.empty() && "Should be!")((WorkList.empty() && "Should be!") ? static_cast<
void> (0) : __assert_fail ("WorkList.empty() && \"Should be!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1931, __PRETTY_FUNCTION__))
;
1932 if (Visited.insert(CmpU).second)
1933 WorkList.push_back(CmpU);
1934
1935 while (!WorkList.empty()) {
1936 auto *Curr = WorkList.pop_back_val();
1937
1938 // If a user is an AND, add all its users to the work list. We only
1939 // propagate "pred != null" condition through AND because it is only
1940 // correct to assume that all conditions of AND are met in true branch.
1941 // TODO: Support similar logic of OR and EQ predicate?
1942 if (Pred == ICmpInst::ICMP_NE)
1943 if (auto *BO = dyn_cast<BinaryOperator>(Curr))
1944 if (BO->getOpcode() == Instruction::And) {
1945 for (auto *BOU : BO->users())
1946 if (Visited.insert(BOU).second)
1947 WorkList.push_back(BOU);
1948 continue;
1949 }
1950
1951 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
1952 assert(BI->isConditional() && "uses a comparison!")((BI->isConditional() && "uses a comparison!") ? static_cast
<void> (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1952, __PRETTY_FUNCTION__))
;
1953
1954 BasicBlock *NonNullSuccessor =
1955 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1956 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1957 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1958 return true;
1959 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
1960 DT->dominates(cast<Instruction>(Curr), CtxI)) {
1961 return true;
1962 }
1963 }
1964 }
1965 }
1966
1967 return false;
1968}
1969
1970/// Does the 'Range' metadata (which must be a valid MD_range operand list)
1971/// ensure that the value it's attached to is never Value? 'RangeType' is
1972/// is the type of the value described by the range.
1973static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1974 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1975 assert(NumRanges >= 1)((NumRanges >= 1) ? static_cast<void> (0) : __assert_fail
("NumRanges >= 1", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 1975, __PRETTY_FUNCTION__))
;
1976 for (unsigned i = 0; i < NumRanges; ++i) {
1977 ConstantInt *Lower =
1978 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1979 ConstantInt *Upper =
1980 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1981 ConstantRange Range(Lower->getValue(), Upper->getValue());
1982 if (Range.contains(Value))
1983 return false;
1984 }
1985 return true;
1986}
1987
1988/// Return true if the given value is known to be non-zero when defined. For
1989/// vectors, return true if every element is known to be non-zero when
1990/// defined. For pointers, if the context instruction and dominator tree are
1991/// specified, perform context-sensitive analysis and return true if the
1992/// pointer couldn't possibly be null at the specified instruction.
1993/// Supports values with integer or pointer type and vectors of integers.
1994bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1995 if (auto *C = dyn_cast<Constant>(V)) {
1996 if (C->isNullValue())
1997 return false;
1998 if (isa<ConstantInt>(C))
1999 // Must be non-zero due to null test above.
2000 return true;
2001
2002 // For constant vectors, check that all elements are undefined or known
2003 // non-zero to determine that the whole vector is known non-zero.
2004 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
2005 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2006 Constant *Elt = C->getAggregateElement(i);
2007 if (!Elt || Elt->isNullValue())
2008 return false;
2009 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2010 return false;
2011 }
2012 return true;
2013 }
2014
2015 // A global variable in address space 0 is non null unless extern weak
2016 // or an absolute symbol reference. Other address spaces may have null as a
2017 // valid address for a global, so we can't assume anything.
2018 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2019 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2020 GV->getType()->getAddressSpace() == 0)
2021 return true;
2022 } else
2023 return false;
2024 }
2025
2026 if (auto *I = dyn_cast<Instruction>(V)) {
2027 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2028 // If the possible ranges don't contain zero, then the value is
2029 // definitely non-zero.
2030 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2031 const APInt ZeroValue(Ty->getBitWidth(), 0);
2032 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2033 return true;
2034 }
2035 }
2036 }
2037
2038 // Some of the tests below are recursive, so bail out if we hit the limit.
2039 if (Depth++ >= MaxDepth)
2040 return false;
2041
2042 // Check for pointer simplifications.
2043 if (V->getType()->isPointerTy()) {
2044 // Alloca never returns null, malloc might.
2045 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2046 return true;
2047
2048 // A byval, inalloca, or nonnull argument is never null.
2049 if (const Argument *A = dyn_cast<Argument>(V))
2050 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
2051 return true;
2052
2053 // A Load tagged with nonnull metadata is never null.
2054 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2055 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2056 return true;
2057
2058 if (const auto *Call = dyn_cast<CallBase>(V)) {
2059 if (Call->isReturnNonNull())
2060 return true;
2061 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call))
2062 return isKnownNonZero(RP, Depth, Q);
2063 }
2064 }
2065
2066
2067 // Check for recursive pointer simplifications.
2068 if (V->getType()->isPointerTy()) {
2069 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2070 return true;
2071
2072 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2073 // do not alter the value, or at least not the nullness property of the
2074 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2075 //
2076 // Note that we have to take special care to avoid looking through
2077 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2078 // as casts that can alter the value, e.g., AddrSpaceCasts.
2079 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2080 if (isGEPKnownNonNull(GEP, Depth, Q))
2081 return true;
2082
2083 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2084 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2085
2086 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2087 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2088 Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2089 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2090 }
2091
2092 // Similar to int2ptr above, we can look through ptr2int here if the cast
2093 // is a no-op or an extend and not a truncate.
2094 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2095 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2096 Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2097 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2098
2099 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2100
2101 // X | Y != 0 if X != 0 or Y != 0.
2102 Value *X = nullptr, *Y = nullptr;
2103 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2104 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
2105
2106 // ext X != 0 if X != 0.
2107 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2108 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2109
2110 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2111 // if the lowest bit is shifted off the end.
2112 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2113 // shl nuw can't remove any non-zero bits.
2114 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2115 if (Q.IIQ.hasNoUnsignedWrap(BO))
2116 return isKnownNonZero(X, Depth, Q);
2117
2118 KnownBits Known(BitWidth);
2119 computeKnownBits(X, Known, Depth, Q);
2120 if (Known.One[0])
2121 return true;
2122 }
2123 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2124 // defined if the sign bit is shifted off the end.
2125 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2126 // shr exact can only shift out zero bits.
2127 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2128 if (BO->isExact())
2129 return isKnownNonZero(X, Depth, Q);
2130
2131 KnownBits Known = computeKnownBits(X, Depth, Q);
2132 if (Known.isNegative())
2133 return true;
2134
2135 // If the shifter operand is a constant, and all of the bits shifted
2136 // out are known to be zero, and X is known non-zero then at least one
2137 // non-zero bit must remain.
2138 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2139 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2140 // Is there a known one in the portion not shifted out?
2141 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2142 return true;
2143 // Are all the bits to be shifted out known zero?
2144 if (Known.countMinTrailingZeros() >= ShiftVal)
2145 return isKnownNonZero(X, Depth, Q);
2146 }
2147 }
2148 // div exact can only produce a zero if the dividend is zero.
2149 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2150 return isKnownNonZero(X, Depth, Q);
2151 }
2152 // X + Y.
2153 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2154 KnownBits XKnown = computeKnownBits(X, Depth, Q);
2155 KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2156
2157 // If X and Y are both non-negative (as signed values) then their sum is not
2158 // zero unless both X and Y are zero.
2159 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2160 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2161 return true;
2162
2163 // If X and Y are both negative (as signed values) then their sum is not
2164 // zero unless both X and Y equal INT_MIN.
2165 if (XKnown.isNegative() && YKnown.isNegative()) {
2166 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2167 // The sign bit of X is set. If some other bit is set then X is not equal
2168 // to INT_MIN.
2169 if (XKnown.One.intersects(Mask))
2170 return true;
2171 // The sign bit of Y is set. If some other bit is set then Y is not equal
2172 // to INT_MIN.
2173 if (YKnown.One.intersects(Mask))
2174 return true;
2175 }
2176
2177 // The sum of a non-negative number and a power of two is not zero.
2178 if (XKnown.isNonNegative() &&
2179 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2180 return true;
2181 if (YKnown.isNonNegative() &&
2182 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2183 return true;
2184 }
2185 // X * Y.
2186 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2187 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2188 // If X and Y are non-zero then so is X * Y as long as the multiplication
2189 // does not overflow.
2190 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2191 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2192 return true;
2193 }
2194 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2195 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2196 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2197 isKnownNonZero(SI->getFalseValue(), Depth, Q))
2198 return true;
2199 }
2200 // PHI
2201 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2202 // Try and detect a recurrence that monotonically increases from a
2203 // starting value, as these are common as induction variables.
2204 if (PN->getNumIncomingValues() == 2) {
2205 Value *Start = PN->getIncomingValue(0);
2206 Value *Induction = PN->getIncomingValue(1);
2207 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2208 std::swap(Start, Induction);
2209 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2210 if (!C->isZero() && !C->isNegative()) {
2211 ConstantInt *X;
2212 if (Q.IIQ.UseInstrInfo &&
2213 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2214 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2215 !X->isNegative())
2216 return true;
2217 }
2218 }
2219 }
2220 // Check if all incoming values are non-zero constant.
2221 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2222 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2223 });
2224 if (AllNonZeroConstants)
2225 return true;
2226 }
2227
2228 KnownBits Known(BitWidth);
2229 computeKnownBits(V, Known, Depth, Q);
2230 return Known.One != 0;
2231}
2232
2233/// Return true if V2 == V1 + X, where X is known non-zero.
2234static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2235 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2236 if (!BO || BO->getOpcode() != Instruction::Add)
2237 return false;
2238 Value *Op = nullptr;
2239 if (V2 == BO->getOperand(0))
2240 Op = BO->getOperand(1);
2241 else if (V2 == BO->getOperand(1))
2242 Op = BO->getOperand(0);
2243 else
2244 return false;
2245 return isKnownNonZero(Op, 0, Q);
2246}
2247
2248/// Return true if it is known that V1 != V2.
2249static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2250 if (V1 == V2)
2251 return false;
2252 if (V1->getType() != V2->getType())
2253 // We can't look through casts yet.
2254 return false;
2255 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2256 return true;
2257
2258 if (V1->getType()->isIntOrIntVectorTy()) {
2259 // Are any known bits in V1 contradictory to known bits in V2? If V1
2260 // has a known zero where V2 has a known one, they must not be equal.
2261 KnownBits Known1 = computeKnownBits(V1, 0, Q);
2262 KnownBits Known2 = computeKnownBits(V2, 0, Q);
2263
2264 if (Known1.Zero.intersects(Known2.One) ||
2265 Known2.Zero.intersects(Known1.One))
2266 return true;
2267 }
2268 return false;
2269}
2270
2271/// Return true if 'V & Mask' is known to be zero. We use this predicate to
2272/// simplify operations downstream. Mask is known to be zero for bits that V
2273/// cannot have.
2274///
2275/// This function is defined on values with integer type, values with pointer
2276/// type, and vectors of integers. In the case
2277/// where V is a vector, the mask, known zero, and known one values are the
2278/// same width as the vector element, and the bit is set only if it is true
2279/// for all of the elements in the vector.
2280bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2281 const Query &Q) {
2282 KnownBits Known(Mask.getBitWidth());
2283 computeKnownBits(V, Known, Depth, Q);
2284 return Mask.isSubsetOf(Known.Zero);
2285}
2286
2287// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2288// Returns the input and lower/upper bounds.
2289static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2290 const APInt *&CLow, const APInt *&CHigh) {
2291 assert(isa<Operator>(Select) &&((isa<Operator>(Select) && cast<Operator>
(Select)->getOpcode() == Instruction::Select && "Input should be a Select!"
) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2293, __PRETTY_FUNCTION__))
2292 cast<Operator>(Select)->getOpcode() == Instruction::Select &&((isa<Operator>(Select) && cast<Operator>
(Select)->getOpcode() == Instruction::Select && "Input should be a Select!"
) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2293, __PRETTY_FUNCTION__))
2293 "Input should be a Select!")((isa<Operator>(Select) && cast<Operator>
(Select)->getOpcode() == Instruction::Select && "Input should be a Select!"
) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2293, __PRETTY_FUNCTION__))
;
2294
2295 const Value *LHS, *RHS, *LHS2, *RHS2;
2296 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2297 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2298 return false;
2299
2300 if (!match(RHS, m_APInt(CLow)))
2301 return false;
2302
2303 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2304 if (getInverseMinMaxFlavor(SPF) != SPF2)
2305 return false;
2306
2307 if (!match(RHS2, m_APInt(CHigh)))
2308 return false;
2309
2310 if (SPF == SPF_SMIN)
2311 std::swap(CLow, CHigh);
2312
2313 In = LHS2;
2314 return CLow->sle(*CHigh);
2315}
2316
2317/// For vector constants, loop over the elements and find the constant with the
2318/// minimum number of sign bits. Return 0 if the value is not a vector constant
2319/// or if any element was not analyzed; otherwise, return the count for the
2320/// element with the minimum number of sign bits.
2321static unsigned computeNumSignBitsVectorConstant(const Value *V,
2322 unsigned TyBits) {
2323 const auto *CV = dyn_cast<Constant>(V);
2324 if (!CV || !CV->getType()->isVectorTy())
2325 return 0;
2326
2327 unsigned MinSignBits = TyBits;
2328 unsigned NumElts = CV->getType()->getVectorNumElements();
2329 for (unsigned i = 0; i != NumElts; ++i) {
2330 // If we find a non-ConstantInt, bail out.
2331 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2332 if (!Elt)
2333 return 0;
2334
2335 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2336 }
2337
2338 return MinSignBits;
2339}
2340
2341static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2342 const Query &Q);
2343
2344static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2345 const Query &Q) {
2346 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2347 assert(Result > 0 && "At least one sign bit needs to be present!")((Result > 0 && "At least one sign bit needs to be present!"
) ? static_cast<void> (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2347, __PRETTY_FUNCTION__))
;
2348 return Result;
2349}
2350
2351/// Return the number of times the sign bit of the register is replicated into
2352/// the other bits. We know that at least 1 bit is always equal to the sign bit
2353/// (itself), but other cases can give us information. For example, immediately
2354/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2355/// other, so we return 3. For vectors, return the number of sign bits for the
2356/// vector element with the minimum number of known sign bits.
2357static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2358 const Query &Q) {
2359 assert(Depth <= MaxDepth && "Limit Search Depth")((Depth <= MaxDepth && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2359, __PRETTY_FUNCTION__))
;
2360
2361 // We return the minimum number of sign bits that are guaranteed to be present
2362 // in V, so for undef we have to conservatively return 1. We don't have the
2363 // same behavior for poison though -- that's a FIXME today.
2364
2365 Type *ScalarTy = V->getType()->getScalarType();
2366 unsigned TyBits = ScalarTy->isPointerTy() ?
2367 Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2368 Q.DL.getTypeSizeInBits(ScalarTy);
2369
2370 unsigned Tmp, Tmp2;
2371 unsigned FirstAnswer = 1;
2372
2373 // Note that ConstantInt is handled by the general computeKnownBits case
2374 // below.
2375
2376 if (Depth == MaxDepth)
2377 return 1; // Limit search depth.
2378
2379 const Operator *U = dyn_cast<Operator>(V);
2380 switch (Operator::getOpcode(V)) {
2381 default: break;
2382 case Instruction::SExt:
2383 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2384 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2385
2386 case Instruction::SDiv: {
2387 const APInt *Denominator;
2388 // sdiv X, C -> adds log(C) sign bits.
2389 if (match(U->getOperand(1), m_APInt(Denominator))) {
2390
2391 // Ignore non-positive denominator.
2392 if (!Denominator->isStrictlyPositive())
2393 break;
2394
2395 // Calculate the incoming numerator bits.
2396 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2397
2398 // Add floor(log(C)) bits to the numerator bits.
2399 return std::min(TyBits, NumBits + Denominator->logBase2());
2400 }
2401 break;
2402 }
2403
2404 case Instruction::SRem: {
2405 const APInt *Denominator;
2406 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2407 // positive constant. This let us put a lower bound on the number of sign
2408 // bits.
2409 if (match(U->getOperand(1), m_APInt(Denominator))) {
2410
2411 // Ignore non-positive denominator.
2412 if (!Denominator->isStrictlyPositive())
2413 break;
2414
2415 // Calculate the incoming numerator bits. SRem by a positive constant
2416 // can't lower the number of sign bits.
2417 unsigned NumrBits =
2418 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2419
2420 // Calculate the leading sign bit constraints by examining the
2421 // denominator. Given that the denominator is positive, there are two
2422 // cases:
2423 //
2424 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2425 // (1 << ceilLogBase2(C)).
2426 //
2427 // 2. the numerator is negative. Then the result range is (-C,0] and
2428 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2429 //
2430 // Thus a lower bound on the number of sign bits is `TyBits -
2431 // ceilLogBase2(C)`.
2432
2433 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2434 return std::max(NumrBits, ResBits);
2435 }
2436 break;
2437 }
2438
2439 case Instruction::AShr: {
2440 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2441 // ashr X, C -> adds C sign bits. Vectors too.
2442 const APInt *ShAmt;
2443 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2444 if (ShAmt->uge(TyBits))
2445 break; // Bad shift.
2446 unsigned ShAmtLimited = ShAmt->getZExtValue();
2447 Tmp += ShAmtLimited;
2448 if (Tmp > TyBits) Tmp = TyBits;
2449 }
2450 return Tmp;
2451 }
2452 case Instruction::Shl: {
2453 const APInt *ShAmt;
2454 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2455 // shl destroys sign bits.
2456 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2457 if (ShAmt->uge(TyBits) || // Bad shift.
2458 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2459 Tmp2 = ShAmt->getZExtValue();
2460 return Tmp - Tmp2;
2461 }
2462 break;
2463 }
2464 case Instruction::And:
2465 case Instruction::Or:
2466 case Instruction::Xor: // NOT is handled here.
2467 // Logical binary ops preserve the number of sign bits at the worst.
2468 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2469 if (Tmp != 1) {
2470 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2471 FirstAnswer = std::min(Tmp, Tmp2);
2472 // We computed what we know about the sign bits as our first
2473 // answer. Now proceed to the generic code that uses
2474 // computeKnownBits, and pick whichever answer is better.
2475 }
2476 break;
2477
2478 case Instruction::Select: {
2479 // If we have a clamp pattern, we know that the number of sign bits will be
2480 // the minimum of the clamp min/max range.
2481 const Value *X;
2482 const APInt *CLow, *CHigh;
2483 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2484 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2485
2486 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2487 if (Tmp == 1) break;
2488 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2489 return std::min(Tmp, Tmp2);
2490 }
2491
2492 case Instruction::Add:
2493 // Add can have at most one carry bit. Thus we know that the output
2494 // is, at worst, one more bit than the inputs.
2495 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2496 if (Tmp == 1) break;
2497
2498 // Special case decrementing a value (ADD X, -1):
2499 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2500 if (CRHS->isAllOnesValue()) {
2501 KnownBits Known(TyBits);
2502 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2503
2504 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2505 // sign bits set.
2506 if ((Known.Zero | 1).isAllOnesValue())
2507 return TyBits;
2508
2509 // If we are subtracting one from a positive number, there is no carry
2510 // out of the result.
2511 if (Known.isNonNegative())
2512 return Tmp;
2513 }
2514
2515 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2516 if (Tmp2 == 1) break;
2517 return std::min(Tmp, Tmp2)-1;
2518
2519 case Instruction::Sub:
2520 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2521 if (Tmp2 == 1) break;
2522
2523 // Handle NEG.
2524 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2525 if (CLHS->isNullValue()) {
2526 KnownBits Known(TyBits);
2527 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2528 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2529 // sign bits set.
2530 if ((Known.Zero | 1).isAllOnesValue())
2531 return TyBits;
2532
2533 // If the input is known to be positive (the sign bit is known clear),
2534 // the output of the NEG has the same number of sign bits as the input.
2535 if (Known.isNonNegative())
2536 return Tmp2;
2537
2538 // Otherwise, we treat this like a SUB.
2539 }
2540
2541 // Sub can have at most one carry bit. Thus we know that the output
2542 // is, at worst, one more bit than the inputs.
2543 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2544 if (Tmp == 1) break;
2545 return std::min(Tmp, Tmp2)-1;
2546
2547 case Instruction::Mul: {
2548 // The output of the Mul can be at most twice the valid bits in the inputs.
2549 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2550 if (SignBitsOp0 == 1) break;
2551 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2552 if (SignBitsOp1 == 1) break;
2553 unsigned OutValidBits =
2554 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2555 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2556 }
2557
2558 case Instruction::PHI: {
2559 const PHINode *PN = cast<PHINode>(U);
2560 unsigned NumIncomingValues = PN->getNumIncomingValues();
2561 // Don't analyze large in-degree PHIs.
2562 if (NumIncomingValues > 4) break;
2563 // Unreachable blocks may have zero-operand PHI nodes.
2564 if (NumIncomingValues == 0) break;
2565
2566 // Take the minimum of all incoming values. This can't infinitely loop
2567 // because of our depth threshold.
2568 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2569 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2570 if (Tmp == 1) return Tmp;
2571 Tmp = std::min(
2572 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2573 }
2574 return Tmp;
2575 }
2576
2577 case Instruction::Trunc:
2578 // FIXME: it's tricky to do anything useful for this, but it is an important
2579 // case for targets like X86.
2580 break;
2581
2582 case Instruction::ExtractElement:
2583 // Look through extract element. At the moment we keep this simple and skip
2584 // tracking the specific element. But at least we might find information
2585 // valid for all elements of the vector (for example if vector is sign
2586 // extended, shifted, etc).
2587 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2588
2589 case Instruction::ShuffleVector: {
2590 // TODO: This is copied almost directly from the SelectionDAG version of
2591 // ComputeNumSignBits. It would be better if we could share common
2592 // code. If not, make sure that changes are translated to the DAG.
2593
2594 // Collect the minimum number of sign bits that are shared by every vector
2595 // element referenced by the shuffle.
2596 auto *Shuf = cast<ShuffleVectorInst>(U);
2597 int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
2598 int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
2599 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2600 for (int i = 0; i != NumMaskElts; ++i) {
2601 int M = Shuf->getMaskValue(i);
2602 assert(M < NumElts * 2 && "Invalid shuffle mask constant")((M < NumElts * 2 && "Invalid shuffle mask constant"
) ? static_cast<void> (0) : __assert_fail ("M < NumElts * 2 && \"Invalid shuffle mask constant\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2602, __PRETTY_FUNCTION__))
;
2603 // For undef elements, we don't know anything about the common state of
2604 // the shuffle result.
2605 if (M == -1)
2606 return 1;
2607 if (M < NumElts)
2608 DemandedLHS.setBit(M % NumElts);
2609 else
2610 DemandedRHS.setBit(M % NumElts);
2611 }
2612 Tmp = std::numeric_limits<unsigned>::max();
2613 if (!!DemandedLHS)
2614 Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
2615 if (!!DemandedRHS) {
2616 Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
2617 Tmp = std::min(Tmp, Tmp2);
2618 }
2619 // If we don't know anything, early out and try computeKnownBits fall-back.
2620 if (Tmp == 1)
2621 break;
2622 assert(Tmp <= V->getType()->getScalarSizeInBits() &&((Tmp <= V->getType()->getScalarSizeInBits() &&
"Failed to determine minimum sign bits") ? static_cast<void
> (0) : __assert_fail ("Tmp <= V->getType()->getScalarSizeInBits() && \"Failed to determine minimum sign bits\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2623, __PRETTY_FUNCTION__))
2623 "Failed to determine minimum sign bits")((Tmp <= V->getType()->getScalarSizeInBits() &&
"Failed to determine minimum sign bits") ? static_cast<void
> (0) : __assert_fail ("Tmp <= V->getType()->getScalarSizeInBits() && \"Failed to determine minimum sign bits\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2623, __PRETTY_FUNCTION__))
;
2624 return Tmp;
2625 }
2626 }
2627
2628 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2629 // use this information.
2630
2631 // If we can examine all elements of a vector constant successfully, we're
2632 // done (we can't do any better than that). If not, keep trying.
2633 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2634 return VecSignBits;
2635
2636 KnownBits Known(TyBits);
2637 computeKnownBits(V, Known, Depth, Q);
2638
2639 // If we know that the sign bit is either zero or one, determine the number of
2640 // identical bits in the top of the input value.
2641 return std::max(FirstAnswer, Known.countMinSignBits());
2642}
2643
2644/// This function computes the integer multiple of Base that equals V.
2645/// If successful, it returns true and returns the multiple in
2646/// Multiple. If unsuccessful, it returns false. It looks
2647/// through SExt instructions only if LookThroughSExt is true.
2648bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2649 bool LookThroughSExt, unsigned Depth) {
2650 const unsigned MaxDepth = 6;
2651
2652 assert(V && "No Value?")((V && "No Value?") ? static_cast<void> (0) : __assert_fail
("V && \"No Value?\"", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2652, __PRETTY_FUNCTION__))
;
2653 assert(Depth <= MaxDepth && "Limit Search Depth")((Depth <= MaxDepth && "Limit Search Depth") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2653, __PRETTY_FUNCTION__))
;
2654 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!")((V->getType()->isIntegerTy() && "Not integer or pointer type!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 2654, __PRETTY_FUNCTION__))
;
2655
2656 Type *T = V->getType();
2657
2658 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2659
2660 if (Base == 0)
2661 return false;
2662
2663 if (Base == 1) {
2664 Multiple = V;
2665 return true;
2666 }
2667
2668 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2669 Constant *BaseVal = ConstantInt::get(T, Base);
2670 if (CO && CO == BaseVal) {
2671 // Multiple is 1.
2672 Multiple = ConstantInt::get(T, 1);
2673 return true;
2674 }
2675
2676 if (CI && CI->getZExtValue() % Base == 0) {
2677 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2678 return true;
2679 }
2680
2681 if (Depth == MaxDepth) return false; // Limit search depth.
2682
2683 Operator *I = dyn_cast<Operator>(V);
2684 if (!I) return false;
2685
2686 switch (I->getOpcode()) {
2687 default: break;
2688 case Instruction::SExt:
2689 if (!LookThroughSExt) return false;
2690 // otherwise fall through to ZExt
2691 LLVM_FALLTHROUGH[[clang::fallthrough]];
2692 case Instruction::ZExt:
2693 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2694 LookThroughSExt, Depth+1);
2695 case Instruction::Shl:
2696 case Instruction::Mul: {
2697 Value *Op0 = I->getOperand(0);
2698 Value *Op1 = I->getOperand(1);
2699
2700 if (I->getOpcode() == Instruction::Shl) {
2701 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2702 if (!Op1CI) return false;
2703 // Turn Op0 << Op1 into Op0 * 2^Op1
2704 APInt Op1Int = Op1CI->getValue();
2705 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2706 APInt API(Op1Int.getBitWidth(), 0);
2707 API.setBit(BitToSet);
2708 Op1 = ConstantInt::get(V->getContext(), API);
2709 }
2710
2711 Value *Mul0 = nullptr;
2712 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2713 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2714 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2715 if (Op1C->getType()->getPrimitiveSizeInBits() <
2716 MulC->getType()->getPrimitiveSizeInBits())
2717 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2718 if (Op1C->getType()->getPrimitiveSizeInBits() >
2719 MulC->getType()->getPrimitiveSizeInBits())
2720 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2721
2722 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2723 Multiple = ConstantExpr::getMul(MulC, Op1C);
2724 return true;
2725 }
2726
2727 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2728 if (Mul0CI->getValue() == 1) {
2729 // V == Base * Op1, so return Op1
2730 Multiple = Op1;
2731 return true;
2732 }
2733 }
2734
2735 Value *Mul1 = nullptr;
2736 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2737 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2738 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2739 if (Op0C->getType()->getPrimitiveSizeInBits() <
2740 MulC->getType()->getPrimitiveSizeInBits())
2741 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2742 if (Op0C->getType()->getPrimitiveSizeInBits() >
2743 MulC->getType()->getPrimitiveSizeInBits())
2744 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2745
2746 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2747 Multiple = ConstantExpr::getMul(MulC, Op0C);
2748 return true;
2749 }
2750
2751 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2752 if (Mul1CI->getValue() == 1) {
2753 // V == Base * Op0, so return Op0
2754 Multiple = Op0;
2755 return true;
2756 }
2757 }
2758 }
2759 }
2760
2761 // We could not determine if V is a multiple of Base.
2762 return false;
2763}
2764
2765Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2766 const TargetLibraryInfo *TLI) {
2767 const Function *F = ICS.getCalledFunction();
2768 if (!F)
2769 return Intrinsic::not_intrinsic;
2770
2771 if (F->isIntrinsic())
2772 return F->getIntrinsicID();
2773
2774 if (!TLI)
2775 return Intrinsic::not_intrinsic;
2776
2777 LibFunc Func;
2778 // We're going to make assumptions on the semantics of the functions, check
2779 // that the target knows that it's available in this environment and it does
2780 // not have local linkage.
2781 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2782 return Intrinsic::not_intrinsic;
2783
2784 if (!ICS.onlyReadsMemory())
2785 return Intrinsic::not_intrinsic;
2786
2787 // Otherwise check if we have a call to a function that can be turned into a
2788 // vector intrinsic.
2789 switch (Func) {
2790 default:
2791 break;
2792 case LibFunc_sin:
2793 case LibFunc_sinf:
2794 case LibFunc_sinl:
2795 return Intrinsic::sin;
2796 case LibFunc_cos:
2797 case LibFunc_cosf:
2798 case LibFunc_cosl:
2799 return Intrinsic::cos;
2800 case LibFunc_exp:
2801 case LibFunc_expf:
2802 case LibFunc_expl:
2803 return Intrinsic::exp;
2804 case LibFunc_exp2:
2805 case LibFunc_exp2f:
2806 case LibFunc_exp2l:
2807 return Intrinsic::exp2;
2808 case LibFunc_log:
2809 case LibFunc_logf:
2810 case LibFunc_logl:
2811 return Intrinsic::log;
2812 case LibFunc_log10:
2813 case LibFunc_log10f:
2814 case LibFunc_log10l:
2815 return Intrinsic::log10;
2816 case LibFunc_log2:
2817 case LibFunc_log2f:
2818 case LibFunc_log2l:
2819 return Intrinsic::log2;
2820 case LibFunc_fabs:
2821 case LibFunc_fabsf:
2822 case LibFunc_fabsl:
2823 return Intrinsic::fabs;
2824 case LibFunc_fmin:
2825 case LibFunc_fminf:
2826 case LibFunc_fminl:
2827 return Intrinsic::minnum;
2828 case LibFunc_fmax:
2829 case LibFunc_fmaxf:
2830 case LibFunc_fmaxl:
2831 return Intrinsic::maxnum;
2832 case LibFunc_copysign:
2833 case LibFunc_copysignf:
2834 case LibFunc_copysignl:
2835 return Intrinsic::copysign;
2836 case LibFunc_floor:
2837 case LibFunc_floorf:
2838 case LibFunc_floorl:
2839 return Intrinsic::floor;
2840 case LibFunc_ceil:
2841 case LibFunc_ceilf:
2842 case LibFunc_ceill:
2843 return Intrinsic::ceil;
2844 case LibFunc_trunc:
2845 case LibFunc_truncf:
2846 case LibFunc_truncl:
2847 return Intrinsic::trunc;
2848 case LibFunc_rint:
2849 case LibFunc_rintf:
2850 case LibFunc_rintl:
2851 return Intrinsic::rint;
2852 case LibFunc_nearbyint:
2853 case LibFunc_nearbyintf:
2854 case LibFunc_nearbyintl:
2855 return Intrinsic::nearbyint;
2856 case LibFunc_round:
2857 case LibFunc_roundf:
2858 case LibFunc_roundl:
2859 return Intrinsic::round;
2860 case LibFunc_pow:
2861 case LibFunc_powf:
2862 case LibFunc_powl:
2863 return Intrinsic::pow;
2864 case LibFunc_sqrt:
2865 case LibFunc_sqrtf:
2866 case LibFunc_sqrtl:
2867 return Intrinsic::sqrt;
2868 }
2869
2870 return Intrinsic::not_intrinsic;
2871}
2872
2873/// Return true if we can prove that the specified FP value is never equal to
2874/// -0.0.
2875///
2876/// NOTE: this function will need to be revisited when we support non-default
2877/// rounding modes!
2878bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2879 unsigned Depth) {
2880 if (auto *CFP = dyn_cast<ConstantFP>(V))
2881 return !CFP->getValueAPF().isNegZero();
2882
2883 // Limit search depth.
2884 if (Depth == MaxDepth)
2885 return false;
2886
2887 auto *Op = dyn_cast<Operator>(V);
2888 if (!Op)
2889 return false;
2890
2891 // Check if the nsz fast-math flag is set.
2892 if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2893 if (FPO->hasNoSignedZeros())
2894 return true;
2895
2896 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2897 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2898 return true;
2899
2900 // sitofp and uitofp turn into +0.0 for zero.
2901 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2902 return true;
2903
2904 if (auto *Call = dyn_cast<CallInst>(Op)) {
2905 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2906 switch (IID) {
2907 default:
2908 break;
2909 // sqrt(-0.0) = -0.0, no other negative results are possible.
2910 case Intrinsic::sqrt:
2911 case Intrinsic::canonicalize:
2912 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2913 // fabs(x) != -0.0
2914 case Intrinsic::fabs:
2915 return true;
2916 }
2917 }
2918
2919 return false;
2920}
2921
2922/// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2923/// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2924/// bit despite comparing equal.
2925static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2926 const TargetLibraryInfo *TLI,
2927 bool SignBitOnly,
2928 unsigned Depth) {
2929 // TODO: This function does not do the right thing when SignBitOnly is true
2930 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2931 // which flips the sign bits of NaNs. See
2932 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2933
2934 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2935 return !CFP->getValueAPF().isNegative() ||
2936 (!SignBitOnly && CFP->getValueAPF().isZero());
2937 }
2938
2939 // Handle vector of constants.
2940 if (auto *CV = dyn_cast<Constant>(V)) {
2941 if (CV->getType()->isVectorTy()) {
2942 unsigned NumElts = CV->getType()->getVectorNumElements();
2943 for (unsigned i = 0; i != NumElts; ++i) {
2944 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2945 if (!CFP)
2946 return false;
2947 if (CFP->getValueAPF().isNegative() &&
2948 (SignBitOnly || !CFP->getValueAPF().isZero()))
2949 return false;
2950 }
2951
2952 // All non-negative ConstantFPs.
2953 return true;
2954 }
2955 }
2956
2957 if (Depth == MaxDepth)
2958 return false; // Limit search depth.
2959
2960 const Operator *I = dyn_cast<Operator>(V);
2961 if (!I)
2962 return false;
2963
2964 switch (I->getOpcode()) {
2965 default:
2966 break;
2967 // Unsigned integers are always nonnegative.
2968 case Instruction::UIToFP:
2969 return true;
2970 case Instruction::FMul:
2971 // x*x is always non-negative or a NaN.
2972 if (I->getOperand(0) == I->getOperand(1) &&
2973 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2974 return true;
2975
2976 LLVM_FALLTHROUGH[[clang::fallthrough]];
2977 case Instruction::FAdd:
2978 case Instruction::FDiv:
2979 case Instruction::FRem:
2980 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2981 Depth + 1) &&
2982 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2983 Depth + 1);
2984 case Instruction::Select:
2985 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2986 Depth + 1) &&
2987 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2988 Depth + 1);
2989 case Instruction::FPExt:
2990 case Instruction::FPTrunc:
2991 // Widening/narrowing never change sign.
2992 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2993 Depth + 1);
2994 case Instruction::ExtractElement:
2995 // Look through extract element. At the moment we keep this simple and skip
2996 // tracking the specific element. But at least we might find information
2997 // valid for all elements of the vector.
2998 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2999 Depth + 1);
3000 case Instruction::Call:
3001 const auto *CI = cast<CallInst>(I);
3002 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
3003 switch (IID) {
3004 default:
3005 break;
3006 case Intrinsic::maxnum:
3007 return (isKnownNeverNaN(I->getOperand(0), TLI) &&
3008 cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI,
3009 SignBitOnly, Depth + 1)) ||
3010 (isKnownNeverNaN(I->getOperand(1), TLI) &&
3011 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3012 SignBitOnly, Depth + 1));
3013
3014 case Intrinsic::maximum:
3015 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3016 Depth + 1) ||
3017 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3018 Depth + 1);
3019 case Intrinsic::minnum:
3020 case Intrinsic::minimum:
3021 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3022 Depth + 1) &&
3023 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3024 Depth + 1);
3025 case Intrinsic::exp:
3026 case Intrinsic::exp2:
3027 case Intrinsic::fabs:
3028 return true;
3029
3030 case Intrinsic::sqrt:
3031 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3032 if (!SignBitOnly)
3033 return true;
3034 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3035 CannotBeNegativeZero(CI->getOperand(0), TLI));
3036
3037 case Intrinsic::powi:
3038 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3039 // powi(x,n) is non-negative if n is even.
3040 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3041 return true;
3042 }
3043 // TODO: This is not correct. Given that exp is an integer, here are the
3044 // ways that pow can return a negative value:
3045 //
3046 // pow(x, exp) --> negative if exp is odd and x is negative.
3047 // pow(-0, exp) --> -inf if exp is negative odd.
3048 // pow(-0, exp) --> -0 if exp is positive odd.
3049 // pow(-inf, exp) --> -0 if exp is negative odd.
3050 // pow(-inf, exp) --> -inf if exp is positive odd.
3051 //
3052 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3053 // but we must return false if x == -0. Unfortunately we do not currently
3054 // have a way of expressing this constraint. See details in
3055 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3056 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3057 Depth + 1);
3058
3059 case Intrinsic::fma:
3060 case Intrinsic::fmuladd:
3061 // x*x+y is non-negative if y is non-negative.
3062 return I->getOperand(0) == I->getOperand(1) &&
3063 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3064 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3065 Depth + 1);
3066 }
3067 break;
3068 }
3069 return false;
3070}
3071
3072bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3073 const TargetLibraryInfo *TLI) {
3074 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3075}
3076
3077bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3078 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3079}
3080
3081bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3082 unsigned Depth) {
3083 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")((V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3083, __PRETTY_FUNCTION__))
;
3084
3085 // If we're told that NaNs won't happen, assume they won't.
3086 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3087 if (FPMathOp->hasNoNaNs())
3088 return true;
3089
3090 // Handle scalar constants.
3091 if (auto *CFP = dyn_cast<ConstantFP>(V))
3092 return !CFP->isNaN();
3093
3094 if (Depth == MaxDepth)
3095 return false;
3096
3097 if (auto *Inst = dyn_cast<Instruction>(V)) {
3098 switch (Inst->getOpcode()) {
3099 case Instruction::FAdd:
3100 case Instruction::FMul:
3101 case Instruction::FSub:
3102 case Instruction::FDiv:
3103 case Instruction::FRem: {
3104 // TODO: Need isKnownNeverInfinity
3105 return false;
3106 }
3107 case Instruction::Select: {
3108 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3109 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3110 }
3111 case Instruction::SIToFP:
3112 case Instruction::UIToFP:
3113 return true;
3114 case Instruction::FPTrunc:
3115 case Instruction::FPExt:
3116 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3117 default:
3118 break;
3119 }
3120 }
3121
3122 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3123 switch (II->getIntrinsicID()) {
3124 case Intrinsic::canonicalize:
3125 case Intrinsic::fabs:
3126 case Intrinsic::copysign:
3127 case Intrinsic::exp:
3128 case Intrinsic::exp2:
3129 case Intrinsic::floor:
3130 case Intrinsic::ceil:
3131 case Intrinsic::trunc:
3132 case Intrinsic::rint:
3133 case Intrinsic::nearbyint:
3134 case Intrinsic::round:
3135 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3136 case Intrinsic::sqrt:
3137 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3138 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3139 case Intrinsic::minnum:
3140 case Intrinsic::maxnum:
3141 // If either operand is not NaN, the result is not NaN.
3142 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3143 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3144 default:
3145 return false;
3146 }
3147 }
3148
3149 // Bail out for constant expressions, but try to handle vector constants.
3150 if (!V->getType()->isVectorTy() || !isa<Constant>(V))
3151 return false;
3152
3153 // For vectors, verify that each element is not NaN.
3154 unsigned NumElts = V->getType()->getVectorNumElements();
3155 for (unsigned i = 0; i != NumElts; ++i) {
3156 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3157 if (!Elt)
3158 return false;
3159 if (isa<UndefValue>(Elt))
3160 continue;
3161 auto *CElt = dyn_cast<ConstantFP>(Elt);
3162 if (!CElt || CElt->isNaN())
3163 return false;
3164 }
3165 // All elements were confirmed not-NaN or undefined.
3166 return true;
3167}
3168
3169Value *llvm::isBytewiseValue(Value *V) {
3170
3171 // All byte-wide stores are splatable, even of arbitrary variables.
3172 if (V->getType()->isIntegerTy(8))
3173 return V;
3174
3175 LLVMContext &Ctx = V->getContext();
3176
3177 // Undef don't care.
3178 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3179 if (isa<UndefValue>(V))
3180 return UndefInt8;
3181
3182 Constant *C = dyn_cast<Constant>(V);
3183 if (!C) {
3184 // Conceptually, we could handle things like:
3185 // %a = zext i8 %X to i16
3186 // %b = shl i16 %a, 8
3187 // %c = or i16 %a, %b
3188 // but until there is an example that actually needs this, it doesn't seem
3189 // worth worrying about.
3190 return nullptr;
3191 }
3192
3193 // Handle 'null' ConstantArrayZero etc.
3194 if (C->isNullValue())
3195 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3196
3197 // Constant floating-point values can be handled as integer values if the
3198 // corresponding integer value is "byteable". An important case is 0.0.
3199 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3200 Type *Ty = nullptr;
3201 if (CFP->getType()->isHalfTy())
3202 Ty = Type::getInt16Ty(Ctx);
3203 else if (CFP->getType()->isFloatTy())
3204 Ty = Type::getInt32Ty(Ctx);
3205 else if (CFP->getType()->isDoubleTy())
3206 Ty = Type::getInt64Ty(Ctx);
3207 // Don't handle long double formats, which have strange constraints.
3208 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty)) : nullptr;
3209 }
3210
3211 // We can handle constant integers that are multiple of 8 bits.
3212 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3213 if (CI->getBitWidth() % 8 == 0) {
3214 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")((CI->getBitWidth() > 8 && "8 bits should be handled above!"
) ? static_cast<void> (0) : __assert_fail ("CI->getBitWidth() > 8 && \"8 bits should be handled above!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3214, __PRETTY_FUNCTION__))
;
3215 if (!CI->getValue().isSplat(8))
3216 return nullptr;
3217 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3218 }
3219 }
3220
3221 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3222 if (LHS == RHS)
3223 return LHS;
3224 if (!LHS || !RHS)
3225 return nullptr;
3226 if (LHS == UndefInt8)
3227 return RHS;
3228 if (RHS == UndefInt8)
3229 return LHS;
3230 return nullptr;
3231 };
3232
3233 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3234 Value *Val = UndefInt8;
3235 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3236 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I)))))
3237 return nullptr;
3238 return Val;
3239 }
3240
3241 if (isa<ConstantVector>(C)) {
3242 Constant *Splat = cast<ConstantVector>(C)->getSplatValue();
3243 return Splat ? isBytewiseValue(Splat) : nullptr;
3244 }
3245
3246 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
3247 Value *Val = UndefInt8;
3248 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3249 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I)))))
3250 return nullptr;
3251 return Val;
3252 }
3253
3254 // Don't try to handle the handful of other constants.
3255 return nullptr;
3256}
3257
3258// This is the recursive version of BuildSubAggregate. It takes a few different
3259// arguments. Idxs is the index within the nested struct From that we are
3260// looking at now (which is of type IndexedType). IdxSkip is the number of
3261// indices from Idxs that should be left out when inserting into the resulting
3262// struct. To is the result struct built so far, new insertvalue instructions
3263// build on that.
3264static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3265 SmallVectorImpl<unsigned> &Idxs,
3266 unsigned IdxSkip,
3267 Instruction *InsertBefore) {
3268 StructType *STy = dyn_cast<StructType>(IndexedType);
3269 if (STy) {
3270 // Save the original To argument so we can modify it
3271 Value *OrigTo = To;
3272 // General case, the type indexed by Idxs is a struct
3273 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3274 // Process each struct element recursively
3275 Idxs.push_back(i);
3276 Value *PrevTo = To;
3277 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3278 InsertBefore);
3279 Idxs.pop_back();
3280 if (!To) {
3281 // Couldn't find any inserted value for this index? Cleanup
3282 while (PrevTo != OrigTo) {
3283 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3284 PrevTo = Del->getAggregateOperand();
3285 Del->eraseFromParent();
3286 }
3287 // Stop processing elements
3288 break;
3289 }
3290 }
3291 // If we successfully found a value for each of our subaggregates
3292 if (To)
3293 return To;
3294 }
3295 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3296 // the struct's elements had a value that was inserted directly. In the latter
3297 // case, perhaps we can't determine each of the subelements individually, but
3298 // we might be able to find the complete struct somewhere.
3299
3300 // Find the value that is at that particular spot
3301 Value *V = FindInsertedValue(From, Idxs);
3302
3303 if (!V)
3304 return nullptr;
3305
3306 // Insert the value in the new (sub) aggregate
3307 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3308 "tmp", InsertBefore);
3309}
3310
3311// This helper takes a nested struct and extracts a part of it (which is again a
3312// struct) into a new value. For example, given the struct:
3313// { a, { b, { c, d }, e } }
3314// and the indices "1, 1" this returns
3315// { c, d }.
3316//
3317// It does this by inserting an insertvalue for each element in the resulting
3318// struct, as opposed to just inserting a single struct. This will only work if
3319// each of the elements of the substruct are known (ie, inserted into From by an
3320// insertvalue instruction somewhere).
3321//
3322// All inserted insertvalue instructions are inserted before InsertBefore
3323static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3324 Instruction *InsertBefore) {
3325 assert(InsertBefore && "Must have someplace to insert!")((InsertBefore && "Must have someplace to insert!") ?
static_cast<void> (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3325, __PRETTY_FUNCTION__))
;
3326 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3327 idx_range);
3328 Value *To = UndefValue::get(IndexedType);
3329 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3330 unsigned IdxSkip = Idxs.size();
3331
3332 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3333}
3334
3335/// Given an aggregate and a sequence of indices, see if the scalar value
3336/// indexed is already around as a register, for example if it was inserted
3337/// directly into the aggregate.
3338///
3339/// If InsertBefore is not null, this function will duplicate (modified)
3340/// insertvalues when a part of a nested struct is extracted.
3341Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3342 Instruction *InsertBefore) {
3343 // Nothing to index? Just return V then (this is useful at the end of our
3344 // recursion).
3345 if (idx_range.empty())
3346 return V;
3347 // We have indices, so V should have an indexable type.
3348 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(((V->getType()->isStructTy() || V->getType()->isArrayTy
()) && "Not looking at a struct or array?") ? static_cast
<void> (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3349, __PRETTY_FUNCTION__))
3349 "Not looking at a struct or array?")(((V->getType()->isStructTy() || V->getType()->isArrayTy
()) && "Not looking at a struct or array?") ? static_cast
<void> (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3349, __PRETTY_FUNCTION__))
;
3350 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&((ExtractValueInst::getIndexedType(V->getType(), idx_range
) && "Invalid indices for type?") ? static_cast<void
> (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3351, __PRETTY_FUNCTION__))
3351 "Invalid indices for type?")((ExtractValueInst::getIndexedType(V->getType(), idx_range
) && "Invalid indices for type?") ? static_cast<void
> (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3351, __PRETTY_FUNCTION__))
;
3352
3353 if (Constant *C = dyn_cast<Constant>(V)) {
3354 C = C->getAggregateElement(idx_range[0]);
3355 if (!C) return nullptr;
3356 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3357 }
3358
3359 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3360 // Loop the indices for the insertvalue instruction in parallel with the
3361 // requested indices
3362 const unsigned *req_idx = idx_range.begin();
3363 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3364 i != e; ++i, ++req_idx) {
3365 if (req_idx == idx_range.end()) {
3366 // We can't handle this without inserting insertvalues
3367 if (!InsertBefore)
3368 return nullptr;
3369
3370 // The requested index identifies a part of a nested aggregate. Handle
3371 // this specially. For example,
3372 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3373 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3374 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3375 // This can be changed into
3376 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3377 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3378 // which allows the unused 0,0 element from the nested struct to be
3379 // removed.
3380 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3381 InsertBefore);
3382 }
3383
3384 // This insert value inserts something else than what we are looking for.
3385 // See if the (aggregate) value inserted into has the value we are
3386 // looking for, then.
3387 if (*req_idx != *i)
3388 return FindInsertedValue(I->getAggregateOperand(), idx_range,
3389 InsertBefore);
3390 }
3391 // If we end up here, the indices of the insertvalue match with those
3392 // requested (though possibly only partially). Now we recursively look at
3393 // the inserted value, passing any remaining indices.
3394 return FindInsertedValue(I->getInsertedValueOperand(),
3395 makeArrayRef(req_idx, idx_range.end()),
3396 InsertBefore);
3397 }
3398
3399 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3400 // If we're extracting a value from an aggregate that was extracted from
3401 // something else, we can extract from that something else directly instead.
3402 // However, we will need to chain I's indices with the requested indices.
3403
3404 // Calculate the number of indices required
3405 unsigned size = I->getNumIndices() + idx_range.size();
3406 // Allocate some space to put the new indices in
3407 SmallVector<unsigned, 5> Idxs;
3408 Idxs.reserve(size);
3409 // Add indices from the extract value instruction
3410 Idxs.append(I->idx_begin(), I->idx_end());
3411
3412 // Add requested indices
3413 Idxs.append(idx_range.begin(), idx_range.end());
3414
3415 assert(Idxs.size() == size((Idxs.size() == size && "Number of indices added not correct?"
) ? static_cast<void> (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3416, __PRETTY_FUNCTION__))
3416 && "Number of indices added not correct?")((Idxs.size() == size && "Number of indices added not correct?"
) ? static_cast<void> (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3416, __PRETTY_FUNCTION__))
;
3417
3418 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3419 }
3420 // Otherwise, we don't know (such as, extracting from a function return value
3421 // or load instruction)
3422 return nullptr;
3423}
3424
3425/// Analyze the specified pointer to see if it can be expressed as a base
3426/// pointer plus a constant offset. Return the base and offset to the caller.
3427Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
3428 const DataLayout &DL) {
3429 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
3430 APInt ByteOffset(BitWidth, 0);
3431
3432 // We walk up the defs but use a visited set to handle unreachable code. In
3433 // that case, we stop after accumulating the cycle once (not that it
3434 // matters).
3435 SmallPtrSet<Value *, 16> Visited;
3436 while (Visited.insert(Ptr).second) {
3437 if (Ptr->getType()->isVectorTy())
3438 break;
3439
3440 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
3441 // If one of the values we have visited is an addrspacecast, then
3442 // the pointer type of this GEP may be different from the type
3443 // of the Ptr parameter which was passed to this function. This
3444 // means when we construct GEPOffset, we need to use the size
3445 // of GEP's pointer type rather than the size of the original
3446 // pointer type.
3447 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
3448 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
3449 break;
3450
3451 APInt OrigByteOffset(ByteOffset);
3452 ByteOffset += GEPOffset.sextOrTrunc(ByteOffset.getBitWidth());
3453 if (ByteOffset.getMinSignedBits() > 64) {
3454 // Stop traversal if the pointer offset wouldn't fit into int64_t
3455 // (this should be removed if Offset is updated to an APInt)
3456 ByteOffset = OrigByteOffset;
3457 break;
3458 }
3459
3460 Ptr = GEP->getPointerOperand();
3461 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3462 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3463 Ptr = cast<Operator>(Ptr)->getOperand(0);
3464 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3465 if (GA->isInterposable())
3466 break;
3467 Ptr = GA->getAliasee();
3468 } else {
3469 break;
3470 }
3471 }
3472 Offset = ByteOffset.getSExtValue();
3473 return Ptr;
3474}
3475
3476bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3477 unsigned CharSize) {
3478 // Make sure the GEP has exactly three arguments.
3479 if (GEP->getNumOperands() != 3)
3480 return false;
3481
3482 // Make sure the index-ee is a pointer to array of \p CharSize integers.
3483 // CharSize.
3484 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3485 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3486 return false;
3487
3488 // Check to make sure that the first operand of the GEP is an integer and
3489 // has value 0 so that we are sure we're indexing into the initializer.
3490 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3491 if (!FirstIdx || !FirstIdx->isZero())
3492 return false;
3493
3494 return true;
3495}
3496
3497bool llvm::getConstantDataArrayInfo(const Value *V,
3498 ConstantDataArraySlice &Slice,
3499 unsigned ElementSize, uint64_t Offset) {
3500 assert(V)((V) ? static_cast<void> (0) : __assert_fail ("V", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3500, __PRETTY_FUNCTION__))
;
3501
3502 // Look through bitcast instructions and geps.
3503 V = V->stripPointerCasts();
3504
3505 // If the value is a GEP instruction or constant expression, treat it as an
3506 // offset.
3507 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3508 // The GEP operator should be based on a pointer to string constant, and is
3509 // indexing into the string constant.
3510 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3511 return false;
3512
3513 // If the second index isn't a ConstantInt, then this is a variable index
3514 // into the array. If this occurs, we can't say anything meaningful about
3515 // the string.
3516 uint64_t StartIdx = 0;
3517 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3518 StartIdx = CI->getZExtValue();
3519 else
3520 return false;
3521 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3522 StartIdx + Offset);
3523 }
3524
3525 // The GEP instruction, constant or instruction, must reference a global
3526 // variable that is a constant and is initialized. The referenced constant
3527 // initializer is the array that we'll use for optimization.
3528 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3529 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3530 return false;
3531
3532 const ConstantDataArray *Array;
3533 ArrayType *ArrayTy;
3534 if (GV->getInitializer()->isNullValue()) {
3535 Type *GVTy = GV->getValueType();
3536 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3537 // A zeroinitializer for the array; there is no ConstantDataArray.
3538 Array = nullptr;
3539 } else {
3540 const DataLayout &DL = GV->getParent()->getDataLayout();
3541 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3542 uint64_t Length = SizeInBytes / (ElementSize / 8);
3543 if (Length <= Offset)
3544 return false;
3545
3546 Slice.Array = nullptr;
3547 Slice.Offset = 0;
3548 Slice.Length = Length - Offset;
3549 return true;
3550 }
3551 } else {
3552 // This must be a ConstantDataArray.
3553 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3554 if (!Array)
3555 return false;
3556 ArrayTy = Array->getType();
3557 }
3558 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3559 return false;
3560
3561 uint64_t NumElts = ArrayTy->getArrayNumElements();
3562 if (Offset > NumElts)
3563 return false;
3564
3565 Slice.Array = Array;
3566 Slice.Offset = Offset;
3567 Slice.Length = NumElts - Offset;
3568 return true;
3569}
3570
3571/// This function computes the length of a null-terminated C string pointed to
3572/// by V. If successful, it returns true and returns the string in Str.
3573/// If unsuccessful, it returns false.
3574bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3575 uint64_t Offset, bool TrimAtNul) {
3576 ConstantDataArraySlice Slice;
3577 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3578 return false;
3579
3580 if (Slice.Array == nullptr) {
3581 if (TrimAtNul) {
3582 Str = StringRef();
3583 return true;
3584 }
3585 if (Slice.Length == 1) {
3586 Str = StringRef("", 1);
3587 return true;
3588 }
3589 // We cannot instantiate a StringRef as we do not have an appropriate string
3590 // of 0s at hand.
3591 return false;
3592 }
3593
3594 // Start out with the entire array in the StringRef.
3595 Str = Slice.Array->getAsString();
3596 // Skip over 'offset' bytes.
3597 Str = Str.substr(Slice.Offset);
3598
3599 if (TrimAtNul) {
3600 // Trim off the \0 and anything after it. If the array is not nul
3601 // terminated, we just return the whole end of string. The client may know
3602 // some other way that the string is length-bound.
3603 Str = Str.substr(0, Str.find('\0'));
3604 }
3605 return true;
3606}
3607
3608// These next two are very similar to the above, but also look through PHI
3609// nodes.
3610// TODO: See if we can integrate these two together.
3611
3612/// If we can compute the length of the string pointed to by
3613/// the specified pointer, return 'len+1'. If we can't, return 0.
3614static uint64_t GetStringLengthH(const Value *V,
3615 SmallPtrSetImpl<const PHINode*> &PHIs,
3616 unsigned CharSize) {
3617 // Look through noop bitcast instructions.
3618 V = V->stripPointerCasts();
3619
3620 // If this is a PHI node, there are two cases: either we have already seen it
3621 // or we haven't.
3622 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3623 if (!PHIs.insert(PN).second)
3624 return ~0ULL; // already in the set.
3625
3626 // If it was new, see if all the input strings are the same length.
3627 uint64_t LenSoFar = ~0ULL;
3628 for (Value *IncValue : PN->incoming_values()) {
3629 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3630 if (Len == 0) return 0; // Unknown length -> unknown.
3631
3632 if (Len == ~0ULL) continue;
3633
3634 if (Len != LenSoFar && LenSoFar != ~0ULL)
3635 return 0; // Disagree -> unknown.
3636 LenSoFar = Len;
3637 }
3638
3639 // Success, all agree.
3640 return LenSoFar;
3641 }
3642
3643 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3644 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3645 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3646 if (Len1 == 0) return 0;
3647 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3648 if (Len2 == 0) return 0;
3649 if (Len1 == ~0ULL) return Len2;
3650 if (Len2 == ~0ULL) return Len1;
3651 if (Len1 != Len2) return 0;
3652 return Len1;
3653 }
3654
3655 // Otherwise, see if we can read the string.
3656 ConstantDataArraySlice Slice;
3657 if (!getConstantDataArrayInfo(V, Slice, CharSize))
3658 return 0;
3659
3660 if (Slice.Array == nullptr)
3661 return 1;
3662
3663 // Search for nul characters
3664 unsigned NullIndex = 0;
3665 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3666 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3667 break;
3668 }
3669
3670 return NullIndex + 1;
3671}
3672
3673/// If we can compute the length of the string pointed to by
3674/// the specified pointer, return 'len+1'. If we can't, return 0.
3675uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3676 if (!V->getType()->isPointerTy())
3677 return 0;
3678
3679 SmallPtrSet<const PHINode*, 32> PHIs;
3680 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3681 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3682 // an empty string as a length.
3683 return Len == ~0ULL ? 1 : Len;
3684}
3685
3686const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) {
3687 assert(Call &&((Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? static_cast<void> (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3688, __PRETTY_FUNCTION__))
3688 "getArgumentAliasingToReturnedPointer only works on nonnull calls")((Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? static_cast<void> (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3688, __PRETTY_FUNCTION__))
;
3689 if (const Value *RV = Call->getReturnedArgOperand())
3690 return RV;
3691 // This can be used only as a aliasing property.
3692 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call))
3693 return Call->getArgOperand(0);
3694 return nullptr;
3695}
3696
3697bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3698 const CallBase *Call) {
3699 return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
3700 Call->getIntrinsicID() == Intrinsic::strip_invariant_group;
3701}
3702
3703/// \p PN defines a loop-variant pointer to an object. Check if the
3704/// previous iteration of the loop was referring to the same object as \p PN.
3705static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3706 const LoopInfo *LI) {
3707 // Find the loop-defined value.
3708 Loop *L = LI->getLoopFor(PN->getParent());
3709 if (PN->getNumIncomingValues() != 2)
3710 return true;
3711
3712 // Find the value from previous iteration.
3713 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3714 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3715 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3716 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3717 return true;
3718
3719 // If a new pointer is loaded in the loop, the pointer references a different
3720 // object in every iteration. E.g.:
3721 // for (i)
3722 // int *p = a[i];
3723 // ...
3724 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3725 if (!L->isLoopInvariant(Load->getPointerOperand()))
3726 return false;
3727 return true;
3728}
3729
3730Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3731 unsigned MaxLookup) {
3732 if (!V->getType()->isPointerTy())
3733 return V;
3734 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3735 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3736 V = GEP->getPointerOperand();
3737 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3738 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3739 V = cast<Operator>(V)->getOperand(0);
3740 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3741 if (GA->isInterposable())
3742 return V;
3743 V = GA->getAliasee();
3744 } else if (isa<AllocaInst>(V)) {
3745 // An alloca can't be further simplified.
3746 return V;
3747 } else {
3748 if (auto *Call = dyn_cast<CallBase>(V)) {
3749 // CaptureTracking can know about special capturing properties of some
3750 // intrinsics like launder.invariant.group, that can't be expressed with
3751 // the attributes, but have properties like returning aliasing pointer.
3752 // Because some analysis may assume that nocaptured pointer is not
3753 // returned from some special intrinsic (because function would have to
3754 // be marked with returns attribute), it is crucial to use this function
3755 // because it should be in sync with CaptureTracking. Not using it may
3756 // cause weird miscompilations where 2 aliasing pointers are assumed to
3757 // noalias.
3758 if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
3759 V = RP;
3760 continue;
3761 }
3762 }
3763
3764 // See if InstructionSimplify knows any relevant tricks.
3765 if (Instruction *I = dyn_cast<Instruction>(V))
3766 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3767 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3768 V = Simplified;
3769 continue;
3770 }
3771
3772 return V;
3773 }
3774 assert(V->getType()->isPointerTy() && "Unexpected operand type!")((V->getType()->isPointerTy() && "Unexpected operand type!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3774, __PRETTY_FUNCTION__))
;
3775 }
3776 return V;
3777}
3778
3779void llvm::GetUnderlyingObjects(const Value *V,
3780 SmallVectorImpl<const Value *> &Objects,
3781 const DataLayout &DL, LoopInfo *LI,
3782 unsigned MaxLookup) {
3783 SmallPtrSet<const Value *, 4> Visited;
3784 SmallVector<const Value *, 4> Worklist;
3785 Worklist.push_back(V);
3786 do {
3787 const Value *P = Worklist.pop_back_val();
3788 P = GetUnderlyingObject(P, DL, MaxLookup);
3789
3790 if (!Visited.insert(P).second)
3791 continue;
3792
3793 if (auto *SI = dyn_cast<SelectInst>(P)) {
3794 Worklist.push_back(SI->getTrueValue());
3795 Worklist.push_back(SI->getFalseValue());
3796 continue;
3797 }
3798
3799 if (auto *PN = dyn_cast<PHINode>(P)) {
3800 // If this PHI changes the underlying object in every iteration of the
3801 // loop, don't look through it. Consider:
3802 // int **A;
3803 // for (i) {
3804 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3805 // Curr = A[i];
3806 // *Prev, *Curr;
3807 //
3808 // Prev is tracking Curr one iteration behind so they refer to different
3809 // underlying objects.
3810 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3811 isSameUnderlyingObjectInLoop(PN, LI))
3812 for (Value *IncValue : PN->incoming_values())
3813 Worklist.push_back(IncValue);
3814 continue;
3815 }
3816
3817 Objects.push_back(P);
3818 } while (!Worklist.empty());
3819}
3820
3821/// This is the function that does the work of looking through basic
3822/// ptrtoint+arithmetic+inttoptr sequences.
3823static const Value *getUnderlyingObjectFromInt(const Value *V) {
3824 do {
3825 if (const Operator *U = dyn_cast<Operator>(V)) {
3826 // If we find a ptrtoint, we can transfer control back to the
3827 // regular getUnderlyingObjectFromInt.
3828 if (U->getOpcode() == Instruction::PtrToInt)
3829 return U->getOperand(0);
3830 // If we find an add of a constant, a multiplied value, or a phi, it's
3831 // likely that the other operand will lead us to the base
3832 // object. We don't have to worry about the case where the
3833 // object address is somehow being computed by the multiply,
3834 // because our callers only care when the result is an
3835 // identifiable object.
3836 if (U->getOpcode() != Instruction::Add ||
3837 (!isa<ConstantInt>(U->getOperand(1)) &&
3838 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3839 !isa<PHINode>(U->getOperand(1))))
3840 return V;
3841 V = U->getOperand(0);
3842 } else {
3843 return V;
3844 }
3845 assert(V->getType()->isIntegerTy() && "Unexpected operand type!")((V->getType()->isIntegerTy() && "Unexpected operand type!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 3845, __PRETTY_FUNCTION__))
;
3846 } while (true);
3847}
3848
3849/// This is a wrapper around GetUnderlyingObjects and adds support for basic
3850/// ptrtoint+arithmetic+inttoptr sequences.
3851/// It returns false if unidentified object is found in GetUnderlyingObjects.
3852bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3853 SmallVectorImpl<Value *> &Objects,
3854 const DataLayout &DL) {
3855 SmallPtrSet<const Value *, 16> Visited;
3856 SmallVector<const Value *, 4> Working(1, V);
3857 do {
3858 V = Working.pop_back_val();
3859
3860 SmallVector<const Value *, 4> Objs;
3861 GetUnderlyingObjects(V, Objs, DL);
3862
3863 for (const Value *V : Objs) {
3864 if (!Visited.insert(V).second)
3865 continue;
3866 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3867 const Value *O =
3868 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3869 if (O->getType()->isPointerTy()) {
3870 Working.push_back(O);
3871 continue;
3872 }
3873 }
3874 // If GetUnderlyingObjects fails to find an identifiable object,
3875 // getUnderlyingObjectsForCodeGen also fails for safety.
3876 if (!isIdentifiedObject(V)) {
3877 Objects.clear();
3878 return false;
3879 }
3880 Objects.push_back(const_cast<Value *>(V));
3881 }
3882 } while (!Working.empty());
3883 return true;
3884}
3885
3886/// Return true if the only users of this pointer are lifetime markers.
3887bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3888 for (const User *U : V->users()) {
3889 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3890 if (!II) return false;
3891
3892 if (!II->isLifetimeStartOrEnd())
3893 return false;
3894 }
3895 return true;
3896}
3897
3898bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3899 const Instruction *CtxI,
3900 const DominatorTree *DT) {
3901 const Operator *Inst = dyn_cast<Operator>(V);
3902 if (!Inst)
3903 return false;
3904
3905 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3906 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3907 if (C->canTrap())
3908 return false;
3909
3910 switch (Inst->getOpcode()) {
3911 default:
3912 return true;
3913 case Instruction::UDiv:
3914 case Instruction::URem: {
3915 // x / y is undefined if y == 0.
3916 const APInt *V;
3917 if (match(Inst->getOperand(1), m_APInt(V)))
3918 return *V != 0;
3919 return false;
3920 }
3921 case Instruction::SDiv:
3922 case Instruction::SRem: {
3923 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3924 const APInt *Numerator, *Denominator;
3925 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3926 return false;
3927 // We cannot hoist this division if the denominator is 0.
3928 if (*Denominator == 0)
3929 return false;
3930 // It's safe to hoist if the denominator is not 0 or -1.
3931 if (*Denominator != -1)
3932 return true;
3933 // At this point we know that the denominator is -1. It is safe to hoist as
3934 // long we know that the numerator is not INT_MIN.
3935 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3936 return !Numerator->isMinSignedValue();
3937 // The numerator *might* be MinSignedValue.
3938 return false;
3939 }
3940 case Instruction::Load: {
3941 const LoadInst *LI = cast<LoadInst>(Inst);
3942 if (!LI->isUnordered() ||
3943 // Speculative load may create a race that did not exist in the source.
3944 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3945 // Speculative load may load data from dirty regions.
3946 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3947 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3948 return false;
3949 const DataLayout &DL = LI->getModule()->getDataLayout();
3950 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3951 LI->getAlignment(), DL, CtxI, DT);
3952 }
3953 case Instruction::Call: {
3954 auto *CI = cast<const CallInst>(Inst);
3955 const Function *Callee = CI->getCalledFunction();
3956
3957 // The called function could have undefined behavior or side-effects, even
3958 // if marked readnone nounwind.
3959 return Callee && Callee->isSpeculatable();
3960 }
3961 case Instruction::VAArg:
3962 case Instruction::Alloca:
3963 case Instruction::Invoke:
3964 case Instruction::CallBr:
3965 case Instruction::PHI:
3966 case Instruction::Store:
3967 case Instruction::Ret:
3968 case Instruction::Br:
3969 case Instruction::IndirectBr:
3970 case Instruction::Switch:
3971 case Instruction::Unreachable:
3972 case Instruction::Fence:
3973 case Instruction::AtomicRMW:
3974 case Instruction::AtomicCmpXchg:
3975 case Instruction::LandingPad:
3976 case Instruction::Resume:
3977 case Instruction::CatchSwitch:
3978 case Instruction::CatchPad:
3979 case Instruction::CatchRet:
3980 case Instruction::CleanupPad:
3981 case Instruction::CleanupRet:
3982 return false; // Misc instructions which have effects
3983 }
3984}
3985
3986bool llvm::mayBeMemoryDependent(const Instruction &I) {
3987 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3988}
3989
3990OverflowResult llvm::computeOverflowForUnsignedMul(
3991 const Value *LHS, const Value *RHS, const DataLayout &DL,
3992 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3993 bool UseInstrInfo) {
3994 // Multiplying n * m significant bits yields a result of n + m significant
3995 // bits. If the total number of significant bits does not exceed the
3996 // result bit width (minus 1), there is no overflow.
3997 // This means if we have enough leading zero bits in the operands
3998 // we can guarantee that the result does not overflow.
3999 // Ref: "Hacker's Delight" by Henry Warren
4000 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4001 KnownBits LHSKnown(BitWidth);
4002 KnownBits RHSKnown(BitWidth);
4003 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr,
4004 UseInstrInfo);
4005 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr,
4006 UseInstrInfo);
4007 // Note that underestimating the number of zero bits gives a more
4008 // conservative answer.
4009 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
4010 RHSKnown.countMinLeadingZeros();
4011 // First handle the easy case: if we have enough zero bits there's
4012 // definitely no overflow.
4013 if (ZeroBits >= BitWidth)
4014 return OverflowResult::NeverOverflows;
4015
4016 // Get the largest possible values for each operand.
4017 APInt LHSMax = ~LHSKnown.Zero;
4018 APInt RHSMax = ~RHSKnown.Zero;
4019
4020 // We know the multiply operation doesn't overflow if the maximum values for
4021 // each operand will not overflow after we multiply them together.
4022 bool MaxOverflow;
4023 (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
4024 if (!MaxOverflow)
4025 return OverflowResult::NeverOverflows;
4026
4027 // We know it always overflows if multiplying the smallest possible values for
4028 // the operands also results in overflow.
4029 bool MinOverflow;
4030 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
4031 if (MinOverflow)
4032 return OverflowResult::AlwaysOverflows;
4033
4034 return OverflowResult::MayOverflow;
4035}
4036
4037OverflowResult
4038llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4039 const DataLayout &DL, AssumptionCache *AC,
4040 const Instruction *CxtI,
4041 const DominatorTree *DT, bool UseInstrInfo) {
4042 // Multiplying n * m significant bits yields a result of n + m significant
4043 // bits. If the total number of significant bits does not exceed the
4044 // result bit width (minus 1), there is no overflow.
4045 // This means if we have enough leading sign bits in the operands
4046 // we can guarantee that the result does not overflow.
4047 // Ref: "Hacker's Delight" by Henry Warren
4048 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4049
4050 // Note that underestimating the number of sign bits gives a more
4051 // conservative answer.
4052 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4053 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4054
4055 // First handle the easy case: if we have enough sign bits there's
4056 // definitely no overflow.
4057 if (SignBits > BitWidth + 1)
4058 return OverflowResult::NeverOverflows;
4059
4060 // There are two ambiguous cases where there can be no overflow:
4061 // SignBits == BitWidth + 1 and
4062 // SignBits == BitWidth
4063 // The second case is difficult to check, therefore we only handle the
4064 // first case.
4065 if (SignBits == BitWidth + 1) {
4066 // It overflows only when both arguments are negative and the true
4067 // product is exactly the minimum negative number.
4068 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4069 // For simplicity we just check if at least one side is not negative.
4070 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4071 nullptr, UseInstrInfo);
4072 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4073 nullptr, UseInstrInfo);
4074 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4075 return OverflowResult::NeverOverflows;
4076 }
4077 return OverflowResult::MayOverflow;
4078}
4079
4080/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4081static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4082 switch (OR) {
4083 case ConstantRange::OverflowResult::MayOverflow:
4084 return OverflowResult::MayOverflow;
4085 case ConstantRange::OverflowResult::AlwaysOverflows:
4086 return OverflowResult::AlwaysOverflows;
4087 case ConstantRange::OverflowResult::NeverOverflows:
4088 return OverflowResult::NeverOverflows;
4089 }
4090 llvm_unreachable("Unknown OverflowResult")::llvm::llvm_unreachable_internal("Unknown OverflowResult", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 4090)
;
4091}
4092
4093/// Combine constant ranges from computeConstantRange() and computeKnownBits().
4094static ConstantRange computeConstantRangeIncludingKnownBits(
4095 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4096 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4097 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4098 KnownBits Known = computeKnownBits(
4099 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4100 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4101 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4102 ConstantRange::PreferredRangeType RangeType =
4103 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4104 return CR1.intersectWith(CR2, RangeType);
4105}
4106
4107OverflowResult llvm::computeOverflowForUnsignedAdd(
4108 const Value *LHS, const Value *RHS, const DataLayout &DL,
4109 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4110 bool UseInstrInfo) {
4111 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4112 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4113 nullptr, UseInstrInfo);
4114 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4115 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4116 nullptr, UseInstrInfo);
4117 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4118}
4119
4120static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4121 const Value *RHS,
4122 const AddOperator *Add,
4123 const DataLayout &DL,
4124 AssumptionCache *AC,
4125 const Instruction *CxtI,
4126 const DominatorTree *DT) {
4127 if (Add && Add->hasNoSignedWrap()) {
4128 return OverflowResult::NeverOverflows;
4129 }
4130
4131 // If LHS and RHS each have at least two sign bits, the addition will look
4132 // like
4133 //
4134 // XX..... +
4135 // YY.....
4136 //
4137 // If the carry into the most significant position is 0, X and Y can't both
4138 // be 1 and therefore the carry out of the addition is also 0.
4139 //
4140 // If the carry into the most significant position is 1, X and Y can't both
4141 // be 0 and therefore the carry out of the addition is also 1.
4142 //
4143 // Since the carry into the most significant position is always equal to
4144 // the carry out of the addition, there is no signed overflow.
4145 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4146 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4147 return OverflowResult::NeverOverflows;
4148
4149 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4150 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4151 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4152 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4153 OverflowResult OR =
4154 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4155 if (OR != OverflowResult::MayOverflow)
4156 return OR;
4157
4158 // The remaining code needs Add to be available. Early returns if not so.
4159 if (!Add)
4160 return OverflowResult::MayOverflow;
4161
4162 // If the sign of Add is the same as at least one of the operands, this add
4163 // CANNOT overflow. If this can be determined from the known bits of the
4164 // operands the above signedAddMayOverflow() check will have already done so.
4165 // The only other way to improve on the known bits is from an assumption, so
4166 // call computeKnownBitsFromAssume() directly.
4167 bool LHSOrRHSKnownNonNegative =
4168 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4169 bool LHSOrRHSKnownNegative =
4170 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4171 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4172 KnownBits AddKnown(LHSRange.getBitWidth());
4173 computeKnownBitsFromAssume(
4174 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4175 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4176 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4177 return OverflowResult::NeverOverflows;
4178 }
4179
4180 return OverflowResult::MayOverflow;
4181}
4182
4183OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4184 const Value *RHS,
4185 const DataLayout &DL,
4186 AssumptionCache *AC,
4187 const Instruction *CxtI,
4188 const DominatorTree *DT) {
4189 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4190 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4191 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4192 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4193 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4194}
4195
4196OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4197 const Value *RHS,
4198 const DataLayout &DL,
4199 AssumptionCache *AC,
4200 const Instruction *CxtI,
4201 const DominatorTree *DT) {
4202 // If LHS and RHS each have at least two sign bits, the subtraction
4203 // cannot overflow.
4204 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4205 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4206 return OverflowResult::NeverOverflows;
4207
4208 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4209 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4210 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4211 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4212 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4213}
4214
4215bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4216 const DominatorTree &DT) {
4217 SmallVector<const BranchInst *, 2> GuardingBranches;
4218 SmallVector<const ExtractValueInst *, 2> Results;
4219
4220 for (const User *U : WO->users()) {
4221 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4222 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")((EVI->getNumIndices() == 1 && "Obvious from CI's type"
) ? static_cast<void> (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 4222, __PRETTY_FUNCTION__))
;
4223
4224 if (EVI->getIndices()[0] == 0)
4225 Results.push_back(EVI);
4226 else {
4227 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")((EVI->getIndices()[0] == 1 && "Obvious from CI's type"
) ? static_cast<void> (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 4227, __PRETTY_FUNCTION__))
;
4228
4229 for (const auto *U : EVI->users())
4230 if (const auto *B = dyn_cast<BranchInst>(U)) {
4231 assert(B->isConditional() && "How else is it using an i1?")((B->isConditional() && "How else is it using an i1?"
) ? static_cast<void> (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 4231, __PRETTY_FUNCTION__))
;
4232 GuardingBranches.push_back(B);
4233 }
4234 }
4235 } else {
4236 // We are using the aggregate directly in a way we don't want to analyze
4237 // here (storing it to a global, say).
4238 return false;
4239 }
4240 }
4241
4242 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4243 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4244 if (!NoWrapEdge.isSingleEdge())
4245 return false;
4246
4247 // Check if all users of the add are provably no-wrap.
4248 for (const auto *Result : Results) {
4249 // If the extractvalue itself is not executed on overflow, the we don't
4250 // need to check each use separately, since domination is transitive.
4251 if (DT.dominates(NoWrapEdge, Result->getParent()))
4252 continue;
4253
4254 for (auto &RU : Result->uses())
4255 if (!DT.dominates(NoWrapEdge, RU))
4256 return false;
4257 }
4258
4259 return true;
4260 };
4261
4262 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4263}
4264
4265
4266OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4267 const DataLayout &DL,
4268 AssumptionCache *AC,
4269 const Instruction *CxtI,
4270 const DominatorTree *DT) {
4271 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4272 Add, DL, AC, CxtI, DT);
4273}
4274
4275OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4276 const Value *RHS,
4277 const DataLayout &DL,
4278 AssumptionCache *AC,
4279 const Instruction *CxtI,
4280 const DominatorTree *DT) {
4281 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4282}
4283
4284bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4285 // A memory operation returns normally if it isn't volatile. A volatile
4286 // operation is allowed to trap.
4287 //
4288 // An atomic operation isn't guaranteed to return in a reasonable amount of
4289 // time because it's possible for another thread to interfere with it for an
4290 // arbitrary length of time, but programs aren't allowed to rely on that.
4291 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
4292 return !LI->isVolatile();
4293 if (const StoreInst *SI = dyn_cast<StoreInst>(I))
4294 return !SI->isVolatile();
4295 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
4296 return !CXI->isVolatile();
4297 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
4298 return !RMWI->isVolatile();
4299 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
4300 return !MII->isVolatile();
4301
4302 // If there is no successor, then execution can't transfer to it.
4303 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4304 return !CRI->unwindsToCaller();
4305 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4306 return !CatchSwitch->unwindsToCaller();
4307 if (isa<ResumeInst>(I))
4308 return false;
4309 if (isa<ReturnInst>(I))
4310 return false;
4311 if (isa<UnreachableInst>(I))
4312 return false;
4313
4314 // Calls can throw, or contain an infinite loop, or kill the process.
4315 if (auto CS = ImmutableCallSite(I)) {
4316 // Call sites that throw have implicit non-local control flow.
4317 if (!CS.doesNotThrow())
4318 return false;
4319
4320 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4321 // etc. and thus not return. However, LLVM already assumes that
4322 //
4323 // - Thread exiting actions are modeled as writes to memory invisible to
4324 // the program.
4325 //
4326 // - Loops that don't have side effects (side effects are volatile/atomic
4327 // stores and IO) always terminate (see http://llvm.org/PR965).
4328 // Furthermore IO itself is also modeled as writes to memory invisible to
4329 // the program.
4330 //
4331 // We rely on those assumptions here, and use the memory effects of the call
4332 // target as a proxy for checking that it always returns.
4333
4334 // FIXME: This isn't aggressive enough; a call which only writes to a global
4335 // is guaranteed to return.
4336 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
4337 match(I, m_Intrinsic<Intrinsic::assume>()) ||
4338 match(I, m_Intrinsic<Intrinsic::sideeffect>()) ||
4339 match(I, m_Intrinsic<Intrinsic::experimental_widenable_condition>());
4340 }
4341
4342 // Other instructions return normally.
4343 return true;
4344}
4345
4346bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
4347 // TODO: This is slightly conservative for invoke instruction since exiting
4348 // via an exception *is* normal control for them.
4349 for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4350 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
4351 return false;
4352 return true;
4353}
4354
4355bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
4356 const Loop *L) {
4357 // The loop header is guaranteed to be executed for every iteration.
4358 //
4359 // FIXME: Relax this constraint to cover all basic blocks that are
4360 // guaranteed to be executed at every iteration.
4361 if (I->getParent() != L->getHeader()) return false;
4362
4363 for (const Instruction &LI : *L->getHeader()) {
4364 if (&LI == I) return true;
4365 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4366 }
4367 llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block."
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 4367)
;
4368}
4369
4370bool llvm::propagatesFullPoison(const Instruction *I) {
4371 switch (I->getOpcode()) {
4372 case Instruction::Add:
4373 case Instruction::Sub:
4374 case Instruction::Xor:
4375 case Instruction::Trunc:
4376 case Instruction::BitCast:
4377 case Instruction::AddrSpaceCast:
4378 case Instruction::Mul:
4379 case Instruction::Shl:
4380 case Instruction::GetElementPtr:
4381 // These operations all propagate poison unconditionally. Note that poison
4382 // is not any particular value, so xor or subtraction of poison with
4383 // itself still yields poison, not zero.
4384 return true;
4385
4386 case Instruction::AShr:
4387 case Instruction::SExt:
4388 // For these operations, one bit of the input is replicated across
4389 // multiple output bits. A replicated poison bit is still poison.
4390 return true;
4391
4392 case Instruction::ICmp:
4393 // Comparing poison with any value yields poison. This is why, for
4394 // instance, x s< (x +nsw 1) can be folded to true.
4395 return true;
4396
4397 default:
4398 return false;
4399 }
4400}
4401
4402const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
4403 switch (I->getOpcode()) {
4404 case Instruction::Store:
4405 return cast<StoreInst>(I)->getPointerOperand();
4406
4407 case Instruction::Load:
4408 return cast<LoadInst>(I)->getPointerOperand();
4409
4410 case Instruction::AtomicCmpXchg:
4411 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4412
4413 case Instruction::AtomicRMW:
4414 return cast<AtomicRMWInst>(I)->getPointerOperand();
4415
4416 case Instruction::UDiv:
4417 case Instruction::SDiv:
4418 case Instruction::URem:
4419 case Instruction::SRem:
4420 return I->getOperand(1);
4421
4422 default:
4423 return nullptr;
4424 }
4425}
4426
4427bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4428 // We currently only look for uses of poison values within the same basic
4429 // block, as that makes it easier to guarantee that the uses will be
4430 // executed given that PoisonI is executed.
4431 //
4432 // FIXME: Expand this to consider uses beyond the same basic block. To do
4433 // this, look out for the distinction between post-dominance and strong
4434 // post-dominance.
4435 const BasicBlock *BB = PoisonI->getParent();
4436
4437 // Set of instructions that we have proved will yield poison if PoisonI
4438 // does.
4439 SmallSet<const Value *, 16> YieldsPoison;
4440 SmallSet<const BasicBlock *, 4> Visited;
4441 YieldsPoison.insert(PoisonI);
4442 Visited.insert(PoisonI->getParent());
4443
4444 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4445
4446 unsigned Iter = 0;
4447 while (Iter++ < MaxDepth) {
4448 for (auto &I : make_range(Begin, End)) {
4449 if (&I != PoisonI) {
4450 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
4451 if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
4452 return true;
4453 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4454 return false;
4455 }
4456
4457 // Mark poison that propagates from I through uses of I.
4458 if (YieldsPoison.count(&I)) {
4459 for (const User *User : I.users()) {
4460 const Instruction *UserI = cast<Instruction>(User);
4461 if (propagatesFullPoison(UserI))
4462 YieldsPoison.insert(User);
4463 }
4464 }
4465 }
4466
4467 if (auto *NextBB = BB->getSingleSuccessor()) {
4468 if (Visited.insert(NextBB).second) {
4469 BB = NextBB;
4470 Begin = BB->getFirstNonPHI()->getIterator();
4471 End = BB->end();
4472 continue;
4473 }
4474 }
4475
4476 break;
4477 }
4478 return false;
4479}
4480
4481static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4482 if (FMF.noNaNs())
4483 return true;
4484
4485 if (auto *C = dyn_cast<ConstantFP>(V))
4486 return !C->isNaN();
4487
4488 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4489 if (!C->getElementType()->isFloatingPointTy())
4490 return false;
4491 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4492 if (C->getElementAsAPFloat(I).isNaN())
4493 return false;
4494 }
4495 return true;
4496 }
4497
4498 return false;
4499}
4500
4501static bool isKnownNonZero(const Value *V) {
4502 if (auto *C = dyn_cast<ConstantFP>(V))
4503 return !C->isZero();
4504
4505 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4506 if (!C->getElementType()->isFloatingPointTy())
4507 return false;
4508 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4509 if (C->getElementAsAPFloat(I).isZero())
4510 return false;
4511 }
4512 return true;
4513 }
4514
4515 return false;
4516}
4517
4518/// Match clamp pattern for float types without care about NaNs or signed zeros.
4519/// Given non-min/max outer cmp/select from the clamp pattern this
4520/// function recognizes if it can be substitued by a "canonical" min/max
4521/// pattern.
4522static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4523 Value *CmpLHS, Value *CmpRHS,
4524 Value *TrueVal, Value *FalseVal,
4525 Value *&LHS, Value *&RHS) {
4526 // Try to match
4527 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4528 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4529 // and return description of the outer Max/Min.
4530
4531 // First, check if select has inverse order:
4532 if (CmpRHS == FalseVal) {
4533 std::swap(TrueVal, FalseVal);
4534 Pred = CmpInst::getInversePredicate(Pred);
4535 }
4536
4537 // Assume success now. If there's no match, callers should not use these anyway.
4538 LHS = TrueVal;
4539 RHS = FalseVal;
4540
4541 const APFloat *FC1;
4542 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4543 return {SPF_UNKNOWN, SPNB_NA, false};
4544
4545 const APFloat *FC2;
4546 switch (Pred) {
4547 case CmpInst::FCMP_OLT:
4548 case CmpInst::FCMP_OLE:
4549 case CmpInst::FCMP_ULT:
4550 case CmpInst::FCMP_ULE:
4551 if (match(FalseVal,
4552 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4553 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4554 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4555 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4556 break;
4557 case CmpInst::FCMP_OGT:
4558 case CmpInst::FCMP_OGE:
4559 case CmpInst::FCMP_UGT:
4560 case CmpInst::FCMP_UGE:
4561 if (match(FalseVal,
4562 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4563 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4564 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4565 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4566 break;
4567 default:
4568 break;
4569 }
4570
4571 return {SPF_UNKNOWN, SPNB_NA, false};
4572}
4573
4574/// Recognize variations of:
4575/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4576static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4577 Value *CmpLHS, Value *CmpRHS,
4578 Value *TrueVal, Value *FalseVal) {
4579 // Swap the select operands and predicate to match the patterns below.
4580 if (CmpRHS != TrueVal) {
4581 Pred = ICmpInst::getSwappedPredicate(Pred);
4582 std::swap(TrueVal, FalseVal);
4583 }
4584 const APInt *C1;
4585 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4586 const APInt *C2;
4587 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4588 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4589 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4590 return {SPF_SMAX, SPNB_NA, false};
4591
4592 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4593 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4594 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4595 return {SPF_SMIN, SPNB_NA, false};
4596
4597 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4598 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4599 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4600 return {SPF_UMAX, SPNB_NA, false};
4601
4602 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4603 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4604 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4605 return {SPF_UMIN, SPNB_NA, false};
4606 }
4607 return {SPF_UNKNOWN, SPNB_NA, false};
4608}
4609
4610/// Recognize variations of:
4611/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4612static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4613 Value *CmpLHS, Value *CmpRHS,
4614 Value *TVal, Value *FVal,
4615 unsigned Depth) {
4616 // TODO: Allow FP min/max with nnan/nsz.
4617 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")((CmpInst::isIntPredicate(Pred) && "Expected integer comparison"
) ? static_cast<void> (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 4617, __PRETTY_FUNCTION__))
;
4618
4619 Value *A, *B;
4620 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4621 if (!SelectPatternResult::isMinOrMax(L.Flavor))
4622 return {SPF_UNKNOWN, SPNB_NA, false};
4623
4624 Value *C, *D;
4625 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4626 if (L.Flavor != R.Flavor)
4627 return {SPF_UNKNOWN, SPNB_NA, false};
4628
4629 // We have something like: x Pred y ? min(a, b) : min(c, d).
4630 // Try to match the compare to the min/max operations of the select operands.
4631 // First, make sure we have the right compare predicate.
4632 switch (L.Flavor) {
4633 case SPF_SMIN:
4634 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4635 Pred = ICmpInst::getSwappedPredicate(Pred);
4636 std::swap(CmpLHS, CmpRHS);
4637 }
4638 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4639 break;
4640 return {SPF_UNKNOWN, SPNB_NA, false};
4641 case SPF_SMAX:
4642 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4643 Pred = ICmpInst::getSwappedPredicate(Pred);
4644 std::swap(CmpLHS, CmpRHS);
4645 }
4646 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4647 break;
4648 return {SPF_UNKNOWN, SPNB_NA, false};
4649 case SPF_UMIN:
4650 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4651 Pred = ICmpInst::getSwappedPredicate(Pred);
4652 std::swap(CmpLHS, CmpRHS);
4653 }
4654 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4655 break;
4656 return {SPF_UNKNOWN, SPNB_NA, false};
4657 case SPF_UMAX:
4658 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4659 Pred = ICmpInst::getSwappedPredicate(Pred);
4660 std::swap(CmpLHS, CmpRHS);
4661 }
4662 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4663 break;
4664 return {SPF_UNKNOWN, SPNB_NA, false};
4665 default:
4666 return {SPF_UNKNOWN, SPNB_NA, false};
4667 }
4668
4669 // If there is a common operand in the already matched min/max and the other
4670 // min/max operands match the compare operands (either directly or inverted),
4671 // then this is min/max of the same flavor.
4672
4673 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4674 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4675 if (D == B) {
4676 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4677 match(A, m_Not(m_Specific(CmpRHS)))))
4678 return {L.Flavor, SPNB_NA, false};
4679 }
4680 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4681 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4682 if (C == B) {
4683 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4684 match(A, m_Not(m_Specific(CmpRHS)))))
4685 return {L.Flavor, SPNB_NA, false};
4686 }
4687 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4688 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4689 if (D == A) {
4690 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4691 match(B, m_Not(m_Specific(CmpRHS)))))
4692 return {L.Flavor, SPNB_NA, false};
4693 }
4694 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4695 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4696 if (C == A) {
4697 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4698 match(B, m_Not(m_Specific(CmpRHS)))))
4699 return {L.Flavor, SPNB_NA, false};
4700 }
4701
4702 return {SPF_UNKNOWN, SPNB_NA, false};
4703}
4704
4705/// Match non-obvious integer minimum and maximum sequences.
4706static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4707 Value *CmpLHS, Value *CmpRHS,
4708 Value *TrueVal, Value *FalseVal,
4709 Value *&LHS, Value *&RHS,
4710 unsigned Depth) {
4711 // Assume success. If there's no match, callers should not use these anyway.
4712 LHS = TrueVal;
4713 RHS = FalseVal;
4714
4715 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4716 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4717 return SPR;
4718
4719 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4720 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4721 return SPR;
4722
4723 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4724 return {SPF_UNKNOWN, SPNB_NA, false};
4725
4726 // Z = X -nsw Y
4727 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4728 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4729 if (match(TrueVal, m_Zero()) &&
4730 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4731 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4732
4733 // Z = X -nsw Y
4734 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4735 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4736 if (match(FalseVal, m_Zero()) &&
4737 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4738 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4739
4740 const APInt *C1;
4741 if (!match(CmpRHS, m_APInt(C1)))
4742 return {SPF_UNKNOWN, SPNB_NA, false};
4743
4744 // An unsigned min/max can be written with a signed compare.
4745 const APInt *C2;
4746 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4747 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4748 // Is the sign bit set?
4749 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4750 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4751 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4752 C2->isMaxSignedValue())
4753 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4754
4755 // Is the sign bit clear?
4756 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4757 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4758 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4759 C2->isMinSignedValue())
4760 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4761 }
4762
4763 // Look through 'not' ops to find disguised signed min/max.
4764 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4765 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4766 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4767 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4768 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4769
4770 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4771 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4772 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4773 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4774 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4775
4776 return {SPF_UNKNOWN, SPNB_NA, false};
4777}
4778
4779bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
4780 assert(X && Y && "Invalid operand")((X && Y && "Invalid operand") ? static_cast<
void> (0) : __assert_fail ("X && Y && \"Invalid operand\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 4780, __PRETTY_FUNCTION__))
;
4781
4782 // X = sub (0, Y) || X = sub nsw (0, Y)
4783 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
4784 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
4785 return true;
4786
4787 // Y = sub (0, X) || Y = sub nsw (0, X)
4788 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
4789 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
4790 return true;
4791
4792 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4793 Value *A, *B;
4794 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
4795 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
4796 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
4797 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
4798}
4799
4800static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4801 FastMathFlags FMF,
4802 Value *CmpLHS, Value *CmpRHS,
4803 Value *TrueVal, Value *FalseVal,
4804 Value *&LHS, Value *&RHS,
4805 unsigned Depth) {
4806 if (CmpInst::isFPPredicate(Pred)) {
4807 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
4808 // 0.0 operand, set the compare's 0.0 operands to that same value for the
4809 // purpose of identifying min/max. Disregard vector constants with undefined
4810 // elements because those can not be back-propagated for analysis.
4811 Value *OutputZeroVal = nullptr;
4812 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
4813 !cast<Constant>(TrueVal)->containsUndefElement())
4814 OutputZeroVal = TrueVal;
4815 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
4816 !cast<Constant>(FalseVal)->containsUndefElement())
4817 OutputZeroVal = FalseVal;
4818
4819 if (OutputZeroVal) {
4820 if (match(CmpLHS, m_AnyZeroFP()))
4821 CmpLHS = OutputZeroVal;
4822 if (match(CmpRHS, m_AnyZeroFP()))
4823 CmpRHS = OutputZeroVal;
4824 }
4825 }
4826
4827 LHS = CmpLHS;
4828 RHS = CmpRHS;
4829
4830 // Signed zero may return inconsistent results between implementations.
4831 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4832 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4833 // Therefore, we behave conservatively and only proceed if at least one of the
4834 // operands is known to not be zero or if we don't care about signed zero.
4835 switch (Pred) {
4836 default: break;
4837 // FIXME: Include OGT/OLT/UGT/ULT.
4838 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4839 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4840 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4841 !isKnownNonZero(CmpRHS))
4842 return {SPF_UNKNOWN, SPNB_NA, false};
4843 }
4844
4845 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4846 bool Ordered = false;
4847
4848 // When given one NaN and one non-NaN input:
4849 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4850 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4851 // ordered comparison fails), which could be NaN or non-NaN.
4852 // so here we discover exactly what NaN behavior is required/accepted.
4853 if (CmpInst::isFPPredicate(Pred)) {
4854 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4855 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4856
4857 if (LHSSafe && RHSSafe) {
4858 // Both operands are known non-NaN.
4859 NaNBehavior = SPNB_RETURNS_ANY;
4860 } else if (CmpInst::isOrdered(Pred)) {
4861 // An ordered comparison will return false when given a NaN, so it
4862 // returns the RHS.
4863 Ordered = true;
4864 if (LHSSafe)
4865 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4866 NaNBehavior = SPNB_RETURNS_NAN;
4867 else if (RHSSafe)
4868 NaNBehavior = SPNB_RETURNS_OTHER;
4869 else
4870 // Completely unsafe.
4871 return {SPF_UNKNOWN, SPNB_NA, false};
4872 } else {
4873 Ordered = false;
4874 // An unordered comparison will return true when given a NaN, so it
4875 // returns the LHS.
4876 if (LHSSafe)
4877 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4878 NaNBehavior = SPNB_RETURNS_OTHER;
4879 else if (RHSSafe)
4880 NaNBehavior = SPNB_RETURNS_NAN;
4881 else
4882 // Completely unsafe.
4883 return {SPF_UNKNOWN, SPNB_NA, false};
4884 }
4885 }
4886
4887 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4888 std::swap(CmpLHS, CmpRHS);
4889 Pred = CmpInst::getSwappedPredicate(Pred);
4890 if (NaNBehavior == SPNB_RETURNS_NAN)
4891 NaNBehavior = SPNB_RETURNS_OTHER;
4892 else if (NaNBehavior == SPNB_RETURNS_OTHER)
4893 NaNBehavior = SPNB_RETURNS_NAN;
4894 Ordered = !Ordered;
4895 }
4896
4897 // ([if]cmp X, Y) ? X : Y
4898 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4899 switch (Pred) {
4900 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4901 case ICmpInst::ICMP_UGT:
4902 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4903 case ICmpInst::ICMP_SGT:
4904 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4905 case ICmpInst::ICMP_ULT:
4906 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4907 case ICmpInst::ICMP_SLT:
4908 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4909 case FCmpInst::FCMP_UGT:
4910 case FCmpInst::FCMP_UGE:
4911 case FCmpInst::FCMP_OGT:
4912 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4913 case FCmpInst::FCMP_ULT:
4914 case FCmpInst::FCMP_ULE:
4915 case FCmpInst::FCMP_OLT:
4916 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4917 }
4918 }
4919
4920 if (isKnownNegation(TrueVal, FalseVal)) {
4921 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4922 // match against either LHS or sext(LHS).
4923 auto MaybeSExtCmpLHS =
4924 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
4925 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
4926 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
4927 if (match(TrueVal, MaybeSExtCmpLHS)) {
4928 // Set the return values. If the compare uses the negated value (-X >s 0),
4929 // swap the return values because the negated value is always 'RHS'.
4930 LHS = TrueVal;
4931 RHS = FalseVal;
4932 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
4933 std::swap(LHS, RHS);
4934
4935 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4936 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4937 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4938 return {SPF_ABS, SPNB_NA, false};
4939
4940 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
4941 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
4942 return {SPF_ABS, SPNB_NA, false};
4943
4944 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4945 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4946 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4947 return {SPF_NABS, SPNB_NA, false};
4948 }
4949 else if (match(FalseVal, MaybeSExtCmpLHS)) {
4950 // Set the return values. If the compare uses the negated value (-X >s 0),
4951 // swap the return values because the negated value is always 'RHS'.
4952 LHS = FalseVal;
4953 RHS = TrueVal;
4954 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
4955 std::swap(LHS, RHS);
4956
4957 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4958 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4959 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4960 return {SPF_NABS, SPNB_NA, false};
4961
4962 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4963 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4964 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4965 return {SPF_ABS, SPNB_NA, false};
4966 }
4967 }
4968
4969 if (CmpInst::isIntPredicate(Pred))
4970 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4971
4972 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4973 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4974 // semantics than minNum. Be conservative in such case.
4975 if (NaNBehavior != SPNB_RETURNS_ANY ||
4976 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4977 !isKnownNonZero(CmpRHS)))
4978 return {SPF_UNKNOWN, SPNB_NA, false};
4979
4980 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4981}
4982
4983/// Helps to match a select pattern in case of a type mismatch.
4984///
4985/// The function processes the case when type of true and false values of a
4986/// select instruction differs from type of the cmp instruction operands because
4987/// of a cast instruction. The function checks if it is legal to move the cast
4988/// operation after "select". If yes, it returns the new second value of
4989/// "select" (with the assumption that cast is moved):
4990/// 1. As operand of cast instruction when both values of "select" are same cast
4991/// instructions.
4992/// 2. As restored constant (by applying reverse cast operation) when the first
4993/// value of the "select" is a cast operation and the second value is a
4994/// constant.
4995/// NOTE: We return only the new second value because the first value could be
4996/// accessed as operand of cast instruction.
4997static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4998 Instruction::CastOps *CastOp) {
4999 auto *Cast1 = dyn_cast<CastInst>(V1);
5000 if (!Cast1)
5001 return nullptr;
5002
5003 *CastOp = Cast1->getOpcode();
5004 Type *SrcTy = Cast1->getSrcTy();
5005 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5006 // If V1 and V2 are both the same cast from the same type, look through V1.
5007 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5008 return Cast2->getOperand(0);
5009 return nullptr;
5010 }
5011
5012 auto *C = dyn_cast<Constant>(V2);
5013 if (!C)
5014 return nullptr;
5015
5016 Constant *CastedTo = nullptr;
5017 switch (*CastOp) {
5018 case Instruction::ZExt:
5019 if (CmpI->isUnsigned())
5020 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5021 break;
5022 case Instruction::SExt:
5023 if (CmpI->isSigned())
5024 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5025 break;
5026 case Instruction::Trunc:
5027 Constant *CmpConst;
5028 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5029 CmpConst->getType() == SrcTy) {
5030 // Here we have the following case:
5031 //
5032 // %cond = cmp iN %x, CmpConst
5033 // %tr = trunc iN %x to iK
5034 // %narrowsel = select i1 %cond, iK %t, iK C
5035 //
5036 // We can always move trunc after select operation:
5037 //
5038 // %cond = cmp iN %x, CmpConst
5039 // %widesel = select i1 %cond, iN %x, iN CmpConst
5040 // %tr = trunc iN %widesel to iK
5041 //
5042 // Note that C could be extended in any way because we don't care about
5043 // upper bits after truncation. It can't be abs pattern, because it would
5044 // look like:
5045 //
5046 // select i1 %cond, x, -x.
5047 //
5048 // So only min/max pattern could be matched. Such match requires widened C
5049 // == CmpConst. That is why set widened C = CmpConst, condition trunc
5050 // CmpConst == C is checked below.
5051 CastedTo = CmpConst;
5052 } else {
5053 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5054 }
5055 break;
5056 case Instruction::FPTrunc:
5057 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5058 break;
5059 case Instruction::FPExt:
5060 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5061 break;
5062 case Instruction::FPToUI:
5063 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5064 break;
5065 case Instruction::FPToSI:
5066 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5067 break;
5068 case Instruction::UIToFP:
5069 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5070 break;
5071 case Instruction::SIToFP:
5072 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5073 break;
5074 default:
5075 break;
5076 }
5077
5078 if (!CastedTo)
5079 return nullptr;
5080
5081 // Make sure the cast doesn't lose any information.
5082 Constant *CastedBack =
5083 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5084 if (CastedBack != C)
5085 return nullptr;
5086
5087 return CastedTo;
5088}
5089
5090SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5091 Instruction::CastOps *CastOp,
5092 unsigned Depth) {
5093 if (Depth >= MaxDepth)
5094 return {SPF_UNKNOWN, SPNB_NA, false};
5095
5096 SelectInst *SI = dyn_cast<SelectInst>(V);
5097 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5098
5099 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5100 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5101
5102 CmpInst::Predicate Pred = CmpI->getPredicate();
5103 Value *CmpLHS = CmpI->getOperand(0);
5104 Value *CmpRHS = CmpI->getOperand(1);
5105 Value *TrueVal = SI->getTrueValue();
5106 Value *FalseVal = SI->getFalseValue();
5107 FastMathFlags FMF;
5108 if (isa<FPMathOperator>(CmpI))
5109 FMF = CmpI->getFastMathFlags();
5110
5111 // Bail out early.
5112 if (CmpI->isEquality())
5113 return {SPF_UNKNOWN, SPNB_NA, false};
5114
5115 // Deal with type mismatches.
5116 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5117 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5118 // If this is a potential fmin/fmax with a cast to integer, then ignore
5119 // -0.0 because there is no corresponding integer value.
5120 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5121 FMF.setNoSignedZeros();
5122 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5123 cast<CastInst>(TrueVal)->getOperand(0), C,
5124 LHS, RHS, Depth);
5125 }
5126 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5127 // If this is a potential fmin/fmax with a cast to integer, then ignore
5128 // -0.0 because there is no corresponding integer value.
5129 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5130 FMF.setNoSignedZeros();
5131 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5132 C, cast<CastInst>(FalseVal)->getOperand(0),
5133 LHS, RHS, Depth);
5134 }
5135 }
5136 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5137 LHS, RHS, Depth);
5138}
5139
5140CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5141 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5142 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5143 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5144 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5145 if (SPF == SPF_FMINNUM)
5146 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5147 if (SPF == SPF_FMAXNUM)
5148 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5149 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5149)
;
5150}
5151
5152SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5153 if (SPF == SPF_SMIN) return SPF_SMAX;
5154 if (SPF == SPF_UMIN) return SPF_UMAX;
5155 if (SPF == SPF_SMAX) return SPF_SMIN;
5156 if (SPF == SPF_UMAX) return SPF_UMIN;
5157 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5157)
;
5158}
5159
5160CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5161 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5162}
5163
5164/// Return true if "icmp Pred LHS RHS" is always true.
5165static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5166 const Value *RHS, const DataLayout &DL,
5167 unsigned Depth) {
5168 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!")((!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"
) ? static_cast<void> (0) : __assert_fail ("!LHS->getType()->isVectorTy() && \"TODO: extend to handle vectors!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5168, __PRETTY_FUNCTION__))
;
5169 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5170 return true;
5171
5172 switch (Pred) {
5173 default:
5174 return false;
5175
5176 case CmpInst::ICMP_SLE: {
5177 const APInt *C;
5178
5179 // LHS s<= LHS +_{nsw} C if C >= 0
5180 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5181 return !C->isNegative();
5182 return false;
5183 }
5184
5185 case CmpInst::ICMP_ULE: {
5186 const APInt *C;
5187
5188 // LHS u<= LHS +_{nuw} C for any C
5189 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5190 return true;
5191
5192 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5193 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5194 const Value *&X,
5195 const APInt *&CA, const APInt *&CB) {
5196 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5197 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5198 return true;
5199
5200 // If X & C == 0 then (X | C) == X +_{nuw} C
5201 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5202 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5203 KnownBits Known(CA->getBitWidth());
5204 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5205 /*CxtI*/ nullptr, /*DT*/ nullptr);
5206 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5207 return true;
5208 }
5209
5210 return false;
5211 };
5212
5213 const Value *X;
5214 const APInt *CLHS, *CRHS;
5215 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
5216 return CLHS->ule(*CRHS);
5217
5218 return false;
5219 }
5220 }
5221}
5222
5223/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
5224/// ALHS ARHS" is true. Otherwise, return None.
5225static Optional<bool>
5226isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
5227 const Value *ARHS, const Value *BLHS, const Value *BRHS,
5228 const DataLayout &DL, unsigned Depth) {
5229 switch (Pred) {
5230 default:
5231 return None;
5232
5233 case CmpInst::ICMP_SLT:
5234 case CmpInst::ICMP_SLE:
5235 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
5236 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
5237 return true;
5238 return None;
5239
5240 case CmpInst::ICMP_ULT:
5241 case CmpInst::ICMP_ULE:
5242 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
5243 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
5244 return true;
5245 return None;
5246 }
5247}
5248
5249/// Return true if the operands of the two compares match. IsSwappedOps is true
5250/// when the operands match, but are swapped.
5251static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
5252 const Value *BLHS, const Value *BRHS,
5253 bool &IsSwappedOps) {
5254
5255 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
5256 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
5257 return IsMatchingOps || IsSwappedOps;
5258}
5259
5260/// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
5261/// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
5262/// Otherwise, return None if we can't infer anything.
5263static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
5264 CmpInst::Predicate BPred,
5265 bool AreSwappedOps) {
5266 // Canonicalize the predicate as if the operands were not commuted.
5267 if (AreSwappedOps)
5268 BPred = ICmpInst::getSwappedPredicate(BPred);
5269
5270 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
5271 return true;
5272 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
5273 return false;
5274
5275 return None;
5276}
5277
5278/// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
5279/// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
5280/// Otherwise, return None if we can't infer anything.
5281static Optional<bool>
5282isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
5283 const ConstantInt *C1,
5284 CmpInst::Predicate BPred,
5285 const ConstantInt *C2) {
5286 ConstantRange DomCR =
5287 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
5288 ConstantRange CR =
5289 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
5290 ConstantRange Intersection = DomCR.intersectWith(CR);
5291 ConstantRange Difference = DomCR.difference(CR);
5292 if (Intersection.isEmptySet())
5293 return false;
5294 if (Difference.isEmptySet())
5295 return true;
5296 return None;
5297}
5298
5299/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5300/// false. Otherwise, return None if we can't infer anything.
5301static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
5302 const ICmpInst *RHS,
5303 const DataLayout &DL, bool LHSIsTrue,
5304 unsigned Depth) {
5305 Value *ALHS = LHS->getOperand(0);
5306 Value *ARHS = LHS->getOperand(1);
5307 // The rest of the logic assumes the LHS condition is true. If that's not the
5308 // case, invert the predicate to make it so.
5309 ICmpInst::Predicate APred =
5310 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
5311
5312 Value *BLHS = RHS->getOperand(0);
5313 Value *BRHS = RHS->getOperand(1);
5314 ICmpInst::Predicate BPred = RHS->getPredicate();
5315
5316 // Can we infer anything when the two compares have matching operands?
5317 bool AreSwappedOps;
5318 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
5319 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
5320 APred, BPred, AreSwappedOps))
5321 return Implication;
5322 // No amount of additional analysis will infer the second condition, so
5323 // early exit.
5324 return None;
5325 }
5326
5327 // Can we infer anything when the LHS operands match and the RHS operands are
5328 // constants (not necessarily matching)?
5329 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
5330 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
5331 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
5332 return Implication;
5333 // No amount of additional analysis will infer the second condition, so
5334 // early exit.
5335 return None;
5336 }
5337
5338 if (APred == BPred)
5339 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
5340 return None;
5341}
5342
5343/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5344/// false. Otherwise, return None if we can't infer anything. We expect the
5345/// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
5346static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
5347 const ICmpInst *RHS,
5348 const DataLayout &DL, bool LHSIsTrue,
5349 unsigned Depth) {
5350 // The LHS must be an 'or' or an 'and' instruction.
5351 assert((LHS->getOpcode() == Instruction::And ||(((LHS->getOpcode() == Instruction::And || LHS->getOpcode
() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5353, __PRETTY_FUNCTION__))
5352 LHS->getOpcode() == Instruction::Or) &&(((LHS->getOpcode() == Instruction::And || LHS->getOpcode
() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5353, __PRETTY_FUNCTION__))
5353 "Expected LHS to be 'and' or 'or'.")(((LHS->getOpcode() == Instruction::And || LHS->getOpcode
() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5353, __PRETTY_FUNCTION__))
;
5354
5355 assert(Depth <= MaxDepth && "Hit recursion limit")((Depth <= MaxDepth && "Hit recursion limit") ? static_cast
<void> (0) : __assert_fail ("Depth <= MaxDepth && \"Hit recursion limit\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5355, __PRETTY_FUNCTION__))
;
5356
5357 // If the result of an 'or' is false, then we know both legs of the 'or' are
5358 // false. Similarly, if the result of an 'and' is true, then we know both
5359 // legs of the 'and' are true.
5360 Value *ALHS, *ARHS;
5361 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
5362 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
5363 // FIXME: Make this non-recursion.
5364 if (Optional<bool> Implication =
5365 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
5366 return Implication;
5367 if (Optional<bool> Implication =
5368 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
5369 return Implication;
5370 return None;
5371 }
5372 return None;
5373}
5374
5375Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
5376 const DataLayout &DL, bool LHSIsTrue,
5377 unsigned Depth) {
5378 // Bail out when we hit the limit.
5379 if (Depth == MaxDepth)
5380 return None;
5381
5382 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
5383 // example.
5384 if (LHS->getType() != RHS->getType())
5385 return None;
5386
5387 Type *OpTy = LHS->getType();
5388 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!")((OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"
) ? static_cast<void> (0) : __assert_fail ("OpTy->isIntOrIntVectorTy(1) && \"Expected integer type only!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5388, __PRETTY_FUNCTION__))
;
5389
5390 // LHS ==> RHS by definition
5391 if (LHS == RHS)
5392 return LHSIsTrue;
5393
5394 // FIXME: Extending the code below to handle vectors.
5395 if (OpTy->isVectorTy())
5396 return None;
5397
5398 assert(OpTy->isIntegerTy(1) && "implied by above")((OpTy->isIntegerTy(1) && "implied by above") ? static_cast
<void> (0) : __assert_fail ("OpTy->isIntegerTy(1) && \"implied by above\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5398, __PRETTY_FUNCTION__))
;
5399
5400 // Both LHS and RHS are icmps.
5401 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
5402 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
5403 if (LHSCmp && RHSCmp)
5404 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
5405
5406 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be
5407 // an icmp. FIXME: Add support for and/or on the RHS.
5408 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
5409 if (LHSBO && RHSCmp) {
5410 if ((LHSBO->getOpcode() == Instruction::And ||
5411 LHSBO->getOpcode() == Instruction::Or))
5412 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
5413 }
5414 return None;
5415}
5416
5417Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
5418 const Instruction *ContextI,
5419 const DataLayout &DL) {
5420 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool")((Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"
) ? static_cast<void> (0) : __assert_fail ("Cond->getType()->isIntOrIntVectorTy(1) && \"Condition must be bool\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5420, __PRETTY_FUNCTION__))
;
5421 if (!ContextI || !ContextI->getParent())
5422 return None;
5423
5424 // TODO: This is a poor/cheap way to determine dominance. Should we use a
5425 // dominator tree (eg, from a SimplifyQuery) instead?
5426 const BasicBlock *ContextBB = ContextI->getParent();
5427 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
5428 if (!PredBB)
5429 return None;
5430
5431 // We need a conditional branch in the predecessor.
5432 Value *PredCond;
5433 BasicBlock *TrueBB, *FalseBB;
5434 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
5435 return None;
5436
5437 // The branch should get simplified. Don't bother simplifying this condition.
5438 if (TrueBB == FalseBB)
5439 return None;
5440
5441 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&(((TrueBB == ContextBB || FalseBB == ContextBB) && "Predecessor block does not point to successor?"
) ? static_cast<void> (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5442, __PRETTY_FUNCTION__))
5442 "Predecessor block does not point to successor?")(((TrueBB == ContextBB || FalseBB == ContextBB) && "Predecessor block does not point to successor?"
) ? static_cast<void> (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5442, __PRETTY_FUNCTION__))
;
5443
5444 // Is this condition implied by the predecessor condition?
5445 bool CondIsTrue = TrueBB == ContextBB;
5446 return isImpliedCondition(PredCond, Cond, DL, CondIsTrue);
5447}
5448
5449static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
5450 APInt &Upper, const InstrInfoQuery &IIQ) {
5451 unsigned Width = Lower.getBitWidth();
5452 const APInt *C;
5453 switch (BO.getOpcode()) {
5454 case Instruction::Add:
5455 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5456 // FIXME: If we have both nuw and nsw, we should reduce the range further.
5457 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5458 // 'add nuw x, C' produces [C, UINT_MAX].
5459 Lower = *C;
5460 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5461 if (C->isNegative()) {
5462 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
5463 Lower = APInt::getSignedMinValue(Width);
5464 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5465 } else {
5466 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
5467 Lower = APInt::getSignedMinValue(Width) + *C;
5468 Upper = APInt::getSignedMaxValue(Width) + 1;
5469 }
5470 }
5471 }
5472 break;
5473
5474 case Instruction::And:
5475 if (match(BO.getOperand(1), m_APInt(C)))
5476 // 'and x, C' produces [0, C].
5477 Upper = *C + 1;
5478 break;
5479
5480 case Instruction::Or:
5481 if (match(BO.getOperand(1), m_APInt(C)))
5482 // 'or x, C' produces [C, UINT_MAX].
5483 Lower = *C;
5484 break;
5485
5486 case Instruction::AShr:
5487 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5488 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
5489 Lower = APInt::getSignedMinValue(Width).ashr(*C);
5490 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
5491 } else if (match(BO.getOperand(0), m_APInt(C))) {
5492 unsigned ShiftAmount = Width - 1;
5493 if (!C->isNullValue() && IIQ.isExact(&BO))
5494 ShiftAmount = C->countTrailingZeros();
5495 if (C->isNegative()) {
5496 // 'ashr C, x' produces [C, C >> (Width-1)]
5497 Lower = *C;
5498 Upper = C->ashr(ShiftAmount) + 1;
5499 } else {
5500 // 'ashr C, x' produces [C >> (Width-1), C]
5501 Lower = C->ashr(ShiftAmount);
5502 Upper = *C + 1;
5503 }
5504 }
5505 break;
5506
5507 case Instruction::LShr:
5508 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5509 // 'lshr x, C' produces [0, UINT_MAX >> C].
5510 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
5511 } else if (match(BO.getOperand(0), m_APInt(C))) {
5512 // 'lshr C, x' produces [C >> (Width-1), C].
5513 unsigned ShiftAmount = Width - 1;
5514 if (!C->isNullValue() && IIQ.isExact(&BO))
5515 ShiftAmount = C->countTrailingZeros();
5516 Lower = C->lshr(ShiftAmount);
5517 Upper = *C + 1;
5518 }
5519 break;
5520
5521 case Instruction::Shl:
5522 if (match(BO.getOperand(0), m_APInt(C))) {
5523 if (IIQ.hasNoUnsignedWrap(&BO)) {
5524 // 'shl nuw C, x' produces [C, C << CLZ(C)]
5525 Lower = *C;
5526 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
5527 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
5528 if (C->isNegative()) {
5529 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
5530 unsigned ShiftAmount = C->countLeadingOnes() - 1;
5531 Lower = C->shl(ShiftAmount);
5532 Upper = *C + 1;
5533 } else {
5534 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
5535 unsigned ShiftAmount = C->countLeadingZeros() - 1;
5536 Lower = *C;
5537 Upper = C->shl(ShiftAmount) + 1;
5538 }
5539 }
5540 }
5541 break;
5542
5543 case Instruction::SDiv:
5544 if (match(BO.getOperand(1), m_APInt(C))) {
5545 APInt IntMin = APInt::getSignedMinValue(Width);
5546 APInt IntMax = APInt::getSignedMaxValue(Width);
5547 if (C->isAllOnesValue()) {
5548 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
5549 // where C != -1 and C != 0 and C != 1
5550 Lower = IntMin + 1;
5551 Upper = IntMax + 1;
5552 } else if (C->countLeadingZeros() < Width - 1) {
5553 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
5554 // where C != -1 and C != 0 and C != 1
5555 Lower = IntMin.sdiv(*C);
5556 Upper = IntMax.sdiv(*C);
5557 if (Lower.sgt(Upper))
5558 std::swap(Lower, Upper);
5559 Upper = Upper + 1;
5560 assert(Upper != Lower && "Upper part of range has wrapped!")((Upper != Lower && "Upper part of range has wrapped!"
) ? static_cast<void> (0) : __assert_fail ("Upper != Lower && \"Upper part of range has wrapped!\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5560, __PRETTY_FUNCTION__))
;
5561 }
5562 } else if (match(BO.getOperand(0), m_APInt(C))) {
5563 if (C->isMinSignedValue()) {
5564 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
5565 Lower = *C;
5566 Upper = Lower.lshr(1) + 1;
5567 } else {
5568 // 'sdiv C, x' produces [-|C|, |C|].
5569 Upper = C->abs() + 1;
5570 Lower = (-Upper) + 1;
5571 }
5572 }
5573 break;
5574
5575 case Instruction::UDiv:
5576 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5577 // 'udiv x, C' produces [0, UINT_MAX / C].
5578 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
5579 } else if (match(BO.getOperand(0), m_APInt(C))) {
5580 // 'udiv C, x' produces [0, C].
5581 Upper = *C + 1;
5582 }
5583 break;
5584
5585 case Instruction::SRem:
5586 if (match(BO.getOperand(1), m_APInt(C))) {
5587 // 'srem x, C' produces (-|C|, |C|).
5588 Upper = C->abs();
5589 Lower = (-Upper) + 1;
5590 }
5591 break;
5592
5593 case Instruction::URem:
5594 if (match(BO.getOperand(1), m_APInt(C)))
5595 // 'urem x, C' produces [0, C).
5596 Upper = *C;
5597 break;
5598
5599 default:
5600 break;
5601 }
5602}
5603
5604static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
5605 APInt &Upper) {
5606 unsigned Width = Lower.getBitWidth();
5607 const APInt *C;
5608 switch (II.getIntrinsicID()) {
5609 case Intrinsic::uadd_sat:
5610 // uadd.sat(x, C) produces [C, UINT_MAX].
5611 if (match(II.getOperand(0), m_APInt(C)) ||
5612 match(II.getOperand(1), m_APInt(C)))
5613 Lower = *C;
5614 break;
5615 case Intrinsic::sadd_sat:
5616 if (match(II.getOperand(0), m_APInt(C)) ||
5617 match(II.getOperand(1), m_APInt(C))) {
5618 if (C->isNegative()) {
5619 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
5620 Lower = APInt::getSignedMinValue(Width);
5621 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5622 } else {
5623 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
5624 Lower = APInt::getSignedMinValue(Width) + *C;
5625 Upper = APInt::getSignedMaxValue(Width) + 1;
5626 }
5627 }
5628 break;
5629 case Intrinsic::usub_sat:
5630 // usub.sat(C, x) produces [0, C].
5631 if (match(II.getOperand(0), m_APInt(C)))
5632 Upper = *C + 1;
5633 // usub.sat(x, C) produces [0, UINT_MAX - C].
5634 else if (match(II.getOperand(1), m_APInt(C)))
5635 Upper = APInt::getMaxValue(Width) - *C + 1;
5636 break;
5637 case Intrinsic::ssub_sat:
5638 if (match(II.getOperand(0), m_APInt(C))) {
5639 if (C->isNegative()) {
5640 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
5641 Lower = APInt::getSignedMinValue(Width);
5642 Upper = *C - APInt::getSignedMinValue(Width) + 1;
5643 } else {
5644 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
5645 Lower = *C - APInt::getSignedMaxValue(Width);
5646 Upper = APInt::getSignedMaxValue(Width) + 1;
5647 }
5648 } else if (match(II.getOperand(1), m_APInt(C))) {
5649 if (C->isNegative()) {
5650 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
5651 Lower = APInt::getSignedMinValue(Width) - *C;
5652 Upper = APInt::getSignedMaxValue(Width) + 1;
5653 } else {
5654 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
5655 Lower = APInt::getSignedMinValue(Width);
5656 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
5657 }
5658 }
5659 break;
5660 default:
5661 break;
5662 }
5663}
5664
5665static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
5666 APInt &Upper) {
5667 const Value *LHS, *RHS;
5668 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
5669 if (R.Flavor == SPF_UNKNOWN)
5670 return;
5671
5672 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
5673
5674 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
5675 // If the negation part of the abs (in RHS) has the NSW flag,
5676 // then the result of abs(X) is [0..SIGNED_MAX],
5677 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
5678 Lower = APInt::getNullValue(BitWidth);
5679 if (cast<Instruction>(RHS)->hasNoSignedWrap())
5680 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5681 else
5682 Upper = APInt::getSignedMinValue(BitWidth) + 1;
5683 return;
5684 }
5685
5686 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
5687 // The result of -abs(X) is <= 0.
5688 Lower = APInt::getSignedMinValue(BitWidth);
5689 Upper = APInt(BitWidth, 1);
5690 return;
5691 }
5692
5693 const APInt *C;
5694 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
5695 return;
5696
5697 switch (R.Flavor) {
5698 case SPF_UMIN:
5699 Upper = *C + 1;
5700 break;
5701 case SPF_UMAX:
5702 Lower = *C;
5703 break;
5704 case SPF_SMIN:
5705 Lower = APInt::getSignedMinValue(BitWidth);
5706 Upper = *C + 1;
5707 break;
5708 case SPF_SMAX:
5709 Lower = *C;
5710 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5711 break;
5712 default:
5713 break;
5714 }
5715}
5716
5717ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) {
5718 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction")((V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && \"Expected integer instruction\""
, "/build/llvm-toolchain-snapshot-9~svn361465/lib/Analysis/ValueTracking.cpp"
, 5718, __PRETTY_FUNCTION__))
;
5719
5720 const APInt *C;
5721 if (match(V, m_APInt(C)))
5722 return ConstantRange(*C);
5723
5724 InstrInfoQuery IIQ(UseInstrInfo);
5725 unsigned BitWidth = V->getType()->getScalarSizeInBits();
5726 APInt Lower = APInt(BitWidth, 0);
5727 APInt Upper = APInt(BitWidth, 0);
5728 if (auto *BO = dyn_cast<BinaryOperator>(V))
5729 setLimitsForBinOp(*BO, Lower, Upper, IIQ);
5730 else if (auto *II = dyn_cast<IntrinsicInst>(V))
5731 setLimitsForIntrinsic(*II, Lower, Upper);
5732 else if (auto *SI = dyn_cast<SelectInst>(V))
5733 setLimitsForSelectPattern(*SI, Lower, Upper);
5734
5735 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
5736
5737 if (auto *I = dyn_cast<Instruction>(V))
5738 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
5739 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
5740
5741 return CR;
5742}

/build/llvm-toolchain-snapshot-9~svn361465/include/llvm/Analysis/ValueTracking.h

1//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ANALYSIS_VALUETRACKING_H
15#define LLVM_ANALYSIS_VALUETRACKING_H
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/Optional.h"
19#include "llvm/IR/CallSite.h"
20#include "llvm/IR/Constants.h"
21#include "llvm/IR/Instruction.h"
22#include "llvm/IR/Intrinsics.h"
23#include <cassert>
24#include <cstdint>
25
26namespace llvm {
27
28class AddOperator;
29class APInt;
30class AssumptionCache;
31class DataLayout;
32class DominatorTree;
33class GEPOperator;
34class IntrinsicInst;
35class WithOverflowInst;
36struct KnownBits;
37class Loop;
38class LoopInfo;
39class MDNode;
40class OptimizationRemarkEmitter;
41class StringRef;
42class TargetLibraryInfo;
43class Value;
44
45 /// Determine which bits of V are known to be either zero or one and return
46 /// them in the KnownZero/KnownOne bit sets.
47 ///
48 /// This function is defined on values with integer type, values with pointer
49 /// type, and vectors of integers. In the case
50 /// where V is a vector, the known zero and known one values are the
51 /// same width as the vector element, and the bit is set only if it is true
52 /// for all of the elements in the vector.
53 void computeKnownBits(const Value *V, KnownBits &Known,
54 const DataLayout &DL, unsigned Depth = 0,
55 AssumptionCache *AC = nullptr,
56 const Instruction *CxtI = nullptr,
57 const DominatorTree *DT = nullptr,
58 OptimizationRemarkEmitter *ORE = nullptr,
59 bool UseInstrInfo = true);
60
61 /// Returns the known bits rather than passing by reference.
62 KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
63 unsigned Depth = 0, AssumptionCache *AC = nullptr,
64 const Instruction *CxtI = nullptr,
65 const DominatorTree *DT = nullptr,
66 OptimizationRemarkEmitter *ORE = nullptr,
67 bool UseInstrInfo = true);
68
69 /// Compute known bits from the range metadata.
70 /// \p KnownZero the set of bits that are known to be zero
71 /// \p KnownOne the set of bits that are known to be one
72 void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
73 KnownBits &Known);
74
75 /// Return true if LHS and RHS have no common bits set.
76 bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
77 const DataLayout &DL,
78 AssumptionCache *AC = nullptr,
79 const Instruction *CxtI = nullptr,
80 const DominatorTree *DT = nullptr,
81 bool UseInstrInfo = true);
82
83 /// Return true if the given value is known to have exactly one bit set when
84 /// defined. For vectors return true if every element is known to be a power
85 /// of two when defined. Supports values with integer or pointer type and
86 /// vectors of integers. If 'OrZero' is set, then return true if the given
87 /// value is either a power of two or zero.
88 bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
89 bool OrZero = false, unsigned Depth = 0,
90 AssumptionCache *AC = nullptr,
91 const Instruction *CxtI = nullptr,
92 const DominatorTree *DT = nullptr,
93 bool UseInstrInfo = true);
94
95 bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
96
97 /// Return true if the given value is known to be non-zero when defined. For
98 /// vectors, return true if every element is known to be non-zero when
99 /// defined. For pointers, if the context instruction and dominator tree are
100 /// specified, perform context-sensitive analysis and return true if the
101 /// pointer couldn't possibly be null at the specified instruction.
102 /// Supports values with integer or pointer type and vectors of integers.
103 bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
104 AssumptionCache *AC = nullptr,
105 const Instruction *CxtI = nullptr,
106 const DominatorTree *DT = nullptr,
107 bool UseInstrInfo = true);
108
109 /// Return true if the two given values are negation.
110 /// Currently can recoginze Value pair:
111 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
112 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
113 bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
114
115 /// Returns true if the give value is known to be non-negative.
116 bool isKnownNonNegative(const Value *V, const DataLayout &DL,
117 unsigned Depth = 0,
118 AssumptionCache *AC = nullptr,
119 const Instruction *CxtI = nullptr,
120 const DominatorTree *DT = nullptr,
121 bool UseInstrInfo = true);
122
123 /// Returns true if the given value is known be positive (i.e. non-negative
124 /// and non-zero).
125 bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
126 AssumptionCache *AC = nullptr,
127 const Instruction *CxtI = nullptr,
128 const DominatorTree *DT = nullptr,
129 bool UseInstrInfo = true);
130
131 /// Returns true if the given value is known be negative (i.e. non-positive
132 /// and non-zero).
133 bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
134 AssumptionCache *AC = nullptr,
135 const Instruction *CxtI = nullptr,
136 const DominatorTree *DT = nullptr,
137 bool UseInstrInfo = true);
138
139 /// Return true if the given values are known to be non-equal when defined.
140 /// Supports scalar integer types only.
141 bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
142 AssumptionCache *AC = nullptr,
143 const Instruction *CxtI = nullptr,
144 const DominatorTree *DT = nullptr,
145 bool UseInstrInfo = true);
146
147 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
148 /// simplify operations downstream. Mask is known to be zero for bits that V
149 /// cannot have.
150 ///
151 /// This function is defined on values with integer type, values with pointer
152 /// type, and vectors of integers. In the case
153 /// where V is a vector, the mask, known zero, and known one values are the