Bug Summary

File:lib/Analysis/ValueTracking.cpp
Warning:line 523, column 5
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ValueTracking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis -I /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn329677/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn329677/build-llvm/lib/Analysis -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-04-11-031539-24776-1 -x c++ /build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp

/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp

1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains routines that help analyze properties that chains of
11// computations have.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Analysis/ValueTracking.h"
16#include "llvm/ADT/APFloat.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/AssumptionCache.h"
29#include "llvm/Analysis/InstructionSimplify.h"
30#include "llvm/Analysis/Loads.h"
31#include "llvm/Analysis/LoopInfo.h"
32#include "llvm/Analysis/OptimizationRemarkEmitter.h"
33#include "llvm/Analysis/TargetLibraryInfo.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/Attributes.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/CallSite.h"
38#include "llvm/IR/Constant.h"
39#include "llvm/IR/ConstantRange.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/DerivedTypes.h"
43#include "llvm/IR/DiagnosticInfo.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GetElementPtrTypeIterator.h"
47#include "llvm/IR/GlobalAlias.h"
48#include "llvm/IR/GlobalValue.h"
49#include "llvm/IR/GlobalVariable.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
52#include "llvm/IR/Instructions.h"
53#include "llvm/IR/IntrinsicInst.h"
54#include "llvm/IR/Intrinsics.h"
55#include "llvm/IR/LLVMContext.h"
56#include "llvm/IR/Metadata.h"
57#include "llvm/IR/Module.h"
58#include "llvm/IR/Operator.h"
59#include "llvm/IR/PatternMatch.h"
60#include "llvm/IR/Type.h"
61#include "llvm/IR/User.h"
62#include "llvm/IR/Value.h"
63#include "llvm/Support/Casting.h"
64#include "llvm/Support/CommandLine.h"
65#include "llvm/Support/Compiler.h"
66#include "llvm/Support/ErrorHandling.h"
67#include "llvm/Support/KnownBits.h"
68#include "llvm/Support/MathExtras.h"
69#include <algorithm>
70#include <array>
71#include <cassert>
72#include <cstdint>
73#include <iterator>
74#include <utility>
75
76using namespace llvm;
77using namespace llvm::PatternMatch;
78
79const unsigned MaxDepth = 6;
80
81// Controls the number of uses of the value searched for possible
82// dominating comparisons.
83static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84 cl::Hidden, cl::init(20));
85
86/// Returns the bitwidth of the given scalar or pointer type. For vector types,
87/// returns the element type's bitwidth.
88static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89 if (unsigned BitWidth = Ty->getScalarSizeInBits())
90 return BitWidth;
91
92 return DL.getIndexTypeSizeInBits(Ty);
93}
94
95namespace {
96
97// Simplifying using an assume can only be done in a particular control-flow
98// context (the context instruction provides that context). If an assume and
99// the context instruction are not in the same block then the DT helps in
100// figuring out if we can use it.
101struct Query {
102 const DataLayout &DL;
103 AssumptionCache *AC;
104 const Instruction *CxtI;
105 const DominatorTree *DT;
106
107 // Unlike the other analyses, this may be a nullptr because not all clients
108 // provide it currently.
109 OptimizationRemarkEmitter *ORE;
110
111 /// Set of assumptions that should be excluded from further queries.
112 /// This is because of the potential for mutual recursion to cause
113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
114 /// classic case of this is assume(x = y), which will attempt to determine
115 /// bits in x from bits in y, which will attempt to determine bits in y from
116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
118 /// (all of which can call computeKnownBits), and so on.
119 std::array<const Value *, MaxDepth> Excluded;
120
121 unsigned NumExcluded = 0;
122
123 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
124 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
125 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {}
126
127 Query(const Query &Q, const Value *NewExcl)
128 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
129 NumExcluded(Q.NumExcluded) {
130 Excluded = Q.Excluded;
131 Excluded[NumExcluded++] = NewExcl;
132 assert(NumExcluded <= Excluded.size())(static_cast <bool> (NumExcluded <= Excluded.size())
? void (0) : __assert_fail ("NumExcluded <= Excluded.size()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 132, __extension__ __PRETTY_FUNCTION__))
;
133 }
134
135 bool isExcluded(const Value *Value) const {
136 if (NumExcluded == 0)
137 return false;
138 auto End = Excluded.begin() + NumExcluded;
139 return std::find(Excluded.begin(), End, Value) != End;
140 }
141};
142
143} // end anonymous namespace
144
145// Given the provided Value and, potentially, a context instruction, return
146// the preferred context instruction (if any).
147static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
148 // If we've been provided with a context instruction, then use that (provided
149 // it has been inserted).
150 if (CxtI && CxtI->getParent())
151 return CxtI;
152
153 // If the value is really an already-inserted instruction, then use that.
154 CxtI = dyn_cast<Instruction>(V);
155 if (CxtI && CxtI->getParent())
156 return CxtI;
157
158 return nullptr;
159}
160
161static void computeKnownBits(const Value *V, KnownBits &Known,
162 unsigned Depth, const Query &Q);
163
164void llvm::computeKnownBits(const Value *V, KnownBits &Known,
165 const DataLayout &DL, unsigned Depth,
166 AssumptionCache *AC, const Instruction *CxtI,
167 const DominatorTree *DT,
168 OptimizationRemarkEmitter *ORE) {
169 ::computeKnownBits(V, Known, Depth,
170 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
171}
172
173static KnownBits computeKnownBits(const Value *V, unsigned Depth,
174 const Query &Q);
175
176KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
177 unsigned Depth, AssumptionCache *AC,
178 const Instruction *CxtI,
179 const DominatorTree *DT,
180 OptimizationRemarkEmitter *ORE) {
181 return ::computeKnownBits(V, Depth,
182 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
183}
184
185bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
186 const DataLayout &DL,
187 AssumptionCache *AC, const Instruction *CxtI,
188 const DominatorTree *DT) {
189 assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 190, __extension__ __PRETTY_FUNCTION__))
190 "LHS and RHS should have the same type")(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 190, __extension__ __PRETTY_FUNCTION__))
;
191 assert(LHS->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 192, __extension__ __PRETTY_FUNCTION__))
192 "LHS and RHS should be integers")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 192, __extension__ __PRETTY_FUNCTION__))
;
193 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
194 KnownBits LHSKnown(IT->getBitWidth());
195 KnownBits RHSKnown(IT->getBitWidth());
196 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
197 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
198 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
199}
200
201bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
202 for (const User *U : CxtI->users()) {
203 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
204 if (IC->isEquality())
205 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
206 if (C->isNullValue())
207 continue;
208 return false;
209 }
210 return true;
211}
212
213static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
214 const Query &Q);
215
216bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
217 bool OrZero,
218 unsigned Depth, AssumptionCache *AC,
219 const Instruction *CxtI,
220 const DominatorTree *DT) {
221 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
222 Query(DL, AC, safeCxtI(V, CxtI), DT));
223}
224
225static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
226
227bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
228 AssumptionCache *AC, const Instruction *CxtI,
229 const DominatorTree *DT) {
230 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
231}
232
233bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
234 unsigned Depth,
235 AssumptionCache *AC, const Instruction *CxtI,
236 const DominatorTree *DT) {
237 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
238 return Known.isNonNegative();
239}
240
241bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
242 AssumptionCache *AC, const Instruction *CxtI,
243 const DominatorTree *DT) {
244 if (auto *CI = dyn_cast<ConstantInt>(V))
245 return CI->getValue().isStrictlyPositive();
246
247 // TODO: We'd doing two recursive queries here. We should factor this such
248 // that only a single query is needed.
249 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
250 isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
251}
252
253bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
254 AssumptionCache *AC, const Instruction *CxtI,
255 const DominatorTree *DT) {
256 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
257 return Known.isNegative();
258}
259
260static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
261
262bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
263 const DataLayout &DL,
264 AssumptionCache *AC, const Instruction *CxtI,
265 const DominatorTree *DT) {
266 return ::isKnownNonEqual(V1, V2, Query(DL, AC,
267 safeCxtI(V1, safeCxtI(V2, CxtI)),
268 DT));
269}
270
271static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
272 const Query &Q);
273
274bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
275 const DataLayout &DL,
276 unsigned Depth, AssumptionCache *AC,
277 const Instruction *CxtI, const DominatorTree *DT) {
278 return ::MaskedValueIsZero(V, Mask, Depth,
279 Query(DL, AC, safeCxtI(V, CxtI), DT));
280}
281
282static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
283 const Query &Q);
284
285unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
286 unsigned Depth, AssumptionCache *AC,
287 const Instruction *CxtI,
288 const DominatorTree *DT) {
289 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
290}
291
292static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
293 bool NSW,
294 KnownBits &KnownOut, KnownBits &Known2,
295 unsigned Depth, const Query &Q) {
296 unsigned BitWidth = KnownOut.getBitWidth();
297
298 // If an initial sequence of bits in the result is not needed, the
299 // corresponding bits in the operands are not needed.
300 KnownBits LHSKnown(BitWidth);
301 computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
302 computeKnownBits(Op1, Known2, Depth + 1, Q);
303
304 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
305}
306
307static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
308 KnownBits &Known, KnownBits &Known2,
309 unsigned Depth, const Query &Q) {
310 unsigned BitWidth = Known.getBitWidth();
311 computeKnownBits(Op1, Known, Depth + 1, Q);
312 computeKnownBits(Op0, Known2, Depth + 1, Q);
313
314 bool isKnownNegative = false;
315 bool isKnownNonNegative = false;
316 // If the multiplication is known not to overflow, compute the sign bit.
317 if (NSW) {
318 if (Op0 == Op1) {
319 // The product of a number with itself is non-negative.
320 isKnownNonNegative = true;
321 } else {
322 bool isKnownNonNegativeOp1 = Known.isNonNegative();
323 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
324 bool isKnownNegativeOp1 = Known.isNegative();
325 bool isKnownNegativeOp0 = Known2.isNegative();
326 // The product of two numbers with the same sign is non-negative.
327 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
328 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
329 // The product of a negative number and a non-negative number is either
330 // negative or zero.
331 if (!isKnownNonNegative)
332 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
333 isKnownNonZero(Op0, Depth, Q)) ||
334 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
335 isKnownNonZero(Op1, Depth, Q));
336 }
337 }
338
339 assert(!Known.hasConflict() && !Known2.hasConflict())(static_cast <bool> (!Known.hasConflict() && !Known2
.hasConflict()) ? void (0) : __assert_fail ("!Known.hasConflict() && !Known2.hasConflict()"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 339, __extension__ __PRETTY_FUNCTION__))
;
340 // Compute a conservative estimate for high known-0 bits.
341 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
342 Known2.countMinLeadingZeros(),
343 BitWidth) - BitWidth;
344 LeadZ = std::min(LeadZ, BitWidth);
345
346 // The result of the bottom bits of an integer multiply can be
347 // inferred by looking at the bottom bits of both operands and
348 // multiplying them together.
349 // We can infer at least the minimum number of known trailing bits
350 // of both operands. Depending on number of trailing zeros, we can
351 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
352 // a and b are divisible by m and n respectively.
353 // We then calculate how many of those bits are inferrable and set
354 // the output. For example, the i8 mul:
355 // a = XXXX1100 (12)
356 // b = XXXX1110 (14)
357 // We know the bottom 3 bits are zero since the first can be divided by
358 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
359 // Applying the multiplication to the trimmed arguments gets:
360 // XX11 (3)
361 // X111 (7)
362 // -------
363 // XX11
364 // XX11
365 // XX11
366 // XX11
367 // -------
368 // XXXXX01
369 // Which allows us to infer the 2 LSBs. Since we're multiplying the result
370 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
371 // The proof for this can be described as:
372 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
373 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
374 // umin(countTrailingZeros(C2), C6) +
375 // umin(C5 - umin(countTrailingZeros(C1), C5),
376 // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
377 // %aa = shl i8 %a, C5
378 // %bb = shl i8 %b, C6
379 // %aaa = or i8 %aa, C1
380 // %bbb = or i8 %bb, C2
381 // %mul = mul i8 %aaa, %bbb
382 // %mask = and i8 %mul, C7
383 // =>
384 // %mask = i8 ((C1*C2)&C7)
385 // Where C5, C6 describe the known bits of %a, %b
386 // C1, C2 describe the known bottom bits of %a, %b.
387 // C7 describes the mask of the known bits of the result.
388 APInt Bottom0 = Known.One;
389 APInt Bottom1 = Known2.One;
390
391 // How many times we'd be able to divide each argument by 2 (shr by 1).
392 // This gives us the number of trailing zeros on the multiplication result.
393 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
394 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
395 unsigned TrailZero0 = Known.countMinTrailingZeros();
396 unsigned TrailZero1 = Known2.countMinTrailingZeros();
397 unsigned TrailZ = TrailZero0 + TrailZero1;
398
399 // Figure out the fewest known-bits operand.
400 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
401 TrailBitsKnown1 - TrailZero1);
402 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
403
404 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
405 Bottom1.getLoBits(TrailBitsKnown1);
406
407 Known.resetAll();
408 Known.Zero.setHighBits(LeadZ);
409 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
410 Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
411
412 // Only make use of no-wrap flags if we failed to compute the sign bit
413 // directly. This matters if the multiplication always overflows, in
414 // which case we prefer to follow the result of the direct computation,
415 // though as the program is invoking undefined behaviour we can choose
416 // whatever we like here.
417 if (isKnownNonNegative && !Known.isNegative())
418 Known.makeNonNegative();
419 else if (isKnownNegative && !Known.isNonNegative())
420 Known.makeNegative();
421}
422
423void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
424 KnownBits &Known) {
425 unsigned BitWidth = Known.getBitWidth();
426 unsigned NumRanges = Ranges.getNumOperands() / 2;
427 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 427, __extension__ __PRETTY_FUNCTION__))
;
428
429 Known.Zero.setAllBits();
430 Known.One.setAllBits();
431
432 for (unsigned i = 0; i < NumRanges; ++i) {
433 ConstantInt *Lower =
434 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
435 ConstantInt *Upper =
436 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
437 ConstantRange Range(Lower->getValue(), Upper->getValue());
438
439 // The first CommonPrefixBits of all values in Range are equal.
440 unsigned CommonPrefixBits =
441 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
442
443 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
444 Known.One &= Range.getUnsignedMax() & Mask;
445 Known.Zero &= ~Range.getUnsignedMax() & Mask;
446 }
447}
448
449static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
450 SmallVector<const Value *, 16> WorkSet(1, I);
451 SmallPtrSet<const Value *, 32> Visited;
452 SmallPtrSet<const Value *, 16> EphValues;
453
454 // The instruction defining an assumption's condition itself is always
455 // considered ephemeral to that assumption (even if it has other
456 // non-ephemeral users). See r246696's test case for an example.
457 if (is_contained(I->operands(), E))
458 return true;
459
460 while (!WorkSet.empty()) {
461 const Value *V = WorkSet.pop_back_val();
462 if (!Visited.insert(V).second)
463 continue;
464
465 // If all uses of this value are ephemeral, then so is this value.
466 if (llvm::all_of(V->users(), [&](const User *U) {
467 return EphValues.count(U);
468 })) {
469 if (V == E)
470 return true;
471
472 if (V == I || isSafeToSpeculativelyExecute(V)) {
473 EphValues.insert(V);
474 if (const User *U = dyn_cast<User>(V))
475 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
476 J != JE; ++J)
477 WorkSet.push_back(*J);
478 }
479 }
480 }
481
482 return false;
483}
484
485// Is this an intrinsic that cannot be speculated but also cannot trap?
486bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
487 if (const CallInst *CI = dyn_cast<CallInst>(I))
488 if (Function *F = CI->getCalledFunction())
489 switch (F->getIntrinsicID()) {
490 default: break;
491 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
492 case Intrinsic::assume:
493 case Intrinsic::sideeffect:
494 case Intrinsic::dbg_declare:
495 case Intrinsic::dbg_value:
496 case Intrinsic::invariant_start:
497 case Intrinsic::invariant_end:
498 case Intrinsic::lifetime_start:
499 case Intrinsic::lifetime_end:
500 case Intrinsic::objectsize:
501 case Intrinsic::ptr_annotation:
502 case Intrinsic::var_annotation:
503 return true;
504 }
505
506 return false;
507}
508
509bool llvm::isValidAssumeForContext(const Instruction *Inv,
510 const Instruction *CxtI,
511 const DominatorTree *DT) {
512 // There are two restrictions on the use of an assume:
513 // 1. The assume must dominate the context (or the control flow must
514 // reach the assume whenever it reaches the context).
515 // 2. The context must not be in the assume's set of ephemeral values
516 // (otherwise we will use the assume to prove that the condition
517 // feeding the assume is trivially true, thus causing the removal of
518 // the assume).
519
520 if (DT) {
521 if (DT->dominates(Inv, CxtI))
522 return true;
523 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
524 // We don't have a DT, but this trivially dominates.
525 return true;
526 }
527
528 // With or without a DT, the only remaining case we will check is if the
529 // instructions are in the same BB. Give up if that is not the case.
530 if (Inv->getParent() != CxtI->getParent())
531 return false;
532
533 // If we have a dom tree, then we now know that the assume doesn't dominate
534 // the other instruction. If we don't have a dom tree then we can check if
535 // the assume is first in the BB.
536 if (!DT) {
537 // Search forward from the assume until we reach the context (or the end
538 // of the block); the common case is that the assume will come first.
539 for (auto I = std::next(BasicBlock::const_iterator(Inv)),
540 IE = Inv->getParent()->end(); I != IE; ++I)
541 if (&*I == CxtI)
542 return true;
543 }
544
545 // The context comes first, but they're both in the same block. Make sure
546 // there is nothing in between that might interrupt the control flow.
547 for (BasicBlock::const_iterator I =
548 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
549 I != IE; ++I)
550 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
551 return false;
552
553 return !isEphemeralValueOf(Inv, CxtI);
554}
555
556static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
557 unsigned Depth, const Query &Q) {
558 // Use of assumptions is context-sensitive. If we don't have a context, we
559 // cannot use them!
560 if (!Q.AC || !Q.CxtI)
561 return;
562
563 unsigned BitWidth = Known.getBitWidth();
564
565 // Note that the patterns below need to be kept in sync with the code
566 // in AssumptionCache::updateAffectedValues.
567
568 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
569 if (!AssumeVH)
570 continue;
571 CallInst *I = cast<CallInst>(AssumeVH);
572 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 573, __extension__ __PRETTY_FUNCTION__))
573 "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 573, __extension__ __PRETTY_FUNCTION__))
;
574 if (Q.isExcluded(I))
575 continue;
576
577 // Warning: This loop can end up being somewhat performance sensitive.
578 // We're running this loop for once for each value queried resulting in a
579 // runtime of ~O(#assumes * #values).
580
581 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 582, __extension__ __PRETTY_FUNCTION__))
582 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 582, __extension__ __PRETTY_FUNCTION__))
;
583
584 Value *Arg = I->getArgOperand(0);
585
586 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
587 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 587, __extension__ __PRETTY_FUNCTION__))
;
588 Known.setAllOnes();
589 return;
590 }
591 if (match(Arg, m_Not(m_Specific(V))) &&
592 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
593 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 593, __extension__ __PRETTY_FUNCTION__))
;
594 Known.setAllZero();
595 return;
596 }
597
598 // The remaining tests are all recursive, so bail out if we hit the limit.
599 if (Depth == MaxDepth)
600 continue;
601
602 Value *A, *B;
603 auto m_V = m_CombineOr(m_Specific(V),
604 m_CombineOr(m_PtrToInt(m_Specific(V)),
605 m_BitCast(m_Specific(V))));
606
607 CmpInst::Predicate Pred;
608 uint64_t C;
609 // assume(v = a)
610 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
611 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
612 KnownBits RHSKnown(BitWidth);
613 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
614 Known.Zero |= RHSKnown.Zero;
615 Known.One |= RHSKnown.One;
616 // assume(v & b = a)
617 } else if (match(Arg,
618 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
619 Pred == ICmpInst::ICMP_EQ &&
620 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
621 KnownBits RHSKnown(BitWidth);
622 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
623 KnownBits MaskKnown(BitWidth);
624 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
625
626 // For those bits in the mask that are known to be one, we can propagate
627 // known bits from the RHS to V.
628 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
629 Known.One |= RHSKnown.One & MaskKnown.One;
630 // assume(~(v & b) = a)
631 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
632 m_Value(A))) &&
633 Pred == ICmpInst::ICMP_EQ &&
634 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
635 KnownBits RHSKnown(BitWidth);
636 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
637 KnownBits MaskKnown(BitWidth);
638 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
639
640 // For those bits in the mask that are known to be one, we can propagate
641 // inverted known bits from the RHS to V.
642 Known.Zero |= RHSKnown.One & MaskKnown.One;
643 Known.One |= RHSKnown.Zero & MaskKnown.One;
644 // assume(v | b = a)
645 } else if (match(Arg,
646 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
647 Pred == ICmpInst::ICMP_EQ &&
648 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
649 KnownBits RHSKnown(BitWidth);
650 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
651 KnownBits BKnown(BitWidth);
652 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
653
654 // For those bits in B that are known to be zero, we can propagate known
655 // bits from the RHS to V.
656 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
657 Known.One |= RHSKnown.One & BKnown.Zero;
658 // assume(~(v | b) = a)
659 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
660 m_Value(A))) &&
661 Pred == ICmpInst::ICMP_EQ &&
662 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
663 KnownBits RHSKnown(BitWidth);
664 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
665 KnownBits BKnown(BitWidth);
666 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
667
668 // For those bits in B that are known to be zero, we can propagate
669 // inverted known bits from the RHS to V.
670 Known.Zero |= RHSKnown.One & BKnown.Zero;
671 Known.One |= RHSKnown.Zero & BKnown.Zero;
672 // assume(v ^ b = a)
673 } else if (match(Arg,
674 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
675 Pred == ICmpInst::ICMP_EQ &&
676 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
677 KnownBits RHSKnown(BitWidth);
678 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
679 KnownBits BKnown(BitWidth);
680 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
681
682 // For those bits in B that are known to be zero, we can propagate known
683 // bits from the RHS to V. For those bits in B that are known to be one,
684 // we can propagate inverted known bits from the RHS to V.
685 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
686 Known.One |= RHSKnown.One & BKnown.Zero;
687 Known.Zero |= RHSKnown.One & BKnown.One;
688 Known.One |= RHSKnown.Zero & BKnown.One;
689 // assume(~(v ^ b) = a)
690 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
691 m_Value(A))) &&
692 Pred == ICmpInst::ICMP_EQ &&
693 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694 KnownBits RHSKnown(BitWidth);
695 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696 KnownBits BKnown(BitWidth);
697 computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
698
699 // For those bits in B that are known to be zero, we can propagate
700 // inverted known bits from the RHS to V. For those bits in B that are
701 // known to be one, we can propagate known bits from the RHS to V.
702 Known.Zero |= RHSKnown.One & BKnown.Zero;
703 Known.One |= RHSKnown.Zero & BKnown.Zero;
704 Known.Zero |= RHSKnown.Zero & BKnown.One;
705 Known.One |= RHSKnown.One & BKnown.One;
706 // assume(v << c = a)
707 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
708 m_Value(A))) &&
709 Pred == ICmpInst::ICMP_EQ &&
710 isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
711 C < BitWidth) {
712 KnownBits RHSKnown(BitWidth);
713 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
714 // For those bits in RHS that are known, we can propagate them to known
715 // bits in V shifted to the right by C.
716 RHSKnown.Zero.lshrInPlace(C);
717 Known.Zero |= RHSKnown.Zero;
718 RHSKnown.One.lshrInPlace(C);
719 Known.One |= RHSKnown.One;
720 // assume(~(v << c) = a)
721 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
722 m_Value(A))) &&
723 Pred == ICmpInst::ICMP_EQ &&
724 isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
725 C < BitWidth) {
726 KnownBits RHSKnown(BitWidth);
727 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
728 // For those bits in RHS that are known, we can propagate them inverted
729 // to known bits in V shifted to the right by C.
730 RHSKnown.One.lshrInPlace(C);
731 Known.Zero |= RHSKnown.One;
732 RHSKnown.Zero.lshrInPlace(C);
733 Known.One |= RHSKnown.Zero;
734 // assume(v >> c = a)
735 } else if (match(Arg,
736 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
737 m_Value(A))) &&
738 Pred == ICmpInst::ICMP_EQ &&
739 isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
740 C < BitWidth) {
741 KnownBits RHSKnown(BitWidth);
742 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
743 // For those bits in RHS that are known, we can propagate them to known
744 // bits in V shifted to the right by C.
745 Known.Zero |= RHSKnown.Zero << C;
746 Known.One |= RHSKnown.One << C;
747 // assume(~(v >> c) = a)
748 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
749 m_Value(A))) &&
750 Pred == ICmpInst::ICMP_EQ &&
751 isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
752 C < BitWidth) {
753 KnownBits RHSKnown(BitWidth);
754 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
755 // For those bits in RHS that are known, we can propagate them inverted
756 // to known bits in V shifted to the right by C.
757 Known.Zero |= RHSKnown.One << C;
758 Known.One |= RHSKnown.Zero << C;
759 // assume(v >=_s c) where c is non-negative
760 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
761 Pred == ICmpInst::ICMP_SGE &&
762 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763 KnownBits RHSKnown(BitWidth);
764 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
765
766 if (RHSKnown.isNonNegative()) {
767 // We know that the sign bit is zero.
768 Known.makeNonNegative();
769 }
770 // assume(v >_s c) where c is at least -1.
771 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
772 Pred == ICmpInst::ICMP_SGT &&
773 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
774 KnownBits RHSKnown(BitWidth);
775 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
776
777 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
778 // We know that the sign bit is zero.
779 Known.makeNonNegative();
780 }
781 // assume(v <=_s c) where c is negative
782 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
783 Pred == ICmpInst::ICMP_SLE &&
784 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785 KnownBits RHSKnown(BitWidth);
786 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
787
788 if (RHSKnown.isNegative()) {
789 // We know that the sign bit is one.
790 Known.makeNegative();
791 }
792 // assume(v <_s c) where c is non-positive
793 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
794 Pred == ICmpInst::ICMP_SLT &&
795 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
796 KnownBits RHSKnown(BitWidth);
797 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
798
799 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
800 // We know that the sign bit is one.
801 Known.makeNegative();
802 }
803 // assume(v <=_u c)
804 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
805 Pred == ICmpInst::ICMP_ULE &&
806 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
807 KnownBits RHSKnown(BitWidth);
808 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
809
810 // Whatever high bits in c are zero are known to be zero.
811 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
812 // assume(v <_u c)
813 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
814 Pred == ICmpInst::ICMP_ULT &&
815 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
816 KnownBits RHSKnown(BitWidth);
817 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
818
819 // If the RHS is known zero, then this assumption must be wrong (nothing
820 // is unsigned less than zero). Signal a conflict and get out of here.
821 if (RHSKnown.isZero()) {
822 Known.Zero.setAllBits();
823 Known.One.setAllBits();
824 break;
825 }
826
827 // Whatever high bits in c are zero are known to be zero (if c is a power
828 // of 2, then one more).
829 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
830 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
831 else
832 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
833 }
834 }
835
836 // If assumptions conflict with each other or previous known bits, then we
837 // have a logical fallacy. It's possible that the assumption is not reachable,
838 // so this isn't a real bug. On the other hand, the program may have undefined
839 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
840 // clear out the known bits, try to warn the user, and hope for the best.
841 if (Known.Zero.intersects(Known.One)) {
842 Known.resetAll();
843
844 if (Q.ORE)
845 Q.ORE->emit([&]() {
846 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
847 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
848 CxtI)
849 << "Detected conflicting code assumptions. Program may "
850 "have undefined behavior, or compiler may have "
851 "internal error.";
852 });
853 }
854}
855
856/// Compute known bits from a shift operator, including those with a
857/// non-constant shift amount. Known is the output of this function. Known2 is a
858/// pre-allocated temporary with the same bit width as Known. KZF and KOF are
859/// operator-specific functions that, given the known-zero or known-one bits
860/// respectively, and a shift amount, compute the implied known-zero or
861/// known-one bits of the shift operator's result respectively for that shift
862/// amount. The results from calling KZF and KOF are conservatively combined for
863/// all permitted shift amounts.
864static void computeKnownBitsFromShiftOperator(
865 const Operator *I, KnownBits &Known, KnownBits &Known2,
866 unsigned Depth, const Query &Q,
867 function_ref<APInt(const APInt &, unsigned)> KZF,
868 function_ref<APInt(const APInt &, unsigned)> KOF) {
869 unsigned BitWidth = Known.getBitWidth();
870
871 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
872 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
873
874 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
875 Known.Zero = KZF(Known.Zero, ShiftAmt);
876 Known.One = KOF(Known.One, ShiftAmt);
877 // If the known bits conflict, this must be an overflowing left shift, so
878 // the shift result is poison. We can return anything we want. Choose 0 for
879 // the best folding opportunity.
880 if (Known.hasConflict())
881 Known.setAllZero();
882
883 return;
884 }
885
886 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
887
888 // If the shift amount could be greater than or equal to the bit-width of the
889 // LHS, the value could be poison, but bail out because the check below is
890 // expensive. TODO: Should we just carry on?
891 if ((~Known.Zero).uge(BitWidth)) {
892 Known.resetAll();
893 return;
894 }
895
896 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
897 // BitWidth > 64 and any upper bits are known, we'll end up returning the
898 // limit value (which implies all bits are known).
899 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
900 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
901
902 // It would be more-clearly correct to use the two temporaries for this
903 // calculation. Reusing the APInts here to prevent unnecessary allocations.
904 Known.resetAll();
905
906 // If we know the shifter operand is nonzero, we can sometimes infer more
907 // known bits. However this is expensive to compute, so be lazy about it and
908 // only compute it when absolutely necessary.
909 Optional<bool> ShifterOperandIsNonZero;
910
911 // Early exit if we can't constrain any well-defined shift amount.
912 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
913 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
914 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
915 if (!*ShifterOperandIsNonZero)
916 return;
917 }
918
919 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
920
921 Known.Zero.setAllBits();
922 Known.One.setAllBits();
923 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
924 // Combine the shifted known input bits only for those shift amounts
925 // compatible with its known constraints.
926 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
927 continue;
928 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
929 continue;
930 // If we know the shifter is nonzero, we may be able to infer more known
931 // bits. This check is sunk down as far as possible to avoid the expensive
932 // call to isKnownNonZero if the cheaper checks above fail.
933 if (ShiftAmt == 0) {
934 if (!ShifterOperandIsNonZero.hasValue())
935 ShifterOperandIsNonZero =
936 isKnownNonZero(I->getOperand(1), Depth + 1, Q);
937 if (*ShifterOperandIsNonZero)
938 continue;
939 }
940
941 Known.Zero &= KZF(Known2.Zero, ShiftAmt);
942 Known.One &= KOF(Known2.One, ShiftAmt);
943 }
944
945 // If the known bits conflict, the result is poison. Return a 0 and hope the
946 // caller can further optimize that.
947 if (Known.hasConflict())
948 Known.setAllZero();
949}
950
951static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
952 unsigned Depth, const Query &Q) {
953 unsigned BitWidth = Known.getBitWidth();
954
955 KnownBits Known2(Known);
956 switch (I->getOpcode()) {
1
Control jumps to 'case Select:' at line 1032
957 default: break;
958 case Instruction::Load:
959 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
960 computeKnownBitsFromRangeMetadata(*MD, Known);
961 break;
962 case Instruction::And: {
963 // If either the LHS or the RHS are Zero, the result is zero.
964 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
965 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
966
967 // Output known-1 bits are only known if set in both the LHS & RHS.
968 Known.One &= Known2.One;
969 // Output known-0 are known to be clear if zero in either the LHS | RHS.
970 Known.Zero |= Known2.Zero;
971
972 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
973 // here we handle the more general case of adding any odd number by
974 // matching the form add(x, add(x, y)) where y is odd.
975 // TODO: This could be generalized to clearing any bit set in y where the
976 // following bit is known to be unset in y.
977 Value *Y = nullptr;
978 if (!Known.Zero[0] && !Known.One[0] &&
979 (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
980 m_Value(Y))) ||
981 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
982 m_Value(Y))))) {
983 Known2.resetAll();
984 computeKnownBits(Y, Known2, Depth + 1, Q);
985 if (Known2.countMinTrailingOnes() > 0)
986 Known.Zero.setBit(0);
987 }
988 break;
989 }
990 case Instruction::Or:
991 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
992 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
993
994 // Output known-0 bits are only known if clear in both the LHS & RHS.
995 Known.Zero &= Known2.Zero;
996 // Output known-1 are known to be set if set in either the LHS | RHS.
997 Known.One |= Known2.One;
998 break;
999 case Instruction::Xor: {
1000 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1001 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1002
1003 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1004 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1005 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1006 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1007 Known.Zero = std::move(KnownZeroOut);
1008 break;
1009 }
1010 case Instruction::Mul: {
1011 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1012 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1013 Known2, Depth, Q);
1014 break;
1015 }
1016 case Instruction::UDiv: {
1017 // For the purposes of computing leading zeros we can conservatively
1018 // treat a udiv as a logical right shift by the power of 2 known to
1019 // be less than the denominator.
1020 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1021 unsigned LeadZ = Known2.countMinLeadingZeros();
1022
1023 Known2.resetAll();
1024 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1025 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1026 if (RHSMaxLeadingZeros != BitWidth)
1027 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1028
1029 Known.Zero.setHighBits(LeadZ);
1030 break;
1031 }
1032 case Instruction::Select: {
1033 const Value *LHS, *RHS;
2
'LHS' declared without an initial value
1034 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
3
Passing value via 2nd parameter 'LHS'
4
Calling 'matchSelectPattern'
1035 if (SelectPatternResult::isMinOrMax(SPF)) {
1036 computeKnownBits(RHS, Known, Depth + 1, Q);
1037 computeKnownBits(LHS, Known2, Depth + 1, Q);
1038 } else {
1039 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1040 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1041 }
1042
1043 unsigned MaxHighOnes = 0;
1044 unsigned MaxHighZeros = 0;
1045 if (SPF == SPF_SMAX) {
1046 // If both sides are negative, the result is negative.
1047 if (Known.isNegative() && Known2.isNegative())
1048 // We can derive a lower bound on the result by taking the max of the
1049 // leading one bits.
1050 MaxHighOnes =
1051 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1052 // If either side is non-negative, the result is non-negative.
1053 else if (Known.isNonNegative() || Known2.isNonNegative())
1054 MaxHighZeros = 1;
1055 } else if (SPF == SPF_SMIN) {
1056 // If both sides are non-negative, the result is non-negative.
1057 if (Known.isNonNegative() && Known2.isNonNegative())
1058 // We can derive an upper bound on the result by taking the max of the
1059 // leading zero bits.
1060 MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1061 Known2.countMinLeadingZeros());
1062 // If either side is negative, the result is negative.
1063 else if (Known.isNegative() || Known2.isNegative())
1064 MaxHighOnes = 1;
1065 } else if (SPF == SPF_UMAX) {
1066 // We can derive a lower bound on the result by taking the max of the
1067 // leading one bits.
1068 MaxHighOnes =
1069 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1070 } else if (SPF == SPF_UMIN) {
1071 // We can derive an upper bound on the result by taking the max of the
1072 // leading zero bits.
1073 MaxHighZeros =
1074 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1075 }
1076
1077 // Only known if known in both the LHS and RHS.
1078 Known.One &= Known2.One;
1079 Known.Zero &= Known2.Zero;
1080 if (MaxHighOnes > 0)
1081 Known.One.setHighBits(MaxHighOnes);
1082 if (MaxHighZeros > 0)
1083 Known.Zero.setHighBits(MaxHighZeros);
1084 break;
1085 }
1086 case Instruction::FPTrunc:
1087 case Instruction::FPExt:
1088 case Instruction::FPToUI:
1089 case Instruction::FPToSI:
1090 case Instruction::SIToFP:
1091 case Instruction::UIToFP:
1092 break; // Can't work with floating point.
1093 case Instruction::PtrToInt:
1094 case Instruction::IntToPtr:
1095 // Fall through and handle them the same as zext/trunc.
1096 LLVM_FALLTHROUGH[[clang::fallthrough]];
1097 case Instruction::ZExt:
1098 case Instruction::Trunc: {
1099 Type *SrcTy = I->getOperand(0)->getType();
1100
1101 unsigned SrcBitWidth;
1102 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1103 // which fall through here.
1104 Type *ScalarTy = SrcTy->getScalarType();
1105 SrcBitWidth = ScalarTy->isPointerTy() ?
1106 Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1107 Q.DL.getTypeSizeInBits(ScalarTy);
1108
1109 assert(SrcBitWidth && "SrcBitWidth can't be zero")(static_cast <bool> (SrcBitWidth && "SrcBitWidth can't be zero"
) ? void (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1109, __extension__ __PRETTY_FUNCTION__))
;
1110 Known = Known.zextOrTrunc(SrcBitWidth);
1111 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1112 Known = Known.zextOrTrunc(BitWidth);
1113 // Any top bits are known to be zero.
1114 if (BitWidth > SrcBitWidth)
1115 Known.Zero.setBitsFrom(SrcBitWidth);
1116 break;
1117 }
1118 case Instruction::BitCast: {
1119 Type *SrcTy = I->getOperand(0)->getType();
1120 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1121 // TODO: For now, not handling conversions like:
1122 // (bitcast i64 %x to <2 x i32>)
1123 !I->getType()->isVectorTy()) {
1124 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1125 break;
1126 }
1127 break;
1128 }
1129 case Instruction::SExt: {
1130 // Compute the bits in the result that are not present in the input.
1131 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1132
1133 Known = Known.trunc(SrcBitWidth);
1134 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1135 // If the sign bit of the input is known set or clear, then we know the
1136 // top bits of the result.
1137 Known = Known.sext(BitWidth);
1138 break;
1139 }
1140 case Instruction::Shl: {
1141 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1142 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1143 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1144 APInt KZResult = KnownZero << ShiftAmt;
1145 KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1146 // If this shift has "nsw" keyword, then the result is either a poison
1147 // value or has the same sign bit as the first operand.
1148 if (NSW && KnownZero.isSignBitSet())
1149 KZResult.setSignBit();
1150 return KZResult;
1151 };
1152
1153 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1154 APInt KOResult = KnownOne << ShiftAmt;
1155 if (NSW && KnownOne.isSignBitSet())
1156 KOResult.setSignBit();
1157 return KOResult;
1158 };
1159
1160 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1161 break;
1162 }
1163 case Instruction::LShr: {
1164 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1165 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1166 APInt KZResult = KnownZero.lshr(ShiftAmt);
1167 // High bits known zero.
1168 KZResult.setHighBits(ShiftAmt);
1169 return KZResult;
1170 };
1171
1172 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1173 return KnownOne.lshr(ShiftAmt);
1174 };
1175
1176 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1177 break;
1178 }
1179 case Instruction::AShr: {
1180 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1181 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1182 return KnownZero.ashr(ShiftAmt);
1183 };
1184
1185 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1186 return KnownOne.ashr(ShiftAmt);
1187 };
1188
1189 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1190 break;
1191 }
1192 case Instruction::Sub: {
1193 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1194 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1195 Known, Known2, Depth, Q);
1196 break;
1197 }
1198 case Instruction::Add: {
1199 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1200 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1201 Known, Known2, Depth, Q);
1202 break;
1203 }
1204 case Instruction::SRem:
1205 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1206 APInt RA = Rem->getValue().abs();
1207 if (RA.isPowerOf2()) {
1208 APInt LowBits = RA - 1;
1209 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1210
1211 // The low bits of the first operand are unchanged by the srem.
1212 Known.Zero = Known2.Zero & LowBits;
1213 Known.One = Known2.One & LowBits;
1214
1215 // If the first operand is non-negative or has all low bits zero, then
1216 // the upper bits are all zero.
1217 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1218 Known.Zero |= ~LowBits;
1219
1220 // If the first operand is negative and not all low bits are zero, then
1221 // the upper bits are all one.
1222 if (Known2.isNegative() && LowBits.intersects(Known2.One))
1223 Known.One |= ~LowBits;
1224
1225 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(static_cast <bool> ((Known.Zero & Known.One) == 0 &&
"Bits known to be one AND zero?") ? void (0) : __assert_fail
("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1225, __extension__ __PRETTY_FUNCTION__))
;
1226 break;
1227 }
1228 }
1229
1230 // The sign bit is the LHS's sign bit, except when the result of the
1231 // remainder is zero.
1232 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1233 // If it's known zero, our sign bit is also zero.
1234 if (Known2.isNonNegative())
1235 Known.makeNonNegative();
1236
1237 break;
1238 case Instruction::URem: {
1239 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1240 const APInt &RA = Rem->getValue();
1241 if (RA.isPowerOf2()) {
1242 APInt LowBits = (RA - 1);
1243 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1244 Known.Zero |= ~LowBits;
1245 Known.One &= LowBits;
1246 break;
1247 }
1248 }
1249
1250 // Since the result is less than or equal to either operand, any leading
1251 // zero bits in either operand must also exist in the result.
1252 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1253 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1254
1255 unsigned Leaders =
1256 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1257 Known.resetAll();
1258 Known.Zero.setHighBits(Leaders);
1259 break;
1260 }
1261
1262 case Instruction::Alloca: {
1263 const AllocaInst *AI = cast<AllocaInst>(I);
1264 unsigned Align = AI->getAlignment();
1265 if (Align == 0)
1266 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1267
1268 if (Align > 0)
1269 Known.Zero.setLowBits(countTrailingZeros(Align));
1270 break;
1271 }
1272 case Instruction::GetElementPtr: {
1273 // Analyze all of the subscripts of this getelementptr instruction
1274 // to determine if we can prove known low zero bits.
1275 KnownBits LocalKnown(BitWidth);
1276 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1277 unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1278
1279 gep_type_iterator GTI = gep_type_begin(I);
1280 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1281 Value *Index = I->getOperand(i);
1282 if (StructType *STy = GTI.getStructTypeOrNull()) {
1283 // Handle struct member offset arithmetic.
1284
1285 // Handle case when index is vector zeroinitializer
1286 Constant *CIndex = cast<Constant>(Index);
1287 if (CIndex->isZeroValue())
1288 continue;
1289
1290 if (CIndex->getType()->isVectorTy())
1291 Index = CIndex->getSplatValue();
1292
1293 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1294 const StructLayout *SL = Q.DL.getStructLayout(STy);
1295 uint64_t Offset = SL->getElementOffset(Idx);
1296 TrailZ = std::min<unsigned>(TrailZ,
1297 countTrailingZeros(Offset));
1298 } else {
1299 // Handle array index arithmetic.
1300 Type *IndexedTy = GTI.getIndexedType();
1301 if (!IndexedTy->isSized()) {
1302 TrailZ = 0;
1303 break;
1304 }
1305 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1306 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1307 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1308 computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1309 TrailZ = std::min(TrailZ,
1310 unsigned(countTrailingZeros(TypeSize) +
1311 LocalKnown.countMinTrailingZeros()));
1312 }
1313 }
1314
1315 Known.Zero.setLowBits(TrailZ);
1316 break;
1317 }
1318 case Instruction::PHI: {
1319 const PHINode *P = cast<PHINode>(I);
1320 // Handle the case of a simple two-predecessor recurrence PHI.
1321 // There's a lot more that could theoretically be done here, but
1322 // this is sufficient to catch some interesting cases.
1323 if (P->getNumIncomingValues() == 2) {
1324 for (unsigned i = 0; i != 2; ++i) {
1325 Value *L = P->getIncomingValue(i);
1326 Value *R = P->getIncomingValue(!i);
1327 Operator *LU = dyn_cast<Operator>(L);
1328 if (!LU)
1329 continue;
1330 unsigned Opcode = LU->getOpcode();
1331 // Check for operations that have the property that if
1332 // both their operands have low zero bits, the result
1333 // will have low zero bits.
1334 if (Opcode == Instruction::Add ||
1335 Opcode == Instruction::Sub ||
1336 Opcode == Instruction::And ||
1337 Opcode == Instruction::Or ||
1338 Opcode == Instruction::Mul) {
1339 Value *LL = LU->getOperand(0);
1340 Value *LR = LU->getOperand(1);
1341 // Find a recurrence.
1342 if (LL == I)
1343 L = LR;
1344 else if (LR == I)
1345 L = LL;
1346 else
1347 break;
1348 // Ok, we have a PHI of the form L op= R. Check for low
1349 // zero bits.
1350 computeKnownBits(R, Known2, Depth + 1, Q);
1351
1352 // We need to take the minimum number of known bits
1353 KnownBits Known3(Known);
1354 computeKnownBits(L, Known3, Depth + 1, Q);
1355
1356 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1357 Known3.countMinTrailingZeros()));
1358
1359 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1360 if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1361 // If initial value of recurrence is nonnegative, and we are adding
1362 // a nonnegative number with nsw, the result can only be nonnegative
1363 // or poison value regardless of the number of times we execute the
1364 // add in phi recurrence. If initial value is negative and we are
1365 // adding a negative number with nsw, the result can only be
1366 // negative or poison value. Similar arguments apply to sub and mul.
1367 //
1368 // (add non-negative, non-negative) --> non-negative
1369 // (add negative, negative) --> negative
1370 if (Opcode == Instruction::Add) {
1371 if (Known2.isNonNegative() && Known3.isNonNegative())
1372 Known.makeNonNegative();
1373 else if (Known2.isNegative() && Known3.isNegative())
1374 Known.makeNegative();
1375 }
1376
1377 // (sub nsw non-negative, negative) --> non-negative
1378 // (sub nsw negative, non-negative) --> negative
1379 else if (Opcode == Instruction::Sub && LL == I) {
1380 if (Known2.isNonNegative() && Known3.isNegative())
1381 Known.makeNonNegative();
1382 else if (Known2.isNegative() && Known3.isNonNegative())
1383 Known.makeNegative();
1384 }
1385
1386 // (mul nsw non-negative, non-negative) --> non-negative
1387 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1388 Known3.isNonNegative())
1389 Known.makeNonNegative();
1390 }
1391
1392 break;
1393 }
1394 }
1395 }
1396
1397 // Unreachable blocks may have zero-operand PHI nodes.
1398 if (P->getNumIncomingValues() == 0)
1399 break;
1400
1401 // Otherwise take the unions of the known bit sets of the operands,
1402 // taking conservative care to avoid excessive recursion.
1403 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1404 // Skip if every incoming value references to ourself.
1405 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1406 break;
1407
1408 Known.Zero.setAllBits();
1409 Known.One.setAllBits();
1410 for (Value *IncValue : P->incoming_values()) {
1411 // Skip direct self references.
1412 if (IncValue == P) continue;
1413
1414 Known2 = KnownBits(BitWidth);
1415 // Recurse, but cap the recursion to one level, because we don't
1416 // want to waste time spinning around in loops.
1417 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1418 Known.Zero &= Known2.Zero;
1419 Known.One &= Known2.One;
1420 // If all bits have been ruled out, there's no need to check
1421 // more operands.
1422 if (!Known.Zero && !Known.One)
1423 break;
1424 }
1425 }
1426 break;
1427 }
1428 case Instruction::Call:
1429 case Instruction::Invoke:
1430 // If range metadata is attached to this call, set known bits from that,
1431 // and then intersect with known bits based on other properties of the
1432 // function.
1433 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1434 computeKnownBitsFromRangeMetadata(*MD, Known);
1435 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1436 computeKnownBits(RV, Known2, Depth + 1, Q);
1437 Known.Zero |= Known2.Zero;
1438 Known.One |= Known2.One;
1439 }
1440 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1441 switch (II->getIntrinsicID()) {
1442 default: break;
1443 case Intrinsic::bitreverse:
1444 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1445 Known.Zero |= Known2.Zero.reverseBits();
1446 Known.One |= Known2.One.reverseBits();
1447 break;
1448 case Intrinsic::bswap:
1449 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1450 Known.Zero |= Known2.Zero.byteSwap();
1451 Known.One |= Known2.One.byteSwap();
1452 break;
1453 case Intrinsic::ctlz: {
1454 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1455 // If we have a known 1, its position is our upper bound.
1456 unsigned PossibleLZ = Known2.One.countLeadingZeros();
1457 // If this call is undefined for 0, the result will be less than 2^n.
1458 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1459 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1460 unsigned LowBits = Log2_32(PossibleLZ)+1;
1461 Known.Zero.setBitsFrom(LowBits);
1462 break;
1463 }
1464 case Intrinsic::cttz: {
1465 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1466 // If we have a known 1, its position is our upper bound.
1467 unsigned PossibleTZ = Known2.One.countTrailingZeros();
1468 // If this call is undefined for 0, the result will be less than 2^n.
1469 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1470 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1471 unsigned LowBits = Log2_32(PossibleTZ)+1;
1472 Known.Zero.setBitsFrom(LowBits);
1473 break;
1474 }
1475 case Intrinsic::ctpop: {
1476 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1477 // We can bound the space the count needs. Also, bits known to be zero
1478 // can't contribute to the population.
1479 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1480 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1481 Known.Zero.setBitsFrom(LowBits);
1482 // TODO: we could bound KnownOne using the lower bound on the number
1483 // of bits which might be set provided by popcnt KnownOne2.
1484 break;
1485 }
1486 case Intrinsic::x86_sse42_crc32_64_64:
1487 Known.Zero.setBitsFrom(32);
1488 break;
1489 }
1490 }
1491 break;
1492 case Instruction::ExtractElement:
1493 // Look through extract element. At the moment we keep this simple and skip
1494 // tracking the specific element. But at least we might find information
1495 // valid for all elements of the vector (for example if vector is sign
1496 // extended, shifted, etc).
1497 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1498 break;
1499 case Instruction::ExtractValue:
1500 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1501 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1502 if (EVI->getNumIndices() != 1) break;
1503 if (EVI->getIndices()[0] == 0) {
1504 switch (II->getIntrinsicID()) {
1505 default: break;
1506 case Intrinsic::uadd_with_overflow:
1507 case Intrinsic::sadd_with_overflow:
1508 computeKnownBitsAddSub(true, II->getArgOperand(0),
1509 II->getArgOperand(1), false, Known, Known2,
1510 Depth, Q);
1511 break;
1512 case Intrinsic::usub_with_overflow:
1513 case Intrinsic::ssub_with_overflow:
1514 computeKnownBitsAddSub(false, II->getArgOperand(0),
1515 II->getArgOperand(1), false, Known, Known2,
1516 Depth, Q);
1517 break;
1518 case Intrinsic::umul_with_overflow:
1519 case Intrinsic::smul_with_overflow:
1520 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1521 Known, Known2, Depth, Q);
1522 break;
1523 }
1524 }
1525 }
1526 }
1527}
1528
1529/// Determine which bits of V are known to be either zero or one and return
1530/// them.
1531KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1532 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1533 computeKnownBits(V, Known, Depth, Q);
1534 return Known;
1535}
1536
1537/// Determine which bits of V are known to be either zero or one and return
1538/// them in the Known bit set.
1539///
1540/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1541/// we cannot optimize based on the assumption that it is zero without changing
1542/// it to be an explicit zero. If we don't change it to zero, other code could
1543/// optimized based on the contradictory assumption that it is non-zero.
1544/// Because instcombine aggressively folds operations with undef args anyway,
1545/// this won't lose us code quality.
1546///
1547/// This function is defined on values with integer type, values with pointer
1548/// type, and vectors of integers. In the case
1549/// where V is a vector, known zero, and known one values are the
1550/// same width as the vector element, and the bit is set only if it is true
1551/// for all of the elements in the vector.
1552void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1553 const Query &Q) {
1554 assert(V && "No Value?")(static_cast <bool> (V && "No Value?") ? void (
0) : __assert_fail ("V && \"No Value?\"", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1554, __extension__ __PRETTY_FUNCTION__))
;
1555 assert(Depth <= MaxDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxDepth && "Limit Search Depth"
) ? void (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1555, __extension__ __PRETTY_FUNCTION__))
;
1556 unsigned BitWidth = Known.getBitWidth();
1557
1558 assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||(static_cast <bool> ((V->getType()->isIntOrIntVectorTy
(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) &&
"Not integer or pointer type!") ? void (0) : __assert_fail (
"(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1560, __extension__ __PRETTY_FUNCTION__))
1559 V->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((V->getType()->isIntOrIntVectorTy
(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) &&
"Not integer or pointer type!") ? void (0) : __assert_fail (
"(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1560, __extension__ __PRETTY_FUNCTION__))
1560 "Not integer or pointer type!")(static_cast <bool> ((V->getType()->isIntOrIntVectorTy
(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) &&
"Not integer or pointer type!") ? void (0) : __assert_fail (
"(V->getType()->isIntOrIntVectorTy(BitWidth) || V->getType()->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1560, __extension__ __PRETTY_FUNCTION__))
;
1561
1562 Type *ScalarTy = V->getType()->getScalarType();
1563 unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1564 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1565 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth")(static_cast <bool> (ExpectedWidth == BitWidth &&
"V and Known should have same BitWidth") ? void (0) : __assert_fail
("ExpectedWidth == BitWidth && \"V and Known should have same BitWidth\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1565, __extension__ __PRETTY_FUNCTION__))
;
1566 (void)BitWidth;
1567 (void)ExpectedWidth;
1568
1569 const APInt *C;
1570 if (match(V, m_APInt(C))) {
1571 // We know all of the bits for a scalar constant or a splat vector constant!
1572 Known.One = *C;
1573 Known.Zero = ~Known.One;
1574 return;
1575 }
1576 // Null and aggregate-zero are all-zeros.
1577 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1578 Known.setAllZero();
1579 return;
1580 }
1581 // Handle a constant vector by taking the intersection of the known bits of
1582 // each element.
1583 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1584 // We know that CDS must be a vector of integers. Take the intersection of
1585 // each element.
1586 Known.Zero.setAllBits(); Known.One.setAllBits();
1587 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1588 APInt Elt = CDS->getElementAsAPInt(i);
1589 Known.Zero &= ~Elt;
1590 Known.One &= Elt;
1591 }
1592 return;
1593 }
1594
1595 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1596 // We know that CV must be a vector of integers. Take the intersection of
1597 // each element.
1598 Known.Zero.setAllBits(); Known.One.setAllBits();
1599 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1600 Constant *Element = CV->getAggregateElement(i);
1601 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1602 if (!ElementCI) {
1603 Known.resetAll();
1604 return;
1605 }
1606 const APInt &Elt = ElementCI->getValue();
1607 Known.Zero &= ~Elt;
1608 Known.One &= Elt;
1609 }
1610 return;
1611 }
1612
1613 // Start out not knowing anything.
1614 Known.resetAll();
1615
1616 // We can't imply anything about undefs.
1617 if (isa<UndefValue>(V))
1618 return;
1619
1620 // There's no point in looking through other users of ConstantData for
1621 // assumptions. Confirm that we've handled them all.
1622 assert(!isa<ConstantData>(V) && "Unhandled constant data!")(static_cast <bool> (!isa<ConstantData>(V) &&
"Unhandled constant data!") ? void (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1622, __extension__ __PRETTY_FUNCTION__))
;
1623
1624 // Limit search depth.
1625 // All recursive calls that increase depth must come after this.
1626 if (Depth == MaxDepth)
1627 return;
1628
1629 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1630 // the bits of its aliasee.
1631 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1632 if (!GA->isInterposable())
1633 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1634 return;
1635 }
1636
1637 if (const Operator *I = dyn_cast<Operator>(V))
1638 computeKnownBitsFromOperator(I, Known, Depth, Q);
1639
1640 // Aligned pointers have trailing zeros - refine Known.Zero set
1641 if (V->getType()->isPointerTy()) {
1642 unsigned Align = V->getPointerAlignment(Q.DL);
1643 if (Align)
1644 Known.Zero.setLowBits(countTrailingZeros(Align));
1645 }
1646
1647 // computeKnownBitsFromAssume strictly refines Known.
1648 // Therefore, we run them after computeKnownBitsFromOperator.
1649
1650 // Check whether a nearby assume intrinsic can determine some known bits.
1651 computeKnownBitsFromAssume(V, Known, Depth, Q);
1652
1653 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(static_cast <bool> ((Known.Zero & Known.One) == 0 &&
"Bits known to be one AND zero?") ? void (0) : __assert_fail
("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1653, __extension__ __PRETTY_FUNCTION__))
;
1654}
1655
1656/// Return true if the given value is known to have exactly one
1657/// bit set when defined. For vectors return true if every element is known to
1658/// be a power of two when defined. Supports values with integer or pointer
1659/// types and vectors of integers.
1660bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1661 const Query &Q) {
1662 assert(Depth <= MaxDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxDepth && "Limit Search Depth"
) ? void (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1662, __extension__ __PRETTY_FUNCTION__))
;
1663
1664 // Attempt to match against constants.
1665 if (OrZero && match(V, m_Power2OrZero()))
1666 return true;
1667 if (match(V, m_Power2()))
1668 return true;
1669
1670 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1671 // it is shifted off the end then the result is undefined.
1672 if (match(V, m_Shl(m_One(), m_Value())))
1673 return true;
1674
1675 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1676 // the bottom. If it is shifted off the bottom then the result is undefined.
1677 if (match(V, m_LShr(m_SignMask(), m_Value())))
1678 return true;
1679
1680 // The remaining tests are all recursive, so bail out if we hit the limit.
1681 if (Depth++ == MaxDepth)
1682 return false;
1683
1684 Value *X = nullptr, *Y = nullptr;
1685 // A shift left or a logical shift right of a power of two is a power of two
1686 // or zero.
1687 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1688 match(V, m_LShr(m_Value(X), m_Value()))))
1689 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1690
1691 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1692 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1693
1694 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1695 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1696 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1697
1698 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1699 // A power of two and'd with anything is a power of two or zero.
1700 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1701 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1702 return true;
1703 // X & (-X) is always a power of two or zero.
1704 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1705 return true;
1706 return false;
1707 }
1708
1709 // Adding a power-of-two or zero to the same power-of-two or zero yields
1710 // either the original power-of-two, a larger power-of-two or zero.
1711 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1712 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1713 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1714 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1715 match(X, m_And(m_Value(), m_Specific(Y))))
1716 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1717 return true;
1718 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1719 match(Y, m_And(m_Value(), m_Specific(X))))
1720 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1721 return true;
1722
1723 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1724 KnownBits LHSBits(BitWidth);
1725 computeKnownBits(X, LHSBits, Depth, Q);
1726
1727 KnownBits RHSBits(BitWidth);
1728 computeKnownBits(Y, RHSBits, Depth, Q);
1729 // If i8 V is a power of two or zero:
1730 // ZeroBits: 1 1 1 0 1 1 1 1
1731 // ~ZeroBits: 0 0 0 1 0 0 0 0
1732 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1733 // If OrZero isn't set, we cannot give back a zero result.
1734 // Make sure either the LHS or RHS has a bit set.
1735 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1736 return true;
1737 }
1738 }
1739
1740 // An exact divide or right shift can only shift off zero bits, so the result
1741 // is a power of two only if the first operand is a power of two and not
1742 // copying a sign bit (sdiv int_min, 2).
1743 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1744 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1745 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1746 Depth, Q);
1747 }
1748
1749 return false;
1750}
1751
1752/// \brief Test whether a GEP's result is known to be non-null.
1753///
1754/// Uses properties inherent in a GEP to try to determine whether it is known
1755/// to be non-null.
1756///
1757/// Currently this routine does not support vector GEPs.
1758static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1759 const Query &Q) {
1760 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1761 return false;
1762
1763 // FIXME: Support vector-GEPs.
1764 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")(static_cast <bool> (GEP->getType()->isPointerTy(
) && "We only support plain pointer GEP") ? void (0) :
__assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1764, __extension__ __PRETTY_FUNCTION__))
;
1765
1766 // If the base pointer is non-null, we cannot walk to a null address with an
1767 // inbounds GEP in address space zero.
1768 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1769 return true;
1770
1771 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1772 // If so, then the GEP cannot produce a null pointer, as doing so would
1773 // inherently violate the inbounds contract within address space zero.
1774 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1775 GTI != GTE; ++GTI) {
1776 // Struct types are easy -- they must always be indexed by a constant.
1777 if (StructType *STy = GTI.getStructTypeOrNull()) {
1778 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1779 unsigned ElementIdx = OpC->getZExtValue();
1780 const StructLayout *SL = Q.DL.getStructLayout(STy);
1781 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1782 if (ElementOffset > 0)
1783 return true;
1784 continue;
1785 }
1786
1787 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1788 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1789 continue;
1790
1791 // Fast path the constant operand case both for efficiency and so we don't
1792 // increment Depth when just zipping down an all-constant GEP.
1793 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1794 if (!OpC->isZero())
1795 return true;
1796 continue;
1797 }
1798
1799 // We post-increment Depth here because while isKnownNonZero increments it
1800 // as well, when we pop back up that increment won't persist. We don't want
1801 // to recurse 10k times just because we have 10k GEP operands. We don't
1802 // bail completely out because we want to handle constant GEPs regardless
1803 // of depth.
1804 if (Depth++ >= MaxDepth)
1805 continue;
1806
1807 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1808 return true;
1809 }
1810
1811 return false;
1812}
1813
1814static bool isKnownNonNullFromDominatingCondition(const Value *V,
1815 const Instruction *CtxI,
1816 const DominatorTree *DT) {
1817 assert(V->getType()->isPointerTy() && "V must be pointer type")(static_cast <bool> (V->getType()->isPointerTy() &&
"V must be pointer type") ? void (0) : __assert_fail ("V->getType()->isPointerTy() && \"V must be pointer type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1817, __extension__ __PRETTY_FUNCTION__))
;
1818 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull")(static_cast <bool> (!isa<ConstantData>(V) &&
"Did not expect ConstantPointerNull") ? void (0) : __assert_fail
("!isa<ConstantData>(V) && \"Did not expect ConstantPointerNull\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1818, __extension__ __PRETTY_FUNCTION__))
;
1819
1820 if (!CtxI || !DT)
1821 return false;
1822
1823 unsigned NumUsesExplored = 0;
1824 for (auto *U : V->users()) {
1825 // Avoid massive lists
1826 if (NumUsesExplored >= DomConditionsMaxUses)
1827 break;
1828 NumUsesExplored++;
1829
1830 // If the value is used as an argument to a call or invoke, then argument
1831 // attributes may provide an answer about null-ness.
1832 if (auto CS = ImmutableCallSite(U))
1833 if (auto *CalledFunc = CS.getCalledFunction())
1834 for (const Argument &Arg : CalledFunc->args())
1835 if (CS.getArgOperand(Arg.getArgNo()) == V &&
1836 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1837 return true;
1838
1839 // Consider only compare instructions uniquely controlling a branch
1840 CmpInst::Predicate Pred;
1841 if (!match(const_cast<User *>(U),
1842 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1843 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1844 continue;
1845
1846 for (auto *CmpU : U->users()) {
1847 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
1848 assert(BI->isConditional() && "uses a comparison!")(static_cast <bool> (BI->isConditional() && "uses a comparison!"
) ? void (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1848, __extension__ __PRETTY_FUNCTION__))
;
1849
1850 BasicBlock *NonNullSuccessor =
1851 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1852 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1853 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1854 return true;
1855 } else if (Pred == ICmpInst::ICMP_NE &&
1856 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
1857 DT->dominates(cast<Instruction>(CmpU), CtxI)) {
1858 return true;
1859 }
1860 }
1861 }
1862
1863 return false;
1864}
1865
1866/// Does the 'Range' metadata (which must be a valid MD_range operand list)
1867/// ensure that the value it's attached to is never Value? 'RangeType' is
1868/// is the type of the value described by the range.
1869static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1870 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1871 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 1871, __extension__ __PRETTY_FUNCTION__))
;
1872 for (unsigned i = 0; i < NumRanges; ++i) {
1873 ConstantInt *Lower =
1874 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1875 ConstantInt *Upper =
1876 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1877 ConstantRange Range(Lower->getValue(), Upper->getValue());
1878 if (Range.contains(Value))
1879 return false;
1880 }
1881 return true;
1882}
1883
1884/// Return true if the given value is known to be non-zero when defined. For
1885/// vectors, return true if every element is known to be non-zero when
1886/// defined. For pointers, if the context instruction and dominator tree are
1887/// specified, perform context-sensitive analysis and return true if the
1888/// pointer couldn't possibly be null at the specified instruction.
1889/// Supports values with integer or pointer type and vectors of integers.
1890bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1891 if (auto *C = dyn_cast<Constant>(V)) {
1892 if (C->isNullValue())
1893 return false;
1894 if (isa<ConstantInt>(C))
1895 // Must be non-zero due to null test above.
1896 return true;
1897
1898 // For constant vectors, check that all elements are undefined or known
1899 // non-zero to determine that the whole vector is known non-zero.
1900 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1901 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1902 Constant *Elt = C->getAggregateElement(i);
1903 if (!Elt || Elt->isNullValue())
1904 return false;
1905 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1906 return false;
1907 }
1908 return true;
1909 }
1910
1911 // A global variable in address space 0 is non null unless extern weak
1912 // or an absolute symbol reference. Other address spaces may have null as a
1913 // valid address for a global, so we can't assume anything.
1914 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1915 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
1916 GV->getType()->getAddressSpace() == 0)
1917 return true;
1918 } else
1919 return false;
1920 }
1921
1922 if (auto *I = dyn_cast<Instruction>(V)) {
1923 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1924 // If the possible ranges don't contain zero, then the value is
1925 // definitely non-zero.
1926 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1927 const APInt ZeroValue(Ty->getBitWidth(), 0);
1928 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1929 return true;
1930 }
1931 }
1932 }
1933
1934 // Check for pointer simplifications.
1935 if (V->getType()->isPointerTy()) {
1936 // Alloca never returns null, malloc might.
1937 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
1938 return true;
1939
1940 // A byval, inalloca, or nonnull argument is never null.
1941 if (const Argument *A = dyn_cast<Argument>(V))
1942 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
1943 return true;
1944
1945 // A Load tagged with nonnull metadata is never null.
1946 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
1947 if (LI->getMetadata(LLVMContext::MD_nonnull))
1948 return true;
1949
1950 if (auto CS = ImmutableCallSite(V))
1951 if (CS.isReturnNonNull())
1952 return true;
1953 }
1954
1955 // The remaining tests are all recursive, so bail out if we hit the limit.
1956 if (Depth++ >= MaxDepth)
1957 return false;
1958
1959 // Check for recursive pointer simplifications.
1960 if (V->getType()->isPointerTy()) {
1961 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
1962 return true;
1963
1964 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1965 if (isGEPKnownNonNull(GEP, Depth, Q))
1966 return true;
1967 }
1968
1969 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1970
1971 // X | Y != 0 if X != 0 or Y != 0.
1972 Value *X = nullptr, *Y = nullptr;
1973 if (match(V, m_Or(m_Value(X), m_Value(Y))))
1974 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1975
1976 // ext X != 0 if X != 0.
1977 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1978 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1979
1980 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1981 // if the lowest bit is shifted off the end.
1982 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1983 // shl nuw can't remove any non-zero bits.
1984 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1985 if (BO->hasNoUnsignedWrap())
1986 return isKnownNonZero(X, Depth, Q);
1987
1988 KnownBits Known(BitWidth);
1989 computeKnownBits(X, Known, Depth, Q);
1990 if (Known.One[0])
1991 return true;
1992 }
1993 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1994 // defined if the sign bit is shifted off the end.
1995 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1996 // shr exact can only shift out zero bits.
1997 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1998 if (BO->isExact())
1999 return isKnownNonZero(X, Depth, Q);
2000
2001 KnownBits Known = computeKnownBits(X, Depth, Q);
2002 if (Known.isNegative())
2003 return true;
2004
2005 // If the shifter operand is a constant, and all of the bits shifted
2006 // out are known to be zero, and X is known non-zero then at least one
2007 // non-zero bit must remain.
2008 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2009 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2010 // Is there a known one in the portion not shifted out?
2011 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2012 return true;
2013 // Are all the bits to be shifted out known zero?
2014 if (Known.countMinTrailingZeros() >= ShiftVal)
2015 return isKnownNonZero(X, Depth, Q);
2016 }
2017 }
2018 // div exact can only produce a zero if the dividend is zero.
2019 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2020 return isKnownNonZero(X, Depth, Q);
2021 }
2022 // X + Y.
2023 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2024 KnownBits XKnown = computeKnownBits(X, Depth, Q);
2025 KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2026
2027 // If X and Y are both non-negative (as signed values) then their sum is not
2028 // zero unless both X and Y are zero.
2029 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2030 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2031 return true;
2032
2033 // If X and Y are both negative (as signed values) then their sum is not
2034 // zero unless both X and Y equal INT_MIN.
2035 if (XKnown.isNegative() && YKnown.isNegative()) {
2036 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2037 // The sign bit of X is set. If some other bit is set then X is not equal
2038 // to INT_MIN.
2039 if (XKnown.One.intersects(Mask))
2040 return true;
2041 // The sign bit of Y is set. If some other bit is set then Y is not equal
2042 // to INT_MIN.
2043 if (YKnown.One.intersects(Mask))
2044 return true;
2045 }
2046
2047 // The sum of a non-negative number and a power of two is not zero.
2048 if (XKnown.isNonNegative() &&
2049 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2050 return true;
2051 if (YKnown.isNonNegative() &&
2052 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2053 return true;
2054 }
2055 // X * Y.
2056 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2057 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2058 // If X and Y are non-zero then so is X * Y as long as the multiplication
2059 // does not overflow.
2060 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
2061 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2062 return true;
2063 }
2064 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2065 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2066 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2067 isKnownNonZero(SI->getFalseValue(), Depth, Q))
2068 return true;
2069 }
2070 // PHI
2071 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2072 // Try and detect a recurrence that monotonically increases from a
2073 // starting value, as these are common as induction variables.
2074 if (PN->getNumIncomingValues() == 2) {
2075 Value *Start = PN->getIncomingValue(0);
2076 Value *Induction = PN->getIncomingValue(1);
2077 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2078 std::swap(Start, Induction);
2079 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2080 if (!C->isZero() && !C->isNegative()) {
2081 ConstantInt *X;
2082 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2083 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2084 !X->isNegative())
2085 return true;
2086 }
2087 }
2088 }
2089 // Check if all incoming values are non-zero constant.
2090 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2091 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2092 });
2093 if (AllNonZeroConstants)
2094 return true;
2095 }
2096
2097 KnownBits Known(BitWidth);
2098 computeKnownBits(V, Known, Depth, Q);
2099 return Known.One != 0;
2100}
2101
2102/// Return true if V2 == V1 + X, where X is known non-zero.
2103static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2104 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2105 if (!BO || BO->getOpcode() != Instruction::Add)
2106 return false;
2107 Value *Op = nullptr;
2108 if (V2 == BO->getOperand(0))
2109 Op = BO->getOperand(1);
2110 else if (V2 == BO->getOperand(1))
2111 Op = BO->getOperand(0);
2112 else
2113 return false;
2114 return isKnownNonZero(Op, 0, Q);
2115}
2116
2117/// Return true if it is known that V1 != V2.
2118static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2119 if (V1 == V2)
2120 return false;
2121 if (V1->getType() != V2->getType())
2122 // We can't look through casts yet.
2123 return false;
2124 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2125 return true;
2126
2127 if (V1->getType()->isIntOrIntVectorTy()) {
2128 // Are any known bits in V1 contradictory to known bits in V2? If V1
2129 // has a known zero where V2 has a known one, they must not be equal.
2130 KnownBits Known1 = computeKnownBits(V1, 0, Q);
2131 KnownBits Known2 = computeKnownBits(V2, 0, Q);
2132
2133 if (Known1.Zero.intersects(Known2.One) ||
2134 Known2.Zero.intersects(Known1.One))
2135 return true;
2136 }
2137 return false;
2138}
2139
2140/// Return true if 'V & Mask' is known to be zero. We use this predicate to
2141/// simplify operations downstream. Mask is known to be zero for bits that V
2142/// cannot have.
2143///
2144/// This function is defined on values with integer type, values with pointer
2145/// type, and vectors of integers. In the case
2146/// where V is a vector, the mask, known zero, and known one values are the
2147/// same width as the vector element, and the bit is set only if it is true
2148/// for all of the elements in the vector.
2149bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2150 const Query &Q) {
2151 KnownBits Known(Mask.getBitWidth());
2152 computeKnownBits(V, Known, Depth, Q);
2153 return Mask.isSubsetOf(Known.Zero);
2154}
2155
2156/// For vector constants, loop over the elements and find the constant with the
2157/// minimum number of sign bits. Return 0 if the value is not a vector constant
2158/// or if any element was not analyzed; otherwise, return the count for the
2159/// element with the minimum number of sign bits.
2160static unsigned computeNumSignBitsVectorConstant(const Value *V,
2161 unsigned TyBits) {
2162 const auto *CV = dyn_cast<Constant>(V);
2163 if (!CV || !CV->getType()->isVectorTy())
2164 return 0;
2165
2166 unsigned MinSignBits = TyBits;
2167 unsigned NumElts = CV->getType()->getVectorNumElements();
2168 for (unsigned i = 0; i != NumElts; ++i) {
2169 // If we find a non-ConstantInt, bail out.
2170 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2171 if (!Elt)
2172 return 0;
2173
2174 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2175 }
2176
2177 return MinSignBits;
2178}
2179
2180static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2181 const Query &Q);
2182
2183static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2184 const Query &Q) {
2185 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2186 assert(Result > 0 && "At least one sign bit needs to be present!")(static_cast <bool> (Result > 0 && "At least one sign bit needs to be present!"
) ? void (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 2186, __extension__ __PRETTY_FUNCTION__))
;
2187 return Result;
2188}
2189
2190/// Return the number of times the sign bit of the register is replicated into
2191/// the other bits. We know that at least 1 bit is always equal to the sign bit
2192/// (itself), but other cases can give us information. For example, immediately
2193/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2194/// other, so we return 3. For vectors, return the number of sign bits for the
2195/// vector element with the minimum number of known sign bits.
2196static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2197 const Query &Q) {
2198 assert(Depth <= MaxDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxDepth && "Limit Search Depth"
) ? void (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 2198, __extension__ __PRETTY_FUNCTION__))
;
2199
2200 // We return the minimum number of sign bits that are guaranteed to be present
2201 // in V, so for undef we have to conservatively return 1. We don't have the
2202 // same behavior for poison though -- that's a FIXME today.
2203
2204 Type *ScalarTy = V->getType()->getScalarType();
2205 unsigned TyBits = ScalarTy->isPointerTy() ?
2206 Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2207 Q.DL.getTypeSizeInBits(ScalarTy);
2208
2209 unsigned Tmp, Tmp2;
2210 unsigned FirstAnswer = 1;
2211
2212 // Note that ConstantInt is handled by the general computeKnownBits case
2213 // below.
2214
2215 if (Depth == MaxDepth)
2216 return 1; // Limit search depth.
2217
2218 const Operator *U = dyn_cast<Operator>(V);
2219 switch (Operator::getOpcode(V)) {
2220 default: break;
2221 case Instruction::SExt:
2222 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2223 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2224
2225 case Instruction::SDiv: {
2226 const APInt *Denominator;
2227 // sdiv X, C -> adds log(C) sign bits.
2228 if (match(U->getOperand(1), m_APInt(Denominator))) {
2229
2230 // Ignore non-positive denominator.
2231 if (!Denominator->isStrictlyPositive())
2232 break;
2233
2234 // Calculate the incoming numerator bits.
2235 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2236
2237 // Add floor(log(C)) bits to the numerator bits.
2238 return std::min(TyBits, NumBits + Denominator->logBase2());
2239 }
2240 break;
2241 }
2242
2243 case Instruction::SRem: {
2244 const APInt *Denominator;
2245 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2246 // positive constant. This let us put a lower bound on the number of sign
2247 // bits.
2248 if (match(U->getOperand(1), m_APInt(Denominator))) {
2249
2250 // Ignore non-positive denominator.
2251 if (!Denominator->isStrictlyPositive())
2252 break;
2253
2254 // Calculate the incoming numerator bits. SRem by a positive constant
2255 // can't lower the number of sign bits.
2256 unsigned NumrBits =
2257 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2258
2259 // Calculate the leading sign bit constraints by examining the
2260 // denominator. Given that the denominator is positive, there are two
2261 // cases:
2262 //
2263 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2264 // (1 << ceilLogBase2(C)).
2265 //
2266 // 2. the numerator is negative. Then the result range is (-C,0] and
2267 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2268 //
2269 // Thus a lower bound on the number of sign bits is `TyBits -
2270 // ceilLogBase2(C)`.
2271
2272 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2273 return std::max(NumrBits, ResBits);
2274 }
2275 break;
2276 }
2277
2278 case Instruction::AShr: {
2279 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2280 // ashr X, C -> adds C sign bits. Vectors too.
2281 const APInt *ShAmt;
2282 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2283 if (ShAmt->uge(TyBits))
2284 break; // Bad shift.
2285 unsigned ShAmtLimited = ShAmt->getZExtValue();
2286 Tmp += ShAmtLimited;
2287 if (Tmp > TyBits) Tmp = TyBits;
2288 }
2289 return Tmp;
2290 }
2291 case Instruction::Shl: {
2292 const APInt *ShAmt;
2293 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2294 // shl destroys sign bits.
2295 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2296 if (ShAmt->uge(TyBits) || // Bad shift.
2297 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2298 Tmp2 = ShAmt->getZExtValue();
2299 return Tmp - Tmp2;
2300 }
2301 break;
2302 }
2303 case Instruction::And:
2304 case Instruction::Or:
2305 case Instruction::Xor: // NOT is handled here.
2306 // Logical binary ops preserve the number of sign bits at the worst.
2307 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2308 if (Tmp != 1) {
2309 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2310 FirstAnswer = std::min(Tmp, Tmp2);
2311 // We computed what we know about the sign bits as our first
2312 // answer. Now proceed to the generic code that uses
2313 // computeKnownBits, and pick whichever answer is better.
2314 }
2315 break;
2316
2317 case Instruction::Select:
2318 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2319 if (Tmp == 1) return 1; // Early out.
2320 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2321 return std::min(Tmp, Tmp2);
2322
2323 case Instruction::Add:
2324 // Add can have at most one carry bit. Thus we know that the output
2325 // is, at worst, one more bit than the inputs.
2326 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2327 if (Tmp == 1) return 1; // Early out.
2328
2329 // Special case decrementing a value (ADD X, -1):
2330 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2331 if (CRHS->isAllOnesValue()) {
2332 KnownBits Known(TyBits);
2333 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2334
2335 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2336 // sign bits set.
2337 if ((Known.Zero | 1).isAllOnesValue())
2338 return TyBits;
2339
2340 // If we are subtracting one from a positive number, there is no carry
2341 // out of the result.
2342 if (Known.isNonNegative())
2343 return Tmp;
2344 }
2345
2346 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2347 if (Tmp2 == 1) return 1;
2348 return std::min(Tmp, Tmp2)-1;
2349
2350 case Instruction::Sub:
2351 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2352 if (Tmp2 == 1) return 1;
2353
2354 // Handle NEG.
2355 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2356 if (CLHS->isNullValue()) {
2357 KnownBits Known(TyBits);
2358 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2359 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2360 // sign bits set.
2361 if ((Known.Zero | 1).isAllOnesValue())
2362 return TyBits;
2363
2364 // If the input is known to be positive (the sign bit is known clear),
2365 // the output of the NEG has the same number of sign bits as the input.
2366 if (Known.isNonNegative())
2367 return Tmp2;
2368
2369 // Otherwise, we treat this like a SUB.
2370 }
2371
2372 // Sub can have at most one carry bit. Thus we know that the output
2373 // is, at worst, one more bit than the inputs.
2374 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2375 if (Tmp == 1) return 1; // Early out.
2376 return std::min(Tmp, Tmp2)-1;
2377
2378 case Instruction::Mul: {
2379 // The output of the Mul can be at most twice the valid bits in the inputs.
2380 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2381 if (SignBitsOp0 == 1) return 1; // Early out.
2382 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2383 if (SignBitsOp1 == 1) return 1;
2384 unsigned OutValidBits =
2385 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2386 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2387 }
2388
2389 case Instruction::PHI: {
2390 const PHINode *PN = cast<PHINode>(U);
2391 unsigned NumIncomingValues = PN->getNumIncomingValues();
2392 // Don't analyze large in-degree PHIs.
2393 if (NumIncomingValues > 4) break;
2394 // Unreachable blocks may have zero-operand PHI nodes.
2395 if (NumIncomingValues == 0) break;
2396
2397 // Take the minimum of all incoming values. This can't infinitely loop
2398 // because of our depth threshold.
2399 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2400 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2401 if (Tmp == 1) return Tmp;
2402 Tmp = std::min(
2403 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2404 }
2405 return Tmp;
2406 }
2407
2408 case Instruction::Trunc:
2409 // FIXME: it's tricky to do anything useful for this, but it is an important
2410 // case for targets like X86.
2411 break;
2412
2413 case Instruction::ExtractElement:
2414 // Look through extract element. At the moment we keep this simple and skip
2415 // tracking the specific element. But at least we might find information
2416 // valid for all elements of the vector (for example if vector is sign
2417 // extended, shifted, etc).
2418 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2419 }
2420
2421 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2422 // use this information.
2423
2424 // If we can examine all elements of a vector constant successfully, we're
2425 // done (we can't do any better than that). If not, keep trying.
2426 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2427 return VecSignBits;
2428
2429 KnownBits Known(TyBits);
2430 computeKnownBits(V, Known, Depth, Q);
2431
2432 // If we know that the sign bit is either zero or one, determine the number of
2433 // identical bits in the top of the input value.
2434 return std::max(FirstAnswer, Known.countMinSignBits());
2435}
2436
2437/// This function computes the integer multiple of Base that equals V.
2438/// If successful, it returns true and returns the multiple in
2439/// Multiple. If unsuccessful, it returns false. It looks
2440/// through SExt instructions only if LookThroughSExt is true.
2441bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2442 bool LookThroughSExt, unsigned Depth) {
2443 const unsigned MaxDepth = 6;
2444
2445 assert(V && "No Value?")(static_cast <bool> (V && "No Value?") ? void (
0) : __assert_fail ("V && \"No Value?\"", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 2445, __extension__ __PRETTY_FUNCTION__))
;
2446 assert(Depth <= MaxDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxDepth && "Limit Search Depth"
) ? void (0) : __assert_fail ("Depth <= MaxDepth && \"Limit Search Depth\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 2446, __extension__ __PRETTY_FUNCTION__))
;
2447 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!")(static_cast <bool> (V->getType()->isIntegerTy() &&
"Not integer or pointer type!") ? void (0) : __assert_fail (
"V->getType()->isIntegerTy() && \"Not integer or pointer type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 2447, __extension__ __PRETTY_FUNCTION__))
;
2448
2449 Type *T = V->getType();
2450
2451 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2452
2453 if (Base == 0)
2454 return false;
2455
2456 if (Base == 1) {
2457 Multiple = V;
2458 return true;
2459 }
2460
2461 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2462 Constant *BaseVal = ConstantInt::get(T, Base);
2463 if (CO && CO == BaseVal) {
2464 // Multiple is 1.
2465 Multiple = ConstantInt::get(T, 1);
2466 return true;
2467 }
2468
2469 if (CI && CI->getZExtValue() % Base == 0) {
2470 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2471 return true;
2472 }
2473
2474 if (Depth == MaxDepth) return false; // Limit search depth.
2475
2476 Operator *I = dyn_cast<Operator>(V);
2477 if (!I) return false;
2478
2479 switch (I->getOpcode()) {
2480 default: break;
2481 case Instruction::SExt:
2482 if (!LookThroughSExt) return false;
2483 // otherwise fall through to ZExt
2484 LLVM_FALLTHROUGH[[clang::fallthrough]];
2485 case Instruction::ZExt:
2486 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2487 LookThroughSExt, Depth+1);
2488 case Instruction::Shl:
2489 case Instruction::Mul: {
2490 Value *Op0 = I->getOperand(0);
2491 Value *Op1 = I->getOperand(1);
2492
2493 if (I->getOpcode() == Instruction::Shl) {
2494 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2495 if (!Op1CI) return false;
2496 // Turn Op0 << Op1 into Op0 * 2^Op1
2497 APInt Op1Int = Op1CI->getValue();
2498 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2499 APInt API(Op1Int.getBitWidth(), 0);
2500 API.setBit(BitToSet);
2501 Op1 = ConstantInt::get(V->getContext(), API);
2502 }
2503
2504 Value *Mul0 = nullptr;
2505 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2506 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2507 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2508 if (Op1C->getType()->getPrimitiveSizeInBits() <
2509 MulC->getType()->getPrimitiveSizeInBits())
2510 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2511 if (Op1C->getType()->getPrimitiveSizeInBits() >
2512 MulC->getType()->getPrimitiveSizeInBits())
2513 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2514
2515 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2516 Multiple = ConstantExpr::getMul(MulC, Op1C);
2517 return true;
2518 }
2519
2520 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2521 if (Mul0CI->getValue() == 1) {
2522 // V == Base * Op1, so return Op1
2523 Multiple = Op1;
2524 return true;
2525 }
2526 }
2527
2528 Value *Mul1 = nullptr;
2529 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2530 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2531 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2532 if (Op0C->getType()->getPrimitiveSizeInBits() <
2533 MulC->getType()->getPrimitiveSizeInBits())
2534 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2535 if (Op0C->getType()->getPrimitiveSizeInBits() >
2536 MulC->getType()->getPrimitiveSizeInBits())
2537 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2538
2539 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2540 Multiple = ConstantExpr::getMul(MulC, Op0C);
2541 return true;
2542 }
2543
2544 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2545 if (Mul1CI->getValue() == 1) {
2546 // V == Base * Op0, so return Op0
2547 Multiple = Op0;
2548 return true;
2549 }
2550 }
2551 }
2552 }
2553
2554 // We could not determine if V is a multiple of Base.
2555 return false;
2556}
2557
2558Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2559 const TargetLibraryInfo *TLI) {
2560 const Function *F = ICS.getCalledFunction();
2561 if (!F)
2562 return Intrinsic::not_intrinsic;
2563
2564 if (F->isIntrinsic())
2565 return F->getIntrinsicID();
2566
2567 if (!TLI)
2568 return Intrinsic::not_intrinsic;
2569
2570 LibFunc Func;
2571 // We're going to make assumptions on the semantics of the functions, check
2572 // that the target knows that it's available in this environment and it does
2573 // not have local linkage.
2574 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2575 return Intrinsic::not_intrinsic;
2576
2577 if (!ICS.onlyReadsMemory())
2578 return Intrinsic::not_intrinsic;
2579
2580 // Otherwise check if we have a call to a function that can be turned into a
2581 // vector intrinsic.
2582 switch (Func) {
2583 default:
2584 break;
2585 case LibFunc_sin:
2586 case LibFunc_sinf:
2587 case LibFunc_sinl:
2588 return Intrinsic::sin;
2589 case LibFunc_cos:
2590 case LibFunc_cosf:
2591 case LibFunc_cosl:
2592 return Intrinsic::cos;
2593 case LibFunc_exp:
2594 case LibFunc_expf:
2595 case LibFunc_expl:
2596 return Intrinsic::exp;
2597 case LibFunc_exp2:
2598 case LibFunc_exp2f:
2599 case LibFunc_exp2l:
2600 return Intrinsic::exp2;
2601 case LibFunc_log:
2602 case LibFunc_logf:
2603 case LibFunc_logl:
2604 return Intrinsic::log;
2605 case LibFunc_log10:
2606 case LibFunc_log10f:
2607 case LibFunc_log10l:
2608 return Intrinsic::log10;
2609 case LibFunc_log2:
2610 case LibFunc_log2f:
2611 case LibFunc_log2l:
2612 return Intrinsic::log2;
2613 case LibFunc_fabs:
2614 case LibFunc_fabsf:
2615 case LibFunc_fabsl:
2616 return Intrinsic::fabs;
2617 case LibFunc_fmin:
2618 case LibFunc_fminf:
2619 case LibFunc_fminl:
2620 return Intrinsic::minnum;
2621 case LibFunc_fmax:
2622 case LibFunc_fmaxf:
2623 case LibFunc_fmaxl:
2624 return Intrinsic::maxnum;
2625 case LibFunc_copysign:
2626 case LibFunc_copysignf:
2627 case LibFunc_copysignl:
2628 return Intrinsic::copysign;
2629 case LibFunc_floor:
2630 case LibFunc_floorf:
2631 case LibFunc_floorl:
2632 return Intrinsic::floor;
2633 case LibFunc_ceil:
2634 case LibFunc_ceilf:
2635 case LibFunc_ceill:
2636 return Intrinsic::ceil;
2637 case LibFunc_trunc:
2638 case LibFunc_truncf:
2639 case LibFunc_truncl:
2640 return Intrinsic::trunc;
2641 case LibFunc_rint:
2642 case LibFunc_rintf:
2643 case LibFunc_rintl:
2644 return Intrinsic::rint;
2645 case LibFunc_nearbyint:
2646 case LibFunc_nearbyintf:
2647 case LibFunc_nearbyintl:
2648 return Intrinsic::nearbyint;
2649 case LibFunc_round:
2650 case LibFunc_roundf:
2651 case LibFunc_roundl:
2652 return Intrinsic::round;
2653 case LibFunc_pow:
2654 case LibFunc_powf:
2655 case LibFunc_powl:
2656 return Intrinsic::pow;
2657 case LibFunc_sqrt:
2658 case LibFunc_sqrtf:
2659 case LibFunc_sqrtl:
2660 return Intrinsic::sqrt;
2661 }
2662
2663 return Intrinsic::not_intrinsic;
2664}
2665
2666/// Return true if we can prove that the specified FP value is never equal to
2667/// -0.0.
2668///
2669/// NOTE: this function will need to be revisited when we support non-default
2670/// rounding modes!
2671bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2672 unsigned Depth) {
2673 if (auto *CFP = dyn_cast<ConstantFP>(V))
2674 return !CFP->getValueAPF().isNegZero();
2675
2676 // Limit search depth.
2677 if (Depth == MaxDepth)
2678 return false;
2679
2680 auto *Op = dyn_cast<Operator>(V);
2681 if (!Op)
2682 return false;
2683
2684 // Check if the nsz fast-math flag is set.
2685 if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2686 if (FPO->hasNoSignedZeros())
2687 return true;
2688
2689 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2690 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2691 return true;
2692
2693 // sitofp and uitofp turn into +0.0 for zero.
2694 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2695 return true;
2696
2697 if (auto *Call = dyn_cast<CallInst>(Op)) {
2698 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2699 switch (IID) {
2700 default:
2701 break;
2702 // sqrt(-0.0) = -0.0, no other negative results are possible.
2703 case Intrinsic::sqrt:
2704 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2705 // fabs(x) != -0.0
2706 case Intrinsic::fabs:
2707 return true;
2708 }
2709 }
2710
2711 return false;
2712}
2713
2714/// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2715/// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2716/// bit despite comparing equal.
2717static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2718 const TargetLibraryInfo *TLI,
2719 bool SignBitOnly,
2720 unsigned Depth) {
2721 // TODO: This function does not do the right thing when SignBitOnly is true
2722 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2723 // which flips the sign bits of NaNs. See
2724 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2725
2726 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2727 return !CFP->getValueAPF().isNegative() ||
2728 (!SignBitOnly && CFP->getValueAPF().isZero());
2729 }
2730
2731 // Handle vector of constants.
2732 if (auto *CV = dyn_cast<Constant>(V)) {
2733 if (CV->getType()->isVectorTy()) {
2734 unsigned NumElts = CV->getType()->getVectorNumElements();
2735 for (unsigned i = 0; i != NumElts; ++i) {
2736 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2737 if (!CFP)
2738 return false;
2739 if (CFP->getValueAPF().isNegative() &&
2740 (SignBitOnly || !CFP->getValueAPF().isZero()))
2741 return false;
2742 }
2743
2744 // All non-negative ConstantFPs.
2745 return true;
2746 }
2747 }
2748
2749 if (Depth == MaxDepth)
2750 return false; // Limit search depth.
2751
2752 const Operator *I = dyn_cast<Operator>(V);
2753 if (!I)
2754 return false;
2755
2756 switch (I->getOpcode()) {
2757 default:
2758 break;
2759 // Unsigned integers are always nonnegative.
2760 case Instruction::UIToFP:
2761 return true;
2762 case Instruction::FMul:
2763 // x*x is always non-negative or a NaN.
2764 if (I->getOperand(0) == I->getOperand(1) &&
2765 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2766 return true;
2767
2768 LLVM_FALLTHROUGH[[clang::fallthrough]];
2769 case Instruction::FAdd:
2770 case Instruction::FDiv:
2771 case Instruction::FRem:
2772 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2773 Depth + 1) &&
2774 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2775 Depth + 1);
2776 case Instruction::Select:
2777 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2778 Depth + 1) &&
2779 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2780 Depth + 1);
2781 case Instruction::FPExt:
2782 case Instruction::FPTrunc:
2783 // Widening/narrowing never change sign.
2784 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2785 Depth + 1);
2786 case Instruction::ExtractElement:
2787 // Look through extract element. At the moment we keep this simple and skip
2788 // tracking the specific element. But at least we might find information
2789 // valid for all elements of the vector.
2790 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2791 Depth + 1);
2792 case Instruction::Call:
2793 const auto *CI = cast<CallInst>(I);
2794 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2795 switch (IID) {
2796 default:
2797 break;
2798 case Intrinsic::maxnum:
2799 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2800 Depth + 1) ||
2801 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2802 Depth + 1);
2803 case Intrinsic::minnum:
2804 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2805 Depth + 1) &&
2806 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2807 Depth + 1);
2808 case Intrinsic::exp:
2809 case Intrinsic::exp2:
2810 case Intrinsic::fabs:
2811 return true;
2812
2813 case Intrinsic::sqrt:
2814 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
2815 if (!SignBitOnly)
2816 return true;
2817 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2818 CannotBeNegativeZero(CI->getOperand(0), TLI));
2819
2820 case Intrinsic::powi:
2821 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2822 // powi(x,n) is non-negative if n is even.
2823 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2824 return true;
2825 }
2826 // TODO: This is not correct. Given that exp is an integer, here are the
2827 // ways that pow can return a negative value:
2828 //
2829 // pow(x, exp) --> negative if exp is odd and x is negative.
2830 // pow(-0, exp) --> -inf if exp is negative odd.
2831 // pow(-0, exp) --> -0 if exp is positive odd.
2832 // pow(-inf, exp) --> -0 if exp is negative odd.
2833 // pow(-inf, exp) --> -inf if exp is positive odd.
2834 //
2835 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2836 // but we must return false if x == -0. Unfortunately we do not currently
2837 // have a way of expressing this constraint. See details in
2838 // https://llvm.org/bugs/show_bug.cgi?id=31702.
2839 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2840 Depth + 1);
2841
2842 case Intrinsic::fma:
2843 case Intrinsic::fmuladd:
2844 // x*x+y is non-negative if y is non-negative.
2845 return I->getOperand(0) == I->getOperand(1) &&
2846 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2847 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2848 Depth + 1);
2849 }
2850 break;
2851 }
2852 return false;
2853}
2854
2855bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2856 const TargetLibraryInfo *TLI) {
2857 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2858}
2859
2860bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2861 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2862}
2863
2864bool llvm::isKnownNeverNaN(const Value *V) {
2865 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for NaN on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 2865, __extension__ __PRETTY_FUNCTION__))
;
2866
2867 // If we're told that NaNs won't happen, assume they won't.
2868 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
2869 if (FPMathOp->hasNoNaNs())
2870 return true;
2871
2872 // TODO: Handle instructions and potentially recurse like other 'isKnown'
2873 // functions. For example, the result of sitofp is never NaN.
2874
2875 // Handle scalar constants.
2876 if (auto *CFP = dyn_cast<ConstantFP>(V))
2877 return !CFP->isNaN();
2878
2879 // Bail out for constant expressions, but try to handle vector constants.
2880 if (!V->getType()->isVectorTy() || !isa<Constant>(V))
2881 return false;
2882
2883 // For vectors, verify that each element is not NaN.
2884 unsigned NumElts = V->getType()->getVectorNumElements();
2885 for (unsigned i = 0; i != NumElts; ++i) {
2886 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
2887 if (!Elt)
2888 return false;
2889 if (isa<UndefValue>(Elt))
2890 continue;
2891 auto *CElt = dyn_cast<ConstantFP>(Elt);
2892 if (!CElt || CElt->isNaN())
2893 return false;
2894 }
2895 // All elements were confirmed not-NaN or undefined.
2896 return true;
2897}
2898
2899/// If the specified value can be set by repeating the same byte in memory,
2900/// return the i8 value that it is represented with. This is
2901/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2902/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2903/// byte store (e.g. i16 0x1234), return null.
2904Value *llvm::isBytewiseValue(Value *V) {
2905 // All byte-wide stores are splatable, even of arbitrary variables.
2906 if (V->getType()->isIntegerTy(8)) return V;
2907
2908 // Handle 'null' ConstantArrayZero etc.
2909 if (Constant *C = dyn_cast<Constant>(V))
2910 if (C->isNullValue())
2911 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2912
2913 // Constant float and double values can be handled as integer values if the
2914 // corresponding integer value is "byteable". An important case is 0.0.
2915 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2916 if (CFP->getType()->isFloatTy())
2917 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2918 if (CFP->getType()->isDoubleTy())
2919 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2920 // Don't handle long double formats, which have strange constraints.
2921 }
2922
2923 // We can handle constant integers that are multiple of 8 bits.
2924 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2925 if (CI->getBitWidth() % 8 == 0) {
2926 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")(static_cast <bool> (CI->getBitWidth() > 8 &&
"8 bits should be handled above!") ? void (0) : __assert_fail
("CI->getBitWidth() > 8 && \"8 bits should be handled above!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 2926, __extension__ __PRETTY_FUNCTION__))
;
2927
2928 if (!CI->getValue().isSplat(8))
2929 return nullptr;
2930 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2931 }
2932 }
2933
2934 // A ConstantDataArray/Vector is splatable if all its members are equal and
2935 // also splatable.
2936 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2937 Value *Elt = CA->getElementAsConstant(0);
2938 Value *Val = isBytewiseValue(Elt);
2939 if (!Val)
2940 return nullptr;
2941
2942 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2943 if (CA->getElementAsConstant(I) != Elt)
2944 return nullptr;
2945
2946 return Val;
2947 }
2948
2949 // Conceptually, we could handle things like:
2950 // %a = zext i8 %X to i16
2951 // %b = shl i16 %a, 8
2952 // %c = or i16 %a, %b
2953 // but until there is an example that actually needs this, it doesn't seem
2954 // worth worrying about.
2955 return nullptr;
2956}
2957
2958// This is the recursive version of BuildSubAggregate. It takes a few different
2959// arguments. Idxs is the index within the nested struct From that we are
2960// looking at now (which is of type IndexedType). IdxSkip is the number of
2961// indices from Idxs that should be left out when inserting into the resulting
2962// struct. To is the result struct built so far, new insertvalue instructions
2963// build on that.
2964static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2965 SmallVectorImpl<unsigned> &Idxs,
2966 unsigned IdxSkip,
2967 Instruction *InsertBefore) {
2968 StructType *STy = dyn_cast<StructType>(IndexedType);
2969 if (STy) {
2970 // Save the original To argument so we can modify it
2971 Value *OrigTo = To;
2972 // General case, the type indexed by Idxs is a struct
2973 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2974 // Process each struct element recursively
2975 Idxs.push_back(i);
2976 Value *PrevTo = To;
2977 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2978 InsertBefore);
2979 Idxs.pop_back();
2980 if (!To) {
2981 // Couldn't find any inserted value for this index? Cleanup
2982 while (PrevTo != OrigTo) {
2983 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2984 PrevTo = Del->getAggregateOperand();
2985 Del->eraseFromParent();
2986 }
2987 // Stop processing elements
2988 break;
2989 }
2990 }
2991 // If we successfully found a value for each of our subaggregates
2992 if (To)
2993 return To;
2994 }
2995 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2996 // the struct's elements had a value that was inserted directly. In the latter
2997 // case, perhaps we can't determine each of the subelements individually, but
2998 // we might be able to find the complete struct somewhere.
2999
3000 // Find the value that is at that particular spot
3001 Value *V = FindInsertedValue(From, Idxs);
3002
3003 if (!V)
3004 return nullptr;
3005
3006 // Insert the value in the new (sub) aggregate
3007 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3008 "tmp", InsertBefore);
3009}
3010
3011// This helper takes a nested struct and extracts a part of it (which is again a
3012// struct) into a new value. For example, given the struct:
3013// { a, { b, { c, d }, e } }
3014// and the indices "1, 1" this returns
3015// { c, d }.
3016//
3017// It does this by inserting an insertvalue for each element in the resulting
3018// struct, as opposed to just inserting a single struct. This will only work if
3019// each of the elements of the substruct are known (ie, inserted into From by an
3020// insertvalue instruction somewhere).
3021//
3022// All inserted insertvalue instructions are inserted before InsertBefore
3023static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3024 Instruction *InsertBefore) {
3025 assert(InsertBefore && "Must have someplace to insert!")(static_cast <bool> (InsertBefore && "Must have someplace to insert!"
) ? void (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3025, __extension__ __PRETTY_FUNCTION__))
;
3026 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3027 idx_range);
3028 Value *To = UndefValue::get(IndexedType);
3029 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3030 unsigned IdxSkip = Idxs.size();
3031
3032 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3033}
3034
3035/// Given an aggregate and a sequence of indices, see if the scalar value
3036/// indexed is already around as a register, for example if it was inserted
3037/// directly into the aggregate.
3038///
3039/// If InsertBefore is not null, this function will duplicate (modified)
3040/// insertvalues when a part of a nested struct is extracted.
3041Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3042 Instruction *InsertBefore) {
3043 // Nothing to index? Just return V then (this is useful at the end of our
3044 // recursion).
3045 if (idx_range.empty())
3046 return V;
3047 // We have indices, so V should have an indexable type.
3048 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3049, __extension__ __PRETTY_FUNCTION__))
3049 "Not looking at a struct or array?")(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3049, __extension__ __PRETTY_FUNCTION__))
;
3050 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3051, __extension__ __PRETTY_FUNCTION__))
3051 "Invalid indices for type?")(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3051, __extension__ __PRETTY_FUNCTION__))
;
3052
3053 if (Constant *C = dyn_cast<Constant>(V)) {
3054 C = C->getAggregateElement(idx_range[0]);
3055 if (!C) return nullptr;
3056 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3057 }
3058
3059 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3060 // Loop the indices for the insertvalue instruction in parallel with the
3061 // requested indices
3062 const unsigned *req_idx = idx_range.begin();
3063 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3064 i != e; ++i, ++req_idx) {
3065 if (req_idx == idx_range.end()) {
3066 // We can't handle this without inserting insertvalues
3067 if (!InsertBefore)
3068 return nullptr;
3069
3070 // The requested index identifies a part of a nested aggregate. Handle
3071 // this specially. For example,
3072 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3073 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3074 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3075 // This can be changed into
3076 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3077 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3078 // which allows the unused 0,0 element from the nested struct to be
3079 // removed.
3080 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3081 InsertBefore);
3082 }
3083
3084 // This insert value inserts something else than what we are looking for.
3085 // See if the (aggregate) value inserted into has the value we are
3086 // looking for, then.
3087 if (*req_idx != *i)
3088 return FindInsertedValue(I->getAggregateOperand(), idx_range,
3089 InsertBefore);
3090 }
3091 // If we end up here, the indices of the insertvalue match with those
3092 // requested (though possibly only partially). Now we recursively look at
3093 // the inserted value, passing any remaining indices.
3094 return FindInsertedValue(I->getInsertedValueOperand(),
3095 makeArrayRef(req_idx, idx_range.end()),
3096 InsertBefore);
3097 }
3098
3099 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3100 // If we're extracting a value from an aggregate that was extracted from
3101 // something else, we can extract from that something else directly instead.
3102 // However, we will need to chain I's indices with the requested indices.
3103
3104 // Calculate the number of indices required
3105 unsigned size = I->getNumIndices() + idx_range.size();
3106 // Allocate some space to put the new indices in
3107 SmallVector<unsigned, 5> Idxs;
3108 Idxs.reserve(size);
3109 // Add indices from the extract value instruction
3110 Idxs.append(I->idx_begin(), I->idx_end());
3111
3112 // Add requested indices
3113 Idxs.append(idx_range.begin(), idx_range.end());
3114
3115 assert(Idxs.size() == size(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3116, __extension__ __PRETTY_FUNCTION__))
3116 && "Number of indices added not correct?")(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3116, __extension__ __PRETTY_FUNCTION__))
;
3117
3118 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3119 }
3120 // Otherwise, we don't know (such as, extracting from a function return value
3121 // or load instruction)
3122 return nullptr;
3123}
3124
3125/// Analyze the specified pointer to see if it can be expressed as a base
3126/// pointer plus a constant offset. Return the base and offset to the caller.
3127Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
3128 const DataLayout &DL) {
3129 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
3130 APInt ByteOffset(BitWidth, 0);
3131
3132 // We walk up the defs but use a visited set to handle unreachable code. In
3133 // that case, we stop after accumulating the cycle once (not that it
3134 // matters).
3135 SmallPtrSet<Value *, 16> Visited;
3136 while (Visited.insert(Ptr).second) {
3137 if (Ptr->getType()->isVectorTy())
3138 break;
3139
3140 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
3141 // If one of the values we have visited is an addrspacecast, then
3142 // the pointer type of this GEP may be different from the type
3143 // of the Ptr parameter which was passed to this function. This
3144 // means when we construct GEPOffset, we need to use the size
3145 // of GEP's pointer type rather than the size of the original
3146 // pointer type.
3147 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
3148 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
3149 break;
3150
3151 ByteOffset += GEPOffset.getSExtValue();
3152
3153 Ptr = GEP->getPointerOperand();
3154 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3155 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3156 Ptr = cast<Operator>(Ptr)->getOperand(0);
3157 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3158 if (GA->isInterposable())
3159 break;
3160 Ptr = GA->getAliasee();
3161 } else {
3162 break;
3163 }
3164 }
3165 Offset = ByteOffset.getSExtValue();
3166 return Ptr;
3167}
3168
3169bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3170 unsigned CharSize) {
3171 // Make sure the GEP has exactly three arguments.
3172 if (GEP->getNumOperands() != 3)
3173 return false;
3174
3175 // Make sure the index-ee is a pointer to array of \p CharSize integers.
3176 // CharSize.
3177 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3178 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3179 return false;
3180
3181 // Check to make sure that the first operand of the GEP is an integer and
3182 // has value 0 so that we are sure we're indexing into the initializer.
3183 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3184 if (!FirstIdx || !FirstIdx->isZero())
3185 return false;
3186
3187 return true;
3188}
3189
3190bool llvm::getConstantDataArrayInfo(const Value *V,
3191 ConstantDataArraySlice &Slice,
3192 unsigned ElementSize, uint64_t Offset) {
3193 assert(V)(static_cast <bool> (V) ? void (0) : __assert_fail ("V"
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3193, __extension__ __PRETTY_FUNCTION__))
;
3194
3195 // Look through bitcast instructions and geps.
3196 V = V->stripPointerCasts();
3197
3198 // If the value is a GEP instruction or constant expression, treat it as an
3199 // offset.
3200 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3201 // The GEP operator should be based on a pointer to string constant, and is
3202 // indexing into the string constant.
3203 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3204 return false;
3205
3206 // If the second index isn't a ConstantInt, then this is a variable index
3207 // into the array. If this occurs, we can't say anything meaningful about
3208 // the string.
3209 uint64_t StartIdx = 0;
3210 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3211 StartIdx = CI->getZExtValue();
3212 else
3213 return false;
3214 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3215 StartIdx + Offset);
3216 }
3217
3218 // The GEP instruction, constant or instruction, must reference a global
3219 // variable that is a constant and is initialized. The referenced constant
3220 // initializer is the array that we'll use for optimization.
3221 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3222 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3223 return false;
3224
3225 const ConstantDataArray *Array;
3226 ArrayType *ArrayTy;
3227 if (GV->getInitializer()->isNullValue()) {
3228 Type *GVTy = GV->getValueType();
3229 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3230 // A zeroinitializer for the array; there is no ConstantDataArray.
3231 Array = nullptr;
3232 } else {
3233 const DataLayout &DL = GV->getParent()->getDataLayout();
3234 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3235 uint64_t Length = SizeInBytes / (ElementSize / 8);
3236 if (Length <= Offset)
3237 return false;
3238
3239 Slice.Array = nullptr;
3240 Slice.Offset = 0;
3241 Slice.Length = Length - Offset;
3242 return true;
3243 }
3244 } else {
3245 // This must be a ConstantDataArray.
3246 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3247 if (!Array)
3248 return false;
3249 ArrayTy = Array->getType();
3250 }
3251 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3252 return false;
3253
3254 uint64_t NumElts = ArrayTy->getArrayNumElements();
3255 if (Offset > NumElts)
3256 return false;
3257
3258 Slice.Array = Array;
3259 Slice.Offset = Offset;
3260 Slice.Length = NumElts - Offset;
3261 return true;
3262}
3263
3264/// This function computes the length of a null-terminated C string pointed to
3265/// by V. If successful, it returns true and returns the string in Str.
3266/// If unsuccessful, it returns false.
3267bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3268 uint64_t Offset, bool TrimAtNul) {
3269 ConstantDataArraySlice Slice;
3270 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3271 return false;
3272
3273 if (Slice.Array == nullptr) {
3274 if (TrimAtNul) {
3275 Str = StringRef();
3276 return true;
3277 }
3278 if (Slice.Length == 1) {
3279 Str = StringRef("", 1);
3280 return true;
3281 }
3282 // We cannot instantiate a StringRef as we do not have an appropriate string
3283 // of 0s at hand.
3284 return false;
3285 }
3286
3287 // Start out with the entire array in the StringRef.
3288 Str = Slice.Array->getAsString();
3289 // Skip over 'offset' bytes.
3290 Str = Str.substr(Slice.Offset);
3291
3292 if (TrimAtNul) {
3293 // Trim off the \0 and anything after it. If the array is not nul
3294 // terminated, we just return the whole end of string. The client may know
3295 // some other way that the string is length-bound.
3296 Str = Str.substr(0, Str.find('\0'));
3297 }
3298 return true;
3299}
3300
3301// These next two are very similar to the above, but also look through PHI
3302// nodes.
3303// TODO: See if we can integrate these two together.
3304
3305/// If we can compute the length of the string pointed to by
3306/// the specified pointer, return 'len+1'. If we can't, return 0.
3307static uint64_t GetStringLengthH(const Value *V,
3308 SmallPtrSetImpl<const PHINode*> &PHIs,
3309 unsigned CharSize) {
3310 // Look through noop bitcast instructions.
3311 V = V->stripPointerCasts();
3312
3313 // If this is a PHI node, there are two cases: either we have already seen it
3314 // or we haven't.
3315 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3316 if (!PHIs.insert(PN).second)
3317 return ~0ULL; // already in the set.
3318
3319 // If it was new, see if all the input strings are the same length.
3320 uint64_t LenSoFar = ~0ULL;
3321 for (Value *IncValue : PN->incoming_values()) {
3322 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3323 if (Len == 0) return 0; // Unknown length -> unknown.
3324
3325 if (Len == ~0ULL) continue;
3326
3327 if (Len != LenSoFar && LenSoFar != ~0ULL)
3328 return 0; // Disagree -> unknown.
3329 LenSoFar = Len;
3330 }
3331
3332 // Success, all agree.
3333 return LenSoFar;
3334 }
3335
3336 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3337 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3338 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3339 if (Len1 == 0) return 0;
3340 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3341 if (Len2 == 0) return 0;
3342 if (Len1 == ~0ULL) return Len2;
3343 if (Len2 == ~0ULL) return Len1;
3344 if (Len1 != Len2) return 0;
3345 return Len1;
3346 }
3347
3348 // Otherwise, see if we can read the string.
3349 ConstantDataArraySlice Slice;
3350 if (!getConstantDataArrayInfo(V, Slice, CharSize))
3351 return 0;
3352
3353 if (Slice.Array == nullptr)
3354 return 1;
3355
3356 // Search for nul characters
3357 unsigned NullIndex = 0;
3358 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3359 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3360 break;
3361 }
3362
3363 return NullIndex + 1;
3364}
3365
3366/// If we can compute the length of the string pointed to by
3367/// the specified pointer, return 'len+1'. If we can't, return 0.
3368uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3369 if (!V->getType()->isPointerTy()) return 0;
3370
3371 SmallPtrSet<const PHINode*, 32> PHIs;
3372 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3373 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3374 // an empty string as a length.
3375 return Len == ~0ULL ? 1 : Len;
3376}
3377
3378/// \brief \p PN defines a loop-variant pointer to an object. Check if the
3379/// previous iteration of the loop was referring to the same object as \p PN.
3380static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3381 const LoopInfo *LI) {
3382 // Find the loop-defined value.
3383 Loop *L = LI->getLoopFor(PN->getParent());
3384 if (PN->getNumIncomingValues() != 2)
3385 return true;
3386
3387 // Find the value from previous iteration.
3388 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3389 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3390 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3391 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3392 return true;
3393
3394 // If a new pointer is loaded in the loop, the pointer references a different
3395 // object in every iteration. E.g.:
3396 // for (i)
3397 // int *p = a[i];
3398 // ...
3399 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3400 if (!L->isLoopInvariant(Load->getPointerOperand()))
3401 return false;
3402 return true;
3403}
3404
3405Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3406 unsigned MaxLookup) {
3407 if (!V->getType()->isPointerTy())
3408 return V;
3409 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3410 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3411 V = GEP->getPointerOperand();
3412 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3413 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3414 V = cast<Operator>(V)->getOperand(0);
3415 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3416 if (GA->isInterposable())
3417 return V;
3418 V = GA->getAliasee();
3419 } else if (isa<AllocaInst>(V)) {
3420 // An alloca can't be further simplified.
3421 return V;
3422 } else {
3423 if (auto CS = CallSite(V))
3424 if (Value *RV = CS.getReturnedArgOperand()) {
3425 V = RV;
3426 continue;
3427 }
3428
3429 // See if InstructionSimplify knows any relevant tricks.
3430 if (Instruction *I = dyn_cast<Instruction>(V))
3431 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3432 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3433 V = Simplified;
3434 continue;
3435 }
3436
3437 return V;
3438 }
3439 assert(V->getType()->isPointerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isPointerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3439, __extension__ __PRETTY_FUNCTION__))
;
3440 }
3441 return V;
3442}
3443
3444void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3445 const DataLayout &DL, LoopInfo *LI,
3446 unsigned MaxLookup) {
3447 SmallPtrSet<Value *, 4> Visited;
3448 SmallVector<Value *, 4> Worklist;
3449 Worklist.push_back(V);
3450 do {
3451 Value *P = Worklist.pop_back_val();
3452 P = GetUnderlyingObject(P, DL, MaxLookup);
3453
3454 if (!Visited.insert(P).second)
3455 continue;
3456
3457 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3458 Worklist.push_back(SI->getTrueValue());
3459 Worklist.push_back(SI->getFalseValue());
3460 continue;
3461 }
3462
3463 if (PHINode *PN = dyn_cast<PHINode>(P)) {
3464 // If this PHI changes the underlying object in every iteration of the
3465 // loop, don't look through it. Consider:
3466 // int **A;
3467 // for (i) {
3468 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3469 // Curr = A[i];
3470 // *Prev, *Curr;
3471 //
3472 // Prev is tracking Curr one iteration behind so they refer to different
3473 // underlying objects.
3474 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3475 isSameUnderlyingObjectInLoop(PN, LI))
3476 for (Value *IncValue : PN->incoming_values())
3477 Worklist.push_back(IncValue);
3478 continue;
3479 }
3480
3481 Objects.push_back(P);
3482 } while (!Worklist.empty());
3483}
3484
3485/// This is the function that does the work of looking through basic
3486/// ptrtoint+arithmetic+inttoptr sequences.
3487static const Value *getUnderlyingObjectFromInt(const Value *V) {
3488 do {
3489 if (const Operator *U = dyn_cast<Operator>(V)) {
3490 // If we find a ptrtoint, we can transfer control back to the
3491 // regular getUnderlyingObjectFromInt.
3492 if (U->getOpcode() == Instruction::PtrToInt)
3493 return U->getOperand(0);
3494 // If we find an add of a constant, a multiplied value, or a phi, it's
3495 // likely that the other operand will lead us to the base
3496 // object. We don't have to worry about the case where the
3497 // object address is somehow being computed by the multiply,
3498 // because our callers only care when the result is an
3499 // identifiable object.
3500 if (U->getOpcode() != Instruction::Add ||
3501 (!isa<ConstantInt>(U->getOperand(1)) &&
3502 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3503 !isa<PHINode>(U->getOperand(1))))
3504 return V;
3505 V = U->getOperand(0);
3506 } else {
3507 return V;
3508 }
3509 assert(V->getType()->isIntegerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isIntegerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3509, __extension__ __PRETTY_FUNCTION__))
;
3510 } while (true);
3511}
3512
3513/// This is a wrapper around GetUnderlyingObjects and adds support for basic
3514/// ptrtoint+arithmetic+inttoptr sequences.
3515/// It returns false if unidentified object is found in GetUnderlyingObjects.
3516bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3517 SmallVectorImpl<Value *> &Objects,
3518 const DataLayout &DL) {
3519 SmallPtrSet<const Value *, 16> Visited;
3520 SmallVector<const Value *, 4> Working(1, V);
3521 do {
3522 V = Working.pop_back_val();
3523
3524 SmallVector<Value *, 4> Objs;
3525 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
3526
3527 for (Value *V : Objs) {
3528 if (!Visited.insert(V).second)
3529 continue;
3530 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3531 const Value *O =
3532 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3533 if (O->getType()->isPointerTy()) {
3534 Working.push_back(O);
3535 continue;
3536 }
3537 }
3538 // If GetUnderlyingObjects fails to find an identifiable object,
3539 // getUnderlyingObjectsForCodeGen also fails for safety.
3540 if (!isIdentifiedObject(V)) {
3541 Objects.clear();
3542 return false;
3543 }
3544 Objects.push_back(const_cast<Value *>(V));
3545 }
3546 } while (!Working.empty());
3547 return true;
3548}
3549
3550/// Return true if the only users of this pointer are lifetime markers.
3551bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3552 for (const User *U : V->users()) {
3553 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3554 if (!II) return false;
3555
3556 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3557 II->getIntrinsicID() != Intrinsic::lifetime_end)
3558 return false;
3559 }
3560 return true;
3561}
3562
3563bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3564 const Instruction *CtxI,
3565 const DominatorTree *DT) {
3566 const Operator *Inst = dyn_cast<Operator>(V);
3567 if (!Inst)
3568 return false;
3569
3570 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3571 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3572 if (C->canTrap())
3573 return false;
3574
3575 switch (Inst->getOpcode()) {
3576 default:
3577 return true;
3578 case Instruction::UDiv:
3579 case Instruction::URem: {
3580 // x / y is undefined if y == 0.
3581 const APInt *V;
3582 if (match(Inst->getOperand(1), m_APInt(V)))
3583 return *V != 0;
3584 return false;
3585 }
3586 case Instruction::SDiv:
3587 case Instruction::SRem: {
3588 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3589 const APInt *Numerator, *Denominator;
3590 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3591 return false;
3592 // We cannot hoist this division if the denominator is 0.
3593 if (*Denominator == 0)
3594 return false;
3595 // It's safe to hoist if the denominator is not 0 or -1.
3596 if (*Denominator != -1)
3597 return true;
3598 // At this point we know that the denominator is -1. It is safe to hoist as
3599 // long we know that the numerator is not INT_MIN.
3600 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3601 return !Numerator->isMinSignedValue();
3602 // The numerator *might* be MinSignedValue.
3603 return false;
3604 }
3605 case Instruction::Load: {
3606 const LoadInst *LI = cast<LoadInst>(Inst);
3607 if (!LI->isUnordered() ||
3608 // Speculative load may create a race that did not exist in the source.
3609 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3610 // Speculative load may load data from dirty regions.
3611 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3612 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3613 return false;
3614 const DataLayout &DL = LI->getModule()->getDataLayout();
3615 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3616 LI->getAlignment(), DL, CtxI, DT);
3617 }
3618 case Instruction::Call: {
3619 auto *CI = cast<const CallInst>(Inst);
3620 const Function *Callee = CI->getCalledFunction();
3621
3622 // The called function could have undefined behavior or side-effects, even
3623 // if marked readnone nounwind.
3624 return Callee && Callee->isSpeculatable();
3625 }
3626 case Instruction::VAArg:
3627 case Instruction::Alloca:
3628 case Instruction::Invoke:
3629 case Instruction::PHI:
3630 case Instruction::Store:
3631 case Instruction::Ret:
3632 case Instruction::Br:
3633 case Instruction::IndirectBr:
3634 case Instruction::Switch:
3635 case Instruction::Unreachable:
3636 case Instruction::Fence:
3637 case Instruction::AtomicRMW:
3638 case Instruction::AtomicCmpXchg:
3639 case Instruction::LandingPad:
3640 case Instruction::Resume:
3641 case Instruction::CatchSwitch:
3642 case Instruction::CatchPad:
3643 case Instruction::CatchRet:
3644 case Instruction::CleanupPad:
3645 case Instruction::CleanupRet:
3646 return false; // Misc instructions which have effects
3647 }
3648}
3649
3650bool llvm::mayBeMemoryDependent(const Instruction &I) {
3651 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3652}
3653
3654OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3655 const Value *RHS,
3656 const DataLayout &DL,
3657 AssumptionCache *AC,
3658 const Instruction *CxtI,
3659 const DominatorTree *DT) {
3660 // Multiplying n * m significant bits yields a result of n + m significant
3661 // bits. If the total number of significant bits does not exceed the
3662 // result bit width (minus 1), there is no overflow.
3663 // This means if we have enough leading zero bits in the operands
3664 // we can guarantee that the result does not overflow.
3665 // Ref: "Hacker's Delight" by Henry Warren
3666 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3667 KnownBits LHSKnown(BitWidth);
3668 KnownBits RHSKnown(BitWidth);
3669 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3670 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3671 // Note that underestimating the number of zero bits gives a more
3672 // conservative answer.
3673 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3674 RHSKnown.countMinLeadingZeros();
3675 // First handle the easy case: if we have enough zero bits there's
3676 // definitely no overflow.
3677 if (ZeroBits >= BitWidth)
3678 return OverflowResult::NeverOverflows;
3679
3680 // Get the largest possible values for each operand.
3681 APInt LHSMax = ~LHSKnown.Zero;
3682 APInt RHSMax = ~RHSKnown.Zero;
3683
3684 // We know the multiply operation doesn't overflow if the maximum values for
3685 // each operand will not overflow after we multiply them together.
3686 bool MaxOverflow;
3687 (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3688 if (!MaxOverflow)
3689 return OverflowResult::NeverOverflows;
3690
3691 // We know it always overflows if multiplying the smallest possible values for
3692 // the operands also results in overflow.
3693 bool MinOverflow;
3694 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3695 if (MinOverflow)
3696 return OverflowResult::AlwaysOverflows;
3697
3698 return OverflowResult::MayOverflow;
3699}
3700
3701OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3702 const Value *RHS,
3703 const DataLayout &DL,
3704 AssumptionCache *AC,
3705 const Instruction *CxtI,
3706 const DominatorTree *DT) {
3707 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3708 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3709 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3710
3711 if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3712 // The sign bit is set in both cases: this MUST overflow.
3713 // Create a simple add instruction, and insert it into the struct.
3714 return OverflowResult::AlwaysOverflows;
3715 }
3716
3717 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3718 // The sign bit is clear in both cases: this CANNOT overflow.
3719 // Create a simple add instruction, and insert it into the struct.
3720 return OverflowResult::NeverOverflows;
3721 }
3722 }
3723
3724 return OverflowResult::MayOverflow;
3725}
3726
3727/// \brief Return true if we can prove that adding the two values of the
3728/// knownbits will not overflow.
3729/// Otherwise return false.
3730static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3731 const KnownBits &RHSKnown) {
3732 // Addition of two 2's complement numbers having opposite signs will never
3733 // overflow.
3734 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3735 (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3736 return true;
3737
3738 // If either of the values is known to be non-negative, adding them can only
3739 // overflow if the second is also non-negative, so we can assume that.
3740 // Two non-negative numbers will only overflow if there is a carry to the
3741 // sign bit, so we can check if even when the values are as big as possible
3742 // there is no overflow to the sign bit.
3743 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3744 APInt MaxLHS = ~LHSKnown.Zero;
3745 MaxLHS.clearSignBit();
3746 APInt MaxRHS = ~RHSKnown.Zero;
3747 MaxRHS.clearSignBit();
3748 APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3749 return Result.isSignBitClear();
3750 }
3751
3752 // If either of the values is known to be negative, adding them can only
3753 // overflow if the second is also negative, so we can assume that.
3754 // Two negative number will only overflow if there is no carry to the sign
3755 // bit, so we can check if even when the values are as small as possible
3756 // there is overflow to the sign bit.
3757 if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3758 APInt MinLHS = LHSKnown.One;
3759 MinLHS.clearSignBit();
3760 APInt MinRHS = RHSKnown.One;
3761 MinRHS.clearSignBit();
3762 APInt Result = std::move(MinLHS) + std::move(MinRHS);
3763 return Result.isSignBitSet();
3764 }
3765
3766 // If we reached here it means that we know nothing about the sign bits.
3767 // In this case we can't know if there will be an overflow, since by
3768 // changing the sign bits any two values can be made to overflow.
3769 return false;
3770}
3771
3772static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3773 const Value *RHS,
3774 const AddOperator *Add,
3775 const DataLayout &DL,
3776 AssumptionCache *AC,
3777 const Instruction *CxtI,
3778 const DominatorTree *DT) {
3779 if (Add && Add->hasNoSignedWrap()) {
3780 return OverflowResult::NeverOverflows;
3781 }
3782
3783 // If LHS and RHS each have at least two sign bits, the addition will look
3784 // like
3785 //
3786 // XX..... +
3787 // YY.....
3788 //
3789 // If the carry into the most significant position is 0, X and Y can't both
3790 // be 1 and therefore the carry out of the addition is also 0.
3791 //
3792 // If the carry into the most significant position is 1, X and Y can't both
3793 // be 0 and therefore the carry out of the addition is also 1.
3794 //
3795 // Since the carry into the most significant position is always equal to
3796 // the carry out of the addition, there is no signed overflow.
3797 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3798 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3799 return OverflowResult::NeverOverflows;
3800
3801 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3802 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3803
3804 if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3805 return OverflowResult::NeverOverflows;
3806
3807 // The remaining code needs Add to be available. Early returns if not so.
3808 if (!Add)
3809 return OverflowResult::MayOverflow;
3810
3811 // If the sign of Add is the same as at least one of the operands, this add
3812 // CANNOT overflow. This is particularly useful when the sum is
3813 // @llvm.assume'ed non-negative rather than proved so from analyzing its
3814 // operands.
3815 bool LHSOrRHSKnownNonNegative =
3816 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3817 bool LHSOrRHSKnownNegative =
3818 (LHSKnown.isNegative() || RHSKnown.isNegative());
3819 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3820 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3821 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3822 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3823 return OverflowResult::NeverOverflows;
3824 }
3825 }
3826
3827 return OverflowResult::MayOverflow;
3828}
3829
3830bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3831 const DominatorTree &DT) {
3832#ifndef NDEBUG
3833 auto IID = II->getIntrinsicID();
3834 assert((IID == Intrinsic::sadd_with_overflow ||(static_cast <bool> ((IID == Intrinsic::sadd_with_overflow
|| IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::
ssub_with_overflow || IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow
) && "Not an overflow intrinsic!") ? void (0) : __assert_fail
("(IID == Intrinsic::sadd_with_overflow || IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::ssub_with_overflow || IID == Intrinsic::usub_with_overflow || IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow) && \"Not an overflow intrinsic!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3840, __extension__ __PRETTY_FUNCTION__))
3835 IID == Intrinsic::uadd_with_overflow ||(static_cast <bool> ((IID == Intrinsic::sadd_with_overflow
|| IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::
ssub_with_overflow || IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow
) && "Not an overflow intrinsic!") ? void (0) : __assert_fail
("(IID == Intrinsic::sadd_with_overflow || IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::ssub_with_overflow || IID == Intrinsic::usub_with_overflow || IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow) && \"Not an overflow intrinsic!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3840, __extension__ __PRETTY_FUNCTION__))
3836 IID == Intrinsic::ssub_with_overflow ||(static_cast <bool> ((IID == Intrinsic::sadd_with_overflow
|| IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::
ssub_with_overflow || IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow
) && "Not an overflow intrinsic!") ? void (0) : __assert_fail
("(IID == Intrinsic::sadd_with_overflow || IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::ssub_with_overflow || IID == Intrinsic::usub_with_overflow || IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow) && \"Not an overflow intrinsic!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3840, __extension__ __PRETTY_FUNCTION__))
3837 IID == Intrinsic::usub_with_overflow ||(static_cast <bool> ((IID == Intrinsic::sadd_with_overflow
|| IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::
ssub_with_overflow || IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow
) && "Not an overflow intrinsic!") ? void (0) : __assert_fail
("(IID == Intrinsic::sadd_with_overflow || IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::ssub_with_overflow || IID == Intrinsic::usub_with_overflow || IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow) && \"Not an overflow intrinsic!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3840, __extension__ __PRETTY_FUNCTION__))
3838 IID == Intrinsic::smul_with_overflow ||(static_cast <bool> ((IID == Intrinsic::sadd_with_overflow
|| IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::
ssub_with_overflow || IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow
) && "Not an overflow intrinsic!") ? void (0) : __assert_fail
("(IID == Intrinsic::sadd_with_overflow || IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::ssub_with_overflow || IID == Intrinsic::usub_with_overflow || IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow) && \"Not an overflow intrinsic!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3840, __extension__ __PRETTY_FUNCTION__))
3839 IID == Intrinsic::umul_with_overflow) &&(static_cast <bool> ((IID == Intrinsic::sadd_with_overflow
|| IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::
ssub_with_overflow || IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow
) && "Not an overflow intrinsic!") ? void (0) : __assert_fail
("(IID == Intrinsic::sadd_with_overflow || IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::ssub_with_overflow || IID == Intrinsic::usub_with_overflow || IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow) && \"Not an overflow intrinsic!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3840, __extension__ __PRETTY_FUNCTION__))
3840 "Not an overflow intrinsic!")(static_cast <bool> ((IID == Intrinsic::sadd_with_overflow
|| IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::
ssub_with_overflow || IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow
) && "Not an overflow intrinsic!") ? void (0) : __assert_fail
("(IID == Intrinsic::sadd_with_overflow || IID == Intrinsic::uadd_with_overflow || IID == Intrinsic::ssub_with_overflow || IID == Intrinsic::usub_with_overflow || IID == Intrinsic::smul_with_overflow || IID == Intrinsic::umul_with_overflow) && \"Not an overflow intrinsic!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3840, __extension__ __PRETTY_FUNCTION__))
;
3841#endif
3842
3843 SmallVector<const BranchInst *, 2> GuardingBranches;
3844 SmallVector<const ExtractValueInst *, 2> Results;
3845
3846 for (const User *U : II->users()) {
3847 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3848 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getNumIndices() == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3848, __extension__ __PRETTY_FUNCTION__))
;
3849
3850 if (EVI->getIndices()[0] == 0)
3851 Results.push_back(EVI);
3852 else {
3853 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getIndices()[0] == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3853, __extension__ __PRETTY_FUNCTION__))
;
3854
3855 for (const auto *U : EVI->users())
3856 if (const auto *B = dyn_cast<BranchInst>(U)) {
3857 assert(B->isConditional() && "How else is it using an i1?")(static_cast <bool> (B->isConditional() && "How else is it using an i1?"
) ? void (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3857, __extension__ __PRETTY_FUNCTION__))
;
3858 GuardingBranches.push_back(B);
3859 }
3860 }
3861 } else {
3862 // We are using the aggregate directly in a way we don't want to analyze
3863 // here (storing it to a global, say).
3864 return false;
3865 }
3866 }
3867
3868 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3869 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3870 if (!NoWrapEdge.isSingleEdge())
3871 return false;
3872
3873 // Check if all users of the add are provably no-wrap.
3874 for (const auto *Result : Results) {
3875 // If the extractvalue itself is not executed on overflow, the we don't
3876 // need to check each use separately, since domination is transitive.
3877 if (DT.dominates(NoWrapEdge, Result->getParent()))
3878 continue;
3879
3880 for (auto &RU : Result->uses())
3881 if (!DT.dominates(NoWrapEdge, RU))
3882 return false;
3883 }
3884
3885 return true;
3886 };
3887
3888 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
3889}
3890
3891
3892OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3893 const DataLayout &DL,
3894 AssumptionCache *AC,
3895 const Instruction *CxtI,
3896 const DominatorTree *DT) {
3897 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3898 Add, DL, AC, CxtI, DT);
3899}
3900
3901OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3902 const Value *RHS,
3903 const DataLayout &DL,
3904 AssumptionCache *AC,
3905 const Instruction *CxtI,
3906 const DominatorTree *DT) {
3907 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3908}
3909
3910bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3911 // A memory operation returns normally if it isn't volatile. A volatile
3912 // operation is allowed to trap.
3913 //
3914 // An atomic operation isn't guaranteed to return in a reasonable amount of
3915 // time because it's possible for another thread to interfere with it for an
3916 // arbitrary length of time, but programs aren't allowed to rely on that.
3917 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3918 return !LI->isVolatile();
3919 if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3920 return !SI->isVolatile();
3921 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3922 return !CXI->isVolatile();
3923 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3924 return !RMWI->isVolatile();
3925 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3926 return !MII->isVolatile();
3927
3928 // If there is no successor, then execution can't transfer to it.
3929 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3930 return !CRI->unwindsToCaller();
3931 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3932 return !CatchSwitch->unwindsToCaller();
3933 if (isa<ResumeInst>(I))
3934 return false;
3935 if (isa<ReturnInst>(I))
3936 return false;
3937 if (isa<UnreachableInst>(I))
3938 return false;
3939
3940 // Calls can throw, or contain an infinite loop, or kill the process.
3941 if (auto CS = ImmutableCallSite(I)) {
3942 // Call sites that throw have implicit non-local control flow.
3943 if (!CS.doesNotThrow())
3944 return false;
3945
3946 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3947 // etc. and thus not return. However, LLVM already assumes that
3948 //
3949 // - Thread exiting actions are modeled as writes to memory invisible to
3950 // the program.
3951 //
3952 // - Loops that don't have side effects (side effects are volatile/atomic
3953 // stores and IO) always terminate (see http://llvm.org/PR965).
3954 // Furthermore IO itself is also modeled as writes to memory invisible to
3955 // the program.
3956 //
3957 // We rely on those assumptions here, and use the memory effects of the call
3958 // target as a proxy for checking that it always returns.
3959
3960 // FIXME: This isn't aggressive enough; a call which only writes to a global
3961 // is guaranteed to return.
3962 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3963 match(I, m_Intrinsic<Intrinsic::assume>()) ||
3964 match(I, m_Intrinsic<Intrinsic::sideeffect>());
3965 }
3966
3967 // Other instructions return normally.
3968 return true;
3969}
3970
3971bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
3972 // TODO: This is slightly consdervative for invoke instruction since exiting
3973 // via an exception *is* normal control for them.
3974 for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
3975 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
3976 return false;
3977 return true;
3978}
3979
3980bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3981 const Loop *L) {
3982 // The loop header is guaranteed to be executed for every iteration.
3983 //
3984 // FIXME: Relax this constraint to cover all basic blocks that are
3985 // guaranteed to be executed at every iteration.
3986 if (I->getParent() != L->getHeader()) return false;
3987
3988 for (const Instruction &LI : *L->getHeader()) {
3989 if (&LI == I) return true;
3990 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3991 }
3992 llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block."
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 3992)
;
3993}
3994
3995bool llvm::propagatesFullPoison(const Instruction *I) {
3996 switch (I->getOpcode()) {
3997 case Instruction::Add:
3998 case Instruction::Sub:
3999 case Instruction::Xor:
4000 case Instruction::Trunc:
4001 case Instruction::BitCast:
4002 case Instruction::AddrSpaceCast:
4003 case Instruction::Mul:
4004 case Instruction::Shl:
4005 case Instruction::GetElementPtr:
4006 // These operations all propagate poison unconditionally. Note that poison
4007 // is not any particular value, so xor or subtraction of poison with
4008 // itself still yields poison, not zero.
4009 return true;
4010
4011 case Instruction::AShr:
4012 case Instruction::SExt:
4013 // For these operations, one bit of the input is replicated across
4014 // multiple output bits. A replicated poison bit is still poison.
4015 return true;
4016
4017 case Instruction::ICmp:
4018 // Comparing poison with any value yields poison. This is why, for
4019 // instance, x s< (x +nsw 1) can be folded to true.
4020 return true;
4021
4022 default:
4023 return false;
4024 }
4025}
4026
4027const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
4028 switch (I->getOpcode()) {
4029 case Instruction::Store:
4030 return cast<StoreInst>(I)->getPointerOperand();
4031
4032 case Instruction::Load:
4033 return cast<LoadInst>(I)->getPointerOperand();
4034
4035 case Instruction::AtomicCmpXchg:
4036 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4037
4038 case Instruction::AtomicRMW:
4039 return cast<AtomicRMWInst>(I)->getPointerOperand();
4040
4041 case Instruction::UDiv:
4042 case Instruction::SDiv:
4043 case Instruction::URem:
4044 case Instruction::SRem:
4045 return I->getOperand(1);
4046
4047 default:
4048 return nullptr;
4049 }
4050}
4051
4052bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4053 // We currently only look for uses of poison values within the same basic
4054 // block, as that makes it easier to guarantee that the uses will be
4055 // executed given that PoisonI is executed.
4056 //
4057 // FIXME: Expand this to consider uses beyond the same basic block. To do
4058 // this, look out for the distinction between post-dominance and strong
4059 // post-dominance.
4060 const BasicBlock *BB = PoisonI->getParent();
4061
4062 // Set of instructions that we have proved will yield poison if PoisonI
4063 // does.
4064 SmallSet<const Value *, 16> YieldsPoison;
4065 SmallSet<const BasicBlock *, 4> Visited;
4066 YieldsPoison.insert(PoisonI);
4067 Visited.insert(PoisonI->getParent());
4068
4069 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4070
4071 unsigned Iter = 0;
4072 while (Iter++ < MaxDepth) {
4073 for (auto &I : make_range(Begin, End)) {
4074 if (&I != PoisonI) {
4075 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
4076 if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
4077 return true;
4078 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4079 return false;
4080 }
4081
4082 // Mark poison that propagates from I through uses of I.
4083 if (YieldsPoison.count(&I)) {
4084 for (const User *User : I.users()) {
4085 const Instruction *UserI = cast<Instruction>(User);
4086 if (propagatesFullPoison(UserI))
4087 YieldsPoison.insert(User);
4088 }
4089 }
4090 }
4091
4092 if (auto *NextBB = BB->getSingleSuccessor()) {
4093 if (Visited.insert(NextBB).second) {
4094 BB = NextBB;
4095 Begin = BB->getFirstNonPHI()->getIterator();
4096 End = BB->end();
4097 continue;
4098 }
4099 }
4100
4101 break;
4102 }
4103 return false;
4104}
4105
4106static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4107 if (FMF.noNaNs())
4108 return true;
4109
4110 if (auto *C = dyn_cast<ConstantFP>(V))
4111 return !C->isNaN();
4112 return false;
4113}
4114
4115static bool isKnownNonZero(const Value *V) {
4116 if (auto *C = dyn_cast<ConstantFP>(V))
4117 return !C->isZero();
4118 return false;
4119}
4120
4121/// Match clamp pattern for float types without care about NaNs or signed zeros.
4122/// Given non-min/max outer cmp/select from the clamp pattern this
4123/// function recognizes if it can be substitued by a "canonical" min/max
4124/// pattern.
4125static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4126 Value *CmpLHS, Value *CmpRHS,
4127 Value *TrueVal, Value *FalseVal,
4128 Value *&LHS, Value *&RHS) {
4129 // Try to match
4130 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4131 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4132 // and return description of the outer Max/Min.
4133
4134 // First, check if select has inverse order:
4135 if (CmpRHS == FalseVal) {
4136 std::swap(TrueVal, FalseVal);
4137 Pred = CmpInst::getInversePredicate(Pred);
4138 }
4139
4140 // Assume success now. If there's no match, callers should not use these anyway.
4141 LHS = TrueVal;
4142 RHS = FalseVal;
4143
4144 const APFloat *FC1;
4145 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4146 return {SPF_UNKNOWN, SPNB_NA, false};
4147
4148 const APFloat *FC2;
4149 switch (Pred) {
4150 case CmpInst::FCMP_OLT:
4151 case CmpInst::FCMP_OLE:
4152 case CmpInst::FCMP_ULT:
4153 case CmpInst::FCMP_ULE:
4154 if (match(FalseVal,
4155 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4156 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4157 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4158 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4159 break;
4160 case CmpInst::FCMP_OGT:
4161 case CmpInst::FCMP_OGE:
4162 case CmpInst::FCMP_UGT:
4163 case CmpInst::FCMP_UGE:
4164 if (match(FalseVal,
4165 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4166 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4167 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4168 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4169 break;
4170 default:
4171 break;
4172 }
4173
4174 return {SPF_UNKNOWN, SPNB_NA, false};
4175}
4176
4177/// Recognize variations of:
4178/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4179static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4180 Value *CmpLHS, Value *CmpRHS,
4181 Value *TrueVal, Value *FalseVal) {
4182 // Swap the select operands and predicate to match the patterns below.
4183 if (CmpRHS != TrueVal) {
4184 Pred = ICmpInst::getSwappedPredicate(Pred);
4185 std::swap(TrueVal, FalseVal);
4186 }
4187 const APInt *C1;
4188 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4189 const APInt *C2;
4190 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4191 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4192 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4193 return {SPF_SMAX, SPNB_NA, false};
4194
4195 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4196 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4197 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4198 return {SPF_SMIN, SPNB_NA, false};
4199
4200 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4201 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4202 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4203 return {SPF_UMAX, SPNB_NA, false};
4204
4205 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4206 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4207 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4208 return {SPF_UMIN, SPNB_NA, false};
4209 }
4210 return {SPF_UNKNOWN, SPNB_NA, false};
4211}
4212
4213/// Recognize variations of:
4214/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4215static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4216 Value *CmpLHS, Value *CmpRHS,
4217 Value *TVal, Value *FVal,
4218 unsigned Depth) {
4219 // TODO: Allow FP min/max with nnan/nsz.
4220 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")(static_cast <bool> (CmpInst::isIntPredicate(Pred) &&
"Expected integer comparison") ? void (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4220, __extension__ __PRETTY_FUNCTION__))
;
4221
4222 Value *A, *B;
4223 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4224 if (!SelectPatternResult::isMinOrMax(L.Flavor))
4225 return {SPF_UNKNOWN, SPNB_NA, false};
4226
4227 Value *C, *D;
4228 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4229 if (L.Flavor != R.Flavor)
4230 return {SPF_UNKNOWN, SPNB_NA, false};
4231
4232 // We have something like: x Pred y ? min(a, b) : min(c, d).
4233 // Try to match the compare to the min/max operations of the select operands.
4234 // First, make sure we have the right compare predicate.
4235 switch (L.Flavor) {
4236 case SPF_SMIN:
4237 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4238 Pred = ICmpInst::getSwappedPredicate(Pred);
4239 std::swap(CmpLHS, CmpRHS);
4240 }
4241 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4242 break;
4243 return {SPF_UNKNOWN, SPNB_NA, false};
4244 case SPF_SMAX:
4245 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4246 Pred = ICmpInst::getSwappedPredicate(Pred);
4247 std::swap(CmpLHS, CmpRHS);
4248 }
4249 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4250 break;
4251 return {SPF_UNKNOWN, SPNB_NA, false};
4252 case SPF_UMIN:
4253 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4254 Pred = ICmpInst::getSwappedPredicate(Pred);
4255 std::swap(CmpLHS, CmpRHS);
4256 }
4257 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4258 break;
4259 return {SPF_UNKNOWN, SPNB_NA, false};
4260 case SPF_UMAX:
4261 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4262 Pred = ICmpInst::getSwappedPredicate(Pred);
4263 std::swap(CmpLHS, CmpRHS);
4264 }
4265 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4266 break;
4267 return {SPF_UNKNOWN, SPNB_NA, false};
4268 default:
4269 return {SPF_UNKNOWN, SPNB_NA, false};
4270 }
4271
4272 // If there is a common operand in the already matched min/max and the other
4273 // min/max operands match the compare operands (either directly or inverted),
4274 // then this is min/max of the same flavor.
4275
4276 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4277 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4278 if (D == B) {
4279 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4280 match(A, m_Not(m_Specific(CmpRHS)))))
4281 return {L.Flavor, SPNB_NA, false};
4282 }
4283 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4284 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4285 if (C == B) {
4286 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4287 match(A, m_Not(m_Specific(CmpRHS)))))
4288 return {L.Flavor, SPNB_NA, false};
4289 }
4290 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4291 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4292 if (D == A) {
4293 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4294 match(B, m_Not(m_Specific(CmpRHS)))))
4295 return {L.Flavor, SPNB_NA, false};
4296 }
4297 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4298 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4299 if (C == A) {
4300 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4301 match(B, m_Not(m_Specific(CmpRHS)))))
4302 return {L.Flavor, SPNB_NA, false};
4303 }
4304
4305 return {SPF_UNKNOWN, SPNB_NA, false};
4306}
4307
4308/// Match non-obvious integer minimum and maximum sequences.
4309static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4310 Value *CmpLHS, Value *CmpRHS,
4311 Value *TrueVal, Value *FalseVal,
4312 Value *&LHS, Value *&RHS,
4313 unsigned Depth) {
4314 // Assume success. If there's no match, callers should not use these anyway.
4315 LHS = TrueVal;
4316 RHS = FalseVal;
4317
4318 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4319 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4320 return SPR;
4321
4322 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4323 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4324 return SPR;
4325
4326 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4327 return {SPF_UNKNOWN, SPNB_NA, false};
4328
4329 // Z = X -nsw Y
4330 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4331 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4332 if (match(TrueVal, m_Zero()) &&
4333 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4334 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4335
4336 // Z = X -nsw Y
4337 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4338 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4339 if (match(FalseVal, m_Zero()) &&
4340 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4341 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4342
4343 const APInt *C1;
4344 if (!match(CmpRHS, m_APInt(C1)))
4345 return {SPF_UNKNOWN, SPNB_NA, false};
4346
4347 // An unsigned min/max can be written with a signed compare.
4348 const APInt *C2;
4349 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4350 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4351 // Is the sign bit set?
4352 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4353 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4354 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4355 C2->isMaxSignedValue())
4356 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4357
4358 // Is the sign bit clear?
4359 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4360 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4361 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4362 C2->isMinSignedValue())
4363 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4364 }
4365
4366 // Look through 'not' ops to find disguised signed min/max.
4367 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4368 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4369 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4370 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4371 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4372
4373 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4374 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4375 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4376 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4377 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4378
4379 return {SPF_UNKNOWN, SPNB_NA, false};
4380}
4381
4382static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4383 FastMathFlags FMF,
4384 Value *CmpLHS, Value *CmpRHS,
4385 Value *TrueVal, Value *FalseVal,
4386 Value *&LHS, Value *&RHS,
4387 unsigned Depth) {
4388 LHS = CmpLHS;
4389 RHS = CmpRHS;
4390
4391 // Signed zero may return inconsistent results between implementations.
4392 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4393 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4394 // Therefore, we behave conservatively and only proceed if at least one of the
4395 // operands is known to not be zero or if we don't care about signed zero.
4396 switch (Pred) {
4397 default: break;
4398 // FIXME: Include OGT/OLT/UGT/ULT.
4399 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4400 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4401 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4402 !isKnownNonZero(CmpRHS))
4403 return {SPF_UNKNOWN, SPNB_NA, false};
4404 }
4405
4406 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4407 bool Ordered = false;
4408
4409 // When given one NaN and one non-NaN input:
4410 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4411 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4412 // ordered comparison fails), which could be NaN or non-NaN.
4413 // so here we discover exactly what NaN behavior is required/accepted.
4414 if (CmpInst::isFPPredicate(Pred)) {
4415 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4416 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4417
4418 if (LHSSafe && RHSSafe) {
4419 // Both operands are known non-NaN.
4420 NaNBehavior = SPNB_RETURNS_ANY;
4421 } else if (CmpInst::isOrdered(Pred)) {
4422 // An ordered comparison will return false when given a NaN, so it
4423 // returns the RHS.
4424 Ordered = true;
4425 if (LHSSafe)
4426 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4427 NaNBehavior = SPNB_RETURNS_NAN;
4428 else if (RHSSafe)
4429 NaNBehavior = SPNB_RETURNS_OTHER;
4430 else
4431 // Completely unsafe.
4432 return {SPF_UNKNOWN, SPNB_NA, false};
4433 } else {
4434 Ordered = false;
4435 // An unordered comparison will return true when given a NaN, so it
4436 // returns the LHS.
4437 if (LHSSafe)
4438 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4439 NaNBehavior = SPNB_RETURNS_OTHER;
4440 else if (RHSSafe)
4441 NaNBehavior = SPNB_RETURNS_NAN;
4442 else
4443 // Completely unsafe.
4444 return {SPF_UNKNOWN, SPNB_NA, false};
4445 }
4446 }
4447
4448 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4449 std::swap(CmpLHS, CmpRHS);
4450 Pred = CmpInst::getSwappedPredicate(Pred);
4451 if (NaNBehavior == SPNB_RETURNS_NAN)
4452 NaNBehavior = SPNB_RETURNS_OTHER;
4453 else if (NaNBehavior == SPNB_RETURNS_OTHER)
4454 NaNBehavior = SPNB_RETURNS_NAN;
4455 Ordered = !Ordered;
4456 }
4457
4458 // ([if]cmp X, Y) ? X : Y
4459 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4460 switch (Pred) {
4461 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4462 case ICmpInst::ICMP_UGT:
4463 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4464 case ICmpInst::ICMP_SGT:
4465 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4466 case ICmpInst::ICMP_ULT:
4467 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4468 case ICmpInst::ICMP_SLT:
4469 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4470 case FCmpInst::FCMP_UGT:
4471 case FCmpInst::FCMP_UGE:
4472 case FCmpInst::FCMP_OGT:
4473 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4474 case FCmpInst::FCMP_ULT:
4475 case FCmpInst::FCMP_ULE:
4476 case FCmpInst::FCMP_OLT:
4477 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4478 }
4479 }
4480
4481 const APInt *C1;
4482 if (match(CmpRHS, m_APInt(C1))) {
4483 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4484 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4485
4486 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4487 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4488 if (Pred == ICmpInst::ICMP_SGT &&
4489 (C1->isNullValue() || C1->isAllOnesValue())) {
4490 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4491 }
4492
4493 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4494 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4495 if (Pred == ICmpInst::ICMP_SLT &&
4496 (C1->isNullValue() || C1->isOneValue())) {
4497 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4498 }
4499 }
4500 }
4501
4502 if (CmpInst::isIntPredicate(Pred))
4503 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4504
4505 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4506 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4507 // semantics than minNum. Be conservative in such case.
4508 if (NaNBehavior != SPNB_RETURNS_ANY ||
4509 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4510 !isKnownNonZero(CmpRHS)))
4511 return {SPF_UNKNOWN, SPNB_NA, false};
4512
4513 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4514}
4515
4516/// Helps to match a select pattern in case of a type mismatch.
4517///
4518/// The function processes the case when type of true and false values of a
4519/// select instruction differs from type of the cmp instruction operands because
4520/// of a cast instruction. The function checks if it is legal to move the cast
4521/// operation after "select". If yes, it returns the new second value of
4522/// "select" (with the assumption that cast is moved):
4523/// 1. As operand of cast instruction when both values of "select" are same cast
4524/// instructions.
4525/// 2. As restored constant (by applying reverse cast operation) when the first
4526/// value of the "select" is a cast operation and the second value is a
4527/// constant.
4528/// NOTE: We return only the new second value because the first value could be
4529/// accessed as operand of cast instruction.
4530static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4531 Instruction::CastOps *CastOp) {
4532 auto *Cast1 = dyn_cast<CastInst>(V1);
4533 if (!Cast1)
4534 return nullptr;
4535
4536 *CastOp = Cast1->getOpcode();
4537 Type *SrcTy = Cast1->getSrcTy();
4538 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4539 // If V1 and V2 are both the same cast from the same type, look through V1.
4540 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4541 return Cast2->getOperand(0);
4542 return nullptr;
4543 }
4544
4545 auto *C = dyn_cast<Constant>(V2);
4546 if (!C)
4547 return nullptr;
4548
4549 Constant *CastedTo = nullptr;
4550 switch (*CastOp) {
4551 case Instruction::ZExt:
4552 if (CmpI->isUnsigned())
4553 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4554 break;
4555 case Instruction::SExt:
4556 if (CmpI->isSigned())
4557 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4558 break;
4559 case Instruction::Trunc:
4560 Constant *CmpConst;
4561 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4562 CmpConst->getType() == SrcTy) {
4563 // Here we have the following case:
4564 //
4565 // %cond = cmp iN %x, CmpConst
4566 // %tr = trunc iN %x to iK
4567 // %narrowsel = select i1 %cond, iK %t, iK C
4568 //
4569 // We can always move trunc after select operation:
4570 //
4571 // %cond = cmp iN %x, CmpConst
4572 // %widesel = select i1 %cond, iN %x, iN CmpConst
4573 // %tr = trunc iN %widesel to iK
4574 //
4575 // Note that C could be extended in any way because we don't care about
4576 // upper bits after truncation. It can't be abs pattern, because it would
4577 // look like:
4578 //
4579 // select i1 %cond, x, -x.
4580 //
4581 // So only min/max pattern could be matched. Such match requires widened C
4582 // == CmpConst. That is why set widened C = CmpConst, condition trunc
4583 // CmpConst == C is checked below.
4584 CastedTo = CmpConst;
4585 } else {
4586 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4587 }
4588 break;
4589 case Instruction::FPTrunc:
4590 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4591 break;
4592 case Instruction::FPExt:
4593 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4594 break;
4595 case Instruction::FPToUI:
4596 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4597 break;
4598 case Instruction::FPToSI:
4599 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4600 break;
4601 case Instruction::UIToFP:
4602 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4603 break;
4604 case Instruction::SIToFP:
4605 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4606 break;
4607 default:
4608 break;
4609 }
4610
4611 if (!CastedTo)
4612 return nullptr;
4613
4614 // Make sure the cast doesn't lose any information.
4615 Constant *CastedBack =
4616 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4617 if (CastedBack != C)
4618 return nullptr;
4619
4620 return CastedTo;
4621}
4622
4623SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4624 Instruction::CastOps *CastOp,
4625 unsigned Depth) {
4626 if (Depth >= MaxDepth)
4627 return {SPF_UNKNOWN, SPNB_NA, false};
4628
4629 SelectInst *SI = dyn_cast<SelectInst>(V);
4630 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4631
4632 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4633 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4634
4635 CmpInst::Predicate Pred = CmpI->getPredicate();
4636 Value *CmpLHS = CmpI->getOperand(0);
4637 Value *CmpRHS = CmpI->getOperand(1);
4638 Value *TrueVal = SI->getTrueValue();
4639 Value *FalseVal = SI->getFalseValue();
4640 FastMathFlags FMF;
4641 if (isa<FPMathOperator>(CmpI))
4642 FMF = CmpI->getFastMathFlags();
4643
4644 // Bail out early.
4645 if (CmpI->isEquality())
4646 return {SPF_UNKNOWN, SPNB_NA, false};
4647
4648 // Deal with type mismatches.
4649 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4650 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
4651 // If this is a potential fmin/fmax with a cast to integer, then ignore
4652 // -0.0 because there is no corresponding integer value.
4653 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4654 FMF.setNoSignedZeros();
4655 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4656 cast<CastInst>(TrueVal)->getOperand(0), C,
4657 LHS, RHS, Depth);
4658 }
4659 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
4660 // If this is a potential fmin/fmax with a cast to integer, then ignore
4661 // -0.0 because there is no corresponding integer value.
4662 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4663 FMF.setNoSignedZeros();
4664 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4665 C, cast<CastInst>(FalseVal)->getOperand(0),
4666 LHS, RHS, Depth);
4667 }
4668 }
4669 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4670 LHS, RHS, Depth);
4671}
4672
4673CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
4674 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
4675 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
4676 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
4677 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
4678 if (SPF == SPF_FMINNUM)
4679 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
4680 if (SPF == SPF_FMAXNUM)
4681 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
4682 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4682)
;
4683}
4684
4685SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
4686 if (SPF == SPF_SMIN) return SPF_SMAX;
4687 if (SPF == SPF_UMIN) return SPF_UMAX;
4688 if (SPF == SPF_SMAX) return SPF_SMIN;
4689 if (SPF == SPF_UMAX) return SPF_UMIN;
4690 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4690)
;
4691}
4692
4693CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
4694 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
4695}
4696
4697/// Return true if "icmp Pred LHS RHS" is always true.
4698static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
4699 const Value *RHS, const DataLayout &DL,
4700 unsigned Depth) {
4701 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!")(static_cast <bool> (!LHS->getType()->isVectorTy(
) && "TODO: extend to handle vectors!") ? void (0) : __assert_fail
("!LHS->getType()->isVectorTy() && \"TODO: extend to handle vectors!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4701, __extension__ __PRETTY_FUNCTION__))
;
4702 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4703 return true;
4704
4705 switch (Pred) {
4706 default:
4707 return false;
4708
4709 case CmpInst::ICMP_SLE: {
4710 const APInt *C;
4711
4712 // LHS s<= LHS +_{nsw} C if C >= 0
4713 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4714 return !C->isNegative();
4715 return false;
4716 }
4717
4718 case CmpInst::ICMP_ULE: {
4719 const APInt *C;
4720
4721 // LHS u<= LHS +_{nuw} C for any C
4722 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4723 return true;
4724
4725 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4726 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4727 const Value *&X,
4728 const APInt *&CA, const APInt *&CB) {
4729 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4730 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4731 return true;
4732
4733 // If X & C == 0 then (X | C) == X +_{nuw} C
4734 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4735 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4736 KnownBits Known(CA->getBitWidth());
4737 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
4738 /*CxtI*/ nullptr, /*DT*/ nullptr);
4739 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4740 return true;
4741 }
4742
4743 return false;
4744 };
4745
4746 const Value *X;
4747 const APInt *CLHS, *CRHS;
4748 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4749 return CLHS->ule(*CRHS);
4750
4751 return false;
4752 }
4753 }
4754}
4755
4756/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4757/// ALHS ARHS" is true. Otherwise, return None.
4758static Optional<bool>
4759isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4760 const Value *ARHS, const Value *BLHS, const Value *BRHS,
4761 const DataLayout &DL, unsigned Depth) {
4762 switch (Pred) {
4763 default:
4764 return None;
4765
4766 case CmpInst::ICMP_SLT:
4767 case CmpInst::ICMP_SLE:
4768 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
4769 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
4770 return true;
4771 return None;
4772
4773 case CmpInst::ICMP_ULT:
4774 case CmpInst::ICMP_ULE:
4775 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
4776 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
4777 return true;
4778 return None;
4779 }
4780}
4781
4782/// Return true if the operands of the two compares match. IsSwappedOps is true
4783/// when the operands match, but are swapped.
4784static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4785 const Value *BLHS, const Value *BRHS,
4786 bool &IsSwappedOps) {
4787
4788 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4789 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4790 return IsMatchingOps || IsSwappedOps;
4791}
4792
4793/// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4794/// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4795/// BRHS" is false. Otherwise, return None if we can't infer anything.
4796static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4797 const Value *ALHS,
4798 const Value *ARHS,
4799 CmpInst::Predicate BPred,
4800 const Value *BLHS,
4801 const Value *BRHS,
4802 bool IsSwappedOps) {
4803 // Canonicalize the operands so they're matching.
4804 if (IsSwappedOps) {
4805 std::swap(BLHS, BRHS);
4806 BPred = ICmpInst::getSwappedPredicate(BPred);
4807 }
4808 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4809 return true;
4810 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4811 return false;
4812
4813 return None;
4814}
4815
4816/// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4817/// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4818/// C2" is false. Otherwise, return None if we can't infer anything.
4819static Optional<bool>
4820isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4821 const ConstantInt *C1,
4822 CmpInst::Predicate BPred,
4823 const Value *BLHS, const ConstantInt *C2) {
4824 assert(ALHS == BLHS && "LHS operands must match.")(static_cast <bool> (ALHS == BLHS && "LHS operands must match."
) ? void (0) : __assert_fail ("ALHS == BLHS && \"LHS operands must match.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4824, __extension__ __PRETTY_FUNCTION__))
;
4825 ConstantRange DomCR =
4826 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4827 ConstantRange CR =
4828 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4829 ConstantRange Intersection = DomCR.intersectWith(CR);
4830 ConstantRange Difference = DomCR.difference(CR);
4831 if (Intersection.isEmptySet())
4832 return false;
4833 if (Difference.isEmptySet())
4834 return true;
4835 return None;
4836}
4837
4838/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
4839/// false. Otherwise, return None if we can't infer anything.
4840static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
4841 const ICmpInst *RHS,
4842 const DataLayout &DL, bool LHSIsTrue,
4843 unsigned Depth) {
4844 Value *ALHS = LHS->getOperand(0);
4845 Value *ARHS = LHS->getOperand(1);
4846 // The rest of the logic assumes the LHS condition is true. If that's not the
4847 // case, invert the predicate to make it so.
4848 ICmpInst::Predicate APred =
4849 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
4850
4851 Value *BLHS = RHS->getOperand(0);
4852 Value *BRHS = RHS->getOperand(1);
4853 ICmpInst::Predicate BPred = RHS->getPredicate();
4854
4855 // Can we infer anything when the two compares have matching operands?
4856 bool IsSwappedOps;
4857 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4858 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4859 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4860 return Implication;
4861 // No amount of additional analysis will infer the second condition, so
4862 // early exit.
4863 return None;
4864 }
4865
4866 // Can we infer anything when the LHS operands match and the RHS operands are
4867 // constants (not necessarily matching)?
4868 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4869 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4870 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4871 cast<ConstantInt>(BRHS)))
4872 return Implication;
4873 // No amount of additional analysis will infer the second condition, so
4874 // early exit.
4875 return None;
4876 }
4877
4878 if (APred == BPred)
4879 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
4880 return None;
4881}
4882
4883/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
4884/// false. Otherwise, return None if we can't infer anything. We expect the
4885/// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
4886static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
4887 const ICmpInst *RHS,
4888 const DataLayout &DL, bool LHSIsTrue,
4889 unsigned Depth) {
4890 // The LHS must be an 'or' or an 'and' instruction.
4891 assert((LHS->getOpcode() == Instruction::And ||(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4893, __extension__ __PRETTY_FUNCTION__))
4892 LHS->getOpcode() == Instruction::Or) &&(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4893, __extension__ __PRETTY_FUNCTION__))
4893 "Expected LHS to be 'and' or 'or'.")(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or) && "Expected LHS to be 'and' or 'or'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or) && \"Expected LHS to be 'and' or 'or'.\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4893, __extension__ __PRETTY_FUNCTION__))
;
4894
4895 assert(Depth <= MaxDepth && "Hit recursion limit")(static_cast <bool> (Depth <= MaxDepth && "Hit recursion limit"
) ? void (0) : __assert_fail ("Depth <= MaxDepth && \"Hit recursion limit\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4895, __extension__ __PRETTY_FUNCTION__))
;
4896
4897 // If the result of an 'or' is false, then we know both legs of the 'or' are
4898 // false. Similarly, if the result of an 'and' is true, then we know both
4899 // legs of the 'and' are true.
4900 Value *ALHS, *ARHS;
4901 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
4902 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
4903 // FIXME: Make this non-recursion.
4904 if (Optional<bool> Implication =
4905 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
4906 return Implication;
4907 if (Optional<bool> Implication =
4908 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
4909 return Implication;
4910 return None;
4911 }
4912 return None;
4913}
4914
4915Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4916 const DataLayout &DL, bool LHSIsTrue,
4917 unsigned Depth) {
4918 // Bail out when we hit the limit.
4919 if (Depth == MaxDepth)
4920 return None;
4921
4922 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
4923 // example.
4924 if (LHS->getType() != RHS->getType())
4925 return None;
4926
4927 Type *OpTy = LHS->getType();
4928 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!")(static_cast <bool> (OpTy->isIntOrIntVectorTy(1) &&
"Expected integer type only!") ? void (0) : __assert_fail ("OpTy->isIntOrIntVectorTy(1) && \"Expected integer type only!\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4928, __extension__ __PRETTY_FUNCTION__))
;
4929
4930 // LHS ==> RHS by definition
4931 if (LHS == RHS)
4932 return LHSIsTrue;
4933
4934 // FIXME: Extending the code below to handle vectors.
4935 if (OpTy->isVectorTy())
4936 return None;
4937
4938 assert(OpTy->isIntegerTy(1) && "implied by above")(static_cast <bool> (OpTy->isIntegerTy(1) &&
"implied by above") ? void (0) : __assert_fail ("OpTy->isIntegerTy(1) && \"implied by above\""
, "/build/llvm-toolchain-snapshot-7~svn329677/lib/Analysis/ValueTracking.cpp"
, 4938, __extension__ __PRETTY_FUNCTION__))
;
4939
4940 // Both LHS and RHS are icmps.
4941 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
4942 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
4943 if (LHSCmp && RHSCmp)
4944 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
4945
4946 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be
4947 // an icmp. FIXME: Add support for and/or on the RHS.
4948 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
4949 if (LHSBO && RHSCmp) {
4950 if ((LHSBO->getOpcode() == Instruction::And ||
4951 LHSBO->getOpcode() == Instruction::Or))
4952 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
4953 }
4954 return None;
4955}

/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/ValueTracking.h

1//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains routines that help analyze properties that chains of
11// computations have.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ANALYSIS_VALUETRACKING_H
16#define LLVM_ANALYSIS_VALUETRACKING_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/IR/CallSite.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/Instruction.h"
23#include "llvm/IR/Intrinsics.h"
24#include <cassert>
25#include <cstdint>
26
27namespace llvm {
28
29class AddOperator;
30class APInt;
31class AssumptionCache;
32class DataLayout;
33class DominatorTree;
34class GEPOperator;
35class IntrinsicInst;
36struct KnownBits;
37class Loop;
38class LoopInfo;
39class MDNode;
40class OptimizationRemarkEmitter;
41class StringRef;
42class TargetLibraryInfo;
43class Value;
44
45 /// Determine which bits of V are known to be either zero or one and return
46 /// them in the KnownZero/KnownOne bit sets.
47 ///
48 /// This function is defined on values with integer type, values with pointer
49 /// type, and vectors of integers. In the case
50 /// where V is a vector, the known zero and known one values are the
51 /// same width as the vector element, and the bit is set only if it is true
52 /// for all of the elements in the vector.
53 void computeKnownBits(const Value *V, KnownBits &Known,
54 const DataLayout &DL, unsigned Depth = 0,
55 AssumptionCache *AC = nullptr,
56 const Instruction *CxtI = nullptr,
57 const DominatorTree *DT = nullptr,
58 OptimizationRemarkEmitter *ORE = nullptr);
59
60 /// Returns the known bits rather than passing by reference.
61 KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
62 unsigned Depth = 0, AssumptionCache *AC = nullptr,
63 const Instruction *CxtI = nullptr,
64 const DominatorTree *DT = nullptr,
65 OptimizationRemarkEmitter *ORE = nullptr);
66
67 /// Compute known bits from the range metadata.
68 /// \p KnownZero the set of bits that are known to be zero
69 /// \p KnownOne the set of bits that are known to be one
70 void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
71 KnownBits &Known);
72
73 /// Return true if LHS and RHS have no common bits set.
74 bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
75 const DataLayout &DL,
76 AssumptionCache *AC = nullptr,
77 const Instruction *CxtI = nullptr,
78 const DominatorTree *DT = nullptr);
79
80 /// Return true if the given value is known to have exactly one bit set when
81 /// defined. For vectors return true if every element is known to be a power
82 /// of two when defined. Supports values with integer or pointer type and
83 /// vectors of integers. If 'OrZero' is set, then return true if the given
84 /// value is either a power of two or zero.
85 bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
86 bool OrZero = false, unsigned Depth = 0,
87 AssumptionCache *AC = nullptr,
88 const Instruction *CxtI = nullptr,
89 const DominatorTree *DT = nullptr);
90
91 bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
92
93 /// Return true if the given value is known to be non-zero when defined. For
94 /// vectors, return true if every element is known to be non-zero when
95 /// defined. For pointers, if the context instruction and dominator tree are
96 /// specified, perform context-sensitive analysis and return true if the
97 /// pointer couldn't possibly be null at the specified instruction.
98 /// Supports values with integer or pointer type and vectors of integers.
99 bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
100 AssumptionCache *AC = nullptr,
101 const Instruction *CxtI = nullptr,
102 const DominatorTree *DT = nullptr);
103
104 /// Returns true if the give value is known to be non-negative.
105 bool isKnownNonNegative(const Value *V, const DataLayout &DL,
106 unsigned Depth = 0,
107 AssumptionCache *AC = nullptr,
108 const Instruction *CxtI = nullptr,
109 const DominatorTree *DT = nullptr);
110
111 /// Returns true if the given value is known be positive (i.e. non-negative
112 /// and non-zero).
113 bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
114 AssumptionCache *AC = nullptr,
115 const Instruction *CxtI = nullptr,
116 const DominatorTree *DT = nullptr);
117
118 /// Returns true if the given value is known be negative (i.e. non-positive
119 /// and non-zero).
120 bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
121 AssumptionCache *AC = nullptr,
122 const Instruction *CxtI = nullptr,
123 const DominatorTree *DT = nullptr);
124
125 /// Return true if the given values are known to be non-equal when defined.
126 /// Supports scalar integer types only.
127 bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
128 AssumptionCache *AC = nullptr,
129 const Instruction *CxtI = nullptr,
130 const DominatorTree *DT = nullptr);
131
132 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
133 /// simplify operations downstream. Mask is known to be zero for bits that V
134 /// cannot have.
135 ///
136 /// This function is defined on values with integer type, values with pointer
137 /// type, and vectors of integers. In the case
138 /// where V is a vector, the mask, known zero, and known one values are the
139 /// same width as the vector element, and the bit is set only if it is true
140 /// for all of the elements in the vector.
141 bool MaskedValueIsZero(const Value *V, const APInt &Mask,
142 const DataLayout &DL,
143 unsigned Depth = 0, AssumptionCache *AC = nullptr,
144 const Instruction *CxtI = nullptr,
145 const DominatorTree *DT = nullptr);
146
147 /// Return the number of times the sign bit of the register is replicated into
148 /// the other bits. We know that at least 1 bit is always equal to the sign
149 /// bit (itself), but other cases can give us information. For example,
150 /// immediately after an "ashr X, 2", we know that the top 3 bits are all
151 /// equal to each other, so we return 3. For vectors, return the number of
152 /// sign bits for the vector element with the mininum number of known sign
153 /// bits.
154 unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
155 unsigned Depth = 0, AssumptionCache *AC = nullptr,
156 const Instruction *CxtI = nullptr,
157 const DominatorTree *DT = nullptr);
158
159 /// This function computes the integer multiple of Base that equals V. If
160 /// successful, it returns true and returns the multiple in Multiple. If
161 /// unsuccessful, it returns false. Also, if V can be simplified to an
162 /// integer, then the simplified V is returned in Val. Look through sext only
163 /// if LookThroughSExt=true.
164 bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
165 bool LookThroughSExt = false,
166 unsigned Depth = 0);
167
168 /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent
169 /// intrinsics are treated as-if they were intrinsics.
170 Intrinsic::ID getIntrinsicForCallSite(ImmutableCallSite ICS,
171 const TargetLibraryInfo *TLI);
172
173 /// Return true if we can prove that the specified FP value is never equal to
174 /// -0.0.
175 bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
176 unsigned Depth = 0);
177
178 /// Return true if we can prove that the specified FP value is either NaN or
179 /// never less than -0.0.
180 ///
181 /// NaN --> true
182 /// +0 --> true
183 /// -0 --> true
184 /// x > +0 --> true
185 /// x < -0 --> false
186 bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
187
188 /// Return true if the floating-point scalar value is not a NaN or if the
189 /// floating-point vector value has no NaN elements. Return false if a value
190 /// could ever be NaN.
191 bool isKnownNeverNaN(const Value *V);
192
193 /// Return true if we can prove that the specified FP value's sign bit is 0.
194 ///
195 /// NaN --> true/false (depending on the NaN's sign bit)
196 /// +0 --> true
197 /// -0 --> false
198 /// x > +0 --> true
199 /// x < -0 --> false
200 bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
201
202 /// If the specified value can be set by repeating the same byte in memory,
203 /// return the i8 value that it is represented with. This is true for all i8
204 /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
205 /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
206 /// i16 0x1234), return null.
207 Value *isBytewiseValue(Value *V);
208
209 /// Given an aggregrate and an sequence of indices, see if the scalar value
210 /// indexed is already around as a register, for example if it were inserted
211 /// directly into the aggregrate.
212 ///
213 /// If InsertBefore is not null, this function will duplicate (modified)
214 /// insertvalues when a part of a nested struct is extracted.
215 Value *FindInsertedValue(Value *V,
216 ArrayRef<unsigned> idx_range,
217 Instruction *InsertBefore = nullptr);
218
219 /// Analyze the specified pointer to see if it can be expressed as a base
220 /// pointer plus a constant offset. Return the base and offset to the caller.
221 Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
222 const DataLayout &DL);
223 inline const Value *GetPointerBaseWithConstantOffset(const Value *Ptr,
224 int64_t &Offset,
225 const DataLayout &DL) {
226 return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
227 DL);
228 }
229
230 /// Returns true if the GEP is based on a pointer to a string (array of
231 // \p CharSize integers) and is indexing into this string.
232 bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
233 unsigned CharSize = 8);
234
235 /// Represents offset+length into a ConstantDataArray.
236 struct ConstantDataArraySlice {
237 /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
238 /// initializer, it just doesn't fit the ConstantDataArray interface).
239 const ConstantDataArray *Array;
240
241 /// Slice starts at this Offset.
242 uint64_t Offset;
243
244 /// Length of the slice.
245 uint64_t Length;
246
247 /// Moves the Offset and adjusts Length accordingly.
248 void move(uint64_t Delta) {
249 assert(Delta < Length)(static_cast <bool> (Delta < Length) ? void (0) : __assert_fail
("Delta < Length", "/build/llvm-toolchain-snapshot-7~svn329677/include/llvm/Analysis/ValueTracking.h"
, 249, __extension__ __PRETTY_FUNCTION__))
;
250 Offset += Delta;
251 Length -= Delta;
252 }
253
254 /// Convenience accessor for elements in the slice.
255 uint64_t operator[](unsigned I) const {
256 return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset);
257 }
258 };
259
260 /// Returns true if the value \p V is a pointer into a ConstantDataArray.
261 /// If successful \p Slice will point to a ConstantDataArray info object
262 /// with an appropriate offset.
263 bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
264 unsigned ElementSize, uint64_t Offset = 0);
265
266 /// This function computes the length of a null-terminated C string pointed to
267 /// by V. If successful, it returns true and returns the string in Str. If
268 /// unsuccessful, it returns false. This does not include the trailing null
269 /// character by default. If TrimAtNul is set to false, then this returns any
270 /// trailing null characters as well as any other characters that come after
271 /// it.
272 bool getConstantStringInfo(const Value *V, StringRef &Str,
273 uint64_t Offset = 0, bool TrimAtNul = true);
274
275 /// If we can compute the length of the string pointed to by the specified
276 /// pointer, return 'len+1'. If we can't, return 0.
277 uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
278
279 /// This method strips off any GEP address adjustments and pointer casts from
280 /// the specified value, returning the original object being addressed. Note
281 /// that the returned value has pointer type if the specified value does. If
282 /// the MaxLookup value is non-zero, it limits the number of instructions to
283 /// be stripped off.
284 Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
285 unsigned MaxLookup = 6);
286 inline const Value *GetUnderlyingObject(const Value *V, const DataLayout &DL,
287 unsigned MaxLookup = 6) {
288 return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
289 }
290
291 /// \brief This method is similar to GetUnderlyingObject except that it can
292 /// look through phi and select instructions and return multiple objects.
293 ///
294 /// If LoopInfo is passed, loop phis are further analyzed. If a pointer
295 /// accesses different objects in each iteration, we don't look through the
296 /// phi node. E.g. consider this loop nest:
297 ///
298 /// int **A;
299 /// for (i)
300 /// for (j) {
301 /// A[i][j] = A[i-1][j] * B[j]
302 /// }
303 ///
304 /// This is transformed by Load-PRE to stash away A[i] for the next iteration
305 /// of the outer loop:
306 ///
307 /// Curr = A[0]; // Prev_0
308 /// for (i: 1..N) {
309 /// Prev = Curr; // Prev = PHI (Prev_0, Curr)
310 /// Curr = A[i];
311 /// for (j: 0..N) {
312 /// Curr[j] = Prev[j] * B[j]
313 /// }
314 /// }
315 ///
316 /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
317 /// should not assume that Curr and Prev share the same underlying object thus
318 /// it shouldn't look through the phi above.
319 void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
320 const DataLayout &DL, LoopInfo *LI = nullptr,
321 unsigned MaxLookup = 6);
322
323 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
324 /// ptrtoint+arithmetic+inttoptr sequences.
325 bool getUnderlyingObjectsForCodeGen(const Value *V,
326 SmallVectorImpl<Value *> &Objects,
327 const DataLayout &DL);
328
329 /// Return true if the only users of this pointer are lifetime markers.
330 bool onlyUsedByLifetimeMarkers(const Value *V);
331
332 /// Return true if the instruction does not have any effects besides
333 /// calculating the result and does not have undefined behavior.
334 ///
335 /// This method never returns true for an instruction that returns true for
336 /// mayHaveSideEffects; however, this method also does some other checks in
337 /// addition. It checks for undefined behavior, like dividing by zero or
338 /// loading from an invalid pointer (but not for undefined results, like a
339 /// shift with a shift amount larger than the width of the result). It checks
340 /// for malloc and alloca because speculatively executing them might cause a
341 /// memory leak. It also returns false for instructions related to control
342 /// flow, specifically terminators and PHI nodes.
343 ///
344 /// If the CtxI is specified this method performs context-sensitive analysis
345 /// and returns true if it is safe to execute the instruction immediately
346 /// before the CtxI.
347 ///
348 /// If the CtxI is NOT specified this method only looks at the instruction
349 /// itself and its operands, so if this method returns true, it is safe to
350 /// move the instruction as long as the correct dominance relationships for
351 /// the operands and users hold.
352 ///
353 /// This method can return true for instructions that read memory;
354 /// for such instructions, moving them may change the resulting value.
355 bool isSafeToSpeculativelyExecute(const Value *V,
356 const Instruction *CtxI = nullptr,
357 const DominatorTree *DT = nullptr);
358
359 /// Returns true if the result or effects of the given instructions \p I
360 /// depend on or influence global memory.
361 /// Memory dependence arises for example if the instruction reads from
362 /// memory or may produce effects or undefined behaviour. Memory dependent
363 /// instructions generally cannot be reorderd with respect to other memory
364 /// dependent instructions or moved into non-dominated basic blocks.
365 /// Instructions which just compute a value based on the values of their
366 /// operands are not memory dependent.
367 bool mayBeMemoryDependent(const Instruction &I);
368
369 /// Return true if it is an intrinsic that cannot be speculated but also
370 /// cannot trap.
371 bool isAssumeLikeIntrinsic(const Instruction *I);
372
373 /// Return true if it is valid to use the assumptions provided by an
374 /// assume intrinsic, I, at the point in the control-flow identified by the
375 /// context instruction, CxtI.
376 bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
377 const DominatorTree *DT = nullptr);
378
379 enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
380
381 OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
382 const Value *RHS,
383 const DataLayout &DL,
384 AssumptionCache *AC,
385 const Instruction *CxtI,
386 const DominatorTree *DT);
387 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
388 const Value *RHS,
389 const DataLayout &DL,
390 AssumptionCache *AC,
391 const Instruction *CxtI,
392 const DominatorTree *DT);
393 OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
394 const DataLayout &DL,
395 AssumptionCache *AC = nullptr,
396 const Instruction *CxtI = nullptr,
397 const DominatorTree *DT = nullptr);
398 /// This version also leverages the sign bit of Add if known.
399 OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
400 const DataLayout &DL,
401 AssumptionCache *AC = nullptr,
402 const Instruction *CxtI = nullptr,
403 const DominatorTree *DT = nullptr);
404
405 /// Returns true if the arithmetic part of the \p II 's result is
406 /// used only along the paths control dependent on the computation
407 /// not overflowing, \p II being an <op>.with.overflow intrinsic.
408 bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
409 const DominatorTree &DT);
410
411 /// Return true if this function can prove that the instruction I will
412 /// always transfer execution to one of its successors (including the next
413 /// instruction that follows within a basic block). E.g. this is not
414 /// guaranteed for function calls that could loop infinitely.
415 ///
416 /// In other words, this function returns false for instructions that may
417 /// transfer execution or fail to transfer execution in a way that is not
418 /// captured in the CFG nor in the sequence of instructions within a basic
419 /// block.
420 ///
421 /// Undefined behavior is assumed not to happen, so e.g. division is
422 /// guaranteed to transfer execution to the following instruction even
423 /// though division by zero might cause undefined behavior.
424 bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
425
426 /// Returns true if this block does not contain a potential implicit exit.
427 /// This is equivelent to saying that all instructions within the basic block
428 /// are guaranteed to transfer execution to their successor within the basic
429 /// block. This has the same assumptions w.r.t. undefined behavior as the
430 /// instruction variant of this function.
431 bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
432
433 /// Return true if this function can prove that the instruction I
434 /// is executed for every iteration of the loop L.
435 ///
436 /// Note that this currently only considers the loop header.
437 bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
438 const Loop *L);
439
440 /// Return true if this function can prove that I is guaranteed to yield
441 /// full-poison (all bits poison) if at least one of its operands are
442 /// full-poison (all bits poison).
443 ///
444 /// The exact rules for how poison propagates through instructions have
445 /// not been settled as of 2015-07-10, so this function is conservative
446 /// and only considers poison to be propagated in uncontroversial
447 /// cases. There is no attempt to track values that may be only partially
448 /// poison.
449 bool propagatesFullPoison(const Instruction *I);
450
451 /// Return either nullptr or an operand of I such that I will trigger
452 /// undefined behavior if I is executed and that operand has a full-poison
453 /// value (all bits poison).
454 const Value *getGuaranteedNonFullPoisonOp(const Instruction *I);
455
456 /// Return true if this function can prove that if PoisonI is executed
457 /// and yields a full-poison value (all bits poison), then that will
458 /// trigger undefined behavior.
459 ///
460 /// Note that this currently only considers the basic block that is
461 /// the parent of I.
462 bool programUndefinedIfFullPoison(const Instruction *PoisonI);
463
464 /// \brief Specific patterns of select instructions we can match.
465 enum SelectPatternFlavor {
466 SPF_UNKNOWN = 0,
467 SPF_SMIN, /// Signed minimum
468 SPF_UMIN, /// Unsigned minimum
469 SPF_SMAX, /// Signed maximum
470 SPF_UMAX, /// Unsigned maximum
471 SPF_FMINNUM, /// Floating point minnum
472 SPF_FMAXNUM, /// Floating point maxnum
473 SPF_ABS, /// Absolute value
474 SPF_NABS /// Negated absolute value
475 };
476
477 /// \brief Behavior when a floating point min/max is given one NaN and one
478 /// non-NaN as input.
479 enum SelectPatternNaNBehavior {
480 SPNB_NA = 0, /// NaN behavior not applicable.
481 SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN.
482 SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
483 SPNB_RETURNS_ANY /// Given one NaN input, can return either (or
484 /// it has been determined that no operands can
485 /// be NaN).
486 };
487
488 struct SelectPatternResult {
489 SelectPatternFlavor Flavor;
490 SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
491 /// SPF_FMINNUM or SPF_FMAXNUM.
492 bool Ordered; /// When implementing this min/max pattern as
493 /// fcmp; select, does the fcmp have to be
494 /// ordered?
495
496 /// Return true if \p SPF is a min or a max pattern.
497 static bool isMinOrMax(SelectPatternFlavor SPF) {
498 return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
499 }
500 };
501
502 /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
503 /// and providing the out parameter results if we successfully match.
504 ///
505 /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
506 /// not match that of the original select. If this is the case, the cast
507 /// operation (one of Trunc,SExt,Zext) that must be done to transform the
508 /// type of LHS and RHS into the type of V is returned in CastOp.
509 ///
510 /// For example:
511 /// %1 = icmp slt i32 %a, i32 4
512 /// %2 = sext i32 %a to i64
513 /// %3 = select i1 %1, i64 %2, i64 4
514 ///
515 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
516 ///
517 SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
518 Instruction::CastOps *CastOp = nullptr,
519 unsigned Depth = 0);
520 inline SelectPatternResult
521 matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS,
522 Instruction::CastOps *CastOp = nullptr) {
523 Value *L = const_cast<Value*>(LHS);
5
Assigned value is garbage or undefined
524 Value *R = const_cast<Value*>(RHS);
525 auto Result = matchSelectPattern(const_cast<Value*>(V), L, R);
526 LHS = L;
527 RHS = R;
528 return Result;
529 }
530
531 /// Return the canonical comparison predicate for the specified
532 /// minimum/maximum flavor.
533 CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF,
534 bool Ordered = false);
535
536 /// Return the inverse minimum/maximum flavor of the specified flavor.
537 /// For example, signed minimum is the inverse of signed maximum.
538 SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
539
540 /// Return the canonical inverse comparison predicate for the specified
541 /// minimum/maximum flavor.
542 CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF);
543
544 /// Return true if RHS is known to be implied true by LHS. Return false if
545 /// RHS is known to be implied false by LHS. Otherwise, return None if no
546 /// implication can be made.
547 /// A & B must be i1 (boolean) values or a vector of such values. Note that
548 /// the truth table for implication is the same as <=u on i1 values (but not
549 /// <=s!). The truth table for both is:
550 /// | T | F (B)
551 /// T | T | F
552 /// F | T | T
553 /// (A)
554 Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
555 const DataLayout &DL, bool LHSIsTrue = true,
556 unsigned Depth = 0);
557} // end namespace llvm
558
559#endif // LLVM_ANALYSIS_VALUETRACKING_H