Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Analysis/ValueTracking.cpp
Warning:line 211, column 31
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ValueTracking.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Analysis -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Analysis -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Analysis/ValueTracking.cpp
1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/AssumeBundleQueries.h"
28#include "llvm/Analysis/AssumptionCache.h"
29#include "llvm/Analysis/ConstantFolding.h"
30#include "llvm/Analysis/EHPersonalities.h"
31#include "llvm/Analysis/GuardUtils.h"
32#include "llvm/Analysis/InstructionSimplify.h"
33#include "llvm/Analysis/Loads.h"
34#include "llvm/Analysis/LoopInfo.h"
35#include "llvm/Analysis/OptimizationRemarkEmitter.h"
36#include "llvm/Analysis/TargetLibraryInfo.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/Constant.h"
41#include "llvm/IR/ConstantRange.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/DiagnosticInfo.h"
45#include "llvm/IR/Dominators.h"
46#include "llvm/IR/Function.h"
47#include "llvm/IR/GetElementPtrTypeIterator.h"
48#include "llvm/IR/GlobalAlias.h"
49#include "llvm/IR/GlobalValue.h"
50#include "llvm/IR/GlobalVariable.h"
51#include "llvm/IR/InstrTypes.h"
52#include "llvm/IR/Instruction.h"
53#include "llvm/IR/Instructions.h"
54#include "llvm/IR/IntrinsicInst.h"
55#include "llvm/IR/Intrinsics.h"
56#include "llvm/IR/IntrinsicsAArch64.h"
57#include "llvm/IR/IntrinsicsRISCV.h"
58#include "llvm/IR/IntrinsicsX86.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/Metadata.h"
61#include "llvm/IR/Module.h"
62#include "llvm/IR/Operator.h"
63#include "llvm/IR/PatternMatch.h"
64#include "llvm/IR/Type.h"
65#include "llvm/IR/User.h"
66#include "llvm/IR/Value.h"
67#include "llvm/Support/Casting.h"
68#include "llvm/Support/CommandLine.h"
69#include "llvm/Support/Compiler.h"
70#include "llvm/Support/ErrorHandling.h"
71#include "llvm/Support/KnownBits.h"
72#include "llvm/Support/MathExtras.h"
73#include <algorithm>
74#include <cassert>
75#include <cstdint>
76#include <utility>
77
78using namespace llvm;
79using namespace llvm::PatternMatch;
80
81// Controls the number of uses of the value searched for possible
82// dominating comparisons.
83static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84 cl::Hidden, cl::init(20));
85
86// According to the LangRef, branching on a poison condition is absolutely
87// immediate full UB. However, historically we haven't implemented that
88// consistently as we had an important transformation (non-trivial unswitch)
89// which introduced instances of branch on poison/undef to otherwise well
90// defined programs. This issue has since been fixed, but the flag is
91// temporarily retained to easily diagnose potential regressions.
92static cl::opt<bool> BranchOnPoisonAsUB("branch-on-poison-as-ub",
93 cl::Hidden, cl::init(true));
94
95
96/// Returns the bitwidth of the given scalar or pointer type. For vector types,
97/// returns the element type's bitwidth.
98static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
99 if (unsigned BitWidth = Ty->getScalarSizeInBits())
100 return BitWidth;
101
102 return DL.getPointerTypeSizeInBits(Ty);
103}
104
105namespace {
106
107// Simplifying using an assume can only be done in a particular control-flow
108// context (the context instruction provides that context). If an assume and
109// the context instruction are not in the same block then the DT helps in
110// figuring out if we can use it.
111struct Query {
112 const DataLayout &DL;
113 AssumptionCache *AC;
114 const Instruction *CxtI;
115 const DominatorTree *DT;
116
117 // Unlike the other analyses, this may be a nullptr because not all clients
118 // provide it currently.
119 OptimizationRemarkEmitter *ORE;
120
121 /// If true, it is safe to use metadata during simplification.
122 InstrInfoQuery IIQ;
123
124 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
125 const DominatorTree *DT, bool UseInstrInfo,
126 OptimizationRemarkEmitter *ORE = nullptr)
127 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
128};
129
130} // end anonymous namespace
131
132// Given the provided Value and, potentially, a context instruction, return
133// the preferred context instruction (if any).
134static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
135 // If we've been provided with a context instruction, then use that (provided
136 // it has been inserted).
137 if (CxtI && CxtI->getParent())
138 return CxtI;
139
140 // If the value is really an already-inserted instruction, then use that.
141 CxtI = dyn_cast<Instruction>(V);
142 if (CxtI && CxtI->getParent())
143 return CxtI;
144
145 return nullptr;
146}
147
148static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
149 // If we've been provided with a context instruction, then use that (provided
150 // it has been inserted).
151 if (CxtI && CxtI->getParent())
152 return CxtI;
153
154 // If the value is really an already-inserted instruction, then use that.
155 CxtI = dyn_cast<Instruction>(V1);
156 if (CxtI && CxtI->getParent())
157 return CxtI;
158
159 CxtI = dyn_cast<Instruction>(V2);
160 if (CxtI && CxtI->getParent())
161 return CxtI;
162
163 return nullptr;
164}
165
166static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
167 const APInt &DemandedElts,
168 APInt &DemandedLHS, APInt &DemandedRHS) {
169 // The length of scalable vectors is unknown at compile time, thus we
170 // cannot check their values
171 if (isa<ScalableVectorType>(Shuf->getType()))
172 return false;
173
174 int NumElts =
175 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
176 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
177 DemandedLHS = DemandedRHS = APInt::getZero(NumElts);
178 if (DemandedElts.isZero())
179 return true;
180 // Simple case of a shuffle with zeroinitializer.
181 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
182 DemandedLHS.setBit(0);
183 return true;
184 }
185 for (int i = 0; i != NumMaskElts; ++i) {
186 if (!DemandedElts[i])
187 continue;
188 int M = Shuf->getMaskValue(i);
189 assert(M < (NumElts * 2) && "Invalid shuffle mask constant")(static_cast <bool> (M < (NumElts * 2) && "Invalid shuffle mask constant"
) ? void (0) : __assert_fail ("M < (NumElts * 2) && \"Invalid shuffle mask constant\""
, "llvm/lib/Analysis/ValueTracking.cpp", 189, __extension__ __PRETTY_FUNCTION__
))
;
190
191 // For undef elements, we don't know anything about the common state of
192 // the shuffle result.
193 if (M == -1)
194 return false;
195 if (M < NumElts)
196 DemandedLHS.setBit(M % NumElts);
197 else
198 DemandedRHS.setBit(M % NumElts);
199 }
200
201 return true;
202}
203
204static void computeKnownBits(const Value *V, const APInt &DemandedElts,
205 KnownBits &Known, unsigned Depth, const Query &Q);
206
207static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
208 const Query &Q) {
209 // FIXME: We currently have no way to represent the DemandedElts of a scalable
210 // vector
211 if (isa<ScalableVectorType>(V->getType())) {
19
Called C++ object pointer is null
212 Known.resetAll();
213 return;
214 }
215
216 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
217 APInt DemandedElts =
218 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
219 computeKnownBits(V, DemandedElts, Known, Depth, Q);
220}
221
222void llvm::computeKnownBits(const Value *V, KnownBits &Known,
223 const DataLayout &DL, unsigned Depth,
224 AssumptionCache *AC, const Instruction *CxtI,
225 const DominatorTree *DT,
226 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
227 ::computeKnownBits(V, Known, Depth,
228 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
229}
230
231void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
232 KnownBits &Known, const DataLayout &DL,
233 unsigned Depth, AssumptionCache *AC,
234 const Instruction *CxtI, const DominatorTree *DT,
235 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
236 ::computeKnownBits(V, DemandedElts, Known, Depth,
237 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
238}
239
240static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
241 unsigned Depth, const Query &Q);
242
243static KnownBits computeKnownBits(const Value *V, unsigned Depth,
244 const Query &Q);
245
246KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
247 unsigned Depth, AssumptionCache *AC,
248 const Instruction *CxtI,
249 const DominatorTree *DT,
250 OptimizationRemarkEmitter *ORE,
251 bool UseInstrInfo) {
252 return ::computeKnownBits(
253 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
254}
255
256KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
257 const DataLayout &DL, unsigned Depth,
258 AssumptionCache *AC, const Instruction *CxtI,
259 const DominatorTree *DT,
260 OptimizationRemarkEmitter *ORE,
261 bool UseInstrInfo) {
262 return ::computeKnownBits(
263 V, DemandedElts, Depth,
264 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
265}
266
267bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
268 const DataLayout &DL, AssumptionCache *AC,
269 const Instruction *CxtI, const DominatorTree *DT,
270 bool UseInstrInfo) {
271 assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 272, __extension__ __PRETTY_FUNCTION__
))
272 "LHS and RHS should have the same type")(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 272, __extension__ __PRETTY_FUNCTION__
))
;
273 assert(LHS->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "llvm/lib/Analysis/ValueTracking.cpp", 274, __extension__ __PRETTY_FUNCTION__
))
274 "LHS and RHS should be integers")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "llvm/lib/Analysis/ValueTracking.cpp", 274, __extension__ __PRETTY_FUNCTION__
))
;
275 // Look for an inverted mask: (X & ~M) op (Y & M).
276 {
277 Value *M;
278 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
279 match(RHS, m_c_And(m_Specific(M), m_Value())))
280 return true;
281 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
282 match(LHS, m_c_And(m_Specific(M), m_Value())))
283 return true;
284 }
285
286 // X op (Y & ~X)
287 if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) ||
288 match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value())))
289 return true;
290
291 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
292 // for constant Y.
293 Value *Y;
294 if (match(RHS,
295 m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) ||
296 match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y))))
297 return true;
298
299 // Look for: (A & B) op ~(A | B)
300 {
301 Value *A, *B;
302 if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
303 match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
304 return true;
305 if (match(RHS, m_And(m_Value(A), m_Value(B))) &&
306 match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
307 return true;
308 }
309 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
310 KnownBits LHSKnown(IT->getBitWidth());
311 KnownBits RHSKnown(IT->getBitWidth());
312 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
313 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
314 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
315}
316
317bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
318 return !I->user_empty() && all_of(I->users(), [](const User *U) {
319 ICmpInst::Predicate P;
320 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
321 });
322}
323
324static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
325 const Query &Q);
326
327bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
328 bool OrZero, unsigned Depth,
329 AssumptionCache *AC, const Instruction *CxtI,
330 const DominatorTree *DT, bool UseInstrInfo) {
331 return ::isKnownToBeAPowerOfTwo(
332 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
333}
334
335static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
336 unsigned Depth, const Query &Q);
337
338static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
339
340bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
341 AssumptionCache *AC, const Instruction *CxtI,
342 const DominatorTree *DT, bool UseInstrInfo) {
343 return ::isKnownNonZero(V, Depth,
344 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
345}
346
347bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
348 unsigned Depth, AssumptionCache *AC,
349 const Instruction *CxtI, const DominatorTree *DT,
350 bool UseInstrInfo) {
351 KnownBits Known =
352 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
353 return Known.isNonNegative();
354}
355
356bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
357 AssumptionCache *AC, const Instruction *CxtI,
358 const DominatorTree *DT, bool UseInstrInfo) {
359 if (auto *CI = dyn_cast<ConstantInt>(V))
360 return CI->getValue().isStrictlyPositive();
361
362 // TODO: We'd doing two recursive queries here. We should factor this such
363 // that only a single query is needed.
364 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
365 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
366}
367
368bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
369 AssumptionCache *AC, const Instruction *CxtI,
370 const DominatorTree *DT, bool UseInstrInfo) {
371 KnownBits Known =
372 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
373 return Known.isNegative();
374}
375
376static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
377 const Query &Q);
378
379bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
380 const DataLayout &DL, AssumptionCache *AC,
381 const Instruction *CxtI, const DominatorTree *DT,
382 bool UseInstrInfo) {
383 return ::isKnownNonEqual(V1, V2, 0,
384 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
385 UseInstrInfo, /*ORE=*/nullptr));
386}
387
388static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
389 const Query &Q);
390
391bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
392 const DataLayout &DL, unsigned Depth,
393 AssumptionCache *AC, const Instruction *CxtI,
394 const DominatorTree *DT, bool UseInstrInfo) {
395 return ::MaskedValueIsZero(
396 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
397}
398
399static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
400 unsigned Depth, const Query &Q);
401
402static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
403 const Query &Q) {
404 // FIXME: We currently have no way to represent the DemandedElts of a scalable
405 // vector
406 if (isa<ScalableVectorType>(V->getType()))
407 return 1;
408
409 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
410 APInt DemandedElts =
411 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
412 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
413}
414
415unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
416 unsigned Depth, AssumptionCache *AC,
417 const Instruction *CxtI,
418 const DominatorTree *DT, bool UseInstrInfo) {
419 return ::ComputeNumSignBits(
420 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
421}
422
423unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
424 unsigned Depth, AssumptionCache *AC,
425 const Instruction *CxtI,
426 const DominatorTree *DT) {
427 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
428 return V->getType()->getScalarSizeInBits() - SignBits + 1;
429}
430
431static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
432 bool NSW, const APInt &DemandedElts,
433 KnownBits &KnownOut, KnownBits &Known2,
434 unsigned Depth, const Query &Q) {
435 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
436
437 // If one operand is unknown and we have no nowrap information,
438 // the result will be unknown independently of the second operand.
439 if (KnownOut.isUnknown() && !NSW)
440 return;
441
442 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
443 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
444}
445
446static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
447 const APInt &DemandedElts, KnownBits &Known,
448 KnownBits &Known2, unsigned Depth,
449 const Query &Q) {
450 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
451 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
452
453 bool isKnownNegative = false;
454 bool isKnownNonNegative = false;
455 // If the multiplication is known not to overflow, compute the sign bit.
456 if (NSW) {
457 if (Op0 == Op1) {
458 // The product of a number with itself is non-negative.
459 isKnownNonNegative = true;
460 } else {
461 bool isKnownNonNegativeOp1 = Known.isNonNegative();
462 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
463 bool isKnownNegativeOp1 = Known.isNegative();
464 bool isKnownNegativeOp0 = Known2.isNegative();
465 // The product of two numbers with the same sign is non-negative.
466 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
467 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
468 // The product of a negative number and a non-negative number is either
469 // negative or zero.
470 if (!isKnownNonNegative)
471 isKnownNegative =
472 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
473 Known2.isNonZero()) ||
474 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
475 }
476 }
477
478 bool SelfMultiply = Op0 == Op1;
479 // TODO: SelfMultiply can be poison, but not undef.
480 if (SelfMultiply)
481 SelfMultiply &=
482 isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
483 Known = KnownBits::mul(Known, Known2, SelfMultiply);
484
485 // Only make use of no-wrap flags if we failed to compute the sign bit
486 // directly. This matters if the multiplication always overflows, in
487 // which case we prefer to follow the result of the direct computation,
488 // though as the program is invoking undefined behaviour we can choose
489 // whatever we like here.
490 if (isKnownNonNegative && !Known.isNegative())
491 Known.makeNonNegative();
492 else if (isKnownNegative && !Known.isNonNegative())
493 Known.makeNegative();
494}
495
496void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
497 KnownBits &Known) {
498 unsigned BitWidth = Known.getBitWidth();
499 unsigned NumRanges = Ranges.getNumOperands() / 2;
500 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp",
500, __extension__ __PRETTY_FUNCTION__))
;
501
502 Known.Zero.setAllBits();
503 Known.One.setAllBits();
504
505 for (unsigned i = 0; i < NumRanges; ++i) {
506 ConstantInt *Lower =
507 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
508 ConstantInt *Upper =
509 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
510 ConstantRange Range(Lower->getValue(), Upper->getValue());
511
512 // The first CommonPrefixBits of all values in Range are equal.
513 unsigned CommonPrefixBits =
514 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
515 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
516 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
517 Known.One &= UnsignedMax & Mask;
518 Known.Zero &= ~UnsignedMax & Mask;
519 }
520}
521
522static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
523 SmallVector<const Value *, 16> WorkSet(1, I);
524 SmallPtrSet<const Value *, 32> Visited;
525 SmallPtrSet<const Value *, 16> EphValues;
526
527 // The instruction defining an assumption's condition itself is always
528 // considered ephemeral to that assumption (even if it has other
529 // non-ephemeral users). See r246696's test case for an example.
530 if (is_contained(I->operands(), E))
531 return true;
532
533 while (!WorkSet.empty()) {
534 const Value *V = WorkSet.pop_back_val();
535 if (!Visited.insert(V).second)
536 continue;
537
538 // If all uses of this value are ephemeral, then so is this value.
539 if (llvm::all_of(V->users(), [&](const User *U) {
540 return EphValues.count(U);
541 })) {
542 if (V == E)
543 return true;
544
545 if (V == I || (isa<Instruction>(V) &&
546 !cast<Instruction>(V)->mayHaveSideEffects() &&
547 !cast<Instruction>(V)->isTerminator())) {
548 EphValues.insert(V);
549 if (const User *U = dyn_cast<User>(V))
550 append_range(WorkSet, U->operands());
551 }
552 }
553 }
554
555 return false;
556}
557
558// Is this an intrinsic that cannot be speculated but also cannot trap?
559bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
560 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
561 return CI->isAssumeLikeIntrinsic();
562
563 return false;
564}
565
566bool llvm::isValidAssumeForContext(const Instruction *Inv,
567 const Instruction *CxtI,
568 const DominatorTree *DT) {
569 // There are two restrictions on the use of an assume:
570 // 1. The assume must dominate the context (or the control flow must
571 // reach the assume whenever it reaches the context).
572 // 2. The context must not be in the assume's set of ephemeral values
573 // (otherwise we will use the assume to prove that the condition
574 // feeding the assume is trivially true, thus causing the removal of
575 // the assume).
576
577 if (Inv->getParent() == CxtI->getParent()) {
578 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
579 // in the BB.
580 if (Inv->comesBefore(CxtI))
581 return true;
582
583 // Don't let an assume affect itself - this would cause the problems
584 // `isEphemeralValueOf` is trying to prevent, and it would also make
585 // the loop below go out of bounds.
586 if (Inv == CxtI)
587 return false;
588
589 // The context comes first, but they're both in the same block.
590 // Make sure there is nothing in between that might interrupt
591 // the control flow, not even CxtI itself.
592 // We limit the scan distance between the assume and its context instruction
593 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
594 // it can be adjusted if needed (could be turned into a cl::opt).
595 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
596 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
597 return false;
598
599 return !isEphemeralValueOf(Inv, CxtI);
600 }
601
602 // Inv and CxtI are in different blocks.
603 if (DT) {
604 if (DT->dominates(Inv, CxtI))
605 return true;
606 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
607 // We don't have a DT, but this trivially dominates.
608 return true;
609 }
610
611 return false;
612}
613
614static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
615 // v u> y implies v != 0.
616 if (Pred == ICmpInst::ICMP_UGT)
617 return true;
618
619 // Special-case v != 0 to also handle v != null.
620 if (Pred == ICmpInst::ICMP_NE)
621 return match(RHS, m_Zero());
622
623 // All other predicates - rely on generic ConstantRange handling.
624 const APInt *C;
625 if (!match(RHS, m_APInt(C)))
626 return false;
627
628 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
629 return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
630}
631
632static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
633 // Use of assumptions is context-sensitive. If we don't have a context, we
634 // cannot use them!
635 if (!Q.AC || !Q.CxtI)
636 return false;
637
638 if (Q.CxtI && V->getType()->isPointerTy()) {
639 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
640 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
641 V->getType()->getPointerAddressSpace()))
642 AttrKinds.push_back(Attribute::Dereferenceable);
643
644 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
645 return true;
646 }
647
648 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
649 if (!AssumeVH)
650 continue;
651 CallInst *I = cast<CallInst>(AssumeVH);
652 assert(I->getFunction() == Q.CxtI->getFunction() &&(static_cast <bool> (I->getFunction() == Q.CxtI->
getFunction() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 653, __extension__ __PRETTY_FUNCTION__
))
653 "Got assumption for the wrong function!")(static_cast <bool> (I->getFunction() == Q.CxtI->
getFunction() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 653, __extension__ __PRETTY_FUNCTION__
))
;
654
655 // Warning: This loop can end up being somewhat performance sensitive.
656 // We're running this loop for once for each value queried resulting in a
657 // runtime of ~O(#assumes * #values).
658
659 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 660, __extension__ __PRETTY_FUNCTION__
))
660 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 660, __extension__ __PRETTY_FUNCTION__
))
;
661
662 Value *RHS;
663 CmpInst::Predicate Pred;
664 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
665 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
666 return false;
667
668 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
669 return true;
670 }
671
672 return false;
673}
674
675static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
676 unsigned Depth, const Query &Q) {
677 // Use of assumptions is context-sensitive. If we don't have a context, we
678 // cannot use them!
679 if (!Q.AC || !Q.CxtI)
680 return;
681
682 unsigned BitWidth = Known.getBitWidth();
683
684 // Refine Known set if the pointer alignment is set by assume bundles.
685 if (V->getType()->isPointerTy()) {
686 if (RetainedKnowledge RK = getKnowledgeValidInContext(
687 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
688 if (isPowerOf2_64(RK.ArgValue))
689 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
690 }
691 }
692
693 // Note that the patterns below need to be kept in sync with the code
694 // in AssumptionCache::updateAffectedValues.
695
696 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
697 if (!AssumeVH)
698 continue;
699 CallInst *I = cast<CallInst>(AssumeVH);
700 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 701, __extension__ __PRETTY_FUNCTION__
))
701 "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 701, __extension__ __PRETTY_FUNCTION__
))
;
702
703 // Warning: This loop can end up being somewhat performance sensitive.
704 // We're running this loop for once for each value queried resulting in a
705 // runtime of ~O(#assumes * #values).
706
707 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 708, __extension__ __PRETTY_FUNCTION__
))
708 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 708, __extension__ __PRETTY_FUNCTION__
))
;
709
710 Value *Arg = I->getArgOperand(0);
711
712 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
713 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 713, __extension__ __PRETTY_FUNCTION__
))
;
714 Known.setAllOnes();
715 return;
716 }
717 if (match(Arg, m_Not(m_Specific(V))) &&
718 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
719 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 719, __extension__ __PRETTY_FUNCTION__
))
;
720 Known.setAllZero();
721 return;
722 }
723
724 // The remaining tests are all recursive, so bail out if we hit the limit.
725 if (Depth == MaxAnalysisRecursionDepth)
726 continue;
727
728 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
729 if (!Cmp)
730 continue;
731
732 // We are attempting to compute known bits for the operands of an assume.
733 // Do not try to use other assumptions for those recursive calls because
734 // that can lead to mutual recursion and a compile-time explosion.
735 // An example of the mutual recursion: computeKnownBits can call
736 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
737 // and so on.
738 Query QueryNoAC = Q;
739 QueryNoAC.AC = nullptr;
740
741 // Note that ptrtoint may change the bitwidth.
742 Value *A, *B;
743 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
744
745 CmpInst::Predicate Pred;
746 uint64_t C;
747 switch (Cmp->getPredicate()) {
748 default:
749 break;
750 case ICmpInst::ICMP_EQ:
751 // assume(v = a)
752 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
753 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
754 KnownBits RHSKnown =
755 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
756 Known.Zero |= RHSKnown.Zero;
757 Known.One |= RHSKnown.One;
758 // assume(v & b = a)
759 } else if (match(Cmp,
760 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
761 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
762 KnownBits RHSKnown =
763 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
764 KnownBits MaskKnown =
765 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
766
767 // For those bits in the mask that are known to be one, we can propagate
768 // known bits from the RHS to V.
769 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
770 Known.One |= RHSKnown.One & MaskKnown.One;
771 // assume(~(v & b) = a)
772 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
773 m_Value(A))) &&
774 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
775 KnownBits RHSKnown =
776 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
777 KnownBits MaskKnown =
778 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
779
780 // For those bits in the mask that are known to be one, we can propagate
781 // inverted known bits from the RHS to V.
782 Known.Zero |= RHSKnown.One & MaskKnown.One;
783 Known.One |= RHSKnown.Zero & MaskKnown.One;
784 // assume(v | b = a)
785 } else if (match(Cmp,
786 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
787 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
788 KnownBits RHSKnown =
789 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
790 KnownBits BKnown =
791 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
792
793 // For those bits in B that are known to be zero, we can propagate known
794 // bits from the RHS to V.
795 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
796 Known.One |= RHSKnown.One & BKnown.Zero;
797 // assume(~(v | b) = a)
798 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
799 m_Value(A))) &&
800 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
801 KnownBits RHSKnown =
802 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
803 KnownBits BKnown =
804 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
805
806 // For those bits in B that are known to be zero, we can propagate
807 // inverted known bits from the RHS to V.
808 Known.Zero |= RHSKnown.One & BKnown.Zero;
809 Known.One |= RHSKnown.Zero & BKnown.Zero;
810 // assume(v ^ b = a)
811 } else if (match(Cmp,
812 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
813 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
814 KnownBits RHSKnown =
815 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
816 KnownBits BKnown =
817 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
818
819 // For those bits in B that are known to be zero, we can propagate known
820 // bits from the RHS to V. For those bits in B that are known to be one,
821 // we can propagate inverted known bits from the RHS to V.
822 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
823 Known.One |= RHSKnown.One & BKnown.Zero;
824 Known.Zero |= RHSKnown.One & BKnown.One;
825 Known.One |= RHSKnown.Zero & BKnown.One;
826 // assume(~(v ^ b) = a)
827 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
828 m_Value(A))) &&
829 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
830 KnownBits RHSKnown =
831 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
832 KnownBits BKnown =
833 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
834
835 // For those bits in B that are known to be zero, we can propagate
836 // inverted known bits from the RHS to V. For those bits in B that are
837 // known to be one, we can propagate known bits from the RHS to V.
838 Known.Zero |= RHSKnown.One & BKnown.Zero;
839 Known.One |= RHSKnown.Zero & BKnown.Zero;
840 Known.Zero |= RHSKnown.Zero & BKnown.One;
841 Known.One |= RHSKnown.One & BKnown.One;
842 // assume(v << c = a)
843 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
844 m_Value(A))) &&
845 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
846 KnownBits RHSKnown =
847 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
848
849 // For those bits in RHS that are known, we can propagate them to known
850 // bits in V shifted to the right by C.
851 RHSKnown.Zero.lshrInPlace(C);
852 Known.Zero |= RHSKnown.Zero;
853 RHSKnown.One.lshrInPlace(C);
854 Known.One |= RHSKnown.One;
855 // assume(~(v << c) = a)
856 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
857 m_Value(A))) &&
858 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
859 KnownBits RHSKnown =
860 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
861 // For those bits in RHS that are known, we can propagate them inverted
862 // to known bits in V shifted to the right by C.
863 RHSKnown.One.lshrInPlace(C);
864 Known.Zero |= RHSKnown.One;
865 RHSKnown.Zero.lshrInPlace(C);
866 Known.One |= RHSKnown.Zero;
867 // assume(v >> c = a)
868 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
869 m_Value(A))) &&
870 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
871 KnownBits RHSKnown =
872 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
873 // For those bits in RHS that are known, we can propagate them to known
874 // bits in V shifted to the right by C.
875 Known.Zero |= RHSKnown.Zero << C;
876 Known.One |= RHSKnown.One << C;
877 // assume(~(v >> c) = a)
878 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
879 m_Value(A))) &&
880 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
881 KnownBits RHSKnown =
882 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
883 // For those bits in RHS that are known, we can propagate them inverted
884 // to known bits in V shifted to the right by C.
885 Known.Zero |= RHSKnown.One << C;
886 Known.One |= RHSKnown.Zero << C;
887 }
888 break;
889 case ICmpInst::ICMP_SGE:
890 // assume(v >=_s c) where c is non-negative
891 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
892 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
893 KnownBits RHSKnown =
894 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
895
896 if (RHSKnown.isNonNegative()) {
897 // We know that the sign bit is zero.
898 Known.makeNonNegative();
899 }
900 }
901 break;
902 case ICmpInst::ICMP_SGT:
903 // assume(v >_s c) where c is at least -1.
904 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
905 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
906 KnownBits RHSKnown =
907 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
908
909 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
910 // We know that the sign bit is zero.
911 Known.makeNonNegative();
912 }
913 }
914 break;
915 case ICmpInst::ICMP_SLE:
916 // assume(v <=_s c) where c is negative
917 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
918 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
919 KnownBits RHSKnown =
920 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
921
922 if (RHSKnown.isNegative()) {
923 // We know that the sign bit is one.
924 Known.makeNegative();
925 }
926 }
927 break;
928 case ICmpInst::ICMP_SLT:
929 // assume(v <_s c) where c is non-positive
930 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
931 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
932 KnownBits RHSKnown =
933 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
934
935 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
936 // We know that the sign bit is one.
937 Known.makeNegative();
938 }
939 }
940 break;
941 case ICmpInst::ICMP_ULE:
942 // assume(v <=_u c)
943 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
944 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
945 KnownBits RHSKnown =
946 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
947
948 // Whatever high bits in c are zero are known to be zero.
949 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
950 }
951 break;
952 case ICmpInst::ICMP_ULT:
953 // assume(v <_u c)
954 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
955 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
956 KnownBits RHSKnown =
957 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
958
959 // If the RHS is known zero, then this assumption must be wrong (nothing
960 // is unsigned less than zero). Signal a conflict and get out of here.
961 if (RHSKnown.isZero()) {
962 Known.Zero.setAllBits();
963 Known.One.setAllBits();
964 break;
965 }
966
967 // Whatever high bits in c are zero are known to be zero (if c is a power
968 // of 2, then one more).
969 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
970 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
971 else
972 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
973 }
974 break;
975 }
976 }
977
978 // If assumptions conflict with each other or previous known bits, then we
979 // have a logical fallacy. It's possible that the assumption is not reachable,
980 // so this isn't a real bug. On the other hand, the program may have undefined
981 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
982 // clear out the known bits, try to warn the user, and hope for the best.
983 if (Known.Zero.intersects(Known.One)) {
984 Known.resetAll();
985
986 if (Q.ORE)
987 Q.ORE->emit([&]() {
988 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
989 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
990 CxtI)
991 << "Detected conflicting code assumptions. Program may "
992 "have undefined behavior, or compiler may have "
993 "internal error.";
994 });
995 }
996}
997
998/// Compute known bits from a shift operator, including those with a
999/// non-constant shift amount. Known is the output of this function. Known2 is a
1000/// pre-allocated temporary with the same bit width as Known and on return
1001/// contains the known bit of the shift value source. KF is an
1002/// operator-specific function that, given the known-bits and a shift amount,
1003/// compute the implied known-bits of the shift operator's result respectively
1004/// for that shift amount. The results from calling KF are conservatively
1005/// combined for all permitted shift amounts.
1006static void computeKnownBitsFromShiftOperator(
1007 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
1008 KnownBits &Known2, unsigned Depth, const Query &Q,
1009 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
1010 unsigned BitWidth = Known.getBitWidth();
1011 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1012 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1013
1014 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
1015 // BitWidth > 64 and any upper bits are known, we'll end up returning the
1016 // limit value (which implies all bits are known).
1017 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
1018 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
1019 bool ShiftAmtIsConstant = Known.isConstant();
1020 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
1021
1022 if (ShiftAmtIsConstant) {
1023 Known = KF(Known2, Known);
1024
1025 // If the known bits conflict, this must be an overflowing left shift, so
1026 // the shift result is poison. We can return anything we want. Choose 0 for
1027 // the best folding opportunity.
1028 if (Known.hasConflict())
1029 Known.setAllZero();
1030
1031 return;
1032 }
1033
1034 // If the shift amount could be greater than or equal to the bit-width of the
1035 // LHS, the value could be poison, but bail out because the check below is
1036 // expensive.
1037 // TODO: Should we just carry on?
1038 if (MaxShiftAmtIsOutOfRange) {
1039 Known.resetAll();
1040 return;
1041 }
1042
1043 // It would be more-clearly correct to use the two temporaries for this
1044 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1045 Known.resetAll();
1046
1047 // If we know the shifter operand is nonzero, we can sometimes infer more
1048 // known bits. However this is expensive to compute, so be lazy about it and
1049 // only compute it when absolutely necessary.
1050 Optional<bool> ShifterOperandIsNonZero;
1051
1052 // Early exit if we can't constrain any well-defined shift amount.
1053 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1054 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1055 ShifterOperandIsNonZero =
1056 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1057 if (!*ShifterOperandIsNonZero)
1058 return;
1059 }
1060
1061 Known.Zero.setAllBits();
1062 Known.One.setAllBits();
1063 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1064 // Combine the shifted known input bits only for those shift amounts
1065 // compatible with its known constraints.
1066 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1067 continue;
1068 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1069 continue;
1070 // If we know the shifter is nonzero, we may be able to infer more known
1071 // bits. This check is sunk down as far as possible to avoid the expensive
1072 // call to isKnownNonZero if the cheaper checks above fail.
1073 if (ShiftAmt == 0) {
1074 if (!ShifterOperandIsNonZero)
1075 ShifterOperandIsNonZero =
1076 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1077 if (*ShifterOperandIsNonZero)
1078 continue;
1079 }
1080
1081 Known = KnownBits::commonBits(
1082 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1083 }
1084
1085 // If the known bits conflict, the result is poison. Return a 0 and hope the
1086 // caller can further optimize that.
1087 if (Known.hasConflict())
1088 Known.setAllZero();
1089}
1090
1091static void computeKnownBitsFromOperator(const Operator *I,
1092 const APInt &DemandedElts,
1093 KnownBits &Known, unsigned Depth,
1094 const Query &Q) {
1095 unsigned BitWidth = Known.getBitWidth();
1096
1097 KnownBits Known2(BitWidth);
1098 switch (I->getOpcode()) {
1
Control jumps to 'case PHI:' at line 1438
1099 default: break;
1100 case Instruction::Load:
1101 if (MDNode *MD =
1102 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1103 computeKnownBitsFromRangeMetadata(*MD, Known);
1104 break;
1105 case Instruction::And: {
1106 // If either the LHS or the RHS are Zero, the result is zero.
1107 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1108 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1109
1110 Known &= Known2;
1111
1112 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1113 // here we handle the more general case of adding any odd number by
1114 // matching the form add(x, add(x, y)) where y is odd.
1115 // TODO: This could be generalized to clearing any bit set in y where the
1116 // following bit is known to be unset in y.
1117 Value *X = nullptr, *Y = nullptr;
1118 if (!Known.Zero[0] && !Known.One[0] &&
1119 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1120 Known2.resetAll();
1121 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1122 if (Known2.countMinTrailingOnes() > 0)
1123 Known.Zero.setBit(0);
1124 }
1125 break;
1126 }
1127 case Instruction::Or:
1128 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1129 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1130
1131 Known |= Known2;
1132 break;
1133 case Instruction::Xor:
1134 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1135 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1136
1137 Known ^= Known2;
1138 break;
1139 case Instruction::Mul: {
1140 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1141 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1142 Known, Known2, Depth, Q);
1143 break;
1144 }
1145 case Instruction::UDiv: {
1146 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1147 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1148 Known = KnownBits::udiv(Known, Known2);
1149 break;
1150 }
1151 case Instruction::Select: {
1152 const Value *LHS = nullptr, *RHS = nullptr;
1153 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1154 if (SelectPatternResult::isMinOrMax(SPF)) {
1155 computeKnownBits(RHS, Known, Depth + 1, Q);
1156 computeKnownBits(LHS, Known2, Depth + 1, Q);
1157 switch (SPF) {
1158 default:
1159 llvm_unreachable("Unhandled select pattern flavor!")::llvm::llvm_unreachable_internal("Unhandled select pattern flavor!"
, "llvm/lib/Analysis/ValueTracking.cpp", 1159)
;
1160 case SPF_SMAX:
1161 Known = KnownBits::smax(Known, Known2);
1162 break;
1163 case SPF_SMIN:
1164 Known = KnownBits::smin(Known, Known2);
1165 break;
1166 case SPF_UMAX:
1167 Known = KnownBits::umax(Known, Known2);
1168 break;
1169 case SPF_UMIN:
1170 Known = KnownBits::umin(Known, Known2);
1171 break;
1172 }
1173 break;
1174 }
1175
1176 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1177 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1178
1179 // Only known if known in both the LHS and RHS.
1180 Known = KnownBits::commonBits(Known, Known2);
1181
1182 if (SPF == SPF_ABS) {
1183 // RHS from matchSelectPattern returns the negation part of abs pattern.
1184 // If the negate has an NSW flag we can assume the sign bit of the result
1185 // will be 0 because that makes abs(INT_MIN) undefined.
1186 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1187 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS)))
1188 Known.Zero.setSignBit();
1189 }
1190
1191 break;
1192 }
1193 case Instruction::FPTrunc:
1194 case Instruction::FPExt:
1195 case Instruction::FPToUI:
1196 case Instruction::FPToSI:
1197 case Instruction::SIToFP:
1198 case Instruction::UIToFP:
1199 break; // Can't work with floating point.
1200 case Instruction::PtrToInt:
1201 case Instruction::IntToPtr:
1202 // Fall through and handle them the same as zext/trunc.
1203 [[fallthrough]];
1204 case Instruction::ZExt:
1205 case Instruction::Trunc: {
1206 Type *SrcTy = I->getOperand(0)->getType();
1207
1208 unsigned SrcBitWidth;
1209 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1210 // which fall through here.
1211 Type *ScalarTy = SrcTy->getScalarType();
1212 SrcBitWidth = ScalarTy->isPointerTy() ?
1213 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1214 Q.DL.getTypeSizeInBits(ScalarTy);
1215
1216 assert(SrcBitWidth && "SrcBitWidth can't be zero")(static_cast <bool> (SrcBitWidth && "SrcBitWidth can't be zero"
) ? void (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1216, __extension__ __PRETTY_FUNCTION__
))
;
1217 Known = Known.anyextOrTrunc(SrcBitWidth);
1218 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1219 Known = Known.zextOrTrunc(BitWidth);
1220 break;
1221 }
1222 case Instruction::BitCast: {
1223 Type *SrcTy = I->getOperand(0)->getType();
1224 if (SrcTy->isIntOrPtrTy() &&
1225 // TODO: For now, not handling conversions like:
1226 // (bitcast i64 %x to <2 x i32>)
1227 !I->getType()->isVectorTy()) {
1228 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1229 break;
1230 }
1231
1232 // Handle cast from vector integer type to scalar or vector integer.
1233 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1234 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1235 !I->getType()->isIntOrIntVectorTy())
1236 break;
1237
1238 // Look through a cast from narrow vector elements to wider type.
1239 // Examples: v4i32 -> v2i64, v3i8 -> v24
1240 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1241 if (BitWidth % SubBitWidth == 0) {
1242 // Known bits are automatically intersected across demanded elements of a
1243 // vector. So for example, if a bit is computed as known zero, it must be
1244 // zero across all demanded elements of the vector.
1245 //
1246 // For this bitcast, each demanded element of the output is sub-divided
1247 // across a set of smaller vector elements in the source vector. To get
1248 // the known bits for an entire element of the output, compute the known
1249 // bits for each sub-element sequentially. This is done by shifting the
1250 // one-set-bit demanded elements parameter across the sub-elements for
1251 // consecutive calls to computeKnownBits. We are using the demanded
1252 // elements parameter as a mask operator.
1253 //
1254 // The known bits of each sub-element are then inserted into place
1255 // (dependent on endian) to form the full result of known bits.
1256 unsigned NumElts = DemandedElts.getBitWidth();
1257 unsigned SubScale = BitWidth / SubBitWidth;
1258 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1259 for (unsigned i = 0; i != NumElts; ++i) {
1260 if (DemandedElts[i])
1261 SubDemandedElts.setBit(i * SubScale);
1262 }
1263
1264 KnownBits KnownSrc(SubBitWidth);
1265 for (unsigned i = 0; i != SubScale; ++i) {
1266 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1267 Depth + 1, Q);
1268 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1269 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1270 }
1271 }
1272 break;
1273 }
1274 case Instruction::SExt: {
1275 // Compute the bits in the result that are not present in the input.
1276 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1277
1278 Known = Known.trunc(SrcBitWidth);
1279 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1280 // If the sign bit of the input is known set or clear, then we know the
1281 // top bits of the result.
1282 Known = Known.sext(BitWidth);
1283 break;
1284 }
1285 case Instruction::Shl: {
1286 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1287 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1288 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1289 // If this shift has "nsw" keyword, then the result is either a poison
1290 // value or has the same sign bit as the first operand.
1291 if (NSW) {
1292 if (KnownVal.Zero.isSignBitSet())
1293 Result.Zero.setSignBit();
1294 if (KnownVal.One.isSignBitSet())
1295 Result.One.setSignBit();
1296 }
1297 return Result;
1298 };
1299 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1300 KF);
1301 // Trailing zeros of a right-shifted constant never decrease.
1302 const APInt *C;
1303 if (match(I->getOperand(0), m_APInt(C)))
1304 Known.Zero.setLowBits(C->countTrailingZeros());
1305 break;
1306 }
1307 case Instruction::LShr: {
1308 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1309 return KnownBits::lshr(KnownVal, KnownAmt);
1310 };
1311 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1312 KF);
1313 // Leading zeros of a left-shifted constant never decrease.
1314 const APInt *C;
1315 if (match(I->getOperand(0), m_APInt(C)))
1316 Known.Zero.setHighBits(C->countLeadingZeros());
1317 break;
1318 }
1319 case Instruction::AShr: {
1320 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1321 return KnownBits::ashr(KnownVal, KnownAmt);
1322 };
1323 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1324 KF);
1325 break;
1326 }
1327 case Instruction::Sub: {
1328 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1329 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1330 DemandedElts, Known, Known2, Depth, Q);
1331 break;
1332 }
1333 case Instruction::Add: {
1334 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1335 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1336 DemandedElts, Known, Known2, Depth, Q);
1337 break;
1338 }
1339 case Instruction::SRem:
1340 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1341 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1342 Known = KnownBits::srem(Known, Known2);
1343 break;
1344
1345 case Instruction::URem:
1346 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1347 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1348 Known = KnownBits::urem(Known, Known2);
1349 break;
1350 case Instruction::Alloca:
1351 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1352 break;
1353 case Instruction::GetElementPtr: {
1354 // Analyze all of the subscripts of this getelementptr instruction
1355 // to determine if we can prove known low zero bits.
1356 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1357 // Accumulate the constant indices in a separate variable
1358 // to minimize the number of calls to computeForAddSub.
1359 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1360
1361 gep_type_iterator GTI = gep_type_begin(I);
1362 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1363 // TrailZ can only become smaller, short-circuit if we hit zero.
1364 if (Known.isUnknown())
1365 break;
1366
1367 Value *Index = I->getOperand(i);
1368
1369 // Handle case when index is zero.
1370 Constant *CIndex = dyn_cast<Constant>(Index);
1371 if (CIndex && CIndex->isZeroValue())
1372 continue;
1373
1374 if (StructType *STy = GTI.getStructTypeOrNull()) {
1375 // Handle struct member offset arithmetic.
1376
1377 assert(CIndex &&(static_cast <bool> (CIndex && "Access to structure field must be known at compile time"
) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1378, __extension__ __PRETTY_FUNCTION__
))
1378 "Access to structure field must be known at compile time")(static_cast <bool> (CIndex && "Access to structure field must be known at compile time"
) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1378, __extension__ __PRETTY_FUNCTION__
))
;
1379
1380 if (CIndex->getType()->isVectorTy())
1381 Index = CIndex->getSplatValue();
1382
1383 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1384 const StructLayout *SL = Q.DL.getStructLayout(STy);
1385 uint64_t Offset = SL->getElementOffset(Idx);
1386 AccConstIndices += Offset;
1387 continue;
1388 }
1389
1390 // Handle array index arithmetic.
1391 Type *IndexedTy = GTI.getIndexedType();
1392 if (!IndexedTy->isSized()) {
1393 Known.resetAll();
1394 break;
1395 }
1396
1397 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1398 KnownBits IndexBits(IndexBitWidth);
1399 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1400 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1401 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1402 KnownBits ScalingFactor(IndexBitWidth);
1403 // Multiply by current sizeof type.
1404 // &A[i] == A + i * sizeof(*A[i]).
1405 if (IndexTypeSize.isScalable()) {
1406 // For scalable types the only thing we know about sizeof is
1407 // that this is a multiple of the minimum size.
1408 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1409 } else if (IndexBits.isConstant()) {
1410 APInt IndexConst = IndexBits.getConstant();
1411 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1412 IndexConst *= ScalingFactor;
1413 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1414 continue;
1415 } else {
1416 ScalingFactor =
1417 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1418 }
1419 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1420
1421 // If the offsets have a different width from the pointer, according
1422 // to the language reference we need to sign-extend or truncate them
1423 // to the width of the pointer.
1424 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1425
1426 // Note that inbounds does *not* guarantee nsw for the addition, as only
1427 // the offset is signed, while the base address is unsigned.
1428 Known = KnownBits::computeForAddSub(
1429 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1430 }
1431 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1432 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1433 Known = KnownBits::computeForAddSub(
1434 /*Add=*/true, /*NSW=*/false, Known, Index);
1435 }
1436 break;
1437 }
1438 case Instruction::PHI: {
1439 const PHINode *P = cast<PHINode>(I);
2
'I' is a 'CastReturnType'
1440 BinaryOperator *BO = nullptr;
1441 Value *R = nullptr, *L = nullptr;
1442 if (matchSimpleRecurrence(P, BO, R, L)) {
3
Value assigned to 'R'
4
Assuming the condition is true
5
Taking true branch
1443 // Handle the case of a simple two-predecessor recurrence PHI.
1444 // There's a lot more that could theoretically be done here, but
1445 // this is sufficient to catch some interesting cases.
1446 unsigned Opcode = BO->getOpcode();
1447
1448 // If this is a shift recurrence, we know the bits being shifted in.
1449 // We can combine that with information about the start value of the
1450 // recurrence to conclude facts about the result.
1451 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
6
Assuming 'Opcode' is not equal to LShr
7
Assuming 'Opcode' is not equal to AShr
1452 Opcode == Instruction::Shl) &&
8
Assuming 'Opcode' is not equal to Shl
1453 BO->getOperand(0) == I) {
1454
1455 // We have matched a recurrence of the form:
1456 // %iv = [R, %entry], [%iv.next, %backedge]
1457 // %iv.next = shift_op %iv, L
1458
1459 // Recurse with the phi context to avoid concern about whether facts
1460 // inferred hold at original context instruction. TODO: It may be
1461 // correct to use the original context. IF warranted, explore and
1462 // add sufficient tests to cover.
1463 Query RecQ = Q;
1464 RecQ.CxtI = P;
1465 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1466 switch (Opcode) {
1467 case Instruction::Shl:
1468 // A shl recurrence will only increase the tailing zeros
1469 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1470 break;
1471 case Instruction::LShr:
1472 // A lshr recurrence will preserve the leading zeros of the
1473 // start value
1474 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1475 break;
1476 case Instruction::AShr:
1477 // An ashr recurrence will extend the initial sign bit
1478 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1479 Known.One.setHighBits(Known2.countMinLeadingOnes());
1480 break;
1481 };
1482 }
1483
1484 // Check for operations that have the property that if
1485 // both their operands have low zero bits, the result
1486 // will have low zero bits.
1487 if (Opcode == Instruction::Add ||
9
Assuming 'Opcode' is not equal to Add
14
Taking true branch
1488 Opcode == Instruction::Sub ||
10
Assuming 'Opcode' is not equal to Sub
1489 Opcode == Instruction::And ||
11
Assuming 'Opcode' is not equal to And
1490 Opcode == Instruction::Or ||
12
Assuming 'Opcode' is not equal to Or
1491 Opcode == Instruction::Mul) {
13
Assuming 'Opcode' is equal to Mul
1492 // Change the context instruction to the "edge" that flows into the
1493 // phi. This is important because that is where the value is actually
1494 // "evaluated" even though it is used later somewhere else. (see also
1495 // D69571).
1496 Query RecQ = Q;
1497
1498 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
15
Assuming pointer value is null
16
'?' condition is true
1499 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1500 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1501
1502 // Ok, we have a PHI of the form L op= R. Check for low
1503 // zero bits.
1504 RecQ.CxtI = RInst;
1505 computeKnownBits(R, Known2, Depth + 1, RecQ);
17
Passing null pointer value via 1st parameter 'V'
18
Calling 'computeKnownBits'
1506
1507 // We need to take the minimum number of known bits
1508 KnownBits Known3(BitWidth);
1509 RecQ.CxtI = LInst;
1510 computeKnownBits(L, Known3, Depth + 1, RecQ);
1511
1512 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1513 Known3.countMinTrailingZeros()));
1514
1515 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1516 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1517 // If initial value of recurrence is nonnegative, and we are adding
1518 // a nonnegative number with nsw, the result can only be nonnegative
1519 // or poison value regardless of the number of times we execute the
1520 // add in phi recurrence. If initial value is negative and we are
1521 // adding a negative number with nsw, the result can only be
1522 // negative or poison value. Similar arguments apply to sub and mul.
1523 //
1524 // (add non-negative, non-negative) --> non-negative
1525 // (add negative, negative) --> negative
1526 if (Opcode == Instruction::Add) {
1527 if (Known2.isNonNegative() && Known3.isNonNegative())
1528 Known.makeNonNegative();
1529 else if (Known2.isNegative() && Known3.isNegative())
1530 Known.makeNegative();
1531 }
1532
1533 // (sub nsw non-negative, negative) --> non-negative
1534 // (sub nsw negative, non-negative) --> negative
1535 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1536 if (Known2.isNonNegative() && Known3.isNegative())
1537 Known.makeNonNegative();
1538 else if (Known2.isNegative() && Known3.isNonNegative())
1539 Known.makeNegative();
1540 }
1541
1542 // (mul nsw non-negative, non-negative) --> non-negative
1543 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1544 Known3.isNonNegative())
1545 Known.makeNonNegative();
1546 }
1547
1548 break;
1549 }
1550 }
1551
1552 // Unreachable blocks may have zero-operand PHI nodes.
1553 if (P->getNumIncomingValues() == 0)
1554 break;
1555
1556 // Otherwise take the unions of the known bit sets of the operands,
1557 // taking conservative care to avoid excessive recursion.
1558 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1559 // Skip if every incoming value references to ourself.
1560 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1561 break;
1562
1563 Known.Zero.setAllBits();
1564 Known.One.setAllBits();
1565 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1566 Value *IncValue = P->getIncomingValue(u);
1567 // Skip direct self references.
1568 if (IncValue == P) continue;
1569
1570 // Change the context instruction to the "edge" that flows into the
1571 // phi. This is important because that is where the value is actually
1572 // "evaluated" even though it is used later somewhere else. (see also
1573 // D69571).
1574 Query RecQ = Q;
1575 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1576
1577 Known2 = KnownBits(BitWidth);
1578
1579 // Recurse, but cap the recursion to one level, because we don't
1580 // want to waste time spinning around in loops.
1581 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1582
1583 // If this failed, see if we can use a conditional branch into the phi
1584 // to help us determine the range of the value.
1585 if (Known2.isUnknown()) {
1586 ICmpInst::Predicate Pred;
1587 const APInt *RHSC;
1588 BasicBlock *TrueSucc, *FalseSucc;
1589 // TODO: Use RHS Value and compute range from its known bits.
1590 if (match(RecQ.CxtI,
1591 m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)),
1592 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
1593 // Check for cases of duplicate successors.
1594 if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) {
1595 // If we're using the false successor, invert the predicate.
1596 if (FalseSucc == P->getParent())
1597 Pred = CmpInst::getInversePredicate(Pred);
1598
1599 switch (Pred) {
1600 case CmpInst::Predicate::ICMP_EQ:
1601 Known2 = KnownBits::makeConstant(*RHSC);
1602 break;
1603 case CmpInst::Predicate::ICMP_ULE:
1604 Known2.Zero.setHighBits(RHSC->countLeadingZeros());
1605 break;
1606 case CmpInst::Predicate::ICMP_ULT:
1607 Known2.Zero.setHighBits((*RHSC - 1).countLeadingZeros());
1608 break;
1609 default:
1610 // TODO - add additional integer predicate handling.
1611 break;
1612 }
1613 }
1614 }
1615 }
1616
1617 Known = KnownBits::commonBits(Known, Known2);
1618 // If all bits have been ruled out, there's no need to check
1619 // more operands.
1620 if (Known.isUnknown())
1621 break;
1622 }
1623 }
1624 break;
1625 }
1626 case Instruction::Call:
1627 case Instruction::Invoke:
1628 // If range metadata is attached to this call, set known bits from that,
1629 // and then intersect with known bits based on other properties of the
1630 // function.
1631 if (MDNode *MD =
1632 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1633 computeKnownBitsFromRangeMetadata(*MD, Known);
1634 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1635 computeKnownBits(RV, Known2, Depth + 1, Q);
1636 Known.Zero |= Known2.Zero;
1637 Known.One |= Known2.One;
1638 }
1639 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1640 switch (II->getIntrinsicID()) {
1641 default: break;
1642 case Intrinsic::abs: {
1643 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1644 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1645 Known = Known2.abs(IntMinIsPoison);
1646 break;
1647 }
1648 case Intrinsic::bitreverse:
1649 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1650 Known.Zero |= Known2.Zero.reverseBits();
1651 Known.One |= Known2.One.reverseBits();
1652 break;
1653 case Intrinsic::bswap:
1654 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1655 Known.Zero |= Known2.Zero.byteSwap();
1656 Known.One |= Known2.One.byteSwap();
1657 break;
1658 case Intrinsic::ctlz: {
1659 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1660 // If we have a known 1, its position is our upper bound.
1661 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1662 // If this call is poison for 0 input, the result will be less than 2^n.
1663 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1664 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1665 unsigned LowBits = Log2_32(PossibleLZ)+1;
1666 Known.Zero.setBitsFrom(LowBits);
1667 break;
1668 }
1669 case Intrinsic::cttz: {
1670 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1671 // If we have a known 1, its position is our upper bound.
1672 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1673 // If this call is poison for 0 input, the result will be less than 2^n.
1674 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1675 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1676 unsigned LowBits = Log2_32(PossibleTZ)+1;
1677 Known.Zero.setBitsFrom(LowBits);
1678 break;
1679 }
1680 case Intrinsic::ctpop: {
1681 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1682 // We can bound the space the count needs. Also, bits known to be zero
1683 // can't contribute to the population.
1684 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1685 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1686 Known.Zero.setBitsFrom(LowBits);
1687 // TODO: we could bound KnownOne using the lower bound on the number
1688 // of bits which might be set provided by popcnt KnownOne2.
1689 break;
1690 }
1691 case Intrinsic::fshr:
1692 case Intrinsic::fshl: {
1693 const APInt *SA;
1694 if (!match(I->getOperand(2), m_APInt(SA)))
1695 break;
1696
1697 // Normalize to funnel shift left.
1698 uint64_t ShiftAmt = SA->urem(BitWidth);
1699 if (II->getIntrinsicID() == Intrinsic::fshr)
1700 ShiftAmt = BitWidth - ShiftAmt;
1701
1702 KnownBits Known3(BitWidth);
1703 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1704 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1705
1706 Known.Zero =
1707 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1708 Known.One =
1709 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1710 break;
1711 }
1712 case Intrinsic::uadd_sat:
1713 case Intrinsic::usub_sat: {
1714 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1715 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1716 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1717
1718 // Add: Leading ones of either operand are preserved.
1719 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1720 // as leading zeros in the result.
1721 unsigned LeadingKnown;
1722 if (IsAdd)
1723 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1724 Known2.countMinLeadingOnes());
1725 else
1726 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1727 Known2.countMinLeadingOnes());
1728
1729 Known = KnownBits::computeForAddSub(
1730 IsAdd, /* NSW */ false, Known, Known2);
1731
1732 // We select between the operation result and all-ones/zero
1733 // respectively, so we can preserve known ones/zeros.
1734 if (IsAdd) {
1735 Known.One.setHighBits(LeadingKnown);
1736 Known.Zero.clearAllBits();
1737 } else {
1738 Known.Zero.setHighBits(LeadingKnown);
1739 Known.One.clearAllBits();
1740 }
1741 break;
1742 }
1743 case Intrinsic::umin:
1744 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1745 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1746 Known = KnownBits::umin(Known, Known2);
1747 break;
1748 case Intrinsic::umax:
1749 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1750 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1751 Known = KnownBits::umax(Known, Known2);
1752 break;
1753 case Intrinsic::smin:
1754 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1755 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1756 Known = KnownBits::smin(Known, Known2);
1757 break;
1758 case Intrinsic::smax:
1759 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1760 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1761 Known = KnownBits::smax(Known, Known2);
1762 break;
1763 case Intrinsic::x86_sse42_crc32_64_64:
1764 Known.Zero.setBitsFrom(32);
1765 break;
1766 case Intrinsic::riscv_vsetvli:
1767 case Intrinsic::riscv_vsetvlimax:
1768 // Assume that VL output is positive and would fit in an int32_t.
1769 // TODO: VLEN might be capped at 16 bits in a future V spec update.
1770 if (BitWidth >= 32)
1771 Known.Zero.setBitsFrom(31);
1772 break;
1773 case Intrinsic::vscale: {
1774 if (!II->getParent() || !II->getFunction() ||
1775 !II->getFunction()->hasFnAttribute(Attribute::VScaleRange))
1776 break;
1777
1778 auto Attr = II->getFunction()->getFnAttribute(Attribute::VScaleRange);
1779 Optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
1780
1781 if (!VScaleMax)
1782 break;
1783
1784 unsigned VScaleMin = Attr.getVScaleRangeMin();
1785
1786 // If vscale min = max then we know the exact value at compile time
1787 // and hence we know the exact bits.
1788 if (VScaleMin == VScaleMax) {
1789 Known.One = VScaleMin;
1790 Known.Zero = VScaleMin;
1791 Known.Zero.flipAllBits();
1792 break;
1793 }
1794
1795 unsigned FirstZeroHighBit = 32 - countLeadingZeros(*VScaleMax);
1796 if (FirstZeroHighBit < BitWidth)
1797 Known.Zero.setBitsFrom(FirstZeroHighBit);
1798
1799 break;
1800 }
1801 }
1802 }
1803 break;
1804 case Instruction::ShuffleVector: {
1805 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1806 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1807 if (!Shuf) {
1808 Known.resetAll();
1809 return;
1810 }
1811 // For undef elements, we don't know anything about the common state of
1812 // the shuffle result.
1813 APInt DemandedLHS, DemandedRHS;
1814 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1815 Known.resetAll();
1816 return;
1817 }
1818 Known.One.setAllBits();
1819 Known.Zero.setAllBits();
1820 if (!!DemandedLHS) {
1821 const Value *LHS = Shuf->getOperand(0);
1822 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1823 // If we don't know any bits, early out.
1824 if (Known.isUnknown())
1825 break;
1826 }
1827 if (!!DemandedRHS) {
1828 const Value *RHS = Shuf->getOperand(1);
1829 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1830 Known = KnownBits::commonBits(Known, Known2);
1831 }
1832 break;
1833 }
1834 case Instruction::InsertElement: {
1835 const Value *Vec = I->getOperand(0);
1836 const Value *Elt = I->getOperand(1);
1837 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1838 // Early out if the index is non-constant or out-of-range.
1839 unsigned NumElts = DemandedElts.getBitWidth();
1840 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1841 Known.resetAll();
1842 return;
1843 }
1844 Known.One.setAllBits();
1845 Known.Zero.setAllBits();
1846 unsigned EltIdx = CIdx->getZExtValue();
1847 // Do we demand the inserted element?
1848 if (DemandedElts[EltIdx]) {
1849 computeKnownBits(Elt, Known, Depth + 1, Q);
1850 // If we don't know any bits, early out.
1851 if (Known.isUnknown())
1852 break;
1853 }
1854 // We don't need the base vector element that has been inserted.
1855 APInt DemandedVecElts = DemandedElts;
1856 DemandedVecElts.clearBit(EltIdx);
1857 if (!!DemandedVecElts) {
1858 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1859 Known = KnownBits::commonBits(Known, Known2);
1860 }
1861 break;
1862 }
1863 case Instruction::ExtractElement: {
1864 // Look through extract element. If the index is non-constant or
1865 // out-of-range demand all elements, otherwise just the extracted element.
1866 const Value *Vec = I->getOperand(0);
1867 const Value *Idx = I->getOperand(1);
1868 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1869 if (isa<ScalableVectorType>(Vec->getType())) {
1870 // FIXME: there's probably *something* we can do with scalable vectors
1871 Known.resetAll();
1872 break;
1873 }
1874 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1875 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1876 if (CIdx && CIdx->getValue().ult(NumElts))
1877 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1878 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1879 break;
1880 }
1881 case Instruction::ExtractValue:
1882 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1883 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1884 if (EVI->getNumIndices() != 1) break;
1885 if (EVI->getIndices()[0] == 0) {
1886 switch (II->getIntrinsicID()) {
1887 default: break;
1888 case Intrinsic::uadd_with_overflow:
1889 case Intrinsic::sadd_with_overflow:
1890 computeKnownBitsAddSub(true, II->getArgOperand(0),
1891 II->getArgOperand(1), false, DemandedElts,
1892 Known, Known2, Depth, Q);
1893 break;
1894 case Intrinsic::usub_with_overflow:
1895 case Intrinsic::ssub_with_overflow:
1896 computeKnownBitsAddSub(false, II->getArgOperand(0),
1897 II->getArgOperand(1), false, DemandedElts,
1898 Known, Known2, Depth, Q);
1899 break;
1900 case Intrinsic::umul_with_overflow:
1901 case Intrinsic::smul_with_overflow:
1902 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1903 DemandedElts, Known, Known2, Depth, Q);
1904 break;
1905 }
1906 }
1907 }
1908 break;
1909 case Instruction::Freeze:
1910 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1911 Depth + 1))
1912 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1913 break;
1914 }
1915}
1916
1917/// Determine which bits of V are known to be either zero or one and return
1918/// them.
1919KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1920 unsigned Depth, const Query &Q) {
1921 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1922 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1923 return Known;
1924}
1925
1926/// Determine which bits of V are known to be either zero or one and return
1927/// them.
1928KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1929 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1930 computeKnownBits(V, Known, Depth, Q);
1931 return Known;
1932}
1933
1934/// Determine which bits of V are known to be either zero or one and return
1935/// them in the Known bit set.
1936///
1937/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1938/// we cannot optimize based on the assumption that it is zero without changing
1939/// it to be an explicit zero. If we don't change it to zero, other code could
1940/// optimized based on the contradictory assumption that it is non-zero.
1941/// Because instcombine aggressively folds operations with undef args anyway,
1942/// this won't lose us code quality.
1943///
1944/// This function is defined on values with integer type, values with pointer
1945/// type, and vectors of integers. In the case
1946/// where V is a vector, known zero, and known one values are the
1947/// same width as the vector element, and the bit is set only if it is true
1948/// for all of the demanded elements in the vector specified by DemandedElts.
1949void computeKnownBits(const Value *V, const APInt &DemandedElts,
1950 KnownBits &Known, unsigned Depth, const Query &Q) {
1951 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1952 // No demanded elts or V is a scalable vector, better to assume we don't
1953 // know anything.
1954 Known.resetAll();
1955 return;
1956 }
1957
1958 assert(V && "No Value?")(static_cast <bool> (V && "No Value?") ? void (
0) : __assert_fail ("V && \"No Value?\"", "llvm/lib/Analysis/ValueTracking.cpp"
, 1958, __extension__ __PRETTY_FUNCTION__))
;
1959 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1959, __extension__ __PRETTY_FUNCTION__
))
;
1960
1961#ifndef NDEBUG
1962 Type *Ty = V->getType();
1963 unsigned BitWidth = Known.getBitWidth();
1964
1965 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth
) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1966, __extension__ __PRETTY_FUNCTION__
))
1966 "Not integer or pointer type!")(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth
) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1966, __extension__ __PRETTY_FUNCTION__
))
;
1967
1968 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1969 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1971, __extension__ __PRETTY_FUNCTION__
))
1970 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1971, __extension__ __PRETTY_FUNCTION__
))
1971 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1971, __extension__ __PRETTY_FUNCTION__
))
;
1972 } else {
1973 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1974, __extension__ __PRETTY_FUNCTION__
))
1974 "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1974, __extension__ __PRETTY_FUNCTION__
))
;
1975 }
1976
1977 Type *ScalarTy = Ty->getScalarType();
1978 if (ScalarTy->isPointerTy()) {
1979 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1980, __extension__ __PRETTY_FUNCTION__
))
1980 "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1980, __extension__ __PRETTY_FUNCTION__
))
;
1981 } else {
1982 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1983, __extension__ __PRETTY_FUNCTION__
))
1983 "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1983, __extension__ __PRETTY_FUNCTION__
))
;
1984 }
1985#endif
1986
1987 const APInt *C;
1988 if (match(V, m_APInt(C))) {
1989 // We know all of the bits for a scalar constant or a splat vector constant!
1990 Known = KnownBits::makeConstant(*C);
1991 return;
1992 }
1993 // Null and aggregate-zero are all-zeros.
1994 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1995 Known.setAllZero();
1996 return;
1997 }
1998 // Handle a constant vector by taking the intersection of the known bits of
1999 // each element.
2000 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
2001 // We know that CDV must be a vector of integers. Take the intersection of
2002 // each element.
2003 Known.Zero.setAllBits(); Known.One.setAllBits();
2004 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2005 if (!DemandedElts[i])
2006 continue;
2007 APInt Elt = CDV->getElementAsAPInt(i);
2008 Known.Zero &= ~Elt;
2009 Known.One &= Elt;
2010 }
2011 return;
2012 }
2013
2014 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
2015 // We know that CV must be a vector of integers. Take the intersection of
2016 // each element.
2017 Known.Zero.setAllBits(); Known.One.setAllBits();
2018 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2019 if (!DemandedElts[i])
2020 continue;
2021 Constant *Element = CV->getAggregateElement(i);
2022 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
2023 if (!ElementCI) {
2024 Known.resetAll();
2025 return;
2026 }
2027 const APInt &Elt = ElementCI->getValue();
2028 Known.Zero &= ~Elt;
2029 Known.One &= Elt;
2030 }
2031 return;
2032 }
2033
2034 // Start out not knowing anything.
2035 Known.resetAll();
2036
2037 // We can't imply anything about undefs.
2038 if (isa<UndefValue>(V))
2039 return;
2040
2041 // There's no point in looking through other users of ConstantData for
2042 // assumptions. Confirm that we've handled them all.
2043 assert(!isa<ConstantData>(V) && "Unhandled constant data!")(static_cast <bool> (!isa<ConstantData>(V) &&
"Unhandled constant data!") ? void (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2043, __extension__ __PRETTY_FUNCTION__
))
;
2044
2045 // All recursive calls that increase depth must come after this.
2046 if (Depth == MaxAnalysisRecursionDepth)
2047 return;
2048
2049 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2050 // the bits of its aliasee.
2051 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2052 if (!GA->isInterposable())
2053 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2054 return;
2055 }
2056
2057 if (const Operator *I = dyn_cast<Operator>(V))
2058 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2059
2060 // Aligned pointers have trailing zeros - refine Known.Zero set
2061 if (isa<PointerType>(V->getType())) {
2062 Align Alignment = V->getPointerAlignment(Q.DL);
2063 Known.Zero.setLowBits(Log2(Alignment));
2064 }
2065
2066 // computeKnownBitsFromAssume strictly refines Known.
2067 // Therefore, we run them after computeKnownBitsFromOperator.
2068
2069 // Check whether a nearby assume intrinsic can determine some known bits.
2070 computeKnownBitsFromAssume(V, Known, Depth, Q);
2071
2072 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(static_cast <bool> ((Known.Zero & Known.One) == 0 &&
"Bits known to be one AND zero?") ? void (0) : __assert_fail
("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2072, __extension__ __PRETTY_FUNCTION__
))
;
2073}
2074
2075/// Try to detect a recurrence that the value of the induction variable is
2076/// always a power of two (or zero).
2077static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
2078 unsigned Depth, Query &Q) {
2079 BinaryOperator *BO = nullptr;
2080 Value *Start = nullptr, *Step = nullptr;
2081 if (!matchSimpleRecurrence(PN, BO, Start, Step))
2082 return false;
2083
2084 // Initial value must be a power of two.
2085 for (const Use &U : PN->operands()) {
2086 if (U.get() == Start) {
2087 // Initial value comes from a different BB, need to adjust context
2088 // instruction for analysis.
2089 Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
2090 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q))
2091 return false;
2092 }
2093 }
2094
2095 // Except for Mul, the induction variable must be on the left side of the
2096 // increment expression, otherwise its value can be arbitrary.
2097 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step)
2098 return false;
2099
2100 Q.CxtI = BO->getParent()->getTerminator();
2101 switch (BO->getOpcode()) {
2102 case Instruction::Mul:
2103 // Power of two is closed under multiplication.
2104 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
2105 Q.IIQ.hasNoSignedWrap(BO)) &&
2106 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q);
2107 case Instruction::SDiv:
2108 // Start value must not be signmask for signed division, so simply being a
2109 // power of two is not sufficient, and it has to be a constant.
2110 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2111 return false;
2112 [[fallthrough]];
2113 case Instruction::UDiv:
2114 // Divisor must be a power of two.
2115 // If OrZero is false, cannot guarantee induction variable is non-zero after
2116 // division, same for Shr, unless it is exact division.
2117 return (OrZero || Q.IIQ.isExact(BO)) &&
2118 isKnownToBeAPowerOfTwo(Step, false, Depth, Q);
2119 case Instruction::Shl:
2120 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
2121 case Instruction::AShr:
2122 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2123 return false;
2124 [[fallthrough]];
2125 case Instruction::LShr:
2126 return OrZero || Q.IIQ.isExact(BO);
2127 default:
2128 return false;
2129 }
2130}
2131
2132/// Return true if the given value is known to have exactly one
2133/// bit set when defined. For vectors return true if every element is known to
2134/// be a power of two when defined. Supports values with integer or pointer
2135/// types and vectors of integers.
2136bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2137 const Query &Q) {
2138 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2138, __extension__ __PRETTY_FUNCTION__
))
;
2139
2140 // Attempt to match against constants.
2141 if (OrZero && match(V, m_Power2OrZero()))
2142 return true;
2143 if (match(V, m_Power2()))
2144 return true;
2145
2146 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2147 // it is shifted off the end then the result is undefined.
2148 if (match(V, m_Shl(m_One(), m_Value())))
2149 return true;
2150
2151 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2152 // the bottom. If it is shifted off the bottom then the result is undefined.
2153 if (match(V, m_LShr(m_SignMask(), m_Value())))
2154 return true;
2155
2156 // The remaining tests are all recursive, so bail out if we hit the limit.
2157 if (Depth++ == MaxAnalysisRecursionDepth)
2158 return false;
2159
2160 Value *X = nullptr, *Y = nullptr;
2161 // A shift left or a logical shift right of a power of two is a power of two
2162 // or zero.
2163 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2164 match(V, m_LShr(m_Value(X), m_Value()))))
2165 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2166
2167 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2168 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2169
2170 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2171 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2172 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2173
2174 // Peek through min/max.
2175 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2176 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2177 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2178 }
2179
2180 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2181 // A power of two and'd with anything is a power of two or zero.
2182 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2183 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2184 return true;
2185 // X & (-X) is always a power of two or zero.
2186 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2187 return true;
2188 return false;
2189 }
2190
2191 // Adding a power-of-two or zero to the same power-of-two or zero yields
2192 // either the original power-of-two, a larger power-of-two or zero.
2193 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2194 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2195 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2196 Q.IIQ.hasNoSignedWrap(VOBO)) {
2197 if (match(X, m_And(m_Specific(Y), m_Value())) ||
2198 match(X, m_And(m_Value(), m_Specific(Y))))
2199 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2200 return true;
2201 if (match(Y, m_And(m_Specific(X), m_Value())) ||
2202 match(Y, m_And(m_Value(), m_Specific(X))))
2203 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2204 return true;
2205
2206 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2207 KnownBits LHSBits(BitWidth);
2208 computeKnownBits(X, LHSBits, Depth, Q);
2209
2210 KnownBits RHSBits(BitWidth);
2211 computeKnownBits(Y, RHSBits, Depth, Q);
2212 // If i8 V is a power of two or zero:
2213 // ZeroBits: 1 1 1 0 1 1 1 1
2214 // ~ZeroBits: 0 0 0 1 0 0 0 0
2215 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2216 // If OrZero isn't set, we cannot give back a zero result.
2217 // Make sure either the LHS or RHS has a bit set.
2218 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2219 return true;
2220 }
2221 }
2222
2223 // A PHI node is power of two if all incoming values are power of two, or if
2224 // it is an induction variable where in each step its value is a power of two.
2225 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2226 Query RecQ = Q;
2227
2228 // Check if it is an induction variable and always power of two.
2229 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ))
2230 return true;
2231
2232 // Recursively check all incoming values. Limit recursion to 2 levels, so
2233 // that search complexity is limited to number of operands^2.
2234 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2235 return llvm::all_of(PN->operands(), [&](const Use &U) {
2236 // Value is power of 2 if it is coming from PHI node itself by induction.
2237 if (U.get() == PN)
2238 return true;
2239
2240 // Change the context instruction to the incoming block where it is
2241 // evaluated.
2242 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2243 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2244 });
2245 }
2246
2247 // An exact divide or right shift can only shift off zero bits, so the result
2248 // is a power of two only if the first operand is a power of two and not
2249 // copying a sign bit (sdiv int_min, 2).
2250 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2251 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2252 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2253 Depth, Q);
2254 }
2255
2256 return false;
2257}
2258
2259/// Test whether a GEP's result is known to be non-null.
2260///
2261/// Uses properties inherent in a GEP to try to determine whether it is known
2262/// to be non-null.
2263///
2264/// Currently this routine does not support vector GEPs.
2265static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2266 const Query &Q) {
2267 const Function *F = nullptr;
2268 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2269 F = I->getFunction();
2270
2271 if (!GEP->isInBounds() ||
2272 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2273 return false;
2274
2275 // FIXME: Support vector-GEPs.
2276 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")(static_cast <bool> (GEP->getType()->isPointerTy(
) && "We only support plain pointer GEP") ? void (0) :
__assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2276, __extension__ __PRETTY_FUNCTION__
))
;
2277
2278 // If the base pointer is non-null, we cannot walk to a null address with an
2279 // inbounds GEP in address space zero.
2280 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2281 return true;
2282
2283 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2284 // If so, then the GEP cannot produce a null pointer, as doing so would
2285 // inherently violate the inbounds contract within address space zero.
2286 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2287 GTI != GTE; ++GTI) {
2288 // Struct types are easy -- they must always be indexed by a constant.
2289 if (StructType *STy = GTI.getStructTypeOrNull()) {
2290 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2291 unsigned ElementIdx = OpC->getZExtValue();
2292 const StructLayout *SL = Q.DL.getStructLayout(STy);
2293 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2294 if (ElementOffset > 0)
2295 return true;
2296 continue;
2297 }
2298
2299 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2300 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2301 continue;
2302
2303 // Fast path the constant operand case both for efficiency and so we don't
2304 // increment Depth when just zipping down an all-constant GEP.
2305 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2306 if (!OpC->isZero())
2307 return true;
2308 continue;
2309 }
2310
2311 // We post-increment Depth here because while isKnownNonZero increments it
2312 // as well, when we pop back up that increment won't persist. We don't want
2313 // to recurse 10k times just because we have 10k GEP operands. We don't
2314 // bail completely out because we want to handle constant GEPs regardless
2315 // of depth.
2316 if (Depth++ >= MaxAnalysisRecursionDepth)
2317 continue;
2318
2319 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2320 return true;
2321 }
2322
2323 return false;
2324}
2325
2326static bool isKnownNonNullFromDominatingCondition(const Value *V,
2327 const Instruction *CtxI,
2328 const DominatorTree *DT) {
2329 if (isa<Constant>(V))
2330 return false;
2331
2332 if (!CtxI || !DT)
2333 return false;
2334
2335 unsigned NumUsesExplored = 0;
2336 for (const auto *U : V->users()) {
2337 // Avoid massive lists
2338 if (NumUsesExplored >= DomConditionsMaxUses)
2339 break;
2340 NumUsesExplored++;
2341
2342 // If the value is used as an argument to a call or invoke, then argument
2343 // attributes may provide an answer about null-ness.
2344 if (const auto *CB = dyn_cast<CallBase>(U))
2345 if (auto *CalledFunc = CB->getCalledFunction())
2346 for (const Argument &Arg : CalledFunc->args())
2347 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2348 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2349 DT->dominates(CB, CtxI))
2350 return true;
2351
2352 // If the value is used as a load/store, then the pointer must be non null.
2353 if (V == getLoadStorePointerOperand(U)) {
2354 const Instruction *I = cast<Instruction>(U);
2355 if (!NullPointerIsDefined(I->getFunction(),
2356 V->getType()->getPointerAddressSpace()) &&
2357 DT->dominates(I, CtxI))
2358 return true;
2359 }
2360
2361 // Consider only compare instructions uniquely controlling a branch
2362 Value *RHS;
2363 CmpInst::Predicate Pred;
2364 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2365 continue;
2366
2367 bool NonNullIfTrue;
2368 if (cmpExcludesZero(Pred, RHS))
2369 NonNullIfTrue = true;
2370 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2371 NonNullIfTrue = false;
2372 else
2373 continue;
2374
2375 SmallVector<const User *, 4> WorkList;
2376 SmallPtrSet<const User *, 4> Visited;
2377 for (const auto *CmpU : U->users()) {
2378 assert(WorkList.empty() && "Should be!")(static_cast <bool> (WorkList.empty() && "Should be!"
) ? void (0) : __assert_fail ("WorkList.empty() && \"Should be!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2378, __extension__ __PRETTY_FUNCTION__
))
;
2379 if (Visited.insert(CmpU).second)
2380 WorkList.push_back(CmpU);
2381
2382 while (!WorkList.empty()) {
2383 auto *Curr = WorkList.pop_back_val();
2384
2385 // If a user is an AND, add all its users to the work list. We only
2386 // propagate "pred != null" condition through AND because it is only
2387 // correct to assume that all conditions of AND are met in true branch.
2388 // TODO: Support similar logic of OR and EQ predicate?
2389 if (NonNullIfTrue)
2390 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2391 for (const auto *CurrU : Curr->users())
2392 if (Visited.insert(CurrU).second)
2393 WorkList.push_back(CurrU);
2394 continue;
2395 }
2396
2397 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2398 assert(BI->isConditional() && "uses a comparison!")(static_cast <bool> (BI->isConditional() && "uses a comparison!"
) ? void (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2398, __extension__ __PRETTY_FUNCTION__
))
;
2399
2400 BasicBlock *NonNullSuccessor =
2401 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2402 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2403 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2404 return true;
2405 } else if (NonNullIfTrue && isGuard(Curr) &&
2406 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2407 return true;
2408 }
2409 }
2410 }
2411 }
2412
2413 return false;
2414}
2415
2416/// Does the 'Range' metadata (which must be a valid MD_range operand list)
2417/// ensure that the value it's attached to is never Value? 'RangeType' is
2418/// is the type of the value described by the range.
2419static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2420 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2421 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp",
2421, __extension__ __PRETTY_FUNCTION__))
;
2422 for (unsigned i = 0; i < NumRanges; ++i) {
2423 ConstantInt *Lower =
2424 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2425 ConstantInt *Upper =
2426 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2427 ConstantRange Range(Lower->getValue(), Upper->getValue());
2428 if (Range.contains(Value))
2429 return false;
2430 }
2431 return true;
2432}
2433
2434/// Try to detect a recurrence that monotonically increases/decreases from a
2435/// non-zero starting value. These are common as induction variables.
2436static bool isNonZeroRecurrence(const PHINode *PN) {
2437 BinaryOperator *BO = nullptr;
2438 Value *Start = nullptr, *Step = nullptr;
2439 const APInt *StartC, *StepC;
2440 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2441 !match(Start, m_APInt(StartC)) || StartC->isZero())
2442 return false;
2443
2444 switch (BO->getOpcode()) {
2445 case Instruction::Add:
2446 // Starting from non-zero and stepping away from zero can never wrap back
2447 // to zero.
2448 return BO->hasNoUnsignedWrap() ||
2449 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2450 StartC->isNegative() == StepC->isNegative());
2451 case Instruction::Mul:
2452 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2453 match(Step, m_APInt(StepC)) && !StepC->isZero();
2454 case Instruction::Shl:
2455 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2456 case Instruction::AShr:
2457 case Instruction::LShr:
2458 return BO->isExact();
2459 default:
2460 return false;
2461 }
2462}
2463
2464/// Return true if the given value is known to be non-zero when defined. For
2465/// vectors, return true if every demanded element is known to be non-zero when
2466/// defined. For pointers, if the context instruction and dominator tree are
2467/// specified, perform context-sensitive analysis and return true if the
2468/// pointer couldn't possibly be null at the specified instruction.
2469/// Supports values with integer or pointer type and vectors of integers.
2470bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2471 const Query &Q) {
2472 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2473 // vector
2474 if (isa<ScalableVectorType>(V->getType()))
2475 return false;
2476
2477 if (auto *C = dyn_cast<Constant>(V)) {
2478 if (C->isNullValue())
2479 return false;
2480 if (isa<ConstantInt>(C))
2481 // Must be non-zero due to null test above.
2482 return true;
2483
2484 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2485 // See the comment for IntToPtr/PtrToInt instructions below.
2486 if (CE->getOpcode() == Instruction::IntToPtr ||
2487 CE->getOpcode() == Instruction::PtrToInt)
2488 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2489 .getFixedSize() <=
2490 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2491 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2492 }
2493
2494 // For constant vectors, check that all elements are undefined or known
2495 // non-zero to determine that the whole vector is known non-zero.
2496 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2497 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2498 if (!DemandedElts[i])
2499 continue;
2500 Constant *Elt = C->getAggregateElement(i);
2501 if (!Elt || Elt->isNullValue())
2502 return false;
2503 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2504 return false;
2505 }
2506 return true;
2507 }
2508
2509 // A global variable in address space 0 is non null unless extern weak
2510 // or an absolute symbol reference. Other address spaces may have null as a
2511 // valid address for a global, so we can't assume anything.
2512 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2513 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2514 GV->getType()->getAddressSpace() == 0)
2515 return true;
2516 } else
2517 return false;
2518 }
2519
2520 if (auto *I = dyn_cast<Instruction>(V)) {
2521 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2522 // If the possible ranges don't contain zero, then the value is
2523 // definitely non-zero.
2524 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2525 const APInt ZeroValue(Ty->getBitWidth(), 0);
2526 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2527 return true;
2528 }
2529 }
2530 }
2531
2532 if (isKnownNonZeroFromAssume(V, Q))
2533 return true;
2534
2535 // Some of the tests below are recursive, so bail out if we hit the limit.
2536 if (Depth++ >= MaxAnalysisRecursionDepth)
2537 return false;
2538
2539 // Check for pointer simplifications.
2540
2541 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2542 // Alloca never returns null, malloc might.
2543 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2544 return true;
2545
2546 // A byval, inalloca may not be null in a non-default addres space. A
2547 // nonnull argument is assumed never 0.
2548 if (const Argument *A = dyn_cast<Argument>(V)) {
2549 if (((A->hasPassPointeeByValueCopyAttr() &&
2550 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2551 A->hasNonNullAttr()))
2552 return true;
2553 }
2554
2555 // A Load tagged with nonnull metadata is never null.
2556 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2557 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2558 return true;
2559
2560 if (const auto *Call = dyn_cast<CallBase>(V)) {
2561 if (Call->isReturnNonNull())
2562 return true;
2563 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2564 return isKnownNonZero(RP, Depth, Q);
2565 }
2566 }
2567
2568 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2569 return true;
2570
2571 // Check for recursive pointer simplifications.
2572 if (V->getType()->isPointerTy()) {
2573 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2574 // do not alter the value, or at least not the nullness property of the
2575 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2576 //
2577 // Note that we have to take special care to avoid looking through
2578 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2579 // as casts that can alter the value, e.g., AddrSpaceCasts.
2580 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2581 return isGEPKnownNonNull(GEP, Depth, Q);
2582
2583 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2584 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2585
2586 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2587 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2588 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2589 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2590 }
2591
2592 // Similar to int2ptr above, we can look through ptr2int here if the cast
2593 // is a no-op or an extend and not a truncate.
2594 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2595 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2596 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2597 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2598
2599 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2600
2601 // X | Y != 0 if X != 0 or Y != 0.
2602 Value *X = nullptr, *Y = nullptr;
2603 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2604 return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2605 isKnownNonZero(Y, DemandedElts, Depth, Q);
2606
2607 // ext X != 0 if X != 0.
2608 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2609 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2610
2611 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2612 // if the lowest bit is shifted off the end.
2613 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2614 // shl nuw can't remove any non-zero bits.
2615 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2616 if (Q.IIQ.hasNoUnsignedWrap(BO))
2617 return isKnownNonZero(X, Depth, Q);
2618
2619 KnownBits Known(BitWidth);
2620 computeKnownBits(X, DemandedElts, Known, Depth, Q);
2621 if (Known.One[0])
2622 return true;
2623 }
2624 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2625 // defined if the sign bit is shifted off the end.
2626 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2627 // shr exact can only shift out zero bits.
2628 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2629 if (BO->isExact())
2630 return isKnownNonZero(X, Depth, Q);
2631
2632 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2633 if (Known.isNegative())
2634 return true;
2635
2636 // If the shifter operand is a constant, and all of the bits shifted
2637 // out are known to be zero, and X is known non-zero then at least one
2638 // non-zero bit must remain.
2639 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2640 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2641 // Is there a known one in the portion not shifted out?
2642 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2643 return true;
2644 // Are all the bits to be shifted out known zero?
2645 if (Known.countMinTrailingZeros() >= ShiftVal)
2646 return isKnownNonZero(X, DemandedElts, Depth, Q);
2647 }
2648 }
2649 // div exact can only produce a zero if the dividend is zero.
2650 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2651 return isKnownNonZero(X, DemandedElts, Depth, Q);
2652 }
2653 // X + Y.
2654 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2655 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2656 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2657
2658 // If X and Y are both non-negative (as signed values) then their sum is not
2659 // zero unless both X and Y are zero.
2660 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2661 if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2662 isKnownNonZero(Y, DemandedElts, Depth, Q))
2663 return true;
2664
2665 // If X and Y are both negative (as signed values) then their sum is not
2666 // zero unless both X and Y equal INT_MIN.
2667 if (XKnown.isNegative() && YKnown.isNegative()) {
2668 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2669 // The sign bit of X is set. If some other bit is set then X is not equal
2670 // to INT_MIN.
2671 if (XKnown.One.intersects(Mask))
2672 return true;
2673 // The sign bit of Y is set. If some other bit is set then Y is not equal
2674 // to INT_MIN.
2675 if (YKnown.One.intersects(Mask))
2676 return true;
2677 }
2678
2679 // The sum of a non-negative number and a power of two is not zero.
2680 if (XKnown.isNonNegative() &&
2681 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2682 return true;
2683 if (YKnown.isNonNegative() &&
2684 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2685 return true;
2686 }
2687 // X * Y.
2688 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2689 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2690 // If X and Y are non-zero then so is X * Y as long as the multiplication
2691 // does not overflow.
2692 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2693 isKnownNonZero(X, DemandedElts, Depth, Q) &&
2694 isKnownNonZero(Y, DemandedElts, Depth, Q))
2695 return true;
2696 }
2697 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2698 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2699 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2700 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2701 return true;
2702 }
2703 // PHI
2704 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2705 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2706 return true;
2707
2708 // Check if all incoming values are non-zero using recursion.
2709 Query RecQ = Q;
2710 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2711 return llvm::all_of(PN->operands(), [&](const Use &U) {
2712 if (U.get() == PN)
2713 return true;
2714 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2715 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2716 });
2717 }
2718 // ExtractElement
2719 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2720 const Value *Vec = EEI->getVectorOperand();
2721 const Value *Idx = EEI->getIndexOperand();
2722 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2723 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2724 unsigned NumElts = VecTy->getNumElements();
2725 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2726 if (CIdx && CIdx->getValue().ult(NumElts))
2727 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2728 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2729 }
2730 }
2731 // Freeze
2732 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2733 auto *Op = FI->getOperand(0);
2734 if (isKnownNonZero(Op, Depth, Q) &&
2735 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2736 return true;
2737 } else if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
2738 if (II->getIntrinsicID() == Intrinsic::vscale)
2739 return true;
2740 }
2741
2742 KnownBits Known(BitWidth);
2743 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2744 return Known.One != 0;
2745}
2746
2747bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2748 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2749 // vector
2750 if (isa<ScalableVectorType>(V->getType()))
2751 return false;
2752
2753 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2754 APInt DemandedElts =
2755 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
2756 return isKnownNonZero(V, DemandedElts, Depth, Q);
2757}
2758
2759/// If the pair of operators are the same invertible function, return the
2760/// the operands of the function corresponding to each input. Otherwise,
2761/// return None. An invertible function is one that is 1-to-1 and maps
2762/// every input value to exactly one output value. This is equivalent to
2763/// saying that Op1 and Op2 are equal exactly when the specified pair of
2764/// operands are equal, (except that Op1 and Op2 may be poison more often.)
2765static Optional<std::pair<Value*, Value*>>
2766getInvertibleOperands(const Operator *Op1,
2767 const Operator *Op2) {
2768 if (Op1->getOpcode() != Op2->getOpcode())
2769 return None;
2770
2771 auto getOperands = [&](unsigned OpNum) -> auto {
2772 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2773 };
2774
2775 switch (Op1->getOpcode()) {
2776 default:
2777 break;
2778 case Instruction::Add:
2779 case Instruction::Sub:
2780 if (Op1->getOperand(0) == Op2->getOperand(0))
2781 return getOperands(1);
2782 if (Op1->getOperand(1) == Op2->getOperand(1))
2783 return getOperands(0);
2784 break;
2785 case Instruction::Mul: {
2786 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2787 // and N is the bitwdith. The nsw case is non-obvious, but proven by
2788 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2789 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2790 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2791 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2792 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2793 break;
2794
2795 // Assume operand order has been canonicalized
2796 if (Op1->getOperand(1) == Op2->getOperand(1) &&
2797 isa<ConstantInt>(Op1->getOperand(1)) &&
2798 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2799 return getOperands(0);
2800 break;
2801 }
2802 case Instruction::Shl: {
2803 // Same as multiplies, with the difference that we don't need to check
2804 // for a non-zero multiply. Shifts always multiply by non-zero.
2805 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2806 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2807 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2808 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2809 break;
2810
2811 if (Op1->getOperand(1) == Op2->getOperand(1))
2812 return getOperands(0);
2813 break;
2814 }
2815 case Instruction::AShr:
2816 case Instruction::LShr: {
2817 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2818 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2819 if (!PEO1->isExact() || !PEO2->isExact())
2820 break;
2821
2822 if (Op1->getOperand(1) == Op2->getOperand(1))
2823 return getOperands(0);
2824 break;
2825 }
2826 case Instruction::SExt:
2827 case Instruction::ZExt:
2828 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2829 return getOperands(0);
2830 break;
2831 case Instruction::PHI: {
2832 const PHINode *PN1 = cast<PHINode>(Op1);
2833 const PHINode *PN2 = cast<PHINode>(Op2);
2834
2835 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2836 // are a single invertible function of the start values? Note that repeated
2837 // application of an invertible function is also invertible
2838 BinaryOperator *BO1 = nullptr;
2839 Value *Start1 = nullptr, *Step1 = nullptr;
2840 BinaryOperator *BO2 = nullptr;
2841 Value *Start2 = nullptr, *Step2 = nullptr;
2842 if (PN1->getParent() != PN2->getParent() ||
2843 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2844 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2845 break;
2846
2847 auto Values = getInvertibleOperands(cast<Operator>(BO1),
2848 cast<Operator>(BO2));
2849 if (!Values)
2850 break;
2851
2852 // We have to be careful of mutually defined recurrences here. Ex:
2853 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2854 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2855 // The invertibility of these is complicated, and not worth reasoning
2856 // about (yet?).
2857 if (Values->first != PN1 || Values->second != PN2)
2858 break;
2859
2860 return std::make_pair(Start1, Start2);
2861 }
2862 }
2863 return None;
2864}
2865
2866/// Return true if V2 == V1 + X, where X is known non-zero.
2867static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2868 const Query &Q) {
2869 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2870 if (!BO || BO->getOpcode() != Instruction::Add)
2871 return false;
2872 Value *Op = nullptr;
2873 if (V2 == BO->getOperand(0))
2874 Op = BO->getOperand(1);
2875 else if (V2 == BO->getOperand(1))
2876 Op = BO->getOperand(0);
2877 else
2878 return false;
2879 return isKnownNonZero(Op, Depth + 1, Q);
2880}
2881
2882/// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2883/// the multiplication is nuw or nsw.
2884static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2885 const Query &Q) {
2886 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2887 const APInt *C;
2888 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2889 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2890 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
2891 }
2892 return false;
2893}
2894
2895/// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2896/// the shift is nuw or nsw.
2897static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2898 const Query &Q) {
2899 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2900 const APInt *C;
2901 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2902 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2903 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
2904 }
2905 return false;
2906}
2907
2908static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2909 unsigned Depth, const Query &Q) {
2910 // Check two PHIs are in same block.
2911 if (PN1->getParent() != PN2->getParent())
2912 return false;
2913
2914 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2915 bool UsedFullRecursion = false;
2916 for (const BasicBlock *IncomBB : PN1->blocks()) {
2917 if (!VisitedBBs.insert(IncomBB).second)
2918 continue; // Don't reprocess blocks that we have dealt with already.
2919 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2920 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2921 const APInt *C1, *C2;
2922 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2923 continue;
2924
2925 // Only one pair of phi operands is allowed for full recursion.
2926 if (UsedFullRecursion)
2927 return false;
2928
2929 Query RecQ = Q;
2930 RecQ.CxtI = IncomBB->getTerminator();
2931 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2932 return false;
2933 UsedFullRecursion = true;
2934 }
2935 return true;
2936}
2937
2938/// Return true if it is known that V1 != V2.
2939static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2940 const Query &Q) {
2941 if (V1 == V2)
2942 return false;
2943 if (V1->getType() != V2->getType())
2944 // We can't look through casts yet.
2945 return false;
2946
2947 if (Depth >= MaxAnalysisRecursionDepth)
2948 return false;
2949
2950 // See if we can recurse through (exactly one of) our operands. This
2951 // requires our operation be 1-to-1 and map every input value to exactly
2952 // one output value. Such an operation is invertible.
2953 auto *O1 = dyn_cast<Operator>(V1);
2954 auto *O2 = dyn_cast<Operator>(V2);
2955 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2956 if (auto Values = getInvertibleOperands(O1, O2))
2957 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2958
2959 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2960 const PHINode *PN2 = cast<PHINode>(V2);
2961 // FIXME: This is missing a generalization to handle the case where one is
2962 // a PHI and another one isn't.
2963 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2964 return true;
2965 };
2966 }
2967
2968 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2969 return true;
2970
2971 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2972 return true;
2973
2974 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2975 return true;
2976
2977 if (V1->getType()->isIntOrIntVectorTy()) {
2978 // Are any known bits in V1 contradictory to known bits in V2? If V1
2979 // has a known zero where V2 has a known one, they must not be equal.
2980 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2981 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2982
2983 if (Known1.Zero.intersects(Known2.One) ||
2984 Known2.Zero.intersects(Known1.One))
2985 return true;
2986 }
2987 return false;
2988}
2989
2990/// Return true if 'V & Mask' is known to be zero. We use this predicate to
2991/// simplify operations downstream. Mask is known to be zero for bits that V
2992/// cannot have.
2993///
2994/// This function is defined on values with integer type, values with pointer
2995/// type, and vectors of integers. In the case
2996/// where V is a vector, the mask, known zero, and known one values are the
2997/// same width as the vector element, and the bit is set only if it is true
2998/// for all of the elements in the vector.
2999bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
3000 const Query &Q) {
3001 KnownBits Known(Mask.getBitWidth());
3002 computeKnownBits(V, Known, Depth, Q);
3003 return Mask.isSubsetOf(Known.Zero);
3004}
3005
3006// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
3007// Returns the input and lower/upper bounds.
3008static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
3009 const APInt *&CLow, const APInt *&CHigh) {
3010 assert(isa<Operator>(Select) &&(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3012, __extension__ __PRETTY_FUNCTION__
))
3011 cast<Operator>(Select)->getOpcode() == Instruction::Select &&(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3012, __extension__ __PRETTY_FUNCTION__
))
3012 "Input should be a Select!")(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3012, __extension__ __PRETTY_FUNCTION__
))
;
3013
3014 const Value *LHS = nullptr, *RHS = nullptr;
3015 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
3016 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
3017 return false;
3018
3019 if (!match(RHS, m_APInt(CLow)))
3020 return false;
3021
3022 const Value *LHS2 = nullptr, *RHS2 = nullptr;
3023 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
3024 if (getInverseMinMaxFlavor(SPF) != SPF2)
3025 return false;
3026
3027 if (!match(RHS2, m_APInt(CHigh)))
3028 return false;
3029
3030 if (SPF == SPF_SMIN)
3031 std::swap(CLow, CHigh);
3032
3033 In = LHS2;
3034 return CLow->sle(*CHigh);
3035}
3036
3037static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
3038 const APInt *&CLow,
3039 const APInt *&CHigh) {
3040 assert((II->getIntrinsicID() == Intrinsic::smin ||(static_cast <bool> ((II->getIntrinsicID() == Intrinsic
::smin || II->getIntrinsicID() == Intrinsic::smax) &&
"Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3041, __extension__ __PRETTY_FUNCTION__
))
3041 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax")(static_cast <bool> ((II->getIntrinsicID() == Intrinsic
::smin || II->getIntrinsicID() == Intrinsic::smax) &&
"Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3041, __extension__ __PRETTY_FUNCTION__
))
;
3042
3043 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
3044 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
3045 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
3046 !match(II->getArgOperand(1), m_APInt(CLow)) ||
3047 !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
3048 return false;
3049
3050 if (II->getIntrinsicID() == Intrinsic::smin)
3051 std::swap(CLow, CHigh);
3052 return CLow->sle(*CHigh);
3053}
3054
3055/// For vector constants, loop over the elements and find the constant with the
3056/// minimum number of sign bits. Return 0 if the value is not a vector constant
3057/// or if any element was not analyzed; otherwise, return the count for the
3058/// element with the minimum number of sign bits.
3059static unsigned computeNumSignBitsVectorConstant(const Value *V,
3060 const APInt &DemandedElts,
3061 unsigned TyBits) {
3062 const auto *CV = dyn_cast<Constant>(V);
3063 if (!CV || !isa<FixedVectorType>(CV->getType()))
3064 return 0;
3065
3066 unsigned MinSignBits = TyBits;
3067 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3068 for (unsigned i = 0; i != NumElts; ++i) {
3069 if (!DemandedElts[i])
3070 continue;
3071 // If we find a non-ConstantInt, bail out.
3072 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3073 if (!Elt)
3074 return 0;
3075
3076 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3077 }
3078
3079 return MinSignBits;
3080}
3081
3082static unsigned ComputeNumSignBitsImpl(const Value *V,
3083 const APInt &DemandedElts,
3084 unsigned Depth, const Query &Q);
3085
3086static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
3087 unsigned Depth, const Query &Q) {
3088 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
3089 assert(Result > 0 && "At least one sign bit needs to be present!")(static_cast <bool> (Result > 0 && "At least one sign bit needs to be present!"
) ? void (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3089, __extension__ __PRETTY_FUNCTION__
))
;
3090 return Result;
3091}
3092
3093/// Return the number of times the sign bit of the register is replicated into
3094/// the other bits. We know that at least 1 bit is always equal to the sign bit
3095/// (itself), but other cases can give us information. For example, immediately
3096/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
3097/// other, so we return 3. For vectors, return the number of sign bits for the
3098/// vector element with the minimum number of known sign bits of the demanded
3099/// elements in the vector specified by DemandedElts.
3100static unsigned ComputeNumSignBitsImpl(const Value *V,
3101 const APInt &DemandedElts,
3102 unsigned Depth, const Query &Q) {
3103 Type *Ty = V->getType();
3104
3105 // FIXME: We currently have no way to represent the DemandedElts of a scalable
3106 // vector
3107 if (isa<ScalableVectorType>(Ty))
3108 return 1;
3109
3110#ifndef NDEBUG
3111 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3111, __extension__ __PRETTY_FUNCTION__
))
;
3112
3113 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3114 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3116, __extension__ __PRETTY_FUNCTION__
))
3115 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3116, __extension__ __PRETTY_FUNCTION__
))
3116 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3116, __extension__ __PRETTY_FUNCTION__
))
;
3117 } else {
3118 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3119, __extension__ __PRETTY_FUNCTION__
))
3119 "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3119, __extension__ __PRETTY_FUNCTION__
))
;
3120 }
3121#endif
3122
3123 // We return the minimum number of sign bits that are guaranteed to be present
3124 // in V, so for undef we have to conservatively return 1. We don't have the
3125 // same behavior for poison though -- that's a FIXME today.
3126
3127 Type *ScalarTy = Ty->getScalarType();
3128 unsigned TyBits = ScalarTy->isPointerTy() ?
3129 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
3130 Q.DL.getTypeSizeInBits(ScalarTy);
3131
3132 unsigned Tmp, Tmp2;
3133 unsigned FirstAnswer = 1;
3134
3135 // Note that ConstantInt is handled by the general computeKnownBits case
3136 // below.
3137
3138 if (Depth == MaxAnalysisRecursionDepth)
3139 return 1;
3140
3141 if (auto *U = dyn_cast<Operator>(V)) {
3142 switch (Operator::getOpcode(V)) {
3143 default: break;
3144 case Instruction::SExt:
3145 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3146 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
3147
3148 case Instruction::SDiv: {
3149 const APInt *Denominator;
3150 // sdiv X, C -> adds log(C) sign bits.
3151 if (match(U->getOperand(1), m_APInt(Denominator))) {
3152
3153 // Ignore non-positive denominator.
3154 if (!Denominator->isStrictlyPositive())
3155 break;
3156
3157 // Calculate the incoming numerator bits.
3158 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3159
3160 // Add floor(log(C)) bits to the numerator bits.
3161 return std::min(TyBits, NumBits + Denominator->logBase2());
3162 }
3163 break;
3164 }
3165
3166 case Instruction::SRem: {
3167 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3168
3169 const APInt *Denominator;
3170 // srem X, C -> we know that the result is within [-C+1,C) when C is a
3171 // positive constant. This let us put a lower bound on the number of sign
3172 // bits.
3173 if (match(U->getOperand(1), m_APInt(Denominator))) {
3174
3175 // Ignore non-positive denominator.
3176 if (Denominator->isStrictlyPositive()) {
3177 // Calculate the leading sign bit constraints by examining the
3178 // denominator. Given that the denominator is positive, there are two
3179 // cases:
3180 //
3181 // 1. The numerator is positive. The result range is [0,C) and
3182 // [0,C) u< (1 << ceilLogBase2(C)).
3183 //
3184 // 2. The numerator is negative. Then the result range is (-C,0] and
3185 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3186 //
3187 // Thus a lower bound on the number of sign bits is `TyBits -
3188 // ceilLogBase2(C)`.
3189
3190 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3191 Tmp = std::max(Tmp, ResBits);
3192 }
3193 }
3194 return Tmp;
3195 }
3196
3197 case Instruction::AShr: {
3198 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3199 // ashr X, C -> adds C sign bits. Vectors too.
3200 const APInt *ShAmt;
3201 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3202 if (ShAmt->uge(TyBits))
3203 break; // Bad shift.
3204 unsigned ShAmtLimited = ShAmt->getZExtValue();
3205 Tmp += ShAmtLimited;
3206 if (Tmp > TyBits) Tmp = TyBits;
3207 }
3208 return Tmp;
3209 }
3210 case Instruction::Shl: {
3211 const APInt *ShAmt;
3212 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3213 // shl destroys sign bits.
3214 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3215 if (ShAmt->uge(TyBits) || // Bad shift.
3216 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3217 Tmp2 = ShAmt->getZExtValue();
3218 return Tmp - Tmp2;
3219 }
3220 break;
3221 }
3222 case Instruction::And:
3223 case Instruction::Or:
3224 case Instruction::Xor: // NOT is handled here.
3225 // Logical binary ops preserve the number of sign bits at the worst.
3226 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3227 if (Tmp != 1) {
3228 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3229 FirstAnswer = std::min(Tmp, Tmp2);
3230 // We computed what we know about the sign bits as our first
3231 // answer. Now proceed to the generic code that uses
3232 // computeKnownBits, and pick whichever answer is better.
3233 }
3234 break;
3235
3236 case Instruction::Select: {
3237 // If we have a clamp pattern, we know that the number of sign bits will
3238 // be the minimum of the clamp min/max range.
3239 const Value *X;
3240 const APInt *CLow, *CHigh;
3241 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3242 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3243
3244 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3245 if (Tmp == 1) break;
3246 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3247 return std::min(Tmp, Tmp2);
3248 }
3249
3250 case Instruction::Add:
3251 // Add can have at most one carry bit. Thus we know that the output
3252 // is, at worst, one more bit than the inputs.
3253 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3254 if (Tmp == 1) break;
3255
3256 // Special case decrementing a value (ADD X, -1):
3257 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3258 if (CRHS->isAllOnesValue()) {
3259 KnownBits Known(TyBits);
3260 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3261
3262 // If the input is known to be 0 or 1, the output is 0/-1, which is
3263 // all sign bits set.
3264 if ((Known.Zero | 1).isAllOnes())
3265 return TyBits;
3266
3267 // If we are subtracting one from a positive number, there is no carry
3268 // out of the result.
3269 if (Known.isNonNegative())
3270 return Tmp;
3271 }
3272
3273 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3274 if (Tmp2 == 1) break;
3275 return std::min(Tmp, Tmp2) - 1;
3276
3277 case Instruction::Sub:
3278 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3279 if (Tmp2 == 1) break;
3280
3281 // Handle NEG.
3282 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3283 if (CLHS->isNullValue()) {
3284 KnownBits Known(TyBits);
3285 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3286 // If the input is known to be 0 or 1, the output is 0/-1, which is
3287 // all sign bits set.
3288 if ((Known.Zero | 1).isAllOnes())
3289 return TyBits;
3290
3291 // If the input is known to be positive (the sign bit is known clear),
3292 // the output of the NEG has the same number of sign bits as the
3293 // input.
3294 if (Known.isNonNegative())
3295 return Tmp2;
3296
3297 // Otherwise, we treat this like a SUB.
3298 }
3299
3300 // Sub can have at most one carry bit. Thus we know that the output
3301 // is, at worst, one more bit than the inputs.
3302 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3303 if (Tmp == 1) break;
3304 return std::min(Tmp, Tmp2) - 1;
3305
3306 case Instruction::Mul: {
3307 // The output of the Mul can be at most twice the valid bits in the
3308 // inputs.
3309 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3310 if (SignBitsOp0 == 1) break;
3311 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3312 if (SignBitsOp1 == 1) break;
3313 unsigned OutValidBits =
3314 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3315 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3316 }
3317
3318 case Instruction::PHI: {
3319 const PHINode *PN = cast<PHINode>(U);
3320 unsigned NumIncomingValues = PN->getNumIncomingValues();
3321 // Don't analyze large in-degree PHIs.
3322 if (NumIncomingValues > 4) break;
3323 // Unreachable blocks may have zero-operand PHI nodes.
3324 if (NumIncomingValues == 0) break;
3325
3326 // Take the minimum of all incoming values. This can't infinitely loop
3327 // because of our depth threshold.
3328 Query RecQ = Q;
3329 Tmp = TyBits;
3330 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3331 if (Tmp == 1) return Tmp;
3332 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3333 Tmp = std::min(
3334 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3335 }
3336 return Tmp;
3337 }
3338
3339 case Instruction::Trunc:
3340 // FIXME: it's tricky to do anything useful for this, but it is an
3341 // important case for targets like X86.
3342 break;
3343
3344 case Instruction::ExtractElement:
3345 // Look through extract element. At the moment we keep this simple and
3346 // skip tracking the specific element. But at least we might find
3347 // information valid for all elements of the vector (for example if vector
3348 // is sign extended, shifted, etc).
3349 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3350
3351 case Instruction::ShuffleVector: {
3352 // Collect the minimum number of sign bits that are shared by every vector
3353 // element referenced by the shuffle.
3354 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3355 if (!Shuf) {
3356 // FIXME: Add support for shufflevector constant expressions.
3357 return 1;
3358 }
3359 APInt DemandedLHS, DemandedRHS;
3360 // For undef elements, we don't know anything about the common state of
3361 // the shuffle result.
3362 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3363 return 1;
3364 Tmp = std::numeric_limits<unsigned>::max();
3365 if (!!DemandedLHS) {
3366 const Value *LHS = Shuf->getOperand(0);
3367 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3368 }
3369 // If we don't know anything, early out and try computeKnownBits
3370 // fall-back.
3371 if (Tmp == 1)
3372 break;
3373 if (!!DemandedRHS) {
3374 const Value *RHS = Shuf->getOperand(1);
3375 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3376 Tmp = std::min(Tmp, Tmp2);
3377 }
3378 // If we don't know anything, early out and try computeKnownBits
3379 // fall-back.
3380 if (Tmp == 1)
3381 break;
3382 assert(Tmp <= TyBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= TyBits && "Failed to determine minimum sign bits"
) ? void (0) : __assert_fail ("Tmp <= TyBits && \"Failed to determine minimum sign bits\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3382, __extension__ __PRETTY_FUNCTION__
))
;
3383 return Tmp;
3384 }
3385 case Instruction::Call: {
3386 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3387 switch (II->getIntrinsicID()) {
3388 default: break;
3389 case Intrinsic::abs:
3390 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3391 if (Tmp == 1) break;
3392
3393 // Absolute value reduces number of sign bits by at most 1.
3394 return Tmp - 1;
3395 case Intrinsic::smin:
3396 case Intrinsic::smax: {
3397 const APInt *CLow, *CHigh;
3398 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
3399 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3400 }
3401 }
3402 }
3403 }
3404 }
3405 }
3406
3407 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3408 // use this information.
3409
3410 // If we can examine all elements of a vector constant successfully, we're
3411 // done (we can't do any better than that). If not, keep trying.
3412 if (unsigned VecSignBits =
3413 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3414 return VecSignBits;
3415
3416 KnownBits Known(TyBits);
3417 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3418
3419 // If we know that the sign bit is either zero or one, determine the number of
3420 // identical bits in the top of the input value.
3421 return std::max(FirstAnswer, Known.countMinSignBits());
3422}
3423
3424Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3425 const TargetLibraryInfo *TLI) {
3426 const Function *F = CB.getCalledFunction();
3427 if (!F)
3428 return Intrinsic::not_intrinsic;
3429
3430 if (F->isIntrinsic())
3431 return F->getIntrinsicID();
3432
3433 // We are going to infer semantics of a library function based on mapping it
3434 // to an LLVM intrinsic. Check that the library function is available from
3435 // this callbase and in this environment.
3436 LibFunc Func;
3437 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3438 !CB.onlyReadsMemory())
3439 return Intrinsic::not_intrinsic;
3440
3441 switch (Func) {
3442 default:
3443 break;
3444 case LibFunc_sin:
3445 case LibFunc_sinf:
3446 case LibFunc_sinl:
3447 return Intrinsic::sin;
3448 case LibFunc_cos:
3449 case LibFunc_cosf:
3450 case LibFunc_cosl:
3451 return Intrinsic::cos;
3452 case LibFunc_exp:
3453 case LibFunc_expf:
3454 case LibFunc_expl:
3455 return Intrinsic::exp;
3456 case LibFunc_exp2:
3457 case LibFunc_exp2f:
3458 case LibFunc_exp2l:
3459 return Intrinsic::exp2;
3460 case LibFunc_log:
3461 case LibFunc_logf:
3462 case LibFunc_logl:
3463 return Intrinsic::log;
3464 case LibFunc_log10:
3465 case LibFunc_log10f:
3466 case LibFunc_log10l:
3467 return Intrinsic::log10;
3468 case LibFunc_log2:
3469 case LibFunc_log2f:
3470 case LibFunc_log2l:
3471 return Intrinsic::log2;
3472 case LibFunc_fabs:
3473 case LibFunc_fabsf:
3474 case LibFunc_fabsl:
3475 return Intrinsic::fabs;
3476 case LibFunc_fmin:
3477 case LibFunc_fminf:
3478 case LibFunc_fminl:
3479 return Intrinsic::minnum;
3480 case LibFunc_fmax:
3481 case LibFunc_fmaxf:
3482 case LibFunc_fmaxl:
3483 return Intrinsic::maxnum;
3484 case LibFunc_copysign:
3485 case LibFunc_copysignf:
3486 case LibFunc_copysignl:
3487 return Intrinsic::copysign;
3488 case LibFunc_floor:
3489 case LibFunc_floorf:
3490 case LibFunc_floorl:
3491 return Intrinsic::floor;
3492 case LibFunc_ceil:
3493 case LibFunc_ceilf:
3494 case LibFunc_ceill:
3495 return Intrinsic::ceil;
3496 case LibFunc_trunc:
3497 case LibFunc_truncf:
3498 case LibFunc_truncl:
3499 return Intrinsic::trunc;
3500 case LibFunc_rint:
3501 case LibFunc_rintf:
3502 case LibFunc_rintl:
3503 return Intrinsic::rint;
3504 case LibFunc_nearbyint:
3505 case LibFunc_nearbyintf:
3506 case LibFunc_nearbyintl:
3507 return Intrinsic::nearbyint;
3508 case LibFunc_round:
3509 case LibFunc_roundf:
3510 case LibFunc_roundl:
3511 return Intrinsic::round;
3512 case LibFunc_roundeven:
3513 case LibFunc_roundevenf:
3514 case LibFunc_roundevenl:
3515 return Intrinsic::roundeven;
3516 case LibFunc_pow:
3517 case LibFunc_powf:
3518 case LibFunc_powl:
3519 return Intrinsic::pow;
3520 case LibFunc_sqrt:
3521 case LibFunc_sqrtf:
3522 case LibFunc_sqrtl:
3523 return Intrinsic::sqrt;
3524 }
3525
3526 return Intrinsic::not_intrinsic;
3527}
3528
3529/// Return true if we can prove that the specified FP value is never equal to
3530/// -0.0.
3531/// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3532/// that a value is not -0.0. It only guarantees that -0.0 may be treated
3533/// the same as +0.0 in floating-point ops.
3534bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3535 unsigned Depth) {
3536 if (auto *CFP = dyn_cast<ConstantFP>(V))
3537 return !CFP->getValueAPF().isNegZero();
3538
3539 if (Depth == MaxAnalysisRecursionDepth)
3540 return false;
3541
3542 auto *Op = dyn_cast<Operator>(V);
3543 if (!Op)
3544 return false;
3545
3546 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3547 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3548 return true;
3549
3550 // sitofp and uitofp turn into +0.0 for zero.
3551 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3552 return true;
3553
3554 if (auto *Call = dyn_cast<CallInst>(Op)) {
3555 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3556 switch (IID) {
3557 default:
3558 break;
3559 // sqrt(-0.0) = -0.0, no other negative results are possible.
3560 case Intrinsic::sqrt:
3561 case Intrinsic::canonicalize:
3562 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3563 case Intrinsic::experimental_constrained_sqrt: {
3564 // NOTE: This rounding mode restriction may be too strict.
3565 const auto *CI = cast<ConstrainedFPIntrinsic>(Call);
3566 if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven)
3567 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3568 else
3569 return false;
3570 }
3571 // fabs(x) != -0.0
3572 case Intrinsic::fabs:
3573 return true;
3574 // sitofp and uitofp turn into +0.0 for zero.
3575 case Intrinsic::experimental_constrained_sitofp:
3576 case Intrinsic::experimental_constrained_uitofp:
3577 return true;
3578 }
3579 }
3580
3581 return false;
3582}
3583
3584/// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3585/// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3586/// bit despite comparing equal.
3587static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3588 const TargetLibraryInfo *TLI,
3589 bool SignBitOnly,
3590 unsigned Depth) {
3591 // TODO: This function does not do the right thing when SignBitOnly is true
3592 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3593 // which flips the sign bits of NaNs. See
3594 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3595
3596 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3597 return !CFP->getValueAPF().isNegative() ||
3598 (!SignBitOnly && CFP->getValueAPF().isZero());
3599 }
3600
3601 // Handle vector of constants.
3602 if (auto *CV = dyn_cast<Constant>(V)) {
3603 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3604 unsigned NumElts = CVFVTy->getNumElements();
3605 for (unsigned i = 0; i != NumElts; ++i) {
3606 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3607 if (!CFP)
3608 return false;
3609 if (CFP->getValueAPF().isNegative() &&
3610 (SignBitOnly || !CFP->getValueAPF().isZero()))
3611 return false;
3612 }
3613
3614 // All non-negative ConstantFPs.
3615 return true;
3616 }
3617 }
3618
3619 if (Depth == MaxAnalysisRecursionDepth)
3620 return false;
3621
3622 const Operator *I = dyn_cast<Operator>(V);
3623 if (!I)
3624 return false;
3625
3626 switch (I->getOpcode()) {
3627 default:
3628 break;
3629 // Unsigned integers are always nonnegative.
3630 case Instruction::UIToFP:
3631 return true;
3632 case Instruction::FDiv:
3633 // X / X is always exactly 1.0 or a NaN.
3634 if (I->getOperand(0) == I->getOperand(1) &&
3635 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3636 return true;
3637
3638 // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN).
3639 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3640 Depth + 1) &&
3641 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3642 /*SignBitOnly*/ true, Depth + 1);
3643 case Instruction::FMul:
3644 // X * X is always non-negative or a NaN.
3645 if (I->getOperand(0) == I->getOperand(1) &&
3646 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3647 return true;
3648
3649 [[fallthrough]];
3650 case Instruction::FAdd:
3651 case Instruction::FRem:
3652 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3653 Depth + 1) &&
3654 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3655 Depth + 1);
3656 case Instruction::Select:
3657 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3658 Depth + 1) &&
3659 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3660 Depth + 1);
3661 case Instruction::FPExt:
3662 case Instruction::FPTrunc:
3663 // Widening/narrowing never change sign.
3664 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3665 Depth + 1);
3666 case Instruction::ExtractElement:
3667 // Look through extract element. At the moment we keep this simple and skip
3668 // tracking the specific element. But at least we might find information
3669 // valid for all elements of the vector.
3670 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3671 Depth + 1);
3672 case Instruction::Call:
3673 const auto *CI = cast<CallInst>(I);
3674 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3675 switch (IID) {
3676 default:
3677 break;
3678 case Intrinsic::maxnum: {
3679 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3680 auto isPositiveNum = [&](Value *V) {
3681 if (SignBitOnly) {
3682 // With SignBitOnly, this is tricky because the result of
3683 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3684 // a constant strictly greater than 0.0.
3685 const APFloat *C;
3686 return match(V, m_APFloat(C)) &&
3687 *C > APFloat::getZero(C->getSemantics());
3688 }
3689
3690 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3691 // maxnum can't be ordered-less-than-zero.
3692 return isKnownNeverNaN(V, TLI) &&
3693 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3694 };
3695
3696 // TODO: This could be improved. We could also check that neither operand
3697 // has its sign bit set (and at least 1 is not-NAN?).
3698 return isPositiveNum(V0) || isPositiveNum(V1);
3699 }
3700
3701 case Intrinsic::maximum:
3702 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3703 Depth + 1) ||
3704 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3705 Depth + 1);
3706 case Intrinsic::minnum:
3707 case Intrinsic::minimum:
3708 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3709 Depth + 1) &&
3710 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3711 Depth + 1);
3712 case Intrinsic::exp:
3713 case Intrinsic::exp2:
3714 case Intrinsic::fabs:
3715 return true;
3716
3717 case Intrinsic::sqrt:
3718 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3719 if (!SignBitOnly)
3720 return true;
3721 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3722 CannotBeNegativeZero(CI->getOperand(0), TLI));
3723
3724 case Intrinsic::powi:
3725 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3726 // powi(x,n) is non-negative if n is even.
3727 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3728 return true;
3729 }
3730 // TODO: This is not correct. Given that exp is an integer, here are the
3731 // ways that pow can return a negative value:
3732 //
3733 // pow(x, exp) --> negative if exp is odd and x is negative.
3734 // pow(-0, exp) --> -inf if exp is negative odd.
3735 // pow(-0, exp) --> -0 if exp is positive odd.
3736 // pow(-inf, exp) --> -0 if exp is negative odd.
3737 // pow(-inf, exp) --> -inf if exp is positive odd.
3738 //
3739 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3740 // but we must return false if x == -0. Unfortunately we do not currently
3741 // have a way of expressing this constraint. See details in
3742 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3743 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3744 Depth + 1);
3745
3746 case Intrinsic::fma:
3747 case Intrinsic::fmuladd:
3748 // x*x+y is non-negative if y is non-negative.
3749 return I->getOperand(0) == I->getOperand(1) &&
3750 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3751 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3752 Depth + 1);
3753 }
3754 break;
3755 }
3756 return false;
3757}
3758
3759bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3760 const TargetLibraryInfo *TLI) {
3761 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3762}
3763
3764bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3765 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3766}
3767
3768bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3769 unsigned Depth) {
3770 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for Inf on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for Inf on non-FP type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3770, __extension__ __PRETTY_FUNCTION__
))
;
3771
3772 // If we're told that infinities won't happen, assume they won't.
3773 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3774 if (FPMathOp->hasNoInfs())
3775 return true;
3776
3777 // Handle scalar constants.
3778 if (auto *CFP = dyn_cast<ConstantFP>(V))
3779 return !CFP->isInfinity();
3780
3781 if (Depth == MaxAnalysisRecursionDepth)
3782 return false;
3783
3784 if (auto *Inst = dyn_cast<Instruction>(V)) {
3785 switch (Inst->getOpcode()) {
3786 case Instruction::Select: {
3787 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3788 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3789 }
3790 case Instruction::SIToFP:
3791 case Instruction::UIToFP: {
3792 // Get width of largest magnitude integer (remove a bit if signed).
3793 // This still works for a signed minimum value because the largest FP
3794 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3795 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3796 if (Inst->getOpcode() == Instruction::SIToFP)
3797 --IntSize;
3798
3799 // If the exponent of the largest finite FP value can hold the largest
3800 // integer, the result of the cast must be finite.
3801 Type *FPTy = Inst->getType()->getScalarType();
3802 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3803 }
3804 case Instruction::FPExt: {
3805 // Peek through to source op. If it is not infinity, this is not infinity.
3806 return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1);
3807 }
3808 default:
3809 break;
3810 }
3811 }
3812
3813 // try to handle fixed width vector constants
3814 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3815 if (VFVTy && isa<Constant>(V)) {
3816 // For vectors, verify that each element is not infinity.
3817 unsigned NumElts = VFVTy->getNumElements();
3818 for (unsigned i = 0; i != NumElts; ++i) {
3819 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3820 if (!Elt)
3821 return false;
3822 if (isa<UndefValue>(Elt))
3823 continue;
3824 auto *CElt = dyn_cast<ConstantFP>(Elt);
3825 if (!CElt || CElt->isInfinity())
3826 return false;
3827 }
3828 // All elements were confirmed non-infinity or undefined.
3829 return true;
3830 }
3831
3832 // was not able to prove that V never contains infinity
3833 return false;
3834}
3835
3836bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3837 unsigned Depth) {
3838 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for NaN on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3838, __extension__ __PRETTY_FUNCTION__
))
;
3839
3840 // If we're told that NaNs won't happen, assume they won't.
3841 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3842 if (FPMathOp->hasNoNaNs())
3843 return true;
3844
3845 // Handle scalar constants.
3846 if (auto *CFP = dyn_cast<ConstantFP>(V))
3847 return !CFP->isNaN();
3848
3849 if (Depth == MaxAnalysisRecursionDepth)
3850 return false;
3851
3852 if (auto *Inst = dyn_cast<Instruction>(V)) {
3853 switch (Inst->getOpcode()) {
3854 case Instruction::FAdd:
3855 case Instruction::FSub:
3856 // Adding positive and negative infinity produces NaN.
3857 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3858 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3859 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3860 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3861
3862 case Instruction::FMul:
3863 // Zero multiplied with infinity produces NaN.
3864 // FIXME: If neither side can be zero fmul never produces NaN.
3865 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3866 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3867 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3868 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3869
3870 case Instruction::FDiv:
3871 case Instruction::FRem:
3872 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3873 return false;
3874
3875 case Instruction::Select: {
3876 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3877 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3878 }
3879 case Instruction::SIToFP:
3880 case Instruction::UIToFP:
3881 return true;
3882 case Instruction::FPTrunc:
3883 case Instruction::FPExt:
3884 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3885 default:
3886 break;
3887 }
3888 }
3889
3890 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3891 switch (II->getIntrinsicID()) {
3892 case Intrinsic::canonicalize:
3893 case Intrinsic::fabs:
3894 case Intrinsic::copysign:
3895 case Intrinsic::exp:
3896 case Intrinsic::exp2:
3897 case Intrinsic::floor:
3898 case Intrinsic::ceil:
3899 case Intrinsic::trunc:
3900 case Intrinsic::rint:
3901 case Intrinsic::nearbyint:
3902 case Intrinsic::round:
3903 case Intrinsic::roundeven:
3904 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3905 case Intrinsic::sqrt:
3906 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3907 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3908 case Intrinsic::minnum:
3909 case Intrinsic::maxnum:
3910 // If either operand is not NaN, the result is not NaN.
3911 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3912 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3913 default:
3914 return false;
3915 }
3916 }
3917
3918 // Try to handle fixed width vector constants
3919 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3920 if (VFVTy && isa<Constant>(V)) {
3921 // For vectors, verify that each element is not NaN.
3922 unsigned NumElts = VFVTy->getNumElements();
3923 for (unsigned i = 0; i != NumElts; ++i) {
3924 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3925 if (!Elt)
3926 return false;
3927 if (isa<UndefValue>(Elt))
3928 continue;
3929 auto *CElt = dyn_cast<ConstantFP>(Elt);
3930 if (!CElt || CElt->isNaN())
3931 return false;
3932 }
3933 // All elements were confirmed not-NaN or undefined.
3934 return true;
3935 }
3936
3937 // Was not able to prove that V never contains NaN
3938 return false;
3939}
3940
3941Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3942
3943 // All byte-wide stores are splatable, even of arbitrary variables.
3944 if (V->getType()->isIntegerTy(8))
3945 return V;
3946
3947 LLVMContext &Ctx = V->getContext();
3948
3949 // Undef don't care.
3950 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3951 if (isa<UndefValue>(V))
3952 return UndefInt8;
3953
3954 // Return Undef for zero-sized type.
3955 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3956 return UndefInt8;
3957
3958 Constant *C = dyn_cast<Constant>(V);
3959 if (!C) {
3960 // Conceptually, we could handle things like:
3961 // %a = zext i8 %X to i16
3962 // %b = shl i16 %a, 8
3963 // %c = or i16 %a, %b
3964 // but until there is an example that actually needs this, it doesn't seem
3965 // worth worrying about.
3966 return nullptr;
3967 }
3968
3969 // Handle 'null' ConstantArrayZero etc.
3970 if (C->isNullValue())
3971 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3972
3973 // Constant floating-point values can be handled as integer values if the
3974 // corresponding integer value is "byteable". An important case is 0.0.
3975 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3976 Type *Ty = nullptr;
3977 if (CFP->getType()->isHalfTy())
3978 Ty = Type::getInt16Ty(Ctx);
3979 else if (CFP->getType()->isFloatTy())
3980 Ty = Type::getInt32Ty(Ctx);
3981 else if (CFP->getType()->isDoubleTy())
3982 Ty = Type::getInt64Ty(Ctx);
3983 // Don't handle long double formats, which have strange constraints.
3984 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3985 : nullptr;
3986 }
3987
3988 // We can handle constant integers that are multiple of 8 bits.
3989 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3990 if (CI->getBitWidth() % 8 == 0) {
3991 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")(static_cast <bool> (CI->getBitWidth() > 8 &&
"8 bits should be handled above!") ? void (0) : __assert_fail
("CI->getBitWidth() > 8 && \"8 bits should be handled above!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3991, __extension__ __PRETTY_FUNCTION__
))
;
3992 if (!CI->getValue().isSplat(8))
3993 return nullptr;
3994 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3995 }
3996 }
3997
3998 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3999 if (CE->getOpcode() == Instruction::IntToPtr) {
4000 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
4001 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
4002 return isBytewiseValue(
4003 ConstantExpr::getIntegerCast(CE->getOperand(0),
4004 Type::getIntNTy(Ctx, BitWidth), false),
4005 DL);
4006 }
4007 }
4008 }
4009
4010 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
4011 if (LHS == RHS)
4012 return LHS;
4013 if (!LHS || !RHS)
4014 return nullptr;
4015 if (LHS == UndefInt8)
4016 return RHS;
4017 if (RHS == UndefInt8)
4018 return LHS;
4019 return nullptr;
4020 };
4021
4022 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
4023 Value *Val = UndefInt8;
4024 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
4025 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
4026 return nullptr;
4027 return Val;
4028 }
4029
4030 if (isa<ConstantAggregate>(C)) {
4031 Value *Val = UndefInt8;
4032 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
4033 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
4034 return nullptr;
4035 return Val;
4036 }
4037
4038 // Don't try to handle the handful of other constants.
4039 return nullptr;
4040}
4041
4042// This is the recursive version of BuildSubAggregate. It takes a few different
4043// arguments. Idxs is the index within the nested struct From that we are
4044// looking at now (which is of type IndexedType). IdxSkip is the number of
4045// indices from Idxs that should be left out when inserting into the resulting
4046// struct. To is the result struct built so far, new insertvalue instructions
4047// build on that.
4048static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
4049 SmallVectorImpl<unsigned> &Idxs,
4050 unsigned IdxSkip,
4051 Instruction *InsertBefore) {
4052 StructType *STy = dyn_cast<StructType>(IndexedType);
4053 if (STy) {
4054 // Save the original To argument so we can modify it
4055 Value *OrigTo = To;
4056 // General case, the type indexed by Idxs is a struct
4057 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4058 // Process each struct element recursively
4059 Idxs.push_back(i);
4060 Value *PrevTo = To;
4061 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
4062 InsertBefore);
4063 Idxs.pop_back();
4064 if (!To) {
4065 // Couldn't find any inserted value for this index? Cleanup
4066 while (PrevTo != OrigTo) {
4067 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
4068 PrevTo = Del->getAggregateOperand();
4069 Del->eraseFromParent();
4070 }
4071 // Stop processing elements
4072 break;
4073 }
4074 }
4075 // If we successfully found a value for each of our subaggregates
4076 if (To)
4077 return To;
4078 }
4079 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
4080 // the struct's elements had a value that was inserted directly. In the latter
4081 // case, perhaps we can't determine each of the subelements individually, but
4082 // we might be able to find the complete struct somewhere.
4083
4084 // Find the value that is at that particular spot
4085 Value *V = FindInsertedValue(From, Idxs);
4086
4087 if (!V)
4088 return nullptr;
4089
4090 // Insert the value in the new (sub) aggregate
4091 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
4092 "tmp", InsertBefore);
4093}
4094
4095// This helper takes a nested struct and extracts a part of it (which is again a
4096// struct) into a new value. For example, given the struct:
4097// { a, { b, { c, d }, e } }
4098// and the indices "1, 1" this returns
4099// { c, d }.
4100//
4101// It does this by inserting an insertvalue for each element in the resulting
4102// struct, as opposed to just inserting a single struct. This will only work if
4103// each of the elements of the substruct are known (ie, inserted into From by an
4104// insertvalue instruction somewhere).
4105//
4106// All inserted insertvalue instructions are inserted before InsertBefore
4107static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
4108 Instruction *InsertBefore) {
4109 assert(InsertBefore && "Must have someplace to insert!")(static_cast <bool> (InsertBefore && "Must have someplace to insert!"
) ? void (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4109, __extension__ __PRETTY_FUNCTION__
))
;
4110 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
4111 idx_range);
4112 Value *To = UndefValue::get(IndexedType);
4113 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
4114 unsigned IdxSkip = Idxs.size();
4115
4116 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
4117}
4118
4119/// Given an aggregate and a sequence of indices, see if the scalar value
4120/// indexed is already around as a register, for example if it was inserted
4121/// directly into the aggregate.
4122///
4123/// If InsertBefore is not null, this function will duplicate (modified)
4124/// insertvalues when a part of a nested struct is extracted.
4125Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
4126 Instruction *InsertBefore) {
4127 // Nothing to index? Just return V then (this is useful at the end of our
4128 // recursion).
4129 if (idx_range.empty())
4130 return V;
4131 // We have indices, so V should have an indexable type.
4132 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4133, __extension__ __PRETTY_FUNCTION__
))
4133 "Not looking at a struct or array?")(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4133, __extension__ __PRETTY_FUNCTION__
))
;
4134 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4135, __extension__ __PRETTY_FUNCTION__
))
4135 "Invalid indices for type?")(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4135, __extension__ __PRETTY_FUNCTION__
))
;
4136
4137 if (Constant *C = dyn_cast<Constant>(V)) {
4138 C = C->getAggregateElement(idx_range[0]);
4139 if (!C) return nullptr;
4140 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
4141 }
4142
4143 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
4144 // Loop the indices for the insertvalue instruction in parallel with the
4145 // requested indices
4146 const unsigned *req_idx = idx_range.begin();
4147 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
4148 i != e; ++i, ++req_idx) {
4149 if (req_idx == idx_range.end()) {
4150 // We can't handle this without inserting insertvalues
4151 if (!InsertBefore)
4152 return nullptr;
4153
4154 // The requested index identifies a part of a nested aggregate. Handle
4155 // this specially. For example,
4156 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
4157 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
4158 // %C = extractvalue {i32, { i32, i32 } } %B, 1
4159 // This can be changed into
4160 // %A = insertvalue {i32, i32 } undef, i32 10, 0
4161 // %C = insertvalue {i32, i32 } %A, i32 11, 1
4162 // which allows the unused 0,0 element from the nested struct to be
4163 // removed.
4164 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
4165 InsertBefore);
4166 }
4167
4168 // This insert value inserts something else than what we are looking for.
4169 // See if the (aggregate) value inserted into has the value we are
4170 // looking for, then.
4171 if (*req_idx != *i)
4172 return FindInsertedValue(I->getAggregateOperand(), idx_range,
4173 InsertBefore);
4174 }
4175 // If we end up here, the indices of the insertvalue match with those
4176 // requested (though possibly only partially). Now we recursively look at
4177 // the inserted value, passing any remaining indices.
4178 return FindInsertedValue(I->getInsertedValueOperand(),
4179 makeArrayRef(req_idx, idx_range.end()),
4180 InsertBefore);
4181 }
4182
4183 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4184 // If we're extracting a value from an aggregate that was extracted from
4185 // something else, we can extract from that something else directly instead.
4186 // However, we will need to chain I's indices with the requested indices.
4187
4188 // Calculate the number of indices required
4189 unsigned size = I->getNumIndices() + idx_range.size();
4190 // Allocate some space to put the new indices in
4191 SmallVector<unsigned, 5> Idxs;
4192 Idxs.reserve(size);
4193 // Add indices from the extract value instruction
4194 Idxs.append(I->idx_begin(), I->idx_end());
4195
4196 // Add requested indices
4197 Idxs.append(idx_range.begin(), idx_range.end());
4198
4199 assert(Idxs.size() == size(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4200, __extension__ __PRETTY_FUNCTION__
))
4200 && "Number of indices added not correct?")(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4200, __extension__ __PRETTY_FUNCTION__
))
;
4201
4202 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4203 }
4204 // Otherwise, we don't know (such as, extracting from a function return value
4205 // or load instruction)
4206 return nullptr;
4207}
4208
4209bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4210 unsigned CharSize) {
4211 // Make sure the GEP has exactly three arguments.
4212 if (GEP->getNumOperands() != 3)
4213 return false;
4214
4215 // Make sure the index-ee is a pointer to array of \p CharSize integers.
4216 // CharSize.
4217 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4218 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4219 return false;
4220
4221 // Check to make sure that the first operand of the GEP is an integer and
4222 // has value 0 so that we are sure we're indexing into the initializer.
4223 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4224 if (!FirstIdx || !FirstIdx->isZero())
4225 return false;
4226
4227 return true;
4228}
4229
4230// If V refers to an initialized global constant, set Slice either to
4231// its initializer if the size of its elements equals ElementSize, or,
4232// for ElementSize == 8, to its representation as an array of unsiged
4233// char. Return true on success.
4234bool llvm::getConstantDataArrayInfo(const Value *V,
4235 ConstantDataArraySlice &Slice,
4236 unsigned ElementSize, uint64_t Offset) {
4237 assert(V)(static_cast <bool> (V) ? void (0) : __assert_fail ("V"
, "llvm/lib/Analysis/ValueTracking.cpp", 4237, __extension__ __PRETTY_FUNCTION__
))
;
4238
4239 // Drill down into the pointer expression V, ignoring any intervening
4240 // casts, and determine the identity of the object it references along
4241 // with the cumulative byte offset into it.
4242 const GlobalVariable *GV =
4243 dyn_cast<GlobalVariable>(getUnderlyingObject(V));
4244 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4245 // Fail if V is not based on constant global object.
4246 return false;
4247
4248 const DataLayout &DL = GV->getParent()->getDataLayout();
4249 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0);
4250
4251 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off,
4252 /*AllowNonInbounds*/ true))
4253 // Fail if a constant offset could not be determined.
4254 return false;
4255
4256 uint64_t StartIdx = Off.getLimitedValue();
4257 if (StartIdx == UINT64_MAX(18446744073709551615UL))
4258 // Fail if the constant offset is excessive.
4259 return false;
4260
4261 Offset += StartIdx;
4262
4263 ConstantDataArray *Array = nullptr;
4264 ArrayType *ArrayTy = nullptr;
4265
4266 if (GV->getInitializer()->isNullValue()) {
4267 Type *GVTy = GV->getValueType();
4268 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4269 uint64_t Length = SizeInBytes / (ElementSize / 8);
4270
4271 Slice.Array = nullptr;
4272 Slice.Offset = 0;
4273 // Return an empty Slice for undersized constants to let callers
4274 // transform even undefined library calls into simpler, well-defined
4275 // expressions. This is preferable to making the calls although it
4276 // prevents sanitizers from detecting such calls.
4277 Slice.Length = Length < Offset ? 0 : Length - Offset;
4278 return true;
4279 }
4280
4281 auto *Init = const_cast<Constant *>(GV->getInitializer());
4282 if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
4283 Type *InitElTy = ArrayInit->getElementType();
4284 if (InitElTy->isIntegerTy(ElementSize)) {
4285 // If Init is an initializer for an array of the expected type
4286 // and size, use it as is.
4287 Array = ArrayInit;
4288 ArrayTy = ArrayInit->getType();
4289 }
4290 }
4291
4292 if (!Array) {
4293 if (ElementSize != 8)
4294 // TODO: Handle conversions to larger integral types.
4295 return false;
4296
4297 // Otherwise extract the portion of the initializer starting
4298 // at Offset as an array of bytes, and reset Offset.
4299 Init = ReadByteArrayFromGlobal(GV, Offset);
4300 if (!Init)
4301 return false;
4302
4303 Offset = 0;
4304 Array = dyn_cast<ConstantDataArray>(Init);
4305 ArrayTy = dyn_cast<ArrayType>(Init->getType());
4306 }
4307
4308 uint64_t NumElts = ArrayTy->getArrayNumElements();
4309 if (Offset > NumElts)
4310 return false;
4311
4312 Slice.Array = Array;
4313 Slice.Offset = Offset;
4314 Slice.Length = NumElts - Offset;
4315 return true;
4316}
4317
4318/// Extract bytes from the initializer of the constant array V, which need
4319/// not be a nul-terminated string. On success, store the bytes in Str and
4320/// return true. When TrimAtNul is set, Str will contain only the bytes up
4321/// to but not including the first nul. Return false on failure.
4322bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4323 uint64_t Offset, bool TrimAtNul) {
4324 ConstantDataArraySlice Slice;
4325 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4326 return false;
4327
4328 if (Slice.Array == nullptr) {
4329 if (TrimAtNul) {
4330 // Return a nul-terminated string even for an empty Slice. This is
4331 // safe because all existing SimplifyLibcalls callers require string
4332 // arguments and the behavior of the functions they fold is undefined
4333 // otherwise. Folding the calls this way is preferable to making
4334 // the undefined library calls, even though it prevents sanitizers
4335 // from reporting such calls.
4336 Str = StringRef();
4337 return true;
4338 }
4339 if (Slice.Length == 1) {
4340 Str = StringRef("", 1);
4341 return true;
4342 }
4343 // We cannot instantiate a StringRef as we do not have an appropriate string
4344 // of 0s at hand.
4345 return false;
4346 }
4347
4348 // Start out with the entire array in the StringRef.
4349 Str = Slice.Array->getAsString();
4350 // Skip over 'offset' bytes.
4351 Str = Str.substr(Slice.Offset);
4352
4353 if (TrimAtNul) {
4354 // Trim off the \0 and anything after it. If the array is not nul
4355 // terminated, we just return the whole end of string. The client may know
4356 // some other way that the string is length-bound.
4357 Str = Str.substr(0, Str.find('\0'));
4358 }
4359 return true;
4360}
4361
4362// These next two are very similar to the above, but also look through PHI
4363// nodes.
4364// TODO: See if we can integrate these two together.
4365
4366/// If we can compute the length of the string pointed to by
4367/// the specified pointer, return 'len+1'. If we can't, return 0.
4368static uint64_t GetStringLengthH(const Value *V,
4369 SmallPtrSetImpl<const PHINode*> &PHIs,
4370 unsigned CharSize) {
4371 // Look through noop bitcast instructions.
4372 V = V->stripPointerCasts();
4373
4374 // If this is a PHI node, there are two cases: either we have already seen it
4375 // or we haven't.
4376 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4377 if (!PHIs.insert(PN).second)
4378 return ~0ULL; // already in the set.
4379
4380 // If it was new, see if all the input strings are the same length.
4381 uint64_t LenSoFar = ~0ULL;
4382 for (Value *IncValue : PN->incoming_values()) {
4383 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4384 if (Len == 0) return 0; // Unknown length -> unknown.
4385
4386 if (Len == ~0ULL) continue;
4387
4388 if (Len != LenSoFar && LenSoFar != ~0ULL)
4389 return 0; // Disagree -> unknown.
4390 LenSoFar = Len;
4391 }
4392
4393 // Success, all agree.
4394 return LenSoFar;
4395 }
4396
4397 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4398 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4399 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4400 if (Len1 == 0) return 0;
4401 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4402 if (Len2 == 0) return 0;
4403 if (Len1 == ~0ULL) return Len2;
4404 if (Len2 == ~0ULL) return Len1;
4405 if (Len1 != Len2) return 0;
4406 return Len1;
4407 }
4408
4409 // Otherwise, see if we can read the string.
4410 ConstantDataArraySlice Slice;
4411 if (!getConstantDataArrayInfo(V, Slice, CharSize))
4412 return 0;
4413
4414 if (Slice.Array == nullptr)
4415 // Zeroinitializer (including an empty one).
4416 return 1;
4417
4418 // Search for the first nul character. Return a conservative result even
4419 // when there is no nul. This is safe since otherwise the string function
4420 // being folded such as strlen is undefined, and can be preferable to
4421 // making the undefined library call.
4422 unsigned NullIndex = 0;
4423 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4424 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4425 break;
4426 }
4427
4428 return NullIndex + 1;
4429}
4430
4431/// If we can compute the length of the string pointed to by
4432/// the specified pointer, return 'len+1'. If we can't, return 0.
4433uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4434 if (!V->getType()->isPointerTy())
4435 return 0;
4436
4437 SmallPtrSet<const PHINode*, 32> PHIs;
4438 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4439 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4440 // an empty string as a length.
4441 return Len == ~0ULL ? 1 : Len;
4442}
4443
4444const Value *
4445llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4446 bool MustPreserveNullness) {
4447 assert(Call &&(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4448, __extension__ __PRETTY_FUNCTION__
))
4448 "getArgumentAliasingToReturnedPointer only works on nonnull calls")(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4448, __extension__ __PRETTY_FUNCTION__
))
;
4449 if (const Value *RV = Call->getReturnedArgOperand())
4450 return RV;
4451 // This can be used only as a aliasing property.
4452 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4453 Call, MustPreserveNullness))
4454 return Call->getArgOperand(0);
4455 return nullptr;
4456}
4457
4458bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4459 const CallBase *Call, bool MustPreserveNullness) {
4460 switch (Call->getIntrinsicID()) {
4461 case Intrinsic::launder_invariant_group:
4462 case Intrinsic::strip_invariant_group:
4463 case Intrinsic::aarch64_irg:
4464 case Intrinsic::aarch64_tagp:
4465 return true;
4466 case Intrinsic::ptrmask:
4467 return !MustPreserveNullness;
4468 default:
4469 return false;
4470 }
4471}
4472
4473/// \p PN defines a loop-variant pointer to an object. Check if the
4474/// previous iteration of the loop was referring to the same object as \p PN.
4475static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4476 const LoopInfo *LI) {
4477 // Find the loop-defined value.
4478 Loop *L = LI->getLoopFor(PN->getParent());
4479 if (PN->getNumIncomingValues() != 2)
4480 return true;
4481
4482 // Find the value from previous iteration.
4483 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4484 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4485 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4486 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4487 return true;
4488
4489 // If a new pointer is loaded in the loop, the pointer references a different
4490 // object in every iteration. E.g.:
4491 // for (i)
4492 // int *p = a[i];
4493 // ...
4494 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4495 if (!L->isLoopInvariant(Load->getPointerOperand()))
4496 return false;
4497 return true;
4498}
4499
4500const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4501 if (!V->getType()->isPointerTy())
4502 return V;
4503 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4504 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4505 V = GEP->getPointerOperand();
4506 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4507 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4508 V = cast<Operator>(V)->getOperand(0);
4509 if (!V->getType()->isPointerTy())
4510 return V;
4511 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4512 if (GA->isInterposable())
4513 return V;
4514 V = GA->getAliasee();
4515 } else {
4516 if (auto *PHI = dyn_cast<PHINode>(V)) {
4517 // Look through single-arg phi nodes created by LCSSA.
4518 if (PHI->getNumIncomingValues() == 1) {
4519 V = PHI->getIncomingValue(0);
4520 continue;
4521 }
4522 } else if (auto *Call = dyn_cast<CallBase>(V)) {
4523 // CaptureTracking can know about special capturing properties of some
4524 // intrinsics like launder.invariant.group, that can't be expressed with
4525 // the attributes, but have properties like returning aliasing pointer.
4526 // Because some analysis may assume that nocaptured pointer is not
4527 // returned from some special intrinsic (because function would have to
4528 // be marked with returns attribute), it is crucial to use this function
4529 // because it should be in sync with CaptureTracking. Not using it may
4530 // cause weird miscompilations where 2 aliasing pointers are assumed to
4531 // noalias.
4532 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4533 V = RP;
4534 continue;
4535 }
4536 }
4537
4538 return V;
4539 }
4540 assert(V->getType()->isPointerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isPointerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4540, __extension__ __PRETTY_FUNCTION__
))
;
4541 }
4542 return V;
4543}
4544
4545void llvm::getUnderlyingObjects(const Value *V,
4546 SmallVectorImpl<const Value *> &Objects,
4547 LoopInfo *LI, unsigned MaxLookup) {
4548 SmallPtrSet<const Value *, 4> Visited;
4549 SmallVector<const Value *, 4> Worklist;
4550 Worklist.push_back(V);
4551 do {
4552 const Value *P = Worklist.pop_back_val();
4553 P = getUnderlyingObject(P, MaxLookup);
4554
4555 if (!Visited.insert(P).second)
4556 continue;
4557
4558 if (auto *SI = dyn_cast<SelectInst>(P)) {
4559 Worklist.push_back(SI->getTrueValue());
4560 Worklist.push_back(SI->getFalseValue());
4561 continue;
4562 }
4563
4564 if (auto *PN = dyn_cast<PHINode>(P)) {
4565 // If this PHI changes the underlying object in every iteration of the
4566 // loop, don't look through it. Consider:
4567 // int **A;
4568 // for (i) {
4569 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4570 // Curr = A[i];
4571 // *Prev, *Curr;
4572 //
4573 // Prev is tracking Curr one iteration behind so they refer to different
4574 // underlying objects.
4575 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4576 isSameUnderlyingObjectInLoop(PN, LI))
4577 append_range(Worklist, PN->incoming_values());
4578 continue;
4579 }
4580
4581 Objects.push_back(P);
4582 } while (!Worklist.empty());
4583}
4584
4585/// This is the function that does the work of looking through basic
4586/// ptrtoint+arithmetic+inttoptr sequences.
4587static const Value *getUnderlyingObjectFromInt(const Value *V) {
4588 do {
4589 if (const Operator *U = dyn_cast<Operator>(V)) {
4590 // If we find a ptrtoint, we can transfer control back to the
4591 // regular getUnderlyingObjectFromInt.
4592 if (U->getOpcode() == Instruction::PtrToInt)
4593 return U->getOperand(0);
4594 // If we find an add of a constant, a multiplied value, or a phi, it's
4595 // likely that the other operand will lead us to the base
4596 // object. We don't have to worry about the case where the
4597 // object address is somehow being computed by the multiply,
4598 // because our callers only care when the result is an
4599 // identifiable object.
4600 if (U->getOpcode() != Instruction::Add ||
4601 (!isa<ConstantInt>(U->getOperand(1)) &&
4602 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4603 !isa<PHINode>(U->getOperand(1))))
4604 return V;
4605 V = U->getOperand(0);
4606 } else {
4607 return V;
4608 }
4609 assert(V->getType()->isIntegerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isIntegerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4609, __extension__ __PRETTY_FUNCTION__
))
;
4610 } while (true);
4611}
4612
4613/// This is a wrapper around getUnderlyingObjects and adds support for basic
4614/// ptrtoint+arithmetic+inttoptr sequences.
4615/// It returns false if unidentified object is found in getUnderlyingObjects.
4616bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4617 SmallVectorImpl<Value *> &Objects) {
4618 SmallPtrSet<const Value *, 16> Visited;
4619 SmallVector<const Value *, 4> Working(1, V);
4620 do {
4621 V = Working.pop_back_val();
4622
4623 SmallVector<const Value *, 4> Objs;
4624 getUnderlyingObjects(V, Objs);
4625
4626 for (const Value *V : Objs) {
4627 if (!Visited.insert(V).second)
4628 continue;
4629 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4630 const Value *O =
4631 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4632 if (O->getType()->isPointerTy()) {
4633 Working.push_back(O);
4634 continue;
4635 }
4636 }
4637 // If getUnderlyingObjects fails to find an identifiable object,
4638 // getUnderlyingObjectsForCodeGen also fails for safety.
4639 if (!isIdentifiedObject(V)) {
4640 Objects.clear();
4641 return false;
4642 }
4643 Objects.push_back(const_cast<Value *>(V));
4644 }
4645 } while (!Working.empty());
4646 return true;
4647}
4648
4649AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4650 AllocaInst *Result = nullptr;
4651 SmallPtrSet<Value *, 4> Visited;
4652 SmallVector<Value *, 4> Worklist;
4653
4654 auto AddWork = [&](Value *V) {
4655 if (Visited.insert(V).second)
4656 Worklist.push_back(V);
4657 };
4658
4659 AddWork(V);
4660 do {
4661 V = Worklist.pop_back_val();
4662 assert(Visited.count(V))(static_cast <bool> (Visited.count(V)) ? void (0) : __assert_fail
("Visited.count(V)", "llvm/lib/Analysis/ValueTracking.cpp", 4662
, __extension__ __PRETTY_FUNCTION__))
;
4663
4664 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4665 if (Result && Result != AI)
4666 return nullptr;
4667 Result = AI;
4668 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4669 AddWork(CI->getOperand(0));
4670 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4671 for (Value *IncValue : PN->incoming_values())
4672 AddWork(IncValue);
4673 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4674 AddWork(SI->getTrueValue());
4675 AddWork(SI->getFalseValue());
4676 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4677 if (OffsetZero && !GEP->hasAllZeroIndices())
4678 return nullptr;
4679 AddWork(GEP->getPointerOperand());
4680 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
4681 Value *Returned = CB->getReturnedArgOperand();
4682 if (Returned)
4683 AddWork(Returned);
4684 else
4685 return nullptr;
4686 } else {
4687 return nullptr;
4688 }
4689 } while (!Worklist.empty());
4690
4691 return Result;
4692}
4693
4694static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4695 const Value *V, bool AllowLifetime, bool AllowDroppable) {
4696 for (const User *U : V->users()) {
4697 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4698 if (!II)
4699 return false;
4700
4701 if (AllowLifetime && II->isLifetimeStartOrEnd())
4702 continue;
4703
4704 if (AllowDroppable && II->isDroppable())
4705 continue;
4706
4707 return false;
4708 }
4709 return true;
4710}
4711
4712bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4713 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4714 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4715}
4716bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4717 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4718 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4719}
4720
4721bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4722 if (!LI.isUnordered())
4723 return true;
4724 const Function &F = *LI.getFunction();
4725 // Speculative load may create a race that did not exist in the source.
4726 return F.hasFnAttribute(Attribute::SanitizeThread) ||
4727 // Speculative load may load data from dirty regions.
4728 F.hasFnAttribute(Attribute::SanitizeAddress) ||
4729 F.hasFnAttribute(Attribute::SanitizeHWAddress);
4730}
4731
4732bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
4733 const Instruction *CtxI,
4734 AssumptionCache *AC,
4735 const DominatorTree *DT,
4736 const TargetLibraryInfo *TLI) {
4737 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
4738 AC, DT, TLI);
4739}
4740
4741bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
4742 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
4743 AssumptionCache *AC, const DominatorTree *DT,
4744 const TargetLibraryInfo *TLI) {
4745#ifndef NDEBUG
4746 if (Inst->getOpcode() != Opcode) {
4747 // Check that the operands are actually compatible with the Opcode override.
4748 auto hasEqualReturnAndLeadingOperandTypes =
4749 [](const Instruction *Inst, unsigned NumLeadingOperands) {
4750 if (Inst->getNumOperands() < NumLeadingOperands)
4751 return false;
4752 const Type *ExpectedType = Inst->getType();
4753 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
4754 if (Inst->getOperand(ItOp)->getType() != ExpectedType)
4755 return false;
4756 return true;
4757 };
4758 assert(!Instruction::isBinaryOp(Opcode) ||(static_cast <bool> (!Instruction::isBinaryOp(Opcode) ||
hasEqualReturnAndLeadingOperandTypes(Inst, 2)) ? void (0) : __assert_fail
("!Instruction::isBinaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 2)"
, "llvm/lib/Analysis/ValueTracking.cpp", 4759, __extension__ __PRETTY_FUNCTION__
))
4759 hasEqualReturnAndLeadingOperandTypes(Inst, 2))(static_cast <bool> (!Instruction::isBinaryOp(Opcode) ||
hasEqualReturnAndLeadingOperandTypes(Inst, 2)) ? void (0) : __assert_fail
("!Instruction::isBinaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 2)"
, "llvm/lib/Analysis/ValueTracking.cpp", 4759, __extension__ __PRETTY_FUNCTION__
))
;
4760 assert(!Instruction::isUnaryOp(Opcode) ||(static_cast <bool> (!Instruction::isUnaryOp(Opcode) ||
hasEqualReturnAndLeadingOperandTypes(Inst, 1)) ? void (0) : __assert_fail
("!Instruction::isUnaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 1)"
, "llvm/lib/Analysis/ValueTracking.cpp", 4761, __extension__ __PRETTY_FUNCTION__
))
4761 hasEqualReturnAndLeadingOperandTypes(Inst, 1))(static_cast <bool> (!Instruction::isUnaryOp(Opcode) ||
hasEqualReturnAndLeadingOperandTypes(Inst, 1)) ? void (0) : __assert_fail
("!Instruction::isUnaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 1)"
, "llvm/lib/Analysis/ValueTracking.cpp", 4761, __extension__ __PRETTY_FUNCTION__
))
;
4762 }
4763#endif
4764
4765 switch (Opcode) {
4766 default:
4767 return true;
4768 case Instruction::UDiv:
4769 case Instruction::URem: {
4770 // x / y is undefined if y == 0.
4771 const APInt *V;
4772 if (match(Inst->getOperand(1), m_APInt(V)))
4773 return *V != 0;
4774 return false;
4775 }
4776 case Instruction::SDiv:
4777 case Instruction::SRem: {
4778 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4779 const APInt *Numerator, *Denominator;
4780 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4781 return false;
4782 // We cannot hoist this division if the denominator is 0.
4783 if (*Denominator == 0)
4784 return false;
4785 // It's safe to hoist if the denominator is not 0 or -1.
4786 if (!Denominator->isAllOnes())
4787 return true;
4788 // At this point we know that the denominator is -1. It is safe to hoist as
4789 // long we know that the numerator is not INT_MIN.
4790 if (match(Inst->getOperand(0), m_APInt(Numerator)))
4791 return !Numerator->isMinSignedValue();
4792 // The numerator *might* be MinSignedValue.
4793 return false;
4794 }
4795 case Instruction::Load: {
4796 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
4797 if (!LI)
4798 return false;
4799 if (mustSuppressSpeculation(*LI))
4800 return false;
4801 const DataLayout &DL = LI->getModule()->getDataLayout();
4802 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
4803 LI->getType(), LI->getAlign(), DL,
4804 CtxI, AC, DT, TLI);
4805 }
4806 case Instruction::Call: {
4807 auto *CI = dyn_cast<const CallInst>(Inst);
4808 if (!CI)
4809 return false;
4810 const Function *Callee = CI->getCalledFunction();
4811
4812 // The called function could have undefined behavior or side-effects, even
4813 // if marked readnone nounwind.
4814 return Callee && Callee->isSpeculatable();
4815 }
4816 case Instruction::VAArg:
4817 case Instruction::Alloca:
4818 case Instruction::Invoke:
4819 case Instruction::CallBr:
4820 case Instruction::PHI:
4821 case Instruction::Store:
4822 case Instruction::Ret:
4823 case Instruction::Br:
4824 case Instruction::IndirectBr:
4825 case Instruction::Switch:
4826 case Instruction::Unreachable:
4827 case Instruction::Fence:
4828 case Instruction::AtomicRMW:
4829 case Instruction::AtomicCmpXchg:
4830 case Instruction::LandingPad:
4831 case Instruction::Resume:
4832 case Instruction::CatchSwitch:
4833 case Instruction::CatchPad:
4834 case Instruction::CatchRet:
4835 case Instruction::CleanupPad:
4836 case Instruction::CleanupRet:
4837 return false; // Misc instructions which have effects
4838 }
4839}
4840
4841bool llvm::mayHaveNonDefUseDependency(const Instruction &I) {
4842 if (I.mayReadOrWriteMemory())
4843 // Memory dependency possible
4844 return true;
4845 if (!isSafeToSpeculativelyExecute(&I))
4846 // Can't move above a maythrow call or infinite loop. Or if an
4847 // inalloca alloca, above a stacksave call.
4848 return true;
4849 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4850 // 1) Can't reorder two inf-loop calls, even if readonly
4851 // 2) Also can't reorder an inf-loop call below a instruction which isn't
4852 // safe to speculative execute. (Inverse of above)
4853 return true;
4854 return false;
4855}
4856
4857/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4858static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4859 switch (OR) {
4860 case ConstantRange::OverflowResult::MayOverflow:
4861 return OverflowResult::MayOverflow;
4862 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4863 return OverflowResult::AlwaysOverflowsLow;
4864 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4865 return OverflowResult::AlwaysOverflowsHigh;
4866 case ConstantRange::OverflowResult::NeverOverflows:
4867 return OverflowResult::NeverOverflows;
4868 }
4869 llvm_unreachable("Unknown OverflowResult")::llvm::llvm_unreachable_internal("Unknown OverflowResult", "llvm/lib/Analysis/ValueTracking.cpp"
, 4869)
;
4870}
4871
4872/// Combine constant ranges from computeConstantRange() and computeKnownBits().
4873static ConstantRange computeConstantRangeIncludingKnownBits(
4874 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4875 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4876 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4877 KnownBits Known = computeKnownBits(
4878 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4879 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4880 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4881 ConstantRange::PreferredRangeType RangeType =
4882 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4883 return CR1.intersectWith(CR2, RangeType);
4884}
4885
4886OverflowResult llvm::computeOverflowForUnsignedMul(
4887 const Value *LHS, const Value *RHS, const DataLayout &DL,
4888 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4889 bool UseInstrInfo) {
4890 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4891 nullptr, UseInstrInfo);
4892 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4893 nullptr, UseInstrInfo);
4894 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4895 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4896 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4897}
4898
4899OverflowResult
4900llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4901 const DataLayout &DL, AssumptionCache *AC,
4902 const Instruction *CxtI,
4903 const DominatorTree *DT, bool UseInstrInfo) {
4904 // Multiplying n * m significant bits yields a result of n + m significant
4905 // bits. If the total number of significant bits does not exceed the
4906 // result bit width (minus 1), there is no overflow.
4907 // This means if we have enough leading sign bits in the operands
4908 // we can guarantee that the result does not overflow.
4909 // Ref: "Hacker's Delight" by Henry Warren
4910 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4911
4912 // Note that underestimating the number of sign bits gives a more
4913 // conservative answer.
4914 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4915 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4916
4917 // First handle the easy case: if we have enough sign bits there's
4918 // definitely no overflow.
4919 if (SignBits > BitWidth + 1)
4920 return OverflowResult::NeverOverflows;
4921
4922 // There are two ambiguous cases where there can be no overflow:
4923 // SignBits == BitWidth + 1 and
4924 // SignBits == BitWidth
4925 // The second case is difficult to check, therefore we only handle the
4926 // first case.
4927 if (SignBits == BitWidth + 1) {
4928 // It overflows only when both arguments are negative and the true
4929 // product is exactly the minimum negative number.
4930 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4931 // For simplicity we just check if at least one side is not negative.
4932 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4933 nullptr, UseInstrInfo);
4934 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4935 nullptr, UseInstrInfo);
4936 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4937 return OverflowResult::NeverOverflows;
4938 }
4939 return OverflowResult::MayOverflow;
4940}
4941
4942OverflowResult llvm::computeOverflowForUnsignedAdd(
4943 const Value *LHS, const Value *RHS, const DataLayout &DL,
4944 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4945 bool UseInstrInfo) {
4946 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4947 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4948 nullptr, UseInstrInfo);
4949 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4950 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4951 nullptr, UseInstrInfo);
4952 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4953}
4954
4955static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4956 const Value *RHS,
4957 const AddOperator *Add,
4958 const DataLayout &DL,
4959 AssumptionCache *AC,
4960 const Instruction *CxtI,
4961 const DominatorTree *DT) {
4962 if (Add && Add->hasNoSignedWrap()) {
4963 return OverflowResult::NeverOverflows;
4964 }
4965
4966 // If LHS and RHS each have at least two sign bits, the addition will look
4967 // like
4968 //
4969 // XX..... +
4970 // YY.....
4971 //
4972 // If the carry into the most significant position is 0, X and Y can't both
4973 // be 1 and therefore the carry out of the addition is also 0.
4974 //
4975 // If the carry into the most significant position is 1, X and Y can't both
4976 // be 0 and therefore the carry out of the addition is also 1.
4977 //
4978 // Since the carry into the most significant position is always equal to
4979 // the carry out of the addition, there is no signed overflow.
4980 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4981 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4982 return OverflowResult::NeverOverflows;
4983
4984 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4985 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4986 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4987 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4988 OverflowResult OR =
4989 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4990 if (OR != OverflowResult::MayOverflow)
4991 return OR;
4992
4993 // The remaining code needs Add to be available. Early returns if not so.
4994 if (!Add)
4995 return OverflowResult::MayOverflow;
4996
4997 // If the sign of Add is the same as at least one of the operands, this add
4998 // CANNOT overflow. If this can be determined from the known bits of the
4999 // operands the above signedAddMayOverflow() check will have already done so.
5000 // The only other way to improve on the known bits is from an assumption, so
5001 // call computeKnownBitsFromAssume() directly.
5002 bool LHSOrRHSKnownNonNegative =
5003 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
5004 bool LHSOrRHSKnownNegative =
5005 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
5006 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
5007 KnownBits AddKnown(LHSRange.getBitWidth());
5008 computeKnownBitsFromAssume(
5009 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
5010 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
5011 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
5012 return OverflowResult::NeverOverflows;
5013 }
5014
5015 return OverflowResult::MayOverflow;
5016}
5017
5018OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
5019 const Value *RHS,
5020 const DataLayout &DL,
5021 AssumptionCache *AC,
5022 const Instruction *CxtI,
5023 const DominatorTree *DT) {
5024 // X - (X % ?)
5025 // The remainder of a value can't have greater magnitude than itself,
5026 // so the subtraction can't overflow.
5027
5028 // X - (X -nuw ?)
5029 // In the minimal case, this would simplify to "?", so there's no subtract
5030 // at all. But if this analysis is used to peek through casts, for example,
5031 // then determining no-overflow may allow other transforms.
5032
5033 // TODO: There are other patterns like this.
5034 // See simplifyICmpWithBinOpOnLHS() for candidates.
5035 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) ||
5036 match(RHS, m_NUWSub(m_Specific(LHS), m_Value())))
5037 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
5038 return OverflowResult::NeverOverflows;
5039
5040 // Checking for conditions implied by dominating conditions may be expensive.
5041 // Limit it to usub_with_overflow calls for now.
5042 if (match(CxtI,
5043 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
5044 if (auto C =
5045 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
5046 if (*C)
5047 return OverflowResult::NeverOverflows;
5048 return OverflowResult::AlwaysOverflowsLow;
5049 }
5050 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
5051 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
5052 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
5053 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
5054 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
5055}
5056
5057OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
5058 const Value *RHS,
5059 const DataLayout &DL,
5060 AssumptionCache *AC,
5061 const Instruction *CxtI,
5062 const DominatorTree *DT) {
5063 // X - (X % ?)
5064 // The remainder of a value can't have greater magnitude than itself,
5065 // so the subtraction can't overflow.
5066
5067 // X - (X -nsw ?)
5068 // In the minimal case, this would simplify to "?", so there's no subtract
5069 // at all. But if this analysis is used to peek through casts, for example,
5070 // then determining no-overflow may allow other transforms.
5071 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) ||
5072 match(RHS, m_NSWSub(m_Specific(LHS), m_Value())))
5073 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
5074 return OverflowResult::NeverOverflows;
5075
5076 // If LHS and RHS each have at least two sign bits, the subtraction
5077 // cannot overflow.
5078 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
5079 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
5080 return OverflowResult::NeverOverflows;
5081
5082 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
5083 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
5084 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
5085 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
5086 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
5087}
5088
5089bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
5090 const DominatorTree &DT) {
5091 SmallVector<const BranchInst *, 2> GuardingBranches;
5092 SmallVector<const ExtractValueInst *, 2> Results;
5093
5094 for (const User *U : WO->users()) {
5095 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
5096 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getNumIndices() == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5096, __extension__ __PRETTY_FUNCTION__
))
;
5097
5098 if (EVI->getIndices()[0] == 0)
5099 Results.push_back(EVI);
5100 else {
5101 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getIndices()[0] == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5101, __extension__ __PRETTY_FUNCTION__
))
;
5102
5103 for (const auto *U : EVI->users())
5104 if (const auto *B = dyn_cast<BranchInst>(U)) {
5105 assert(B->isConditional() && "How else is it using an i1?")(static_cast <bool> (B->isConditional() && "How else is it using an i1?"
) ? void (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5105, __extension__ __PRETTY_FUNCTION__
))
;
5106 GuardingBranches.push_back(B);
5107 }
5108 }
5109 } else {
5110 // We are using the aggregate directly in a way we don't want to analyze
5111 // here (storing it to a global, say).
5112 return false;
5113 }
5114 }
5115
5116 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
5117 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
5118 if (!NoWrapEdge.isSingleEdge())
5119 return false;
5120
5121 // Check if all users of the add are provably no-wrap.
5122 for (const auto *Result : Results) {
5123 // If the extractvalue itself is not executed on overflow, the we don't
5124 // need to check each use separately, since domination is transitive.
5125 if (DT.dominates(NoWrapEdge, Result->getParent()))
5126 continue;
5127
5128 for (const auto &RU : Result->uses())
5129 if (!DT.dominates(NoWrapEdge, RU))
5130 return false;
5131 }
5132
5133 return true;
5134 };
5135
5136 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
5137}
5138
5139static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
5140 bool ConsiderFlags) {
5141
5142 if (ConsiderFlags && Op->hasPoisonGeneratingFlags())
5143 return true;
5144
5145 unsigned Opcode = Op->getOpcode();
5146
5147 // Check whether opcode is a poison/undef-generating operation
5148 switch (Opcode) {
5149 case Instruction::Shl:
5150 case Instruction::AShr:
5151 case Instruction::LShr: {
5152 // Shifts return poison if shiftwidth is larger than the bitwidth.
5153 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
5154 SmallVector<Constant *, 4> ShiftAmounts;
5155 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
5156 unsigned NumElts = FVTy->getNumElements();
5157 for (unsigned i = 0; i < NumElts; ++i)
5158 ShiftAmounts.push_back(C->getAggregateElement(i));
5159 } else if (isa<ScalableVectorType>(C->getType()))
5160 return true; // Can't tell, just return true to be safe
5161 else
5162 ShiftAmounts.push_back(C);
5163
5164 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
5165 auto *CI = dyn_cast_or_null<ConstantInt>(C);
5166 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
5167 });
5168 return !Safe;
5169 }
5170 return true;
5171 }
5172 case Instruction::FPToSI:
5173 case Instruction::FPToUI:
5174 // fptosi/ui yields poison if the resulting value does not fit in the
5175 // destination type.
5176 return true;
5177 case Instruction::Call:
5178 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
5179 switch (II->getIntrinsicID()) {
5180 // TODO: Add more intrinsics.
5181 case Intrinsic::ctpop:
5182 case Intrinsic::sadd_with_overflow:
5183 case Intrinsic::ssub_with_overflow:
5184 case Intrinsic::smul_with_overflow:
5185 case Intrinsic::uadd_with_overflow:
5186 case Intrinsic::usub_with_overflow:
5187 case Intrinsic::umul_with_overflow:
5188 return false;
5189 }
5190 }
5191 [[fallthrough]];
5192 case Instruction::CallBr:
5193 case Instruction::Invoke: {
5194 const auto *CB = cast<CallBase>(Op);
5195 return !CB->hasRetAttr(Attribute::NoUndef);
5196 }
5197 case Instruction::InsertElement:
5198 case Instruction::ExtractElement: {
5199 // If index exceeds the length of the vector, it returns poison
5200 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
5201 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
5202 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
5203 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
5204 return true;
5205 return false;
5206 }
5207 case Instruction::ShuffleVector: {
5208 // shufflevector may return undef.
5209 if (PoisonOnly)
5210 return false;
5211 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
5212 ? cast<ConstantExpr>(Op)->getShuffleMask()
5213 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
5214 return is_contained(Mask, UndefMaskElem);
5215 }
5216 case Instruction::FNeg:
5217 case Instruction::PHI:
5218 case Instruction::Select:
5219 case Instruction::URem:
5220 case Instruction::SRem:
5221 case Instruction::ExtractValue:
5222 case Instruction::InsertValue:
5223 case Instruction::Freeze:
5224 case Instruction::ICmp:
5225 case Instruction::FCmp:
5226 return false;
5227 case Instruction::GetElementPtr:
5228 // inbounds is handled above
5229 // TODO: what about inrange on constexpr?
5230 return false;
5231 default: {
5232 const auto *CE = dyn_cast<ConstantExpr>(Op);
5233 if (isa<CastInst>(Op) || (CE && CE->isCast()))
5234 return false;
5235 else if (Instruction::isBinaryOp(Opcode))
5236 return false;
5237 // Be conservative and return true.
5238 return true;
5239 }
5240 }
5241}
5242
5243bool llvm::canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlags) {
5244 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false, ConsiderFlags);
5245}
5246
5247bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlags) {
5248 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true, ConsiderFlags);
5249}
5250
5251static bool directlyImpliesPoison(const Value *ValAssumedPoison,
5252 const Value *V, unsigned Depth) {
5253 if (ValAssumedPoison == V)
5254 return true;
5255
5256 const unsigned MaxDepth = 2;
5257 if (Depth >= MaxDepth)
5258 return false;
5259
5260 if (const auto *I = dyn_cast<Instruction>(V)) {
5261 if (propagatesPoison(cast<Operator>(I)))
5262 return any_of(I->operands(), [=](const Value *Op) {
5263 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
5264 });
5265
5266 // 'select ValAssumedPoison, _, _' is poison.
5267 if (const auto *SI = dyn_cast<SelectInst>(I))
5268 return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
5269 Depth + 1);
5270 // V = extractvalue V0, idx
5271 // V2 = extractvalue V0, idx2
5272 // V0's elements are all poison or not. (e.g., add_with_overflow)
5273 const WithOverflowInst *II;
5274 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
5275 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
5276 llvm::is_contained(II->args(), ValAssumedPoison)))
5277 return true;
5278 }
5279 return false;
5280}
5281
5282static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
5283 unsigned Depth) {
5284 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5285 return true;
5286
5287 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5288 return true;
5289
5290 const unsigned MaxDepth = 2;
5291 if (Depth >= MaxDepth)
5292 return false;
5293
5294 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5295 if (I && !canCreatePoison(cast<Operator>(I))) {
5296 return all_of(I->operands(), [=](const Value *Op) {
5297 return impliesPoison(Op, V, Depth + 1);
5298 });
5299 }
5300 return false;
5301}
5302
5303bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5304 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5305}
5306
5307static bool programUndefinedIfUndefOrPoison(const Value *V,
5308 bool PoisonOnly);
5309
5310static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5311 AssumptionCache *AC,
5312 const Instruction *CtxI,
5313 const DominatorTree *DT,
5314 unsigned Depth, bool PoisonOnly) {
5315 if (Depth >= MaxAnalysisRecursionDepth)
5316 return false;
5317
5318 if (isa<MetadataAsValue>(V))
5319 return false;
5320
5321 if (const auto *A = dyn_cast<Argument>(V)) {
5322 if (A->hasAttribute(Attribute::NoUndef))
5323 return true;
5324 }
5325
5326 if (auto *C = dyn_cast<Constant>(V)) {
5327 if (isa<UndefValue>(C))
5328 return PoisonOnly && !isa<PoisonValue>(C);
5329
5330 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5331 isa<ConstantPointerNull>(C) || isa<Function>(C))
5332 return true;
5333
5334 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5335 return (PoisonOnly ? !C->containsPoisonElement()
5336 : !C->containsUndefOrPoisonElement()) &&
5337 !C->containsConstantExpression();
5338 }
5339
5340 // Strip cast operations from a pointer value.
5341 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5342 // inbounds with zero offset. To guarantee that the result isn't poison, the
5343 // stripped pointer is checked as it has to be pointing into an allocated
5344 // object or be null `null` to ensure `inbounds` getelement pointers with a
5345 // zero offset could not produce poison.
5346 // It can strip off addrspacecast that do not change bit representation as
5347 // well. We believe that such addrspacecast is equivalent to no-op.
5348 auto *StrippedV = V->stripPointerCastsSameRepresentation();
5349 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5350 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5351 return true;
5352
5353 auto OpCheck = [&](const Value *V) {
5354 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5355 PoisonOnly);
5356 };
5357
5358 if (auto *Opr = dyn_cast<Operator>(V)) {
5359 // If the value is a freeze instruction, then it can never
5360 // be undef or poison.
5361 if (isa<FreezeInst>(V))
5362 return true;
5363
5364 if (const auto *CB = dyn_cast<CallBase>(V)) {
5365 if (CB->hasRetAttr(Attribute::NoUndef))
5366 return true;
5367 }
5368
5369 if (const auto *PN = dyn_cast<PHINode>(V)) {
5370 unsigned Num = PN->getNumIncomingValues();
5371 bool IsWellDefined = true;
5372 for (unsigned i = 0; i < Num; ++i) {
5373 auto *TI = PN->getIncomingBlock(i)->getTerminator();
5374 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5375 DT, Depth + 1, PoisonOnly)) {
5376 IsWellDefined = false;
5377 break;
5378 }
5379 }
5380 if (IsWellDefined)
5381 return true;
5382 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5383 return true;
5384 }
5385
5386 if (auto *I = dyn_cast<LoadInst>(V))
5387 if (I->hasMetadata(LLVMContext::MD_noundef) ||
5388 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
5389 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
5390 return true;
5391
5392 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5393 return true;
5394
5395 // CxtI may be null or a cloned instruction.
5396 if (!CtxI || !CtxI->getParent() || !DT)
5397 return false;
5398
5399 auto *DNode = DT->getNode(CtxI->getParent());
5400 if (!DNode)
5401 // Unreachable block
5402 return false;
5403
5404 // If V is used as a branch condition before reaching CtxI, V cannot be
5405 // undef or poison.
5406 // br V, BB1, BB2
5407 // BB1:
5408 // CtxI ; V cannot be undef or poison here
5409 auto *Dominator = DNode->getIDom();
5410 while (Dominator) {
5411 auto *TI = Dominator->getBlock()->getTerminator();
5412
5413 Value *Cond = nullptr;
5414 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
5415 if (BI->isConditional())
5416 Cond = BI->getCondition();
5417 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
5418 Cond = SI->getCondition();
5419 }
5420
5421 if (Cond) {
5422 if (Cond == V)
5423 return true;
5424 else if (PoisonOnly && isa<Operator>(Cond)) {
5425 // For poison, we can analyze further
5426 auto *Opr = cast<Operator>(Cond);
5427 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5428 return true;
5429 }
5430 }
5431
5432 Dominator = Dominator->getIDom();
5433 }
5434
5435 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
5436 return true;
5437
5438 return false;
5439}
5440
5441bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5442 const Instruction *CtxI,
5443 const DominatorTree *DT,
5444 unsigned Depth) {
5445 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5446}
5447
5448bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5449 const Instruction *CtxI,
5450 const DominatorTree *DT, unsigned Depth) {
5451 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5452}
5453
5454OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5455 const DataLayout &DL,
5456 AssumptionCache *AC,
5457 const Instruction *CxtI,
5458 const DominatorTree *DT) {
5459 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5460 Add, DL, AC, CxtI, DT);
5461}
5462
5463OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5464 const Value *RHS,
5465 const DataLayout &DL,
5466 AssumptionCache *AC,
5467 const Instruction *CxtI,
5468 const DominatorTree *DT) {
5469 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5470}
5471
5472bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5473 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5474 // of time because it's possible for another thread to interfere with it for an
5475 // arbitrary length of time, but programs aren't allowed to rely on that.
5476
5477 // If there is no successor, then execution can't transfer to it.
5478 if (isa<ReturnInst>(I))
5479 return false;
5480 if (isa<UnreachableInst>(I))
5481 return false;
5482
5483 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
5484 // Instruction::willReturn.
5485 //
5486 // FIXME: Move this check into Instruction::willReturn.
5487 if (isa<CatchPadInst>(I)) {
5488 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
5489 default:
5490 // A catchpad may invoke exception object constructors and such, which
5491 // in some languages can be arbitrary code, so be conservative by default.
5492 return false;
5493 case EHPersonality::CoreCLR:
5494 // For CoreCLR, it just involves a type test.
5495 return true;
5496 }
5497 }
5498
5499 // An instruction that returns without throwing must transfer control flow
5500 // to a successor.
5501 return !I->mayThrow() && I->willReturn();
5502}
5503
5504bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5505 // TODO: This is slightly conservative for invoke instruction since exiting
5506 // via an exception *is* normal control for them.
5507 for (const Instruction &I : *BB)
5508 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5509 return false;
5510 return true;
5511}
5512
5513bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5514 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
5515 unsigned ScanLimit) {
5516 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
5517 ScanLimit);
5518}
5519
5520bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5521 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
5522 assert(ScanLimit && "scan limit must be non-zero")(static_cast <bool> (ScanLimit && "scan limit must be non-zero"
) ? void (0) : __assert_fail ("ScanLimit && \"scan limit must be non-zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5522, __extension__ __PRETTY_FUNCTION__
))
;
5523 for (const Instruction &I : Range) {
5524 if (isa<DbgInfoIntrinsic>(I))
5525 continue;
5526 if (--ScanLimit == 0)
5527 return false;
5528 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5529 return false;
5530 }
5531 return true;
5532}
5533
5534bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5535 const Loop *L) {
5536 // The loop header is guaranteed to be executed for every iteration.
5537 //
5538 // FIXME: Relax this constraint to cover all basic blocks that are
5539 // guaranteed to be executed at every iteration.
5540 if (I->getParent() != L->getHeader()) return false;
5541
5542 for (const Instruction &LI : *L->getHeader()) {
5543 if (&LI == I) return true;
5544 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5545 }
5546 llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block."
, "llvm/lib/Analysis/ValueTracking.cpp", 5546)
;
5547}
5548
5549bool llvm::propagatesPoison(const Operator *I) {
5550 switch (I->getOpcode()) {
5551 case Instruction::Freeze:
5552 case Instruction::Select:
5553 case Instruction::PHI:
5554 case Instruction::Invoke:
5555 return false;
5556 case Instruction::Call:
5557 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5558 switch (II->getIntrinsicID()) {
5559 // TODO: Add more intrinsics.
5560 case Intrinsic::sadd_with_overflow:
5561 case Intrinsic::ssub_with_overflow:
5562 case Intrinsic::smul_with_overflow:
5563 case Intrinsic::uadd_with_overflow:
5564 case Intrinsic::usub_with_overflow:
5565 case Intrinsic::umul_with_overflow:
5566 // If an input is a vector containing a poison element, the
5567 // two output vectors (calculated results, overflow bits)'
5568 // corresponding lanes are poison.
5569 return true;
5570 case Intrinsic::ctpop:
5571 return true;
5572 }
5573 }
5574 return false;
5575 case Instruction::ICmp:
5576 case Instruction::FCmp:
5577 case Instruction::GetElementPtr:
5578 return true;
5579 default:
5580 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5581 return true;
5582
5583 // Be conservative and return false.
5584 return false;
5585 }
5586}
5587
5588void llvm::getGuaranteedWellDefinedOps(
5589 const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5590 switch (I->getOpcode()) {
5591 case Instruction::Store:
5592 Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5593 break;
5594
5595 case Instruction::Load:
5596 Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5597 break;
5598
5599 // Since dereferenceable attribute imply noundef, atomic operations
5600 // also implicitly have noundef pointers too
5601 case Instruction::AtomicCmpXchg:
5602 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5603 break;
5604
5605 case Instruction::AtomicRMW:
5606 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5607 break;
5608
5609 case Instruction::Call:
5610 case Instruction::Invoke: {
5611 const CallBase *CB = cast<CallBase>(I);
5612 if (CB->isIndirectCall())
5613 Operands.insert(CB->getCalledOperand());
5614 for (unsigned i = 0; i < CB->arg_size(); ++i) {
5615 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5616 CB->paramHasAttr(i, Attribute::Dereferenceable))
5617 Operands.insert(CB->getArgOperand(i));
5618 }
5619 break;
5620 }
5621 case Instruction::Ret:
5622 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
5623 Operands.insert(I->getOperand(0));
5624 break;
5625 default:
5626 break;
5627 }
5628}
5629
5630void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5631 SmallPtrSetImpl<const Value *> &Operands) {
5632 getGuaranteedWellDefinedOps(I, Operands);
5633 switch (I->getOpcode()) {
5634 // Divisors of these operations are allowed to be partially undef.
5635 case Instruction::UDiv:
5636 case Instruction::SDiv:
5637 case Instruction::URem:
5638 case Instruction::SRem:
5639 Operands.insert(I->getOperand(1));
5640 break;
5641 case Instruction::Switch:
5642 if (BranchOnPoisonAsUB)
5643 Operands.insert(cast<SwitchInst>(I)->getCondition());
5644 break;
5645 case Instruction::Br: {
5646 auto *BR = cast<BranchInst>(I);
5647 if (BranchOnPoisonAsUB && BR->isConditional())
5648 Operands.insert(BR->getCondition());
5649 break;
5650 }
5651 default:
5652 break;
5653 }
5654}
5655
5656bool llvm::mustTriggerUB(const Instruction *I,
5657 const SmallSet<const Value *, 16>& KnownPoison) {
5658 SmallPtrSet<const Value *, 4> NonPoisonOps;
5659 getGuaranteedNonPoisonOps(I, NonPoisonOps);
5660
5661 for (const auto *V : NonPoisonOps)
5662 if (KnownPoison.count(V))
5663 return true;
5664
5665 return false;
5666}
5667
5668static bool programUndefinedIfUndefOrPoison(const Value *V,
5669 bool PoisonOnly) {
5670 // We currently only look for uses of values within the same basic
5671 // block, as that makes it easier to guarantee that the uses will be
5672 // executed given that Inst is executed.
5673 //
5674 // FIXME: Expand this to consider uses beyond the same basic block. To do
5675 // this, look out for the distinction between post-dominance and strong
5676 // post-dominance.
5677 const BasicBlock *BB = nullptr;
5678 BasicBlock::const_iterator Begin;
5679 if (const auto *Inst = dyn_cast<Instruction>(V)) {
5680 BB = Inst->getParent();
5681 Begin = Inst->getIterator();
5682 Begin++;
5683 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5684 BB = &Arg->getParent()->getEntryBlock();
5685 Begin = BB->begin();
5686 } else {
5687 return false;
5688 }
5689
5690 // Limit number of instructions we look at, to avoid scanning through large
5691 // blocks. The current limit is chosen arbitrarily.
5692 unsigned ScanLimit = 32;
5693 BasicBlock::const_iterator End = BB->end();
5694
5695 if (!PoisonOnly) {
5696 // Since undef does not propagate eagerly, be conservative & just check
5697 // whether a value is directly passed to an instruction that must take
5698 // well-defined operands.
5699
5700 for (const auto &I : make_range(Begin, End)) {
5701 if (isa<DbgInfoIntrinsic>(I))
5702 continue;
5703 if (--ScanLimit == 0)
5704 break;
5705
5706 SmallPtrSet<const Value *, 4> WellDefinedOps;
5707 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5708 if (WellDefinedOps.contains(V))
5709 return true;
5710
5711 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5712 break;
5713 }
5714 return false;
5715 }
5716
5717 // Set of instructions that we have proved will yield poison if Inst
5718 // does.
5719 SmallSet<const Value *, 16> YieldsPoison;
5720 SmallSet<const BasicBlock *, 4> Visited;
5721
5722 YieldsPoison.insert(V);
5723 auto Propagate = [&](const User *User) {
5724 if (propagatesPoison(cast<Operator>(User)))
5725 YieldsPoison.insert(User);
5726 };
5727 for_each(V->users(), Propagate);
5728 Visited.insert(BB);
5729
5730 while (true) {
5731 for (const auto &I : make_range(Begin, End)) {
5732 if (isa<DbgInfoIntrinsic>(I))
5733 continue;
5734 if (--ScanLimit == 0)
5735 return false;
5736 if (mustTriggerUB(&I, YieldsPoison))
5737 return true;
5738 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5739 return false;
5740
5741 // Mark poison that propagates from I through uses of I.
5742 if (YieldsPoison.count(&I))
5743 for_each(I.users(), Propagate);
5744 }
5745
5746 BB = BB->getSingleSuccessor();
5747 if (!BB || !Visited.insert(BB).second)
5748 break;
5749
5750 Begin = BB->getFirstNonPHI()->getIterator();
5751 End = BB->end();
5752 }
5753 return false;
5754}
5755
5756bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5757 return ::programUndefinedIfUndefOrPoison(Inst, false);
5758}
5759
5760bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5761 return ::programUndefinedIfUndefOrPoison(Inst, true);
5762}
5763
5764static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5765 if (FMF.noNaNs())
5766 return true;
5767
5768 if (auto *C = dyn_cast<ConstantFP>(V))
5769 return !C->isNaN();
5770
5771 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5772 if (!C->getElementType()->isFloatingPointTy())
5773 return false;
5774 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5775 if (C->getElementAsAPFloat(I).isNaN())
5776 return false;
5777 }
5778 return true;
5779 }
5780
5781 if (isa<ConstantAggregateZero>(V))
5782 return true;
5783
5784 return false;
5785}
5786
5787static bool isKnownNonZero(const Value *V) {
5788 if (auto *C = dyn_cast<ConstantFP>(V))
5789 return !C->isZero();
5790
5791 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5792 if (!C->getElementType()->isFloatingPointTy())
5793 return false;
5794 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5795 if (C->getElementAsAPFloat(I).isZero())
5796 return false;
5797 }
5798 return true;
5799 }
5800
5801 return false;
5802}
5803
5804/// Match clamp pattern for float types without care about NaNs or signed zeros.
5805/// Given non-min/max outer cmp/select from the clamp pattern this
5806/// function recognizes if it can be substitued by a "canonical" min/max
5807/// pattern.
5808static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5809 Value *CmpLHS, Value *CmpRHS,
5810 Value *TrueVal, Value *FalseVal,
5811 Value *&LHS, Value *&RHS) {
5812 // Try to match
5813 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5814 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5815 // and return description of the outer Max/Min.
5816
5817 // First, check if select has inverse order:
5818 if (CmpRHS == FalseVal) {
5819 std::swap(TrueVal, FalseVal);
5820 Pred = CmpInst::getInversePredicate(Pred);
5821 }
5822
5823 // Assume success now. If there's no match, callers should not use these anyway.
5824 LHS = TrueVal;
5825 RHS = FalseVal;
5826
5827 const APFloat *FC1;
5828 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5829 return {SPF_UNKNOWN, SPNB_NA, false};
5830
5831 const APFloat *FC2;
5832 switch (Pred) {
5833 case CmpInst::FCMP_OLT:
5834 case CmpInst::FCMP_OLE:
5835 case CmpInst::FCMP_ULT:
5836 case CmpInst::FCMP_ULE:
5837 if (match(FalseVal,
5838 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5839 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5840 *FC1 < *FC2)
5841 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5842 break;
5843 case CmpInst::FCMP_OGT:
5844 case CmpInst::FCMP_OGE:
5845 case CmpInst::FCMP_UGT:
5846 case CmpInst::FCMP_UGE:
5847 if (match(FalseVal,
5848 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5849 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5850 *FC1 > *FC2)
5851 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5852 break;
5853 default:
5854 break;
5855 }
5856
5857 return {SPF_UNKNOWN, SPNB_NA, false};
5858}
5859
5860/// Recognize variations of:
5861/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5862static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5863 Value *CmpLHS, Value *CmpRHS,
5864 Value *TrueVal, Value *FalseVal) {
5865 // Swap the select operands and predicate to match the patterns below.
5866 if (CmpRHS != TrueVal) {
5867 Pred = ICmpInst::getSwappedPredicate(Pred);
5868 std::swap(TrueVal, FalseVal);
5869 }
5870 const APInt *C1;
5871 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5872 const APInt *C2;
5873 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5874 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5875 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5876 return {SPF_SMAX, SPNB_NA, false};
5877
5878 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5879 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5880 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5881 return {SPF_SMIN, SPNB_NA, false};
5882
5883 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5884 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5885 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5886 return {SPF_UMAX, SPNB_NA, false};
5887
5888 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5889 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5890 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5891 return {SPF_UMIN, SPNB_NA, false};
5892 }
5893 return {SPF_UNKNOWN, SPNB_NA, false};
5894}
5895
5896/// Recognize variations of:
5897/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5898static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5899 Value *CmpLHS, Value *CmpRHS,
5900 Value *TVal, Value *FVal,
5901 unsigned Depth) {
5902 // TODO: Allow FP min/max with nnan/nsz.
5903 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")(static_cast <bool> (CmpInst::isIntPredicate(Pred) &&
"Expected integer comparison") ? void (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5903, __extension__ __PRETTY_FUNCTION__
))
;
5904
5905 Value *A = nullptr, *B = nullptr;
5906 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5907 if (!SelectPatternResult::isMinOrMax(L.Flavor))
5908 return {SPF_UNKNOWN, SPNB_NA, false};
5909
5910 Value *C = nullptr, *D = nullptr;
5911 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5912 if (L.Flavor != R.Flavor)
5913 return {SPF_UNKNOWN, SPNB_NA, false};
5914
5915 // We have something like: x Pred y ? min(a, b) : min(c, d).
5916 // Try to match the compare to the min/max operations of the select operands.
5917 // First, make sure we have the right compare predicate.
5918 switch (L.Flavor) {
5919 case SPF_SMIN:
5920 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5921 Pred = ICmpInst::getSwappedPredicate(Pred);
5922 std::swap(CmpLHS, CmpRHS);
5923 }
5924 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5925 break;
5926 return {SPF_UNKNOWN, SPNB_NA, false};
5927 case SPF_SMAX:
5928 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5929 Pred = ICmpInst::getSwappedPredicate(Pred);
5930 std::swap(CmpLHS, CmpRHS);
5931 }
5932 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5933 break;
5934 return {SPF_UNKNOWN, SPNB_NA, false};
5935 case SPF_UMIN:
5936 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5937 Pred = ICmpInst::getSwappedPredicate(Pred);
5938 std::swap(CmpLHS, CmpRHS);
5939 }
5940 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5941 break;
5942 return {SPF_UNKNOWN, SPNB_NA, false};
5943 case SPF_UMAX:
5944 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5945 Pred = ICmpInst::getSwappedPredicate(Pred);
5946 std::swap(CmpLHS, CmpRHS);
5947 }
5948 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5949 break;
5950 return {SPF_UNKNOWN, SPNB_NA, false};
5951 default:
5952 return {SPF_UNKNOWN, SPNB_NA, false};
5953 }
5954
5955 // If there is a common operand in the already matched min/max and the other
5956 // min/max operands match the compare operands (either directly or inverted),
5957 // then this is min/max of the same flavor.
5958
5959 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5960 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5961 if (D == B) {
5962 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5963 match(A, m_Not(m_Specific(CmpRHS)))))
5964 return {L.Flavor, SPNB_NA, false};
5965 }
5966 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5967 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5968 if (C == B) {
5969 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5970 match(A, m_Not(m_Specific(CmpRHS)))))
5971 return {L.Flavor, SPNB_NA, false};
5972 }
5973 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5974 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5975 if (D == A) {
5976 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5977 match(B, m_Not(m_Specific(CmpRHS)))))
5978 return {L.Flavor, SPNB_NA, false};
5979 }
5980 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5981 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5982 if (C == A) {
5983 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5984 match(B, m_Not(m_Specific(CmpRHS)))))
5985 return {L.Flavor, SPNB_NA, false};
5986 }
5987
5988 return {SPF_UNKNOWN, SPNB_NA, false};
5989}
5990
5991/// If the input value is the result of a 'not' op, constant integer, or vector
5992/// splat of a constant integer, return the bitwise-not source value.
5993/// TODO: This could be extended to handle non-splat vector integer constants.
5994static Value *getNotValue(Value *V) {
5995 Value *NotV;
5996 if (match(V, m_Not(m_Value(NotV))))
5997 return NotV;
5998
5999 const APInt *C;
6000 if (match(V, m_APInt(C)))
6001 return ConstantInt::get(V->getType(), ~(*C));
6002
6003 return nullptr;
6004}
6005
6006/// Match non-obvious integer minimum and maximum sequences.
6007static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
6008 Value *CmpLHS, Value *CmpRHS,
6009 Value *TrueVal, Value *FalseVal,
6010 Value *&LHS, Value *&RHS,
6011 unsigned Depth) {
6012 // Assume success. If there's no match, callers should not use these anyway.
6013 LHS = TrueVal;
6014 RHS = FalseVal;
6015
6016 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
6017 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
6018 return SPR;
6019
6020 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
6021 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
6022 return SPR;
6023
6024 // Look through 'not' ops to find disguised min/max.
6025 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
6026 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
6027 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
6028 switch (Pred) {
6029 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
6030 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
6031 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
6032 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
6033 default: break;
6034 }
6035 }
6036
6037 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
6038 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
6039 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
6040 switch (Pred) {
6041 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
6042 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
6043 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
6044 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
6045 default: break;
6046 }
6047 }
6048
6049 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
6050 return {SPF_UNKNOWN, SPNB_NA, false};
6051
6052 const APInt *C1;
6053 if (!match(CmpRHS, m_APInt(C1)))
6054 return {SPF_UNKNOWN, SPNB_NA, false};
6055
6056 // An unsigned min/max can be written with a signed compare.
6057 const APInt *C2;
6058 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
6059 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
6060 // Is the sign bit set?
6061 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
6062 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
6063 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
6064 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
6065
6066 // Is the sign bit clear?
6067 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
6068 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
6069 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
6070 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
6071 }
6072
6073 return {SPF_UNKNOWN, SPNB_NA, false};
6074}
6075
6076bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
6077 assert(X && Y && "Invalid operand")(static_cast <bool> (X && Y && "Invalid operand"
) ? void (0) : __assert_fail ("X && Y && \"Invalid operand\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6077, __extension__ __PRETTY_FUNCTION__
))
;
6078
6079 // X = sub (0, Y) || X = sub nsw (0, Y)
6080 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
6081 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
6082 return true;
6083
6084 // Y = sub (0, X) || Y = sub nsw (0, X)
6085 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
6086 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
6087 return true;
6088
6089 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
6090 Value *A, *B;
6091 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
6092 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
6093 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
6094 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
6095}
6096
6097static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
6098 FastMathFlags FMF,
6099 Value *CmpLHS, Value *CmpRHS,
6100 Value *TrueVal, Value *FalseVal,
6101 Value *&LHS, Value *&RHS,
6102 unsigned Depth) {
6103 bool HasMismatchedZeros = false;
6104 if (CmpInst::isFPPredicate(Pred)) {
6105 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
6106 // 0.0 operand, set the compare's 0.0 operands to that same value for the
6107 // purpose of identifying min/max. Disregard vector constants with undefined
6108 // elements because those can not be back-propagated for analysis.
6109 Value *OutputZeroVal = nullptr;
6110 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
6111 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
6112 OutputZeroVal = TrueVal;
6113 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
6114 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
6115 OutputZeroVal = FalseVal;
6116
6117 if (OutputZeroVal) {
6118 if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) {
6119 HasMismatchedZeros = true;
6120 CmpLHS = OutputZeroVal;
6121 }
6122 if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) {
6123 HasMismatchedZeros = true;
6124 CmpRHS = OutputZeroVal;
6125 }
6126 }
6127 }
6128
6129 LHS = CmpLHS;
6130 RHS = CmpRHS;
6131
6132 // Signed zero may return inconsistent results between implementations.
6133 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
6134 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
6135 // Therefore, we behave conservatively and only proceed if at least one of the
6136 // operands is known to not be zero or if we don't care about signed zero.
6137 switch (Pred) {
6138 default: break;
6139 case CmpInst::FCMP_OGT: case CmpInst::FCMP_OLT:
6140 case CmpInst::FCMP_UGT: case CmpInst::FCMP_ULT:
6141 if (!HasMismatchedZeros)
6142 break;
6143 [[fallthrough]];
6144 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
6145 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
6146 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6147 !isKnownNonZero(CmpRHS))
6148 return {SPF_UNKNOWN, SPNB_NA, false};
6149 }
6150
6151 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
6152 bool Ordered = false;
6153
6154 // When given one NaN and one non-NaN input:
6155 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
6156 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
6157 // ordered comparison fails), which could be NaN or non-NaN.
6158 // so here we discover exactly what NaN behavior is required/accepted.
6159 if (CmpInst::isFPPredicate(Pred)) {
6160 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
6161 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
6162
6163 if (LHSSafe && RHSSafe) {
6164 // Both operands are known non-NaN.
6165 NaNBehavior = SPNB_RETURNS_ANY;
6166 } else if (CmpInst::isOrdered(Pred)) {
6167 // An ordered comparison will return false when given a NaN, so it
6168 // returns the RHS.
6169 Ordered = true;
6170 if (LHSSafe)
6171 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
6172 NaNBehavior = SPNB_RETURNS_NAN;
6173 else if (RHSSafe)
6174 NaNBehavior = SPNB_RETURNS_OTHER;
6175 else
6176 // Completely unsafe.
6177 return {SPF_UNKNOWN, SPNB_NA, false};
6178 } else {
6179 Ordered = false;
6180 // An unordered comparison will return true when given a NaN, so it
6181 // returns the LHS.
6182 if (LHSSafe)
6183 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
6184 NaNBehavior = SPNB_RETURNS_OTHER;
6185 else if (RHSSafe)
6186 NaNBehavior = SPNB_RETURNS_NAN;
6187 else
6188 // Completely unsafe.
6189 return {SPF_UNKNOWN, SPNB_NA, false};
6190 }
6191 }
6192
6193 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
6194 std::swap(CmpLHS, CmpRHS);
6195 Pred = CmpInst::getSwappedPredicate(Pred);
6196 if (NaNBehavior == SPNB_RETURNS_NAN)
6197 NaNBehavior = SPNB_RETURNS_OTHER;
6198 else if (NaNBehavior == SPNB_RETURNS_OTHER)
6199 NaNBehavior = SPNB_RETURNS_NAN;
6200 Ordered = !Ordered;
6201 }
6202
6203 // ([if]cmp X, Y) ? X : Y
6204 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
6205 switch (Pred) {
6206 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
6207 case ICmpInst::ICMP_UGT:
6208 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
6209 case ICmpInst::ICMP_SGT:
6210 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
6211 case ICmpInst::ICMP_ULT:
6212 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
6213 case ICmpInst::ICMP_SLT:
6214 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
6215 case FCmpInst::FCMP_UGT:
6216 case FCmpInst::FCMP_UGE:
6217 case FCmpInst::FCMP_OGT:
6218 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
6219 case FCmpInst::FCMP_ULT:
6220 case FCmpInst::FCMP_ULE:
6221 case FCmpInst::FCMP_OLT:
6222 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
6223 }
6224 }
6225
6226 if (isKnownNegation(TrueVal, FalseVal)) {
6227 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
6228 // match against either LHS or sext(LHS).
6229 auto MaybeSExtCmpLHS =
6230 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
6231 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
6232 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
6233 if (match(TrueVal, MaybeSExtCmpLHS)) {
6234 // Set the return values. If the compare uses the negated value (-X >s 0),
6235 // swap the return values because the negated value is always 'RHS'.
6236 LHS = TrueVal;
6237 RHS = FalseVal;
6238 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
6239 std::swap(LHS, RHS);
6240
6241 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
6242 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
6243 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6244 return {SPF_ABS, SPNB_NA, false};
6245
6246 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
6247 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
6248 return {SPF_ABS, SPNB_NA, false};
6249
6250 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
6251 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
6252 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6253 return {SPF_NABS, SPNB_NA, false};
6254 }
6255 else if (match(FalseVal, MaybeSExtCmpLHS)) {
6256 // Set the return values. If the compare uses the negated value (-X >s 0),
6257 // swap the return values because the negated value is always 'RHS'.
6258 LHS = FalseVal;
6259 RHS = TrueVal;
6260 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
6261 std::swap(LHS, RHS);
6262
6263 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
6264 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
6265 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6266 return {SPF_NABS, SPNB_NA, false};
6267
6268 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
6269 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
6270 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6271 return {SPF_ABS, SPNB_NA, false};
6272 }
6273 }
6274
6275 if (CmpInst::isIntPredicate(Pred))
6276 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
6277
6278 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
6279 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
6280 // semantics than minNum. Be conservative in such case.
6281 if (NaNBehavior != SPNB_RETURNS_ANY ||
6282 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6283 !isKnownNonZero(CmpRHS)))
6284 return {SPF_UNKNOWN, SPNB_NA, false};
6285
6286 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
6287}
6288
6289/// Helps to match a select pattern in case of a type mismatch.
6290///
6291/// The function processes the case when type of true and false values of a
6292/// select instruction differs from type of the cmp instruction operands because
6293/// of a cast instruction. The function checks if it is legal to move the cast
6294/// operation after "select". If yes, it returns the new second value of
6295/// "select" (with the assumption that cast is moved):
6296/// 1. As operand of cast instruction when both values of "select" are same cast
6297/// instructions.
6298/// 2. As restored constant (by applying reverse cast operation) when the first
6299/// value of the "select" is a cast operation and the second value is a
6300/// constant.
6301/// NOTE: We return only the new second value because the first value could be
6302/// accessed as operand of cast instruction.
6303static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
6304 Instruction::CastOps *CastOp) {
6305 auto *Cast1 = dyn_cast<CastInst>(V1);
6306 if (!Cast1)
6307 return nullptr;
6308
6309 *CastOp = Cast1->getOpcode();
6310 Type *SrcTy = Cast1->getSrcTy();
6311 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
6312 // If V1 and V2 are both the same cast from the same type, look through V1.
6313 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
6314 return Cast2->getOperand(0);
6315 return nullptr;
6316 }
6317
6318 auto *C = dyn_cast<Constant>(V2);
6319 if (!C)
6320 return nullptr;
6321
6322 Constant *CastedTo = nullptr;
6323 switch (*CastOp) {
6324 case Instruction::ZExt:
6325 if (CmpI->isUnsigned())
6326 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
6327 break;
6328 case Instruction::SExt:
6329 if (CmpI->isSigned())
6330 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
6331 break;
6332 case Instruction::Trunc:
6333 Constant *CmpConst;
6334 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
6335 CmpConst->getType() == SrcTy) {
6336 // Here we have the following case:
6337 //
6338 // %cond = cmp iN %x, CmpConst
6339 // %tr = trunc iN %x to iK
6340 // %narrowsel = select i1 %cond, iK %t, iK C
6341 //
6342 // We can always move trunc after select operation:
6343 //
6344 // %cond = cmp iN %x, CmpConst
6345 // %widesel = select i1 %cond, iN %x, iN CmpConst
6346 // %tr = trunc iN %widesel to iK
6347 //
6348 // Note that C could be extended in any way because we don't care about
6349 // upper bits after truncation. It can't be abs pattern, because it would
6350 // look like:
6351 //
6352 // select i1 %cond, x, -x.
6353 //
6354 // So only min/max pattern could be matched. Such match requires widened C
6355 // == CmpConst. That is why set widened C = CmpConst, condition trunc
6356 // CmpConst == C is checked below.
6357 CastedTo = CmpConst;
6358 } else {
6359 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
6360 }
6361 break;
6362 case Instruction::FPTrunc:
6363 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
6364 break;
6365 case Instruction::FPExt:
6366 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
6367 break;
6368 case Instruction::FPToUI:
6369 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
6370 break;
6371 case Instruction::FPToSI:
6372 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
6373 break;
6374 case Instruction::UIToFP:
6375 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
6376 break;
6377 case Instruction::SIToFP:
6378 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
6379 break;
6380 default:
6381 break;
6382 }
6383
6384 if (!CastedTo)
6385 return nullptr;
6386
6387 // Make sure the cast doesn't lose any information.
6388 Constant *CastedBack =
6389 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
6390 if (CastedBack != C)
6391 return nullptr;
6392
6393 return CastedTo;
6394}
6395
6396SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
6397 Instruction::CastOps *CastOp,
6398 unsigned Depth) {
6399 if (Depth >= MaxAnalysisRecursionDepth)
6400 return {SPF_UNKNOWN, SPNB_NA, false};
6401
6402 SelectInst *SI = dyn_cast<SelectInst>(V);
6403 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
6404
6405 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
6406 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
6407
6408 Value *TrueVal = SI->getTrueValue();
6409 Value *FalseVal = SI->getFalseValue();
6410
6411 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
6412 CastOp, Depth);
6413}
6414
6415SelectPatternResult llvm::matchDecomposedSelectPattern(
6416 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
6417 Instruction::CastOps *CastOp, unsigned Depth) {
6418 CmpInst::Predicate Pred = CmpI->getPredicate();
6419 Value *CmpLHS = CmpI->getOperand(0);
6420 Value *CmpRHS = CmpI->getOperand(1);
6421 FastMathFlags FMF;
6422 if (isa<FPMathOperator>(CmpI))
6423 FMF = CmpI->getFastMathFlags();
6424
6425 // Bail out early.
6426 if (CmpI->isEquality())
6427 return {SPF_UNKNOWN, SPNB_NA, false};
6428
6429 // Deal with type mismatches.
6430 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
6431 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
6432 // If this is a potential fmin/fmax with a cast to integer, then ignore
6433 // -0.0 because there is no corresponding integer value.
6434 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6435 FMF.setNoSignedZeros();
6436 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6437 cast<CastInst>(TrueVal)->getOperand(0), C,
6438 LHS, RHS, Depth);
6439 }
6440 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
6441 // If this is a potential fmin/fmax with a cast to integer, then ignore
6442 // -0.0 because there is no corresponding integer value.
6443 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6444 FMF.setNoSignedZeros();
6445 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6446 C, cast<CastInst>(FalseVal)->getOperand(0),
6447 LHS, RHS, Depth);
6448 }
6449 }
6450 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6451 LHS, RHS, Depth);
6452}
6453
6454CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6455 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6456 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6457 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6458 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6459 if (SPF == SPF_FMINNUM)
6460 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6461 if (SPF == SPF_FMAXNUM)
6462 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6463 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "llvm/lib/Analysis/ValueTracking.cpp"
, 6463)
;
6464}
6465
6466SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6467 if (SPF == SPF_SMIN) return SPF_SMAX;
6468 if (SPF == SPF_UMIN) return SPF_UMAX;
6469 if (SPF == SPF_SMAX) return SPF_SMIN;
6470 if (SPF == SPF_UMAX) return SPF_UMIN;
6471 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "llvm/lib/Analysis/ValueTracking.cpp"
, 6471)
;
6472}
6473
6474Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6475 switch (MinMaxID) {
6476 case Intrinsic::smax: return Intrinsic::smin;
6477 case Intrinsic::smin: return Intrinsic::smax;
6478 case Intrinsic::umax: return Intrinsic::umin;
6479 case Intrinsic::umin: return Intrinsic::umax;
6480 default: llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/Analysis/ValueTracking.cpp"
, 6480)
;
6481 }
6482}
6483
6484CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6485 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6486}
6487
6488APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
6489 switch (SPF) {
6490 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
6491 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
6492 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
6493 case SPF_UMIN: return APInt::getMinValue(BitWidth);
6494 default: llvm_unreachable("Unexpected flavor")::llvm::llvm_unreachable_internal("Unexpected flavor", "llvm/lib/Analysis/ValueTracking.cpp"
, 6494)
;
6495 }
6496}
6497
6498std::pair<Intrinsic::ID, bool>
6499llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6500 // Check if VL contains select instructions that can be folded into a min/max
6501 // vector intrinsic and return the intrinsic if it is possible.
6502 // TODO: Support floating point min/max.
6503 bool AllCmpSingleUse = true;
6504 SelectPatternResult SelectPattern;
6505 SelectPattern.Flavor = SPF_UNKNOWN;
6506 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6507 Value *LHS, *RHS;
6508 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6509 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6510 CurrentPattern.Flavor == SPF_FMINNUM ||
6511 CurrentPattern.Flavor == SPF_FMAXNUM ||
6512 !I->getType()->isIntOrIntVectorTy())
6513 return false;
6514 if (SelectPattern.Flavor != SPF_UNKNOWN &&
6515 SelectPattern.Flavor != CurrentPattern.Flavor)
6516 return false;
6517 SelectPattern = CurrentPattern;
6518 AllCmpSingleUse &=
6519 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6520 return true;
6521 })) {
6522 switch (SelectPattern.Flavor) {
6523 case SPF_SMIN:
6524 return {Intrinsic::smin, AllCmpSingleUse};
6525 case SPF_UMIN:
6526 return {Intrinsic::umin, AllCmpSingleUse};
6527 case SPF_SMAX:
6528 return {Intrinsic::smax, AllCmpSingleUse};
6529 case SPF_UMAX:
6530 return {Intrinsic::umax, AllCmpSingleUse};
6531 default:
6532 llvm_unreachable("unexpected select pattern flavor")::llvm::llvm_unreachable_internal("unexpected select pattern flavor"
, "llvm/lib/Analysis/ValueTracking.cpp", 6532)
;
6533 }
6534 }
6535 return {Intrinsic::not_intrinsic, false};
6536}
6537
6538bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6539 Value *&Start, Value *&Step) {
6540 // Handle the case of a simple two-predecessor recurrence PHI.
6541 // There's a lot more that could theoretically be done here, but
6542 // this is sufficient to catch some interesting cases.
6543 if (P->getNumIncomingValues() != 2)
6544 return false;
6545
6546 for (unsigned i = 0; i != 2; ++i) {
6547 Value *L = P->getIncomingValue(i);
6548 Value *R = P->getIncomingValue(!i);
6549 Operator *LU = dyn_cast<Operator>(L);
6550 if (!LU)
6551 continue;
6552 unsigned Opcode = LU->getOpcode();
6553
6554 switch (Opcode) {
6555 default:
6556 continue;
6557 // TODO: Expand list -- xor, div, gep, uaddo, etc..
6558 case Instruction::LShr:
6559 case Instruction::AShr:
6560 case Instruction::Shl:
6561 case Instruction::Add:
6562 case Instruction::Sub:
6563 case Instruction::And:
6564 case Instruction::Or:
6565 case Instruction::Mul:
6566 case Instruction::FMul: {
6567 Value *LL = LU->getOperand(0);
6568 Value *LR = LU->getOperand(1);
6569 // Find a recurrence.
6570 if (LL == P)
6571 L = LR;
6572 else if (LR == P)
6573 L = LL;
6574 else
6575 continue; // Check for recurrence with L and R flipped.
6576
6577 break; // Match!
6578 }
6579 };
6580
6581 // We have matched a recurrence of the form:
6582 // %iv = [R, %entry], [%iv.next, %backedge]
6583 // %iv.next = binop %iv, L
6584 // OR
6585 // %iv = [R, %entry], [%iv.next, %backedge]
6586 // %iv.next = binop L, %iv
6587 BO = cast<BinaryOperator>(LU);
6588 Start = R;
6589 Step = L;
6590 return true;
6591 }
6592 return false;
6593}
6594
6595bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6596 Value *&Start, Value *&Step) {
6597 BinaryOperator *BO = nullptr;
6598 P = dyn_cast<PHINode>(I->getOperand(0));
6599 if (!P)
6600 P = dyn_cast<PHINode>(I->getOperand(1));
6601 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6602}
6603
6604/// Return true if "icmp Pred LHS RHS" is always true.
6605static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6606 const Value *RHS, const DataLayout &DL,
6607 unsigned Depth) {
6608 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6609 return true;
6610
6611 switch (Pred) {
6612 default:
6613 return false;
6614
6615 case CmpInst::ICMP_SLE: {
6616 const APInt *C;
6617
6618 // LHS s<= LHS +_{nsw} C if C >= 0
6619 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6620 return !C->isNegative();
6621 return false;
6622 }
6623
6624 case CmpInst::ICMP_ULE: {
6625 const APInt *C;
6626
6627 // LHS u<= LHS +_{nuw} C for any C
6628 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6629 return true;
6630
6631 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6632 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6633 const Value *&X,
6634 const APInt *&CA, const APInt *&CB) {
6635 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6636 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6637 return true;
6638
6639 // If X & C == 0 then (X | C) == X +_{nuw} C
6640 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6641 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6642 KnownBits Known(CA->getBitWidth());
6643 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6644 /*CxtI*/ nullptr, /*DT*/ nullptr);
6645 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6646 return true;
6647 }
6648
6649 return false;
6650 };
6651
6652 const Value *X;
6653 const APInt *CLHS, *CRHS;
6654 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6655 return CLHS->ule(*CRHS);
6656
6657 return false;
6658 }
6659 }
6660}
6661
6662/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6663/// ALHS ARHS" is true. Otherwise, return None.
6664static Optional<bool>
6665isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6666 const Value *ARHS, const Value *BLHS, const Value *BRHS,
6667 const DataLayout &DL, unsigned Depth) {
6668 switch (Pred) {
6669 default:
6670 return None;
6671
6672 case CmpInst::ICMP_SLT:
6673 case CmpInst::ICMP_SLE:
6674 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6675 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6676 return true;
6677 return None;
6678
6679 case CmpInst::ICMP_ULT:
6680 case CmpInst::ICMP_ULE:
6681 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6682 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6683 return true;
6684 return None;
6685 }
6686}
6687
6688/// Return true if the operands of two compares (expanded as "L0 pred L1" and
6689/// "R0 pred R1") match. IsSwappedOps is true when the operands match, but are
6690/// swapped.
6691static bool areMatchingOperands(const Value *L0, const Value *L1, const Value *R0,
6692 const Value *R1, bool &AreSwappedOps) {
6693 bool AreMatchingOps = (L0 == R0 && L1 == R1);
6694 AreSwappedOps = (L0 == R1 && L1 == R0);
6695 return AreMatchingOps || AreSwappedOps;
6696}
6697
6698/// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
6699/// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false.
6700/// Otherwise, return None if we can't infer anything.
6701static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
6702 CmpInst::Predicate RPred,
6703 bool AreSwappedOps) {
6704 // Canonicalize the predicate as if the operands were not commuted.
6705 if (AreSwappedOps)
6706 RPred = ICmpInst::getSwappedPredicate(RPred);
6707
6708 if (CmpInst::isImpliedTrueByMatchingCmp(LPred, RPred))
6709 return true;
6710 if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred))
6711 return false;
6712
6713 return None;
6714}
6715
6716/// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
6717/// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false.
6718/// Otherwise, return None if we can't infer anything.
6719static Optional<bool> isImpliedCondCommonOperandWithConstants(
6720 CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred,
6721 const APInt &RC) {
6722 ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC);
6723 ConstantRange CR = ConstantRange::makeExactICmpRegion(RPred, RC);
6724 ConstantRange Intersection = DomCR.intersectWith(CR);
6725 ConstantRange Difference = DomCR.difference(CR);
6726 if (Intersection.isEmptySet())
6727 return false;
6728 if (Difference.isEmptySet())
6729 return true;
6730 return None;
6731}
6732
6733/// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
6734/// is true. Return false if LHS implies RHS is false. Otherwise, return None
6735/// if we can't infer anything.
6736static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6737 CmpInst::Predicate RPred,
6738 const Value *R0, const Value *R1,
6739 const DataLayout &DL, bool LHSIsTrue,
6740 unsigned Depth) {
6741 Value *L0 = LHS->getOperand(0);
6742 Value *L1 = LHS->getOperand(1);
6743
6744 // The rest of the logic assumes the LHS condition is true. If that's not the
6745 // case, invert the predicate to make it so.
6746 CmpInst::Predicate LPred =
6747 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6748
6749 // Can we infer anything when the two compares have matching operands?
6750 bool AreSwappedOps;
6751 if (areMatchingOperands(L0, L1, R0, R1, AreSwappedOps))
6752 return isImpliedCondMatchingOperands(LPred, RPred, AreSwappedOps);
6753
6754 // Can we infer anything when the 0-operands match and the 1-operands are
6755 // constants (not necessarily matching)?
6756 const APInt *LC, *RC;
6757 if (L0 == R0 && match(L1, m_APInt(LC)) && match(R1, m_APInt(RC)))
6758 return isImpliedCondCommonOperandWithConstants(LPred, *LC, RPred, *RC);
6759
6760 if (LPred == RPred)
6761 return isImpliedCondOperands(LPred, L0, L1, R0, R1, DL, Depth);
6762
6763 return None;
6764}
6765
6766/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6767/// false. Otherwise, return None if we can't infer anything. We expect the
6768/// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6769static Optional<bool>
6770isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6771 const Value *RHSOp0, const Value *RHSOp1,
6772 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6773 // The LHS must be an 'or', 'and', or a 'select' instruction.
6774 assert((LHS->getOpcode() == Instruction::And ||(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6777, __extension__ __PRETTY_FUNCTION__
))
6775 LHS->getOpcode() == Instruction::Or ||(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6777, __extension__ __PRETTY_FUNCTION__
))
6776 LHS->getOpcode() == Instruction::Select) &&(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6777, __extension__ __PRETTY_FUNCTION__
))
6777 "Expected LHS to be 'and', 'or', or 'select'.")(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6777, __extension__ __PRETTY_FUNCTION__
))
;
6778
6779 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Hit recursion limit") ? void (0) : __assert_fail
("Depth <= MaxAnalysisRecursionDepth && \"Hit recursion limit\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6779, __extension__ __PRETTY_FUNCTION__
))
;
6780
6781 // If the result of an 'or' is false, then we know both legs of the 'or' are
6782 // false. Similarly, if the result of an 'and' is true, then we know both
6783 // legs of the 'and' are true.
6784 const Value *ALHS, *ARHS;
6785 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6786 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6787 // FIXME: Make this non-recursion.
6788 if (Optional<bool> Implication = isImpliedCondition(
6789 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6790 return Implication;
6791 if (Optional<bool> Implication = isImpliedCondition(
6792 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6793 return Implication;
6794 return None;
6795 }
6796 return None;
6797}
6798
6799Optional<bool>
6800llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6801 const Value *RHSOp0, const Value *RHSOp1,
6802 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6803 // Bail out when we hit the limit.
6804 if (Depth == MaxAnalysisRecursionDepth)
6805 return None;
6806
6807 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6808 // example.
6809 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6810 return None;
6811
6812 assert(LHS->getType()->isIntOrIntVectorTy(1) &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
(1) && "Expected integer type only!") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy(1) && \"Expected integer type only!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6813, __extension__ __PRETTY_FUNCTION__
))
6813 "Expected integer type only!")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
(1) && "Expected integer type only!") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy(1) && \"Expected integer type only!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6813, __extension__ __PRETTY_FUNCTION__
))
;
6814
6815 // Both LHS and RHS are icmps.
6816 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6817 if (LHSCmp)
6818 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6819 Depth);
6820
6821 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
6822 /// the RHS to be an icmp.
6823 /// FIXME: Add support for and/or/select on the RHS.
6824 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6825 if ((LHSI->getOpcode() == Instruction::And ||
6826 LHSI->getOpcode() == Instruction::Or ||
6827 LHSI->getOpcode() == Instruction::Select))
6828 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6829 Depth);
6830 }
6831 return None;
6832}
6833
6834Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6835 const DataLayout &DL, bool LHSIsTrue,
6836 unsigned Depth) {
6837 // LHS ==> RHS by definition
6838 if (LHS == RHS)
6839 return LHSIsTrue;
6840
6841 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS))
6842 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6843 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6844 LHSIsTrue, Depth);
6845
6846 if (Depth == MaxAnalysisRecursionDepth)
6847 return None;
6848
6849 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
6850 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
6851 const Value *RHS1, *RHS2;
6852 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
6853 if (Optional<bool> Imp =
6854 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
6855 if (*Imp == true)
6856 return true;
6857 if (Optional<bool> Imp =
6858 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
6859 if (*Imp == true)
6860 return true;
6861 }
6862 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
6863 if (Optional<bool> Imp =
6864 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
6865 if (*Imp == false)
6866 return false;
6867 if (Optional<bool> Imp =
6868 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
6869 if (*Imp == false)
6870 return false;
6871 }
6872
6873 return None;
6874}
6875
6876// Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6877// condition dominating ContextI or nullptr, if no condition is found.
6878static std::pair<Value *, bool>
6879getDomPredecessorCondition(const Instruction *ContextI) {
6880 if (!ContextI || !ContextI->getParent())
6881 return {nullptr, false};
6882
6883 // TODO: This is a poor/cheap way to determine dominance. Should we use a
6884 // dominator tree (eg, from a SimplifyQuery) instead?
6885 const BasicBlock *ContextBB = ContextI->getParent();
6886 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6887 if (!PredBB)
6888 return {nullptr, false};
6889
6890 // We need a conditional branch in the predecessor.
6891 Value *PredCond;
6892 BasicBlock *TrueBB, *FalseBB;
6893 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6894 return {nullptr, false};
6895
6896 // The branch should get simplified. Don't bother simplifying this condition.
6897 if (TrueBB == FalseBB)
6898 return {nullptr, false};
6899
6900 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&(static_cast <bool> ((TrueBB == ContextBB || FalseBB ==
ContextBB) && "Predecessor block does not point to successor?"
) ? void (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6901, __extension__ __PRETTY_FUNCTION__
))
6901 "Predecessor block does not point to successor?")(static_cast <bool> ((TrueBB == ContextBB || FalseBB ==
ContextBB) && "Predecessor block does not point to successor?"
) ? void (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6901, __extension__ __PRETTY_FUNCTION__
))
;
6902
6903 // Is this condition implied by the predecessor condition?
6904 return {PredCond, TrueBB == ContextBB};
6905}
6906
6907Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6908 const Instruction *ContextI,
6909 const DataLayout &DL) {
6910 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool")(static_cast <bool> (Cond->getType()->isIntOrIntVectorTy
(1) && "Condition must be bool") ? void (0) : __assert_fail
("Cond->getType()->isIntOrIntVectorTy(1) && \"Condition must be bool\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6910, __extension__ __PRETTY_FUNCTION__
))
;
6911 auto PredCond = getDomPredecessorCondition(ContextI);
6912 if (PredCond.first)
6913 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6914 return None;
6915}
6916
6917Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6918 const Value *LHS, const Value *RHS,
6919 const Instruction *ContextI,
6920 const DataLayout &DL) {
6921 auto PredCond = getDomPredecessorCondition(ContextI);
6922 if (PredCond.first)
6923 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6924 PredCond.second);
6925 return None;
6926}
6927
6928static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6929 APInt &Upper, const InstrInfoQuery &IIQ,
6930 bool PreferSignedRange) {
6931 unsigned Width = Lower.getBitWidth();
6932 const APInt *C;
6933 switch (BO.getOpcode()) {
6934 case Instruction::Add:
6935 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
6936 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
6937 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
6938
6939 // If the caller expects a signed compare, then try to use a signed range.
6940 // Otherwise if both no-wraps are set, use the unsigned range because it
6941 // is never larger than the signed range. Example:
6942 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
6943 if (PreferSignedRange && HasNSW && HasNUW)
6944 HasNUW = false;
6945
6946 if (HasNUW) {
6947 // 'add nuw x, C' produces [C, UINT_MAX].
6948 Lower = *C;
6949 } else if (HasNSW) {
6950 if (C->isNegative()) {
6951 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6952 Lower = APInt::getSignedMinValue(Width);
6953 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6954 } else {
6955 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6956 Lower = APInt::getSignedMinValue(Width) + *C;
6957 Upper = APInt::getSignedMaxValue(Width) + 1;
6958 }
6959 }
6960 }
6961 break;
6962
6963 case Instruction::And:
6964 if (match(BO.getOperand(1), m_APInt(C)))
6965 // 'and x, C' produces [0, C].
6966 Upper = *C + 1;
6967 break;
6968
6969 case Instruction::Or:
6970 if (match(BO.getOperand(1), m_APInt(C)))
6971 // 'or x, C' produces [C, UINT_MAX].
6972 Lower = *C;
6973 break;
6974
6975 case Instruction::AShr:
6976 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6977 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6978 Lower = APInt::getSignedMinValue(Width).ashr(*C);
6979 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6980 } else if (match(BO.getOperand(0), m_APInt(C))) {
6981 unsigned ShiftAmount = Width - 1;
6982 if (!C->isZero() && IIQ.isExact(&BO))
6983 ShiftAmount = C->countTrailingZeros();
6984 if (C->isNegative()) {
6985 // 'ashr C, x' produces [C, C >> (Width-1)]
6986 Lower = *C;
6987 Upper = C->ashr(ShiftAmount) + 1;
6988 } else {
6989 // 'ashr C, x' produces [C >> (Width-1), C]
6990 Lower = C->ashr(ShiftAmount);
6991 Upper = *C + 1;
6992 }
6993 }
6994 break;
6995
6996 case Instruction::LShr:
6997 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6998 // 'lshr x, C' produces [0, UINT_MAX >> C].
6999 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
7000 } else if (match(BO.getOperand(0), m_APInt(C))) {
7001 // 'lshr C, x' produces [C >> (Width-1), C].
7002 unsigned ShiftAmount = Width - 1;
7003 if (!C->isZero() && IIQ.isExact(&BO))
7004 ShiftAmount = C->countTrailingZeros();
7005 Lower = C->lshr(ShiftAmount);
7006 Upper = *C + 1;
7007 }
7008 break;
7009
7010 case Instruction::Shl:
7011 if (match(BO.getOperand(0), m_APInt(C))) {
7012 if (IIQ.hasNoUnsignedWrap(&BO)) {
7013 // 'shl nuw C, x' produces [C, C << CLZ(C)]
7014 Lower = *C;
7015 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
7016 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
7017 if (C->isNegative()) {
7018 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
7019 unsigned ShiftAmount = C->countLeadingOnes() - 1;
7020 Lower = C->shl(ShiftAmount);
7021 Upper = *C + 1;
7022 } else {
7023 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
7024 unsigned ShiftAmount = C->countLeadingZeros() - 1;
7025 Lower = *C;
7026 Upper = C->shl(ShiftAmount) + 1;
7027 }
7028 }
7029 }
7030 break;
7031
7032 case Instruction::SDiv:
7033 if (match(BO.getOperand(1), m_APInt(C))) {
7034 APInt IntMin = APInt::getSignedMinValue(Width);
7035 APInt IntMax = APInt::getSignedMaxValue(Width);
7036 if (C->isAllOnes()) {
7037 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
7038 // where C != -1 and C != 0 and C != 1
7039 Lower = IntMin + 1;
7040 Upper = IntMax + 1;
7041 } else if (C->countLeadingZeros() < Width - 1) {
7042 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
7043 // where C != -1 and C != 0 and C != 1
7044 Lower = IntMin.sdiv(*C);
7045 Upper = IntMax.sdiv(*C);
7046 if (Lower.sgt(Upper))
7047 std::swap(Lower, Upper);
7048 Upper = Upper + 1;
7049 assert(Upper != Lower && "Upper part of range has wrapped!")(static_cast <bool> (Upper != Lower && "Upper part of range has wrapped!"
) ? void (0) : __assert_fail ("Upper != Lower && \"Upper part of range has wrapped!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7049, __extension__ __PRETTY_FUNCTION__
))
;
7050 }
7051 } else if (match(BO.getOperand(0), m_APInt(C))) {
7052 if (C->isMinSignedValue()) {
7053 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
7054 Lower = *C;
7055 Upper = Lower.lshr(1) + 1;
7056 } else {
7057 // 'sdiv C, x' produces [-|C|, |C|].
7058 Upper = C->abs() + 1;
7059 Lower = (-Upper) + 1;
7060 }
7061 }
7062 break;
7063
7064 case Instruction::UDiv:
7065 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
7066 // 'udiv x, C' produces [0, UINT_MAX / C].
7067 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
7068 } else if (match(BO.getOperand(0), m_APInt(C))) {
7069 // 'udiv C, x' produces [0, C].
7070 Upper = *C + 1;
7071 }
7072 break;
7073
7074 case Instruction::SRem:
7075 if (match(BO.getOperand(1), m_APInt(C))) {
7076 // 'srem x, C' produces (-|C|, |C|).
7077 Upper = C->abs();
7078 Lower = (-Upper) + 1;
7079 }
7080 break;
7081
7082 case Instruction::URem:
7083 if (match(BO.getOperand(1), m_APInt(C)))
7084 // 'urem x, C' produces [0, C).
7085 Upper = *C;
7086 break;
7087
7088 default:
7089 break;
7090 }
7091}
7092
7093static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
7094 APInt &Upper) {
7095 unsigned Width = Lower.getBitWidth();
7096 const APInt *C;
7097 switch (II.getIntrinsicID()) {
7098 case Intrinsic::ctpop:
7099 case Intrinsic::ctlz:
7100 case Intrinsic::cttz:
7101 // Maximum of set/clear bits is the bit width.
7102 assert(Lower == 0 && "Expected lower bound to be zero")(static_cast <bool> (Lower == 0 && "Expected lower bound to be zero"
) ? void (0) : __assert_fail ("Lower == 0 && \"Expected lower bound to be zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7102, __extension__ __PRETTY_FUNCTION__
))
;
7103 Upper = Width + 1;
7104 break;
7105 case Intrinsic::uadd_sat:
7106 // uadd.sat(x, C) produces [C, UINT_MAX].
7107 if (match(II.getOperand(0), m_APInt(C)) ||
7108 match(II.getOperand(1), m_APInt(C)))
7109 Lower = *C;
7110 break;
7111 case Intrinsic::sadd_sat:
7112 if (match(II.getOperand(0), m_APInt(C)) ||
7113 match(II.getOperand(1), m_APInt(C))) {
7114 if (C->isNegative()) {
7115 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
7116 Lower = APInt::getSignedMinValue(Width);
7117 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
7118 } else {
7119 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
7120 Lower = APInt::getSignedMinValue(Width) + *C;
7121 Upper = APInt::getSignedMaxValue(Width) + 1;
7122 }
7123 }
7124 break;
7125 case Intrinsic::usub_sat:
7126 // usub.sat(C, x) produces [0, C].
7127 if (match(II.getOperand(0), m_APInt(C)))
7128 Upper = *C + 1;
7129 // usub.sat(x, C) produces [0, UINT_MAX - C].
7130 else if (match(II.getOperand(1), m_APInt(C)))
7131 Upper = APInt::getMaxValue(Width) - *C + 1;
7132 break;
7133 case Intrinsic::ssub_sat:
7134 if (match(II.getOperand(0), m_APInt(C))) {
7135 if (C->isNegative()) {
7136 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
7137 Lower = APInt::getSignedMinValue(Width);
7138 Upper = *C - APInt::getSignedMinValue(Width) + 1;
7139 } else {
7140 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
7141 Lower = *C - APInt::getSignedMaxValue(Width);
7142 Upper = APInt::getSignedMaxValue(Width) + 1;
7143 }
7144 } else if (match(II.getOperand(1), m_APInt(C))) {
7145 if (C->isNegative()) {
7146 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
7147 Lower = APInt::getSignedMinValue(Width) - *C;
7148 Upper = APInt::getSignedMaxValue(Width) + 1;
7149 } else {
7150 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
7151 Lower = APInt::getSignedMinValue(Width);
7152 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
7153 }
7154 }
7155 break;
7156 case Intrinsic::umin:
7157 case Intrinsic::umax:
7158 case Intrinsic::smin:
7159 case Intrinsic::smax:
7160 if (!match(II.getOperand(0), m_APInt(C)) &&
7161 !match(II.getOperand(1), m_APInt(C)))
7162 break;
7163
7164 switch (II.getIntrinsicID()) {
7165 case Intrinsic::umin:
7166 Upper = *C + 1;
7167 break;
7168 case Intrinsic::umax:
7169 Lower = *C;
7170 break;
7171 case Intrinsic::smin:
7172 Lower = APInt::getSignedMinValue(Width);
7173 Upper = *C + 1;
7174 break;
7175 case Intrinsic::smax:
7176 Lower = *C;
7177 Upper = APInt::getSignedMaxValue(Width) + 1;
7178 break;
7179 default:
7180 llvm_unreachable("Must be min/max intrinsic")::llvm::llvm_unreachable_internal("Must be min/max intrinsic"
, "llvm/lib/Analysis/ValueTracking.cpp", 7180)
;
7181 }
7182 break;
7183 case Intrinsic::abs:
7184 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
7185 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
7186 if (match(II.getOperand(1), m_One()))
7187 Upper = APInt::getSignedMaxValue(Width) + 1;
7188 else
7189 Upper = APInt::getSignedMinValue(Width) + 1;
7190 break;
7191 default:
7192 break;
7193 }
7194}
7195
7196static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
7197 APInt &Upper, const InstrInfoQuery &IIQ) {
7198 const Value *LHS = nullptr, *RHS = nullptr;
7199 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
7200 if (R.Flavor == SPF_UNKNOWN)
7201 return;
7202
7203 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
7204
7205 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
7206 // If the negation part of the abs (in RHS) has the NSW flag,
7207 // then the result of abs(X) is [0..SIGNED_MAX],
7208 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
7209 Lower = APInt::getZero(BitWidth);
7210 if (match(RHS, m_Neg(m_Specific(LHS))) &&
7211 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
7212 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7213 else
7214 Upper = APInt::getSignedMinValue(BitWidth) + 1;
7215 return;
7216 }
7217
7218 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
7219 // The result of -abs(X) is <= 0.
7220 Lower = APInt::getSignedMinValue(BitWidth);
7221 Upper = APInt(BitWidth, 1);
7222 return;
7223 }
7224
7225 const APInt *C;
7226 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
7227 return;
7228
7229 switch (R.Flavor) {
7230 case SPF_UMIN:
7231 Upper = *C + 1;
7232 break;
7233 case SPF_UMAX:
7234 Lower = *C;
7235 break;
7236 case SPF_SMIN:
7237 Lower = APInt::getSignedMinValue(BitWidth);
7238 Upper = *C + 1;
7239 break;
7240 case SPF_SMAX:
7241 Lower = *C;
7242 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7243 break;
7244 default:
7245 break;
7246 }
7247}
7248
7249static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
7250 // The maximum representable value of a half is 65504. For floats the maximum
7251 // value is 3.4e38 which requires roughly 129 bits.
7252 unsigned BitWidth = I->getType()->getScalarSizeInBits();
7253 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
7254 return;
7255 if (isa<FPToSIInst>(I) && BitWidth >= 17) {
7256 Lower = APInt(BitWidth, -65504);
7257 Upper = APInt(BitWidth, 65505);
7258 }
7259
7260 if (isa<FPToUIInst>(I) && BitWidth >= 16) {
7261 // For a fptoui the lower limit is left as 0.
7262 Upper = APInt(BitWidth, 65505);
7263 }
7264}
7265
7266ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
7267 bool UseInstrInfo, AssumptionCache *AC,
7268 const Instruction *CtxI,
7269 const DominatorTree *DT,
7270 unsigned Depth) {
7271 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && "Expected integer instruction") ? void (0) : __assert_fail
("V->getType()->isIntOrIntVectorTy() && \"Expected integer instruction\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7271, __extension__ __PRETTY_FUNCTION__
))
;
7272
7273 if (Depth == MaxAnalysisRecursionDepth)
7274 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
7275
7276 const APInt *C;
7277 if (match(V, m_APInt(C)))
7278 return ConstantRange(*C);
7279
7280 InstrInfoQuery IIQ(UseInstrInfo);
7281 unsigned BitWidth = V->getType()->getScalarSizeInBits();
7282 APInt Lower = APInt(BitWidth, 0);
7283 APInt Upper = APInt(BitWidth, 0);
7284 if (auto *BO = dyn_cast<BinaryOperator>(V))
7285 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
7286 else if (auto *II = dyn_cast<IntrinsicInst>(V))
7287 setLimitsForIntrinsic(*II, Lower, Upper);
7288 else if (auto *SI = dyn_cast<SelectInst>(V))
7289 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
7290 else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V))
7291 setLimitForFPToI(cast<Instruction>(V), Lower, Upper);
7292
7293 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
7294
7295 if (auto *I = dyn_cast<Instruction>(V))
7296 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
7297 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
7298
7299 if (CtxI && AC) {
7300 // Try to restrict the range based on information from assumptions.
7301 for (auto &AssumeVH : AC->assumptionsFor(V)) {
7302 if (!AssumeVH)
7303 continue;
7304 CallInst *I = cast<CallInst>(AssumeVH);
7305 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() ==
CtxI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7306, __extension__ __PRETTY_FUNCTION__
))
7306 "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() ==
CtxI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7306, __extension__ __PRETTY_FUNCTION__
))
;
7307 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7308, __extension__ __PRETTY_FUNCTION__
))
7308 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7308, __extension__ __PRETTY_FUNCTION__
))
;
7309
7310 if (!isValidAssumeForContext(I, CtxI, DT))
7311 continue;
7312 Value *Arg = I->getArgOperand(0);
7313 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
7314 // Currently we just use information from comparisons.
7315 if (!Cmp || Cmp->getOperand(0) != V)
7316 continue;
7317 // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
7318 ConstantRange RHS =
7319 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
7320 UseInstrInfo, AC, I, DT, Depth + 1);
7321 CR = CR.intersectWith(
7322 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
7323 }
7324 }
7325
7326 return CR;
7327}
7328
7329static Optional<int64_t>
7330getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
7331 // Skip over the first indices.
7332 gep_type_iterator GTI = gep_type_begin(GEP);
7333 for (unsigned i = 1; i != Idx; ++i, ++GTI)
7334 /*skip along*/;
7335
7336 // Compute the offset implied by the rest of the indices.
7337 int64_t Offset = 0;
7338 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
7339 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
7340 if (!OpC)
7341 return None;
7342 if (OpC->isZero())
7343 continue; // No offset.
7344
7345 // Handle struct indices, which add their field offset to the pointer.
7346 if (StructType *STy = GTI.getStructTypeOrNull()) {
7347 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
7348 continue;
7349 }
7350
7351 // Otherwise, we have a sequential type like an array or fixed-length
7352 // vector. Multiply the index by the ElementSize.
7353 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
7354 if (Size.isScalable())
7355 return None;
7356 Offset += Size.getFixedSize() * OpC->getSExtValue();
7357 }
7358
7359 return Offset;
7360}
7361
7362Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
7363 const DataLayout &DL) {
7364 APInt Offset1(DL.getIndexTypeSizeInBits(Ptr1->getType()), 0);
7365 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0);
7366 Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset1, true);
7367 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true);
7368
7369 // Handle the trivial case first.
7370 if (Ptr1 == Ptr2)
7371 return Offset2.getSExtValue() - Offset1.getSExtValue();
7372
7373 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
7374 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
7375
7376 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
7377 // base. After that base, they may have some number of common (and
7378 // potentially variable) indices. After that they handle some constant
7379 // offset, which determines their offset from each other. At this point, we
7380 // handle no other case.
7381 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0) ||
7382 GEP1->getSourceElementType() != GEP2->getSourceElementType())
7383 return None;
7384
7385 // Skip any common indices and track the GEP types.
7386 unsigned Idx = 1;
7387 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
7388 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
7389 break;
7390
7391 auto IOffset1 = getOffsetFromIndex(GEP1, Idx, DL);
7392 auto IOffset2 = getOffsetFromIndex(GEP2, Idx, DL);
7393 if (!IOffset1 || !IOffset2)
7394 return None;
7395 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() -
7396 Offset1.getSExtValue();
7397}