Bug Summary

File:build/source/llvm/lib/Analysis/ValueTracking.cpp
Warning:line 5641, column 22
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ValueTracking.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-17/lib/clang/17 -I lib/Analysis -I /build/source/llvm/lib/Analysis -I include -I /build/source/llvm/include -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-05-10-133810-16478-1 -x c++ /build/source/llvm/lib/Analysis/ValueTracking.cpp
1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/iterator_range.h"
25#include "llvm/Analysis/AliasAnalysis.h"
26#include "llvm/Analysis/AssumeBundleQueries.h"
27#include "llvm/Analysis/AssumptionCache.h"
28#include "llvm/Analysis/ConstantFolding.h"
29#include "llvm/Analysis/GuardUtils.h"
30#include "llvm/Analysis/InstructionSimplify.h"
31#include "llvm/Analysis/Loads.h"
32#include "llvm/Analysis/LoopInfo.h"
33#include "llvm/Analysis/OptimizationRemarkEmitter.h"
34#include "llvm/Analysis/TargetLibraryInfo.h"
35#include "llvm/Analysis/VectorUtils.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/ConstantRange.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DerivedTypes.h"
43#include "llvm/IR/DiagnosticInfo.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/EHPersonalities.h"
46#include "llvm/IR/Function.h"
47#include "llvm/IR/GetElementPtrTypeIterator.h"
48#include "llvm/IR/GlobalAlias.h"
49#include "llvm/IR/GlobalValue.h"
50#include "llvm/IR/GlobalVariable.h"
51#include "llvm/IR/InstrTypes.h"
52#include "llvm/IR/Instruction.h"
53#include "llvm/IR/Instructions.h"
54#include "llvm/IR/IntrinsicInst.h"
55#include "llvm/IR/Intrinsics.h"
56#include "llvm/IR/IntrinsicsAArch64.h"
57#include "llvm/IR/IntrinsicsRISCV.h"
58#include "llvm/IR/IntrinsicsX86.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/Metadata.h"
61#include "llvm/IR/Module.h"
62#include "llvm/IR/Operator.h"
63#include "llvm/IR/PatternMatch.h"
64#include "llvm/IR/Type.h"
65#include "llvm/IR/User.h"
66#include "llvm/IR/Value.h"
67#include "llvm/Support/Casting.h"
68#include "llvm/Support/CommandLine.h"
69#include "llvm/Support/Compiler.h"
70#include "llvm/Support/ErrorHandling.h"
71#include "llvm/Support/KnownBits.h"
72#include "llvm/Support/MathExtras.h"
73#include <algorithm>
74#include <cassert>
75#include <cstdint>
76#include <optional>
77#include <utility>
78
79using namespace llvm;
80using namespace llvm::PatternMatch;
81
82// Controls the number of uses of the value searched for possible
83// dominating comparisons.
84static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85 cl::Hidden, cl::init(20));
86
87
88/// Returns the bitwidth of the given scalar or pointer type. For vector types,
89/// returns the element type's bitwidth.
90static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
91 if (unsigned BitWidth = Ty->getScalarSizeInBits())
92 return BitWidth;
93
94 return DL.getPointerTypeSizeInBits(Ty);
95}
96
97namespace {
98
99// Simplifying using an assume can only be done in a particular control-flow
100// context (the context instruction provides that context). If an assume and
101// the context instruction are not in the same block then the DT helps in
102// figuring out if we can use it.
103struct Query {
104 const DataLayout &DL;
105 AssumptionCache *AC;
106 const Instruction *CxtI;
107 const DominatorTree *DT;
108
109 // Unlike the other analyses, this may be a nullptr because not all clients
110 // provide it currently.
111 OptimizationRemarkEmitter *ORE;
112
113 /// If true, it is safe to use metadata during simplification.
114 InstrInfoQuery IIQ;
115
116 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
117 const DominatorTree *DT, bool UseInstrInfo,
118 OptimizationRemarkEmitter *ORE = nullptr)
119 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
120};
121
122} // end anonymous namespace
123
124// Given the provided Value and, potentially, a context instruction, return
125// the preferred context instruction (if any).
126static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
127 // If we've been provided with a context instruction, then use that (provided
128 // it has been inserted).
129 if (CxtI && CxtI->getParent())
130 return CxtI;
131
132 // If the value is really an already-inserted instruction, then use that.
133 CxtI = dyn_cast<Instruction>(V);
134 if (CxtI && CxtI->getParent())
135 return CxtI;
136
137 return nullptr;
138}
139
140static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
141 // If we've been provided with a context instruction, then use that (provided
142 // it has been inserted).
143 if (CxtI && CxtI->getParent())
144 return CxtI;
145
146 // If the value is really an already-inserted instruction, then use that.
147 CxtI = dyn_cast<Instruction>(V1);
148 if (CxtI && CxtI->getParent())
149 return CxtI;
150
151 CxtI = dyn_cast<Instruction>(V2);
152 if (CxtI && CxtI->getParent())
153 return CxtI;
154
155 return nullptr;
156}
157
158static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
159 const APInt &DemandedElts,
160 APInt &DemandedLHS, APInt &DemandedRHS) {
161 if (isa<ScalableVectorType>(Shuf->getType())) {
162 assert(DemandedElts == APInt(1,1))(static_cast <bool> (DemandedElts == APInt(1,1)) ? void
(0) : __assert_fail ("DemandedElts == APInt(1,1)", "llvm/lib/Analysis/ValueTracking.cpp"
, 162, __extension__ __PRETTY_FUNCTION__))
;
163 DemandedLHS = DemandedRHS = DemandedElts;
164 return true;
165 }
166
167 int NumElts =
168 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
169 return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(),
170 DemandedElts, DemandedLHS, DemandedRHS);
171}
172
173static void computeKnownBits(const Value *V, const APInt &DemandedElts,
174 KnownBits &Known, unsigned Depth, const Query &Q);
175
176static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
177 const Query &Q) {
178 // Since the number of lanes in a scalable vector is unknown at compile time,
179 // we track one bit which is implicitly broadcast to all lanes. This means
180 // that all lanes in a scalable vector are considered demanded.
181 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
182 APInt DemandedElts =
183 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
184 computeKnownBits(V, DemandedElts, Known, Depth, Q);
185}
186
187void llvm::computeKnownBits(const Value *V, KnownBits &Known,
188 const DataLayout &DL, unsigned Depth,
189 AssumptionCache *AC, const Instruction *CxtI,
190 const DominatorTree *DT,
191 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
192 ::computeKnownBits(V, Known, Depth,
193 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
194}
195
196void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
197 KnownBits &Known, const DataLayout &DL,
198 unsigned Depth, AssumptionCache *AC,
199 const Instruction *CxtI, const DominatorTree *DT,
200 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
201 ::computeKnownBits(V, DemandedElts, Known, Depth,
202 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
203}
204
205static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
206 unsigned Depth, const Query &Q);
207
208static KnownBits computeKnownBits(const Value *V, unsigned Depth,
209 const Query &Q);
210
211KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
212 unsigned Depth, AssumptionCache *AC,
213 const Instruction *CxtI,
214 const DominatorTree *DT,
215 OptimizationRemarkEmitter *ORE,
216 bool UseInstrInfo) {
217 return ::computeKnownBits(
218 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
219}
220
221KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
222 const DataLayout &DL, unsigned Depth,
223 AssumptionCache *AC, const Instruction *CxtI,
224 const DominatorTree *DT,
225 OptimizationRemarkEmitter *ORE,
226 bool UseInstrInfo) {
227 return ::computeKnownBits(
228 V, DemandedElts, Depth,
229 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
230}
231
232bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
233 const DataLayout &DL, AssumptionCache *AC,
234 const Instruction *CxtI, const DominatorTree *DT,
235 bool UseInstrInfo) {
236 assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 237, __extension__ __PRETTY_FUNCTION__
))
237 "LHS and RHS should have the same type")(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 237, __extension__ __PRETTY_FUNCTION__
))
;
238 assert(LHS->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "llvm/lib/Analysis/ValueTracking.cpp", 239, __extension__ __PRETTY_FUNCTION__
))
239 "LHS and RHS should be integers")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "llvm/lib/Analysis/ValueTracking.cpp", 239, __extension__ __PRETTY_FUNCTION__
))
;
240 // Look for an inverted mask: (X & ~M) op (Y & M).
241 {
242 Value *M;
243 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
244 match(RHS, m_c_And(m_Specific(M), m_Value())))
245 return true;
246 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
247 match(LHS, m_c_And(m_Specific(M), m_Value())))
248 return true;
249 }
250
251 // X op (Y & ~X)
252 if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) ||
253 match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value())))
254 return true;
255
256 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
257 // for constant Y.
258 Value *Y;
259 if (match(RHS,
260 m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) ||
261 match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y))))
262 return true;
263
264 // Peek through extends to find a 'not' of the other side:
265 // (ext Y) op ext(~Y)
266 // (ext ~Y) op ext(Y)
267 if ((match(LHS, m_ZExtOrSExt(m_Value(Y))) &&
268 match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))) ||
269 (match(RHS, m_ZExtOrSExt(m_Value(Y))) &&
270 match(LHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))))
271 return true;
272
273 // Look for: (A & B) op ~(A | B)
274 {
275 Value *A, *B;
276 if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
277 match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
278 return true;
279 if (match(RHS, m_And(m_Value(A), m_Value(B))) &&
280 match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
281 return true;
282 }
283 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
284 KnownBits LHSKnown(IT->getBitWidth());
285 KnownBits RHSKnown(IT->getBitWidth());
286 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
287 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
288 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
289}
290
291bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
292 return !I->user_empty() && all_of(I->users(), [](const User *U) {
293 ICmpInst::Predicate P;
294 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
295 });
296}
297
298static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
299 const Query &Q);
300
301bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
302 bool OrZero, unsigned Depth,
303 AssumptionCache *AC, const Instruction *CxtI,
304 const DominatorTree *DT, bool UseInstrInfo) {
305 return ::isKnownToBeAPowerOfTwo(
306 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
307}
308
309static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
310 unsigned Depth, const Query &Q);
311
312static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
313
314bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
315 AssumptionCache *AC, const Instruction *CxtI,
316 const DominatorTree *DT, bool UseInstrInfo) {
317 return ::isKnownNonZero(V, Depth,
318 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
319}
320
321bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
322 unsigned Depth, AssumptionCache *AC,
323 const Instruction *CxtI, const DominatorTree *DT,
324 bool UseInstrInfo) {
325 KnownBits Known =
326 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
327 return Known.isNonNegative();
328}
329
330bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
331 AssumptionCache *AC, const Instruction *CxtI,
332 const DominatorTree *DT, bool UseInstrInfo) {
333 if (auto *CI = dyn_cast<ConstantInt>(V))
334 return CI->getValue().isStrictlyPositive();
335
336 // TODO: We'd doing two recursive queries here. We should factor this such
337 // that only a single query is needed.
338 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
339 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
340}
341
342bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
343 AssumptionCache *AC, const Instruction *CxtI,
344 const DominatorTree *DT, bool UseInstrInfo) {
345 KnownBits Known =
346 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
347 return Known.isNegative();
348}
349
350static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
351 const Query &Q);
352
353bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
354 const DataLayout &DL, AssumptionCache *AC,
355 const Instruction *CxtI, const DominatorTree *DT,
356 bool UseInstrInfo) {
357 return ::isKnownNonEqual(V1, V2, 0,
358 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
359 UseInstrInfo, /*ORE=*/nullptr));
360}
361
362static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
363 const Query &Q);
364
365bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
366 const DataLayout &DL, unsigned Depth,
367 AssumptionCache *AC, const Instruction *CxtI,
368 const DominatorTree *DT, bool UseInstrInfo) {
369 return ::MaskedValueIsZero(
370 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
371}
372
373static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
374 unsigned Depth, const Query &Q);
375
376static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
377 const Query &Q) {
378 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
379 APInt DemandedElts =
380 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
381 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
382}
383
384unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
385 unsigned Depth, AssumptionCache *AC,
386 const Instruction *CxtI,
387 const DominatorTree *DT, bool UseInstrInfo) {
388 return ::ComputeNumSignBits(
389 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
390}
391
392unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
393 unsigned Depth, AssumptionCache *AC,
394 const Instruction *CxtI,
395 const DominatorTree *DT) {
396 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
397 return V->getType()->getScalarSizeInBits() - SignBits + 1;
398}
399
400static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
401 bool NSW, const APInt &DemandedElts,
402 KnownBits &KnownOut, KnownBits &Known2,
403 unsigned Depth, const Query &Q) {
404 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
405
406 // If one operand is unknown and we have no nowrap information,
407 // the result will be unknown independently of the second operand.
408 if (KnownOut.isUnknown() && !NSW)
409 return;
410
411 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
412 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
413}
414
415static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
416 const APInt &DemandedElts, KnownBits &Known,
417 KnownBits &Known2, unsigned Depth,
418 const Query &Q) {
419 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
420 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
421
422 bool isKnownNegative = false;
423 bool isKnownNonNegative = false;
424 // If the multiplication is known not to overflow, compute the sign bit.
425 if (NSW) {
426 if (Op0 == Op1) {
427 // The product of a number with itself is non-negative.
428 isKnownNonNegative = true;
429 } else {
430 bool isKnownNonNegativeOp1 = Known.isNonNegative();
431 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
432 bool isKnownNegativeOp1 = Known.isNegative();
433 bool isKnownNegativeOp0 = Known2.isNegative();
434 // The product of two numbers with the same sign is non-negative.
435 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
436 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
437 // The product of a negative number and a non-negative number is either
438 // negative or zero.
439 if (!isKnownNonNegative)
440 isKnownNegative =
441 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
442 Known2.isNonZero()) ||
443 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
444 }
445 }
446
447 bool SelfMultiply = Op0 == Op1;
448 // TODO: SelfMultiply can be poison, but not undef.
449 if (SelfMultiply)
450 SelfMultiply &=
451 isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
452 Known = KnownBits::mul(Known, Known2, SelfMultiply);
453
454 // Only make use of no-wrap flags if we failed to compute the sign bit
455 // directly. This matters if the multiplication always overflows, in
456 // which case we prefer to follow the result of the direct computation,
457 // though as the program is invoking undefined behaviour we can choose
458 // whatever we like here.
459 if (isKnownNonNegative && !Known.isNegative())
460 Known.makeNonNegative();
461 else if (isKnownNegative && !Known.isNonNegative())
462 Known.makeNegative();
463}
464
465void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
466 KnownBits &Known) {
467 unsigned BitWidth = Known.getBitWidth();
468 unsigned NumRanges = Ranges.getNumOperands() / 2;
469 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp",
469, __extension__ __PRETTY_FUNCTION__))
;
470
471 Known.Zero.setAllBits();
472 Known.One.setAllBits();
473
474 for (unsigned i = 0; i < NumRanges; ++i) {
475 ConstantInt *Lower =
476 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
477 ConstantInt *Upper =
478 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
479 ConstantRange Range(Lower->getValue(), Upper->getValue());
480
481 // The first CommonPrefixBits of all values in Range are equal.
482 unsigned CommonPrefixBits =
483 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countl_zero();
484 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
485 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
486 Known.One &= UnsignedMax & Mask;
487 Known.Zero &= ~UnsignedMax & Mask;
488 }
489}
490
491static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
492 SmallVector<const Value *, 16> WorkSet(1, I);
493 SmallPtrSet<const Value *, 32> Visited;
494 SmallPtrSet<const Value *, 16> EphValues;
495
496 // The instruction defining an assumption's condition itself is always
497 // considered ephemeral to that assumption (even if it has other
498 // non-ephemeral users). See r246696's test case for an example.
499 if (is_contained(I->operands(), E))
500 return true;
501
502 while (!WorkSet.empty()) {
503 const Value *V = WorkSet.pop_back_val();
504 if (!Visited.insert(V).second)
505 continue;
506
507 // If all uses of this value are ephemeral, then so is this value.
508 if (llvm::all_of(V->users(), [&](const User *U) {
509 return EphValues.count(U);
510 })) {
511 if (V == E)
512 return true;
513
514 if (V == I || (isa<Instruction>(V) &&
515 !cast<Instruction>(V)->mayHaveSideEffects() &&
516 !cast<Instruction>(V)->isTerminator())) {
517 EphValues.insert(V);
518 if (const User *U = dyn_cast<User>(V))
519 append_range(WorkSet, U->operands());
520 }
521 }
522 }
523
524 return false;
525}
526
527// Is this an intrinsic that cannot be speculated but also cannot trap?
528bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
529 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
530 return CI->isAssumeLikeIntrinsic();
531
532 return false;
533}
534
535bool llvm::isValidAssumeForContext(const Instruction *Inv,
536 const Instruction *CxtI,
537 const DominatorTree *DT) {
538 // There are two restrictions on the use of an assume:
539 // 1. The assume must dominate the context (or the control flow must
540 // reach the assume whenever it reaches the context).
541 // 2. The context must not be in the assume's set of ephemeral values
542 // (otherwise we will use the assume to prove that the condition
543 // feeding the assume is trivially true, thus causing the removal of
544 // the assume).
545
546 if (Inv->getParent() == CxtI->getParent()) {
547 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
548 // in the BB.
549 if (Inv->comesBefore(CxtI))
550 return true;
551
552 // Don't let an assume affect itself - this would cause the problems
553 // `isEphemeralValueOf` is trying to prevent, and it would also make
554 // the loop below go out of bounds.
555 if (Inv == CxtI)
556 return false;
557
558 // The context comes first, but they're both in the same block.
559 // Make sure there is nothing in between that might interrupt
560 // the control flow, not even CxtI itself.
561 // We limit the scan distance between the assume and its context instruction
562 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
563 // it can be adjusted if needed (could be turned into a cl::opt).
564 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
565 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
566 return false;
567
568 return !isEphemeralValueOf(Inv, CxtI);
569 }
570
571 // Inv and CxtI are in different blocks.
572 if (DT) {
573 if (DT->dominates(Inv, CxtI))
574 return true;
575 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
576 // We don't have a DT, but this trivially dominates.
577 return true;
578 }
579
580 return false;
581}
582
583static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
584 // v u> y implies v != 0.
585 if (Pred == ICmpInst::ICMP_UGT)
586 return true;
587
588 // Special-case v != 0 to also handle v != null.
589 if (Pred == ICmpInst::ICMP_NE)
590 return match(RHS, m_Zero());
591
592 // All other predicates - rely on generic ConstantRange handling.
593 const APInt *C;
594 if (!match(RHS, m_APInt(C)))
595 return false;
596
597 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
598 return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
599}
600
601static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
602 // Use of assumptions is context-sensitive. If we don't have a context, we
603 // cannot use them!
604 if (!Q.AC || !Q.CxtI)
605 return false;
606
607 if (Q.CxtI && V->getType()->isPointerTy()) {
608 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
609 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
610 V->getType()->getPointerAddressSpace()))
611 AttrKinds.push_back(Attribute::Dereferenceable);
612
613 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
614 return true;
615 }
616
617 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
618 if (!AssumeVH)
619 continue;
620 CallInst *I = cast<CallInst>(AssumeVH);
621 assert(I->getFunction() == Q.CxtI->getFunction() &&(static_cast <bool> (I->getFunction() == Q.CxtI->
getFunction() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 622, __extension__ __PRETTY_FUNCTION__
))
622 "Got assumption for the wrong function!")(static_cast <bool> (I->getFunction() == Q.CxtI->
getFunction() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 622, __extension__ __PRETTY_FUNCTION__
))
;
623
624 // Warning: This loop can end up being somewhat performance sensitive.
625 // We're running this loop for once for each value queried resulting in a
626 // runtime of ~O(#assumes * #values).
627
628 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 629, __extension__ __PRETTY_FUNCTION__
))
629 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 629, __extension__ __PRETTY_FUNCTION__
))
;
630
631 Value *RHS;
632 CmpInst::Predicate Pred;
633 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
634 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
635 return false;
636
637 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
638 return true;
639 }
640
641 return false;
642}
643
644static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp,
645 KnownBits &Known, unsigned Depth,
646 const Query &Q) {
647 unsigned BitWidth = Known.getBitWidth();
648 // We are attempting to compute known bits for the operands of an assume.
649 // Do not try to use other assumptions for those recursive calls because
650 // that can lead to mutual recursion and a compile-time explosion.
651 // An example of the mutual recursion: computeKnownBits can call
652 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
653 // and so on.
654 Query QueryNoAC = Q;
655 QueryNoAC.AC = nullptr;
656
657 // Note that ptrtoint may change the bitwidth.
658 Value *A, *B;
659 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
660
661 CmpInst::Predicate Pred;
662 uint64_t C;
663 switch (Cmp->getPredicate()) {
664 default:
665 break;
666 case ICmpInst::ICMP_EQ:
667 // assume(v = a)
668 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A)))) {
669 KnownBits RHSKnown =
670 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
671 Known.Zero |= RHSKnown.Zero;
672 Known.One |= RHSKnown.One;
673 // assume(v & b = a)
674 } else if (match(Cmp,
675 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A)))) {
676 KnownBits RHSKnown =
677 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
678 KnownBits MaskKnown =
679 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
680
681 // For those bits in the mask that are known to be one, we can propagate
682 // known bits from the RHS to V.
683 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
684 Known.One |= RHSKnown.One & MaskKnown.One;
685 // assume(~(v & b) = a)
686 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
687 m_Value(A)))) {
688 KnownBits RHSKnown =
689 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
690 KnownBits MaskKnown =
691 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
692
693 // For those bits in the mask that are known to be one, we can propagate
694 // inverted known bits from the RHS to V.
695 Known.Zero |= RHSKnown.One & MaskKnown.One;
696 Known.One |= RHSKnown.Zero & MaskKnown.One;
697 // assume(v | b = a)
698 } else if (match(Cmp,
699 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A)))) {
700 KnownBits RHSKnown =
701 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
702 KnownBits BKnown =
703 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
704
705 // For those bits in B that are known to be zero, we can propagate known
706 // bits from the RHS to V.
707 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
708 Known.One |= RHSKnown.One & BKnown.Zero;
709 // assume(~(v | b) = a)
710 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
711 m_Value(A)))) {
712 KnownBits RHSKnown =
713 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
714 KnownBits BKnown =
715 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
716
717 // For those bits in B that are known to be zero, we can propagate
718 // inverted known bits from the RHS to V.
719 Known.Zero |= RHSKnown.One & BKnown.Zero;
720 Known.One |= RHSKnown.Zero & BKnown.Zero;
721 // assume(v ^ b = a)
722 } else if (match(Cmp,
723 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A)))) {
724 KnownBits RHSKnown =
725 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
726 KnownBits BKnown =
727 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
728
729 // For those bits in B that are known to be zero, we can propagate known
730 // bits from the RHS to V. For those bits in B that are known to be one,
731 // we can propagate inverted known bits from the RHS to V.
732 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
733 Known.One |= RHSKnown.One & BKnown.Zero;
734 Known.Zero |= RHSKnown.One & BKnown.One;
735 Known.One |= RHSKnown.Zero & BKnown.One;
736 // assume(~(v ^ b) = a)
737 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
738 m_Value(A)))) {
739 KnownBits RHSKnown =
740 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
741 KnownBits BKnown =
742 computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
743
744 // For those bits in B that are known to be zero, we can propagate
745 // inverted known bits from the RHS to V. For those bits in B that are
746 // known to be one, we can propagate known bits from the RHS to V.
747 Known.Zero |= RHSKnown.One & BKnown.Zero;
748 Known.One |= RHSKnown.Zero & BKnown.Zero;
749 Known.Zero |= RHSKnown.Zero & BKnown.One;
750 Known.One |= RHSKnown.One & BKnown.One;
751 // assume(v << c = a)
752 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
753 m_Value(A))) &&
754 C < BitWidth) {
755 KnownBits RHSKnown =
756 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
757
758 // For those bits in RHS that are known, we can propagate them to known
759 // bits in V shifted to the right by C.
760 RHSKnown.Zero.lshrInPlace(C);
761 Known.Zero |= RHSKnown.Zero;
762 RHSKnown.One.lshrInPlace(C);
763 Known.One |= RHSKnown.One;
764 // assume(~(v << c) = a)
765 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
766 m_Value(A))) &&
767 C < BitWidth) {
768 KnownBits RHSKnown =
769 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
770 // For those bits in RHS that are known, we can propagate them inverted
771 // to known bits in V shifted to the right by C.
772 RHSKnown.One.lshrInPlace(C);
773 Known.Zero |= RHSKnown.One;
774 RHSKnown.Zero.lshrInPlace(C);
775 Known.One |= RHSKnown.Zero;
776 // assume(v >> c = a)
777 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
778 m_Value(A))) &&
779 C < BitWidth) {
780 KnownBits RHSKnown =
781 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
782 // For those bits in RHS that are known, we can propagate them to known
783 // bits in V shifted to the right by C.
784 Known.Zero |= RHSKnown.Zero << C;
785 Known.One |= RHSKnown.One << C;
786 // assume(~(v >> c) = a)
787 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
788 m_Value(A))) &&
789 C < BitWidth) {
790 KnownBits RHSKnown =
791 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
792 // For those bits in RHS that are known, we can propagate them inverted
793 // to known bits in V shifted to the right by C.
794 Known.Zero |= RHSKnown.One << C;
795 Known.One |= RHSKnown.Zero << C;
796 }
797 break;
798 case ICmpInst::ICMP_SGE:
799 // assume(v >=_s c) where c is non-negative
800 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
801 KnownBits RHSKnown =
802 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
803
804 if (RHSKnown.isNonNegative()) {
805 // We know that the sign bit is zero.
806 Known.makeNonNegative();
807 }
808 }
809 break;
810 case ICmpInst::ICMP_SGT:
811 // assume(v >_s c) where c is at least -1.
812 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
813 KnownBits RHSKnown =
814 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
815
816 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
817 // We know that the sign bit is zero.
818 Known.makeNonNegative();
819 }
820 }
821 break;
822 case ICmpInst::ICMP_SLE:
823 // assume(v <=_s c) where c is negative
824 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
825 KnownBits RHSKnown =
826 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
827
828 if (RHSKnown.isNegative()) {
829 // We know that the sign bit is one.
830 Known.makeNegative();
831 }
832 }
833 break;
834 case ICmpInst::ICMP_SLT:
835 // assume(v <_s c) where c is non-positive
836 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
837 KnownBits RHSKnown =
838 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
839
840 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
841 // We know that the sign bit is one.
842 Known.makeNegative();
843 }
844 }
845 break;
846 case ICmpInst::ICMP_ULE:
847 // assume(v <=_u c)
848 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
849 KnownBits RHSKnown =
850 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
851
852 // Whatever high bits in c are zero are known to be zero.
853 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
854 }
855 break;
856 case ICmpInst::ICMP_ULT:
857 // assume(v <_u c)
858 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) {
859 KnownBits RHSKnown =
860 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
861
862 // If the RHS is known zero, then this assumption must be wrong (nothing
863 // is unsigned less than zero). Signal a conflict and get out of here.
864 if (RHSKnown.isZero()) {
865 Known.Zero.setAllBits();
866 Known.One.setAllBits();
867 break;
868 }
869
870 // Whatever high bits in c are zero are known to be zero (if c is a power
871 // of 2, then one more).
872 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
873 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
874 else
875 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
876 }
877 break;
878 case ICmpInst::ICMP_NE: {
879 // assume (v & b != 0) where b is a power of 2
880 const APInt *BPow2;
881 if (match(Cmp, m_ICmp(Pred, m_c_And(m_V, m_Power2(BPow2)), m_Zero()))) {
882 Known.One |= BPow2->zextOrTrunc(BitWidth);
883 }
884 } break;
885 }
886}
887
888static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
889 unsigned Depth, const Query &Q) {
890 // Use of assumptions is context-sensitive. If we don't have a context, we
891 // cannot use them!
892 if (!Q.AC || !Q.CxtI)
893 return;
894
895 unsigned BitWidth = Known.getBitWidth();
896
897 // Refine Known set if the pointer alignment is set by assume bundles.
898 if (V->getType()->isPointerTy()) {
899 if (RetainedKnowledge RK = getKnowledgeValidInContext(
900 V, { Attribute::Alignment }, Q.CxtI, Q.DT, Q.AC)) {
901 if (isPowerOf2_64(RK.ArgValue))
902 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
903 }
904 }
905
906 // Note that the patterns below need to be kept in sync with the code
907 // in AssumptionCache::updateAffectedValues.
908
909 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
910 if (!AssumeVH)
911 continue;
912 CallInst *I = cast<CallInst>(AssumeVH);
913 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 914, __extension__ __PRETTY_FUNCTION__
))
914 "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 914, __extension__ __PRETTY_FUNCTION__
))
;
915
916 // Warning: This loop can end up being somewhat performance sensitive.
917 // We're running this loop for once for each value queried resulting in a
918 // runtime of ~O(#assumes * #values).
919
920 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 921, __extension__ __PRETTY_FUNCTION__
))
921 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 921, __extension__ __PRETTY_FUNCTION__
))
;
922
923 Value *Arg = I->getArgOperand(0);
924
925 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
926 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 926, __extension__ __PRETTY_FUNCTION__
))
;
927 (void)BitWidth;
928 Known.setAllOnes();
929 return;
930 }
931 if (match(Arg, m_Not(m_Specific(V))) &&
932 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
933 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 933, __extension__ __PRETTY_FUNCTION__
))
;
934 (void)BitWidth;
935 Known.setAllZero();
936 return;
937 }
938
939 // The remaining tests are all recursive, so bail out if we hit the limit.
940 if (Depth == MaxAnalysisRecursionDepth)
941 continue;
942
943 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
944 if (!Cmp)
945 continue;
946
947 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
948 continue;
949
950 computeKnownBitsFromCmp(V, Cmp, Known, Depth, Q);
951 }
952
953 // If assumptions conflict with each other or previous known bits, then we
954 // have a logical fallacy. It's possible that the assumption is not reachable,
955 // so this isn't a real bug. On the other hand, the program may have undefined
956 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
957 // clear out the known bits, try to warn the user, and hope for the best.
958 if (Known.Zero.intersects(Known.One)) {
959 Known.resetAll();
960
961 if (Q.ORE)
962 Q.ORE->emit([&]() {
963 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
964 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
965 CxtI)
966 << "Detected conflicting code assumptions. Program may "
967 "have undefined behavior, or compiler may have "
968 "internal error.";
969 });
970 }
971}
972
973/// Compute known bits from a shift operator, including those with a
974/// non-constant shift amount. Known is the output of this function. Known2 is a
975/// pre-allocated temporary with the same bit width as Known and on return
976/// contains the known bit of the shift value source. KF is an
977/// operator-specific function that, given the known-bits and a shift amount,
978/// compute the implied known-bits of the shift operator's result respectively
979/// for that shift amount. The results from calling KF are conservatively
980/// combined for all permitted shift amounts.
981static void computeKnownBitsFromShiftOperator(
982 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
983 KnownBits &Known2, unsigned Depth, const Query &Q,
984 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
985 unsigned BitWidth = Known.getBitWidth();
986 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
987 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
988
989 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
990 // BitWidth > 64 and any upper bits are known, we'll end up returning the
991 // limit value (which implies all bits are known).
992 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
993 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
994 bool ShiftAmtIsConstant = Known.isConstant();
995 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
996
997 if (ShiftAmtIsConstant) {
998 Known = KF(Known2, Known);
999
1000 // If the known bits conflict, this must be an overflowing left shift, so
1001 // the shift result is poison. We can return anything we want. Choose 0 for
1002 // the best folding opportunity.
1003 if (Known.hasConflict())
1004 Known.setAllZero();
1005
1006 return;
1007 }
1008
1009 // If the shift amount could be greater than or equal to the bit-width of the
1010 // LHS, the value could be poison, but bail out because the check below is
1011 // expensive.
1012 // TODO: Should we just carry on?
1013 if (MaxShiftAmtIsOutOfRange) {
1014 Known.resetAll();
1015 return;
1016 }
1017
1018 // It would be more-clearly correct to use the two temporaries for this
1019 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1020 Known.resetAll();
1021
1022 // If we know the shifter operand is nonzero, we can sometimes infer more
1023 // known bits. However this is expensive to compute, so be lazy about it and
1024 // only compute it when absolutely necessary.
1025 std::optional<bool> ShifterOperandIsNonZero;
1026
1027 // Early exit if we can't constrain any well-defined shift amount.
1028 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1029 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1030 ShifterOperandIsNonZero =
1031 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1032 if (!*ShifterOperandIsNonZero)
1033 return;
1034 }
1035
1036 Known.Zero.setAllBits();
1037 Known.One.setAllBits();
1038 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1039 // Combine the shifted known input bits only for those shift amounts
1040 // compatible with its known constraints.
1041 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1042 continue;
1043 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1044 continue;
1045 // If we know the shifter is nonzero, we may be able to infer more known
1046 // bits. This check is sunk down as far as possible to avoid the expensive
1047 // call to isKnownNonZero if the cheaper checks above fail.
1048 if (ShiftAmt == 0) {
1049 if (!ShifterOperandIsNonZero)
1050 ShifterOperandIsNonZero =
1051 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1052 if (*ShifterOperandIsNonZero)
1053 continue;
1054 }
1055
1056 Known = KnownBits::commonBits(
1057 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1058 }
1059
1060 // If the known bits conflict, the result is poison. Return a 0 and hope the
1061 // caller can further optimize that.
1062 if (Known.hasConflict())
1063 Known.setAllZero();
1064}
1065
1066static KnownBits getKnownBitsFromAndXorOr(const Operator *I,
1067 const APInt &DemandedElts,
1068 const KnownBits &KnownLHS,
1069 const KnownBits &KnownRHS,
1070 unsigned Depth, const Query &Q) {
1071 unsigned BitWidth = KnownLHS.getBitWidth();
1072 KnownBits KnownOut(BitWidth);
1073 bool IsAnd = false;
1074 bool HasKnownOne = !KnownLHS.One.isZero() || !KnownRHS.One.isZero();
1075 Value *X = nullptr, *Y = nullptr;
1076
1077 switch (I->getOpcode()) {
1078 case Instruction::And:
1079 KnownOut = KnownLHS & KnownRHS;
1080 IsAnd = true;
1081 // and(x, -x) is common idioms that will clear all but lowest set
1082 // bit. If we have a single known bit in x, we can clear all bits
1083 // above it.
1084 // TODO: instcombine often reassociates independent `and` which can hide
1085 // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x).
1086 if (HasKnownOne && match(I, m_c_And(m_Value(X), m_Neg(m_Deferred(X))))) {
1087 // -(-x) == x so using whichever (LHS/RHS) gets us a better result.
1088 if (KnownLHS.countMaxTrailingZeros() <= KnownRHS.countMaxTrailingZeros())
1089 KnownOut = KnownLHS.blsi();
1090 else
1091 KnownOut = KnownRHS.blsi();
1092 }
1093 break;
1094 case Instruction::Or:
1095 KnownOut = KnownLHS | KnownRHS;
1096 break;
1097 case Instruction::Xor:
1098 KnownOut = KnownLHS ^ KnownRHS;
1099 // xor(x, x-1) is common idioms that will clear all but lowest set
1100 // bit. If we have a single known bit in x, we can clear all bits
1101 // above it.
1102 // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C !=
1103 // -1 but for the purpose of demanded bits (xor(x, x-C) &
1104 // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern
1105 // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1).
1106 if (HasKnownOne &&
1107 match(I, m_c_Xor(m_Value(X), m_c_Add(m_Deferred(X), m_AllOnes())))) {
1108 const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS;
1109 KnownOut = XBits.blsmsk();
1110 }
1111 break;
1112 default:
1113 llvm_unreachable("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'")::llvm::llvm_unreachable_internal("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'"
, "llvm/lib/Analysis/ValueTracking.cpp", 1113)
;
1114 }
1115
1116 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1117 // xor/or(x, add (x, -1)) is an idiom that will always set the low bit.
1118 // here we handle the more general case of adding any odd number by
1119 // matching the form and/xor/or(x, add(x, y)) where y is odd.
1120 // TODO: This could be generalized to clearing any bit set in y where the
1121 // following bit is known to be unset in y.
1122 if (!KnownOut.Zero[0] && !KnownOut.One[0] &&
1123 (match(I, m_c_BinOp(m_Value(X), m_c_Add(m_Deferred(X), m_Value(Y)))) ||
1124 match(I, m_c_BinOp(m_Value(X), m_Sub(m_Deferred(X), m_Value(Y)))) ||
1125 match(I, m_c_BinOp(m_Value(X), m_Sub(m_Value(Y), m_Deferred(X)))))) {
1126 KnownBits KnownY(BitWidth);
1127 computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q);
1128 if (KnownY.countMinTrailingOnes() > 0) {
1129 if (IsAnd)
1130 KnownOut.Zero.setBit(0);
1131 else
1132 KnownOut.One.setBit(0);
1133 }
1134 }
1135 return KnownOut;
1136}
1137
1138// Public so this can be used in `SimplifyDemandedUseBits`.
1139KnownBits llvm::analyzeKnownBitsFromAndXorOr(
1140 const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS,
1141 unsigned Depth, const DataLayout &DL, AssumptionCache *AC,
1142 const Instruction *CxtI, const DominatorTree *DT,
1143 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
1144 auto *FVTy = dyn_cast<FixedVectorType>(I->getType());
1145 APInt DemandedElts =
1146 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
1147
1148 return getKnownBitsFromAndXorOr(
1149 I, DemandedElts, KnownLHS, KnownRHS, Depth,
1150 Query(DL, AC, safeCxtI(I, CxtI), DT, UseInstrInfo, ORE));
1151}
1152
1153ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) {
1154 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
1155 // Without vscale_range, we only know that vscale is non-zero.
1156 if (!Attr.isValid())
1157 return ConstantRange(APInt(BitWidth, 1), APInt::getZero(BitWidth));
1158
1159 unsigned AttrMin = Attr.getVScaleRangeMin();
1160 // Minimum is larger than vscale width, result is always poison.
1161 if ((unsigned)llvm::bit_width(AttrMin) > BitWidth)
1162 return ConstantRange::getEmpty(BitWidth);
1163
1164 APInt Min(BitWidth, AttrMin);
1165 std::optional<unsigned> AttrMax = Attr.getVScaleRangeMax();
1166 if (!AttrMax || (unsigned)llvm::bit_width(*AttrMax) > BitWidth)
1167 return ConstantRange(Min, APInt::getZero(BitWidth));
1168
1169 return ConstantRange(Min, APInt(BitWidth, *AttrMax) + 1);
1170}
1171
1172static void computeKnownBitsFromOperator(const Operator *I,
1173 const APInt &DemandedElts,
1174 KnownBits &Known, unsigned Depth,
1175 const Query &Q) {
1176 unsigned BitWidth = Known.getBitWidth();
1177
1178 KnownBits Known2(BitWidth);
1179 switch (I->getOpcode()) {
1180 default: break;
1181 case Instruction::Load:
1182 if (MDNode *MD =
1183 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1184 computeKnownBitsFromRangeMetadata(*MD, Known);
1185 break;
1186 case Instruction::And:
1187 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1188 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1189
1190 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1191 break;
1192 case Instruction::Or:
1193 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1194 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1195
1196 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1197 break;
1198 case Instruction::Xor:
1199 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1200 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1201
1202 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1203 break;
1204 case Instruction::Mul: {
1205 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1206 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1207 Known, Known2, Depth, Q);
1208 break;
1209 }
1210 case Instruction::UDiv: {
1211 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1212 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1213 Known = KnownBits::udiv(Known, Known2);
1214 break;
1215 }
1216 case Instruction::Select: {
1217 const Value *LHS = nullptr, *RHS = nullptr;
1218 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1219 if (SelectPatternResult::isMinOrMax(SPF)) {
1220 computeKnownBits(RHS, Known, Depth + 1, Q);
1221 computeKnownBits(LHS, Known2, Depth + 1, Q);
1222 switch (SPF) {
1223 default:
1224 llvm_unreachable("Unhandled select pattern flavor!")::llvm::llvm_unreachable_internal("Unhandled select pattern flavor!"
, "llvm/lib/Analysis/ValueTracking.cpp", 1224)
;
1225 case SPF_SMAX:
1226 Known = KnownBits::smax(Known, Known2);
1227 break;
1228 case SPF_SMIN:
1229 Known = KnownBits::smin(Known, Known2);
1230 break;
1231 case SPF_UMAX:
1232 Known = KnownBits::umax(Known, Known2);
1233 break;
1234 case SPF_UMIN:
1235 Known = KnownBits::umin(Known, Known2);
1236 break;
1237 }
1238 break;
1239 }
1240
1241 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1242 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1243
1244 // Only known if known in both the LHS and RHS.
1245 Known = KnownBits::commonBits(Known, Known2);
1246
1247 if (SPF == SPF_ABS) {
1248 // RHS from matchSelectPattern returns the negation part of abs pattern.
1249 // If the negate has an NSW flag we can assume the sign bit of the result
1250 // will be 0 because that makes abs(INT_MIN) undefined.
1251 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1252 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS)))
1253 Known.Zero.setSignBit();
1254 }
1255
1256 break;
1257 }
1258 case Instruction::FPTrunc:
1259 case Instruction::FPExt:
1260 case Instruction::FPToUI:
1261 case Instruction::FPToSI:
1262 case Instruction::SIToFP:
1263 case Instruction::UIToFP:
1264 break; // Can't work with floating point.
1265 case Instruction::PtrToInt:
1266 case Instruction::IntToPtr:
1267 // Fall through and handle them the same as zext/trunc.
1268 [[fallthrough]];
1269 case Instruction::ZExt:
1270 case Instruction::Trunc: {
1271 Type *SrcTy = I->getOperand(0)->getType();
1272
1273 unsigned SrcBitWidth;
1274 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1275 // which fall through here.
1276 Type *ScalarTy = SrcTy->getScalarType();
1277 SrcBitWidth = ScalarTy->isPointerTy() ?
1278 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1279 Q.DL.getTypeSizeInBits(ScalarTy);
1280
1281 assert(SrcBitWidth && "SrcBitWidth can't be zero")(static_cast <bool> (SrcBitWidth && "SrcBitWidth can't be zero"
) ? void (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1281, __extension__ __PRETTY_FUNCTION__
))
;
1282 Known = Known.anyextOrTrunc(SrcBitWidth);
1283 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1284 Known = Known.zextOrTrunc(BitWidth);
1285 break;
1286 }
1287 case Instruction::BitCast: {
1288 Type *SrcTy = I->getOperand(0)->getType();
1289 if (SrcTy->isIntOrPtrTy() &&
1290 // TODO: For now, not handling conversions like:
1291 // (bitcast i64 %x to <2 x i32>)
1292 !I->getType()->isVectorTy()) {
1293 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1294 break;
1295 }
1296
1297 // Handle cast from vector integer type to scalar or vector integer.
1298 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1299 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1300 !I->getType()->isIntOrIntVectorTy() ||
1301 isa<ScalableVectorType>(I->getType()))
1302 break;
1303
1304 // Look through a cast from narrow vector elements to wider type.
1305 // Examples: v4i32 -> v2i64, v3i8 -> v24
1306 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1307 if (BitWidth % SubBitWidth == 0) {
1308 // Known bits are automatically intersected across demanded elements of a
1309 // vector. So for example, if a bit is computed as known zero, it must be
1310 // zero across all demanded elements of the vector.
1311 //
1312 // For this bitcast, each demanded element of the output is sub-divided
1313 // across a set of smaller vector elements in the source vector. To get
1314 // the known bits for an entire element of the output, compute the known
1315 // bits for each sub-element sequentially. This is done by shifting the
1316 // one-set-bit demanded elements parameter across the sub-elements for
1317 // consecutive calls to computeKnownBits. We are using the demanded
1318 // elements parameter as a mask operator.
1319 //
1320 // The known bits of each sub-element are then inserted into place
1321 // (dependent on endian) to form the full result of known bits.
1322 unsigned NumElts = DemandedElts.getBitWidth();
1323 unsigned SubScale = BitWidth / SubBitWidth;
1324 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1325 for (unsigned i = 0; i != NumElts; ++i) {
1326 if (DemandedElts[i])
1327 SubDemandedElts.setBit(i * SubScale);
1328 }
1329
1330 KnownBits KnownSrc(SubBitWidth);
1331 for (unsigned i = 0; i != SubScale; ++i) {
1332 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1333 Depth + 1, Q);
1334 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1335 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1336 }
1337 }
1338 break;
1339 }
1340 case Instruction::SExt: {
1341 // Compute the bits in the result that are not present in the input.
1342 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1343
1344 Known = Known.trunc(SrcBitWidth);
1345 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1346 // If the sign bit of the input is known set or clear, then we know the
1347 // top bits of the result.
1348 Known = Known.sext(BitWidth);
1349 break;
1350 }
1351 case Instruction::Shl: {
1352 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1353 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1354 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1355 // If this shift has "nsw" keyword, then the result is either a poison
1356 // value or has the same sign bit as the first operand.
1357 if (NSW) {
1358 if (KnownVal.Zero.isSignBitSet())
1359 Result.Zero.setSignBit();
1360 if (KnownVal.One.isSignBitSet())
1361 Result.One.setSignBit();
1362 }
1363 return Result;
1364 };
1365 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1366 KF);
1367 // Trailing zeros of a right-shifted constant never decrease.
1368 const APInt *C;
1369 if (match(I->getOperand(0), m_APInt(C)))
1370 Known.Zero.setLowBits(C->countr_zero());
1371 break;
1372 }
1373 case Instruction::LShr: {
1374 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1375 return KnownBits::lshr(KnownVal, KnownAmt);
1376 };
1377 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1378 KF);
1379 // Leading zeros of a left-shifted constant never decrease.
1380 const APInt *C;
1381 if (match(I->getOperand(0), m_APInt(C)))
1382 Known.Zero.setHighBits(C->countl_zero());
1383 break;
1384 }
1385 case Instruction::AShr: {
1386 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1387 return KnownBits::ashr(KnownVal, KnownAmt);
1388 };
1389 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1390 KF);
1391 break;
1392 }
1393 case Instruction::Sub: {
1394 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1395 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1396 DemandedElts, Known, Known2, Depth, Q);
1397 break;
1398 }
1399 case Instruction::Add: {
1400 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1401 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1402 DemandedElts, Known, Known2, Depth, Q);
1403 break;
1404 }
1405 case Instruction::SRem:
1406 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1407 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1408 Known = KnownBits::srem(Known, Known2);
1409 break;
1410
1411 case Instruction::URem:
1412 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1413 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1414 Known = KnownBits::urem(Known, Known2);
1415 break;
1416 case Instruction::Alloca:
1417 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1418 break;
1419 case Instruction::GetElementPtr: {
1420 // Analyze all of the subscripts of this getelementptr instruction
1421 // to determine if we can prove known low zero bits.
1422 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1423 // Accumulate the constant indices in a separate variable
1424 // to minimize the number of calls to computeForAddSub.
1425 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1426
1427 gep_type_iterator GTI = gep_type_begin(I);
1428 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1429 // TrailZ can only become smaller, short-circuit if we hit zero.
1430 if (Known.isUnknown())
1431 break;
1432
1433 Value *Index = I->getOperand(i);
1434
1435 // Handle case when index is zero.
1436 Constant *CIndex = dyn_cast<Constant>(Index);
1437 if (CIndex && CIndex->isZeroValue())
1438 continue;
1439
1440 if (StructType *STy = GTI.getStructTypeOrNull()) {
1441 // Handle struct member offset arithmetic.
1442
1443 assert(CIndex &&(static_cast <bool> (CIndex && "Access to structure field must be known at compile time"
) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1444, __extension__ __PRETTY_FUNCTION__
))
1444 "Access to structure field must be known at compile time")(static_cast <bool> (CIndex && "Access to structure field must be known at compile time"
) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1444, __extension__ __PRETTY_FUNCTION__
))
;
1445
1446 if (CIndex->getType()->isVectorTy())
1447 Index = CIndex->getSplatValue();
1448
1449 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1450 const StructLayout *SL = Q.DL.getStructLayout(STy);
1451 uint64_t Offset = SL->getElementOffset(Idx);
1452 AccConstIndices += Offset;
1453 continue;
1454 }
1455
1456 // Handle array index arithmetic.
1457 Type *IndexedTy = GTI.getIndexedType();
1458 if (!IndexedTy->isSized()) {
1459 Known.resetAll();
1460 break;
1461 }
1462
1463 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1464 KnownBits IndexBits(IndexBitWidth);
1465 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1466 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1467 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
1468 KnownBits ScalingFactor(IndexBitWidth);
1469 // Multiply by current sizeof type.
1470 // &A[i] == A + i * sizeof(*A[i]).
1471 if (IndexTypeSize.isScalable()) {
1472 // For scalable types the only thing we know about sizeof is
1473 // that this is a multiple of the minimum size.
1474 ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes));
1475 } else if (IndexBits.isConstant()) {
1476 APInt IndexConst = IndexBits.getConstant();
1477 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1478 IndexConst *= ScalingFactor;
1479 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1480 continue;
1481 } else {
1482 ScalingFactor =
1483 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1484 }
1485 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1486
1487 // If the offsets have a different width from the pointer, according
1488 // to the language reference we need to sign-extend or truncate them
1489 // to the width of the pointer.
1490 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1491
1492 // Note that inbounds does *not* guarantee nsw for the addition, as only
1493 // the offset is signed, while the base address is unsigned.
1494 Known = KnownBits::computeForAddSub(
1495 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1496 }
1497 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1498 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1499 Known = KnownBits::computeForAddSub(
1500 /*Add=*/true, /*NSW=*/false, Known, Index);
1501 }
1502 break;
1503 }
1504 case Instruction::PHI: {
1505 const PHINode *P = cast<PHINode>(I);
1506 BinaryOperator *BO = nullptr;
1507 Value *R = nullptr, *L = nullptr;
1508 if (matchSimpleRecurrence(P, BO, R, L)) {
1509 // Handle the case of a simple two-predecessor recurrence PHI.
1510 // There's a lot more that could theoretically be done here, but
1511 // this is sufficient to catch some interesting cases.
1512 unsigned Opcode = BO->getOpcode();
1513
1514 // If this is a shift recurrence, we know the bits being shifted in.
1515 // We can combine that with information about the start value of the
1516 // recurrence to conclude facts about the result.
1517 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1518 Opcode == Instruction::Shl) &&
1519 BO->getOperand(0) == I) {
1520
1521 // We have matched a recurrence of the form:
1522 // %iv = [R, %entry], [%iv.next, %backedge]
1523 // %iv.next = shift_op %iv, L
1524
1525 // Recurse with the phi context to avoid concern about whether facts
1526 // inferred hold at original context instruction. TODO: It may be
1527 // correct to use the original context. IF warranted, explore and
1528 // add sufficient tests to cover.
1529 Query RecQ = Q;
1530 RecQ.CxtI = P;
1531 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1532 switch (Opcode) {
1533 case Instruction::Shl:
1534 // A shl recurrence will only increase the tailing zeros
1535 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1536 break;
1537 case Instruction::LShr:
1538 // A lshr recurrence will preserve the leading zeros of the
1539 // start value
1540 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1541 break;
1542 case Instruction::AShr:
1543 // An ashr recurrence will extend the initial sign bit
1544 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1545 Known.One.setHighBits(Known2.countMinLeadingOnes());
1546 break;
1547 };
1548 }
1549
1550 // Check for operations that have the property that if
1551 // both their operands have low zero bits, the result
1552 // will have low zero bits.
1553 if (Opcode == Instruction::Add ||
1554 Opcode == Instruction::Sub ||
1555 Opcode == Instruction::And ||
1556 Opcode == Instruction::Or ||
1557 Opcode == Instruction::Mul) {
1558 // Change the context instruction to the "edge" that flows into the
1559 // phi. This is important because that is where the value is actually
1560 // "evaluated" even though it is used later somewhere else. (see also
1561 // D69571).
1562 Query RecQ = Q;
1563
1564 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1565 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1566 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1567
1568 // Ok, we have a PHI of the form L op= R. Check for low
1569 // zero bits.
1570 RecQ.CxtI = RInst;
1571 computeKnownBits(R, Known2, Depth + 1, RecQ);
1572
1573 // We need to take the minimum number of known bits
1574 KnownBits Known3(BitWidth);
1575 RecQ.CxtI = LInst;
1576 computeKnownBits(L, Known3, Depth + 1, RecQ);
1577
1578 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1579 Known3.countMinTrailingZeros()));
1580
1581 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1582 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1583 // If initial value of recurrence is nonnegative, and we are adding
1584 // a nonnegative number with nsw, the result can only be nonnegative
1585 // or poison value regardless of the number of times we execute the
1586 // add in phi recurrence. If initial value is negative and we are
1587 // adding a negative number with nsw, the result can only be
1588 // negative or poison value. Similar arguments apply to sub and mul.
1589 //
1590 // (add non-negative, non-negative) --> non-negative
1591 // (add negative, negative) --> negative
1592 if (Opcode == Instruction::Add) {
1593 if (Known2.isNonNegative() && Known3.isNonNegative())
1594 Known.makeNonNegative();
1595 else if (Known2.isNegative() && Known3.isNegative())
1596 Known.makeNegative();
1597 }
1598
1599 // (sub nsw non-negative, negative) --> non-negative
1600 // (sub nsw negative, non-negative) --> negative
1601 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1602 if (Known2.isNonNegative() && Known3.isNegative())
1603 Known.makeNonNegative();
1604 else if (Known2.isNegative() && Known3.isNonNegative())
1605 Known.makeNegative();
1606 }
1607
1608 // (mul nsw non-negative, non-negative) --> non-negative
1609 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1610 Known3.isNonNegative())
1611 Known.makeNonNegative();
1612 }
1613
1614 break;
1615 }
1616 }
1617
1618 // Unreachable blocks may have zero-operand PHI nodes.
1619 if (P->getNumIncomingValues() == 0)
1620 break;
1621
1622 // Otherwise take the unions of the known bit sets of the operands,
1623 // taking conservative care to avoid excessive recursion.
1624 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1625 // Skip if every incoming value references to ourself.
1626 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1627 break;
1628
1629 Known.Zero.setAllBits();
1630 Known.One.setAllBits();
1631 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1632 Value *IncValue = P->getIncomingValue(u);
1633 // Skip direct self references.
1634 if (IncValue == P) continue;
1635
1636 // Change the context instruction to the "edge" that flows into the
1637 // phi. This is important because that is where the value is actually
1638 // "evaluated" even though it is used later somewhere else. (see also
1639 // D69571).
1640 Query RecQ = Q;
1641 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1642
1643 Known2 = KnownBits(BitWidth);
1644
1645 // Recurse, but cap the recursion to one level, because we don't
1646 // want to waste time spinning around in loops.
1647 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1648
1649 // If this failed, see if we can use a conditional branch into the phi
1650 // to help us determine the range of the value.
1651 if (Known2.isUnknown()) {
1652 ICmpInst::Predicate Pred;
1653 const APInt *RHSC;
1654 BasicBlock *TrueSucc, *FalseSucc;
1655 // TODO: Use RHS Value and compute range from its known bits.
1656 if (match(RecQ.CxtI,
1657 m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)),
1658 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
1659 // Check for cases of duplicate successors.
1660 if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) {
1661 // If we're using the false successor, invert the predicate.
1662 if (FalseSucc == P->getParent())
1663 Pred = CmpInst::getInversePredicate(Pred);
1664
1665 switch (Pred) {
1666 case CmpInst::Predicate::ICMP_EQ:
1667 Known2 = KnownBits::makeConstant(*RHSC);
1668 break;
1669 case CmpInst::Predicate::ICMP_ULE:
1670 Known2.Zero.setHighBits(RHSC->countl_zero());
1671 break;
1672 case CmpInst::Predicate::ICMP_ULT:
1673 Known2.Zero.setHighBits((*RHSC - 1).countl_zero());
1674 break;
1675 default:
1676 // TODO - add additional integer predicate handling.
1677 break;
1678 }
1679 }
1680 }
1681 }
1682
1683 Known = KnownBits::commonBits(Known, Known2);
1684 // If all bits have been ruled out, there's no need to check
1685 // more operands.
1686 if (Known.isUnknown())
1687 break;
1688 }
1689 }
1690 break;
1691 }
1692 case Instruction::Call:
1693 case Instruction::Invoke:
1694 // If range metadata is attached to this call, set known bits from that,
1695 // and then intersect with known bits based on other properties of the
1696 // function.
1697 if (MDNode *MD =
1698 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1699 computeKnownBitsFromRangeMetadata(*MD, Known);
1700 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1701 computeKnownBits(RV, Known2, Depth + 1, Q);
1702 Known.Zero |= Known2.Zero;
1703 Known.One |= Known2.One;
1704 }
1705 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1706 switch (II->getIntrinsicID()) {
1707 default: break;
1708 case Intrinsic::abs: {
1709 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1710 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1711 Known = Known2.abs(IntMinIsPoison);
1712 break;
1713 }
1714 case Intrinsic::bitreverse:
1715 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1716 Known.Zero |= Known2.Zero.reverseBits();
1717 Known.One |= Known2.One.reverseBits();
1718 break;
1719 case Intrinsic::bswap:
1720 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1721 Known.Zero |= Known2.Zero.byteSwap();
1722 Known.One |= Known2.One.byteSwap();
1723 break;
1724 case Intrinsic::ctlz: {
1725 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1726 // If we have a known 1, its position is our upper bound.
1727 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1728 // If this call is poison for 0 input, the result will be less than 2^n.
1729 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1730 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1731 unsigned LowBits = llvm::bit_width(PossibleLZ);
1732 Known.Zero.setBitsFrom(LowBits);
1733 break;
1734 }
1735 case Intrinsic::cttz: {
1736 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1737 // If we have a known 1, its position is our upper bound.
1738 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1739 // If this call is poison for 0 input, the result will be less than 2^n.
1740 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1741 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1742 unsigned LowBits = llvm::bit_width(PossibleTZ);
1743 Known.Zero.setBitsFrom(LowBits);
1744 break;
1745 }
1746 case Intrinsic::ctpop: {
1747 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1748 // We can bound the space the count needs. Also, bits known to be zero
1749 // can't contribute to the population.
1750 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1751 unsigned LowBits = llvm::bit_width(BitsPossiblySet);
1752 Known.Zero.setBitsFrom(LowBits);
1753 // TODO: we could bound KnownOne using the lower bound on the number
1754 // of bits which might be set provided by popcnt KnownOne2.
1755 break;
1756 }
1757 case Intrinsic::fshr:
1758 case Intrinsic::fshl: {
1759 const APInt *SA;
1760 if (!match(I->getOperand(2), m_APInt(SA)))
1761 break;
1762
1763 // Normalize to funnel shift left.
1764 uint64_t ShiftAmt = SA->urem(BitWidth);
1765 if (II->getIntrinsicID() == Intrinsic::fshr)
1766 ShiftAmt = BitWidth - ShiftAmt;
1767
1768 KnownBits Known3(BitWidth);
1769 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1770 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1771
1772 Known.Zero =
1773 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1774 Known.One =
1775 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1776 break;
1777 }
1778 case Intrinsic::uadd_sat:
1779 case Intrinsic::usub_sat: {
1780 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1781 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1782 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1783
1784 // Add: Leading ones of either operand are preserved.
1785 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1786 // as leading zeros in the result.
1787 unsigned LeadingKnown;
1788 if (IsAdd)
1789 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1790 Known2.countMinLeadingOnes());
1791 else
1792 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1793 Known2.countMinLeadingOnes());
1794
1795 Known = KnownBits::computeForAddSub(
1796 IsAdd, /* NSW */ false, Known, Known2);
1797
1798 // We select between the operation result and all-ones/zero
1799 // respectively, so we can preserve known ones/zeros.
1800 if (IsAdd) {
1801 Known.One.setHighBits(LeadingKnown);
1802 Known.Zero.clearAllBits();
1803 } else {
1804 Known.Zero.setHighBits(LeadingKnown);
1805 Known.One.clearAllBits();
1806 }
1807 break;
1808 }
1809 case Intrinsic::umin:
1810 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1811 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1812 Known = KnownBits::umin(Known, Known2);
1813 break;
1814 case Intrinsic::umax:
1815 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1816 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1817 Known = KnownBits::umax(Known, Known2);
1818 break;
1819 case Intrinsic::smin:
1820 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1821 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1822 Known = KnownBits::smin(Known, Known2);
1823 break;
1824 case Intrinsic::smax:
1825 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1826 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1827 Known = KnownBits::smax(Known, Known2);
1828 break;
1829 case Intrinsic::x86_sse42_crc32_64_64:
1830 Known.Zero.setBitsFrom(32);
1831 break;
1832 case Intrinsic::riscv_vsetvli:
1833 case Intrinsic::riscv_vsetvlimax:
1834 // Assume that VL output is >= 65536.
1835 // TODO: Take SEW and LMUL into account.
1836 if (BitWidth > 17)
1837 Known.Zero.setBitsFrom(17);
1838 break;
1839 case Intrinsic::vscale: {
1840 if (!II->getParent() || !II->getFunction())
1841 break;
1842
1843 Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits();
1844 break;
1845 }
1846 }
1847 }
1848 break;
1849 case Instruction::ShuffleVector: {
1850 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1851 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1852 if (!Shuf) {
1853 Known.resetAll();
1854 return;
1855 }
1856 // For undef elements, we don't know anything about the common state of
1857 // the shuffle result.
1858 APInt DemandedLHS, DemandedRHS;
1859 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1860 Known.resetAll();
1861 return;
1862 }
1863 Known.One.setAllBits();
1864 Known.Zero.setAllBits();
1865 if (!!DemandedLHS) {
1866 const Value *LHS = Shuf->getOperand(0);
1867 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1868 // If we don't know any bits, early out.
1869 if (Known.isUnknown())
1870 break;
1871 }
1872 if (!!DemandedRHS) {
1873 const Value *RHS = Shuf->getOperand(1);
1874 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1875 Known = KnownBits::commonBits(Known, Known2);
1876 }
1877 break;
1878 }
1879 case Instruction::InsertElement: {
1880 if (isa<ScalableVectorType>(I->getType())) {
1881 Known.resetAll();
1882 return;
1883 }
1884 const Value *Vec = I->getOperand(0);
1885 const Value *Elt = I->getOperand(1);
1886 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1887 // Early out if the index is non-constant or out-of-range.
1888 unsigned NumElts = DemandedElts.getBitWidth();
1889 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1890 Known.resetAll();
1891 return;
1892 }
1893 Known.One.setAllBits();
1894 Known.Zero.setAllBits();
1895 unsigned EltIdx = CIdx->getZExtValue();
1896 // Do we demand the inserted element?
1897 if (DemandedElts[EltIdx]) {
1898 computeKnownBits(Elt, Known, Depth + 1, Q);
1899 // If we don't know any bits, early out.
1900 if (Known.isUnknown())
1901 break;
1902 }
1903 // We don't need the base vector element that has been inserted.
1904 APInt DemandedVecElts = DemandedElts;
1905 DemandedVecElts.clearBit(EltIdx);
1906 if (!!DemandedVecElts) {
1907 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1908 Known = KnownBits::commonBits(Known, Known2);
1909 }
1910 break;
1911 }
1912 case Instruction::ExtractElement: {
1913 // Look through extract element. If the index is non-constant or
1914 // out-of-range demand all elements, otherwise just the extracted element.
1915 const Value *Vec = I->getOperand(0);
1916 const Value *Idx = I->getOperand(1);
1917 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1918 if (isa<ScalableVectorType>(Vec->getType())) {
1919 // FIXME: there's probably *something* we can do with scalable vectors
1920 Known.resetAll();
1921 break;
1922 }
1923 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1924 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1925 if (CIdx && CIdx->getValue().ult(NumElts))
1926 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1927 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1928 break;
1929 }
1930 case Instruction::ExtractValue:
1931 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1932 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1933 if (EVI->getNumIndices() != 1) break;
1934 if (EVI->getIndices()[0] == 0) {
1935 switch (II->getIntrinsicID()) {
1936 default: break;
1937 case Intrinsic::uadd_with_overflow:
1938 case Intrinsic::sadd_with_overflow:
1939 computeKnownBitsAddSub(true, II->getArgOperand(0),
1940 II->getArgOperand(1), false, DemandedElts,
1941 Known, Known2, Depth, Q);
1942 break;
1943 case Intrinsic::usub_with_overflow:
1944 case Intrinsic::ssub_with_overflow:
1945 computeKnownBitsAddSub(false, II->getArgOperand(0),
1946 II->getArgOperand(1), false, DemandedElts,
1947 Known, Known2, Depth, Q);
1948 break;
1949 case Intrinsic::umul_with_overflow:
1950 case Intrinsic::smul_with_overflow:
1951 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1952 DemandedElts, Known, Known2, Depth, Q);
1953 break;
1954 }
1955 }
1956 }
1957 break;
1958 case Instruction::Freeze:
1959 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1960 Depth + 1))
1961 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1962 break;
1963 }
1964}
1965
1966/// Determine which bits of V are known to be either zero or one and return
1967/// them.
1968KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1969 unsigned Depth, const Query &Q) {
1970 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1971 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1972 return Known;
1973}
1974
1975/// Determine which bits of V are known to be either zero or one and return
1976/// them.
1977KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1978 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1979 computeKnownBits(V, Known, Depth, Q);
1980 return Known;
1981}
1982
1983/// Determine which bits of V are known to be either zero or one and return
1984/// them in the Known bit set.
1985///
1986/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1987/// we cannot optimize based on the assumption that it is zero without changing
1988/// it to be an explicit zero. If we don't change it to zero, other code could
1989/// optimized based on the contradictory assumption that it is non-zero.
1990/// Because instcombine aggressively folds operations with undef args anyway,
1991/// this won't lose us code quality.
1992///
1993/// This function is defined on values with integer type, values with pointer
1994/// type, and vectors of integers. In the case
1995/// where V is a vector, known zero, and known one values are the
1996/// same width as the vector element, and the bit is set only if it is true
1997/// for all of the demanded elements in the vector specified by DemandedElts.
1998void computeKnownBits(const Value *V, const APInt &DemandedElts,
1999 KnownBits &Known, unsigned Depth, const Query &Q) {
2000 if (!DemandedElts) {
2001 // No demanded elts, better to assume we don't know anything.
2002 Known.resetAll();
2003 return;
2004 }
2005
2006 assert(V && "No Value?")(static_cast <bool> (V && "No Value?") ? void (
0) : __assert_fail ("V && \"No Value?\"", "llvm/lib/Analysis/ValueTracking.cpp"
, 2006, __extension__ __PRETTY_FUNCTION__))
;
2007 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2007, __extension__ __PRETTY_FUNCTION__
))
;
2008
2009#ifndef NDEBUG
2010 Type *Ty = V->getType();
2011 unsigned BitWidth = Known.getBitWidth();
2012
2013 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth
) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2014, __extension__ __PRETTY_FUNCTION__
))
2014 "Not integer or pointer type!")(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth
) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2014, __extension__ __PRETTY_FUNCTION__
))
;
2015
2016 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2017 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__
))
2018 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__
))
2019 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__
))
;
2020 } else {
2021 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars or scalable vectors"
) ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars or scalable vectors\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2022, __extension__ __PRETTY_FUNCTION__
))
2022 "DemandedElt width should be 1 for scalars or scalable vectors")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars or scalable vectors"
) ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars or scalable vectors\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2022, __extension__ __PRETTY_FUNCTION__
))
;
2023 }
2024
2025 Type *ScalarTy = Ty->getScalarType();
2026 if (ScalarTy->isPointerTy()) {
2027 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2028, __extension__ __PRETTY_FUNCTION__
))
2028 "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2028, __extension__ __PRETTY_FUNCTION__
))
;
2029 } else {
2030 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2031, __extension__ __PRETTY_FUNCTION__
))
2031 "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2031, __extension__ __PRETTY_FUNCTION__
))
;
2032 }
2033#endif
2034
2035 const APInt *C;
2036 if (match(V, m_APInt(C))) {
2037 // We know all of the bits for a scalar constant or a splat vector constant!
2038 Known = KnownBits::makeConstant(*C);
2039 return;
2040 }
2041 // Null and aggregate-zero are all-zeros.
2042 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
2043 Known.setAllZero();
2044 return;
2045 }
2046 // Handle a constant vector by taking the intersection of the known bits of
2047 // each element.
2048 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
2049 assert(!isa<ScalableVectorType>(V->getType()))(static_cast <bool> (!isa<ScalableVectorType>(V->
getType())) ? void (0) : __assert_fail ("!isa<ScalableVectorType>(V->getType())"
, "llvm/lib/Analysis/ValueTracking.cpp", 2049, __extension__ __PRETTY_FUNCTION__
))
;
2050 // We know that CDV must be a vector of integers. Take the intersection of
2051 // each element.
2052 Known.Zero.setAllBits(); Known.One.setAllBits();
2053 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2054 if (!DemandedElts[i])
2055 continue;
2056 APInt Elt = CDV->getElementAsAPInt(i);
2057 Known.Zero &= ~Elt;
2058 Known.One &= Elt;
2059 }
2060 return;
2061 }
2062
2063 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
2064 assert(!isa<ScalableVectorType>(V->getType()))(static_cast <bool> (!isa<ScalableVectorType>(V->
getType())) ? void (0) : __assert_fail ("!isa<ScalableVectorType>(V->getType())"
, "llvm/lib/Analysis/ValueTracking.cpp", 2064, __extension__ __PRETTY_FUNCTION__
))
;
2065 // We know that CV must be a vector of integers. Take the intersection of
2066 // each element.
2067 Known.Zero.setAllBits(); Known.One.setAllBits();
2068 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2069 if (!DemandedElts[i])
2070 continue;
2071 Constant *Element = CV->getAggregateElement(i);
2072 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
2073 if (!ElementCI) {
2074 Known.resetAll();
2075 return;
2076 }
2077 const APInt &Elt = ElementCI->getValue();
2078 Known.Zero &= ~Elt;
2079 Known.One &= Elt;
2080 }
2081 return;
2082 }
2083
2084 // Start out not knowing anything.
2085 Known.resetAll();
2086
2087 // We can't imply anything about undefs.
2088 if (isa<UndefValue>(V))
2089 return;
2090
2091 // There's no point in looking through other users of ConstantData for
2092 // assumptions. Confirm that we've handled them all.
2093 assert(!isa<ConstantData>(V) && "Unhandled constant data!")(static_cast <bool> (!isa<ConstantData>(V) &&
"Unhandled constant data!") ? void (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2093, __extension__ __PRETTY_FUNCTION__
))
;
2094
2095 // All recursive calls that increase depth must come after this.
2096 if (Depth == MaxAnalysisRecursionDepth)
2097 return;
2098
2099 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2100 // the bits of its aliasee.
2101 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2102 if (!GA->isInterposable())
2103 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2104 return;
2105 }
2106
2107 if (const Operator *I = dyn_cast<Operator>(V))
2108 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2109
2110 // Aligned pointers have trailing zeros - refine Known.Zero set
2111 if (isa<PointerType>(V->getType())) {
2112 Align Alignment = V->getPointerAlignment(Q.DL);
2113 Known.Zero.setLowBits(Log2(Alignment));
2114 }
2115
2116 // computeKnownBitsFromAssume strictly refines Known.
2117 // Therefore, we run them after computeKnownBitsFromOperator.
2118
2119 // Check whether a nearby assume intrinsic can determine some known bits.
2120 computeKnownBitsFromAssume(V, Known, Depth, Q);
2121
2122 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(static_cast <bool> ((Known.Zero & Known.One) == 0 &&
"Bits known to be one AND zero?") ? void (0) : __assert_fail
("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2122, __extension__ __PRETTY_FUNCTION__
))
;
2123}
2124
2125/// Try to detect a recurrence that the value of the induction variable is
2126/// always a power of two (or zero).
2127static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
2128 unsigned Depth, Query &Q) {
2129 BinaryOperator *BO = nullptr;
2130 Value *Start = nullptr, *Step = nullptr;
2131 if (!matchSimpleRecurrence(PN, BO, Start, Step))
2132 return false;
2133
2134 // Initial value must be a power of two.
2135 for (const Use &U : PN->operands()) {
2136 if (U.get() == Start) {
2137 // Initial value comes from a different BB, need to adjust context
2138 // instruction for analysis.
2139 Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
2140 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q))
2141 return false;
2142 }
2143 }
2144
2145 // Except for Mul, the induction variable must be on the left side of the
2146 // increment expression, otherwise its value can be arbitrary.
2147 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step)
2148 return false;
2149
2150 Q.CxtI = BO->getParent()->getTerminator();
2151 switch (BO->getOpcode()) {
2152 case Instruction::Mul:
2153 // Power of two is closed under multiplication.
2154 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
2155 Q.IIQ.hasNoSignedWrap(BO)) &&
2156 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q);
2157 case Instruction::SDiv:
2158 // Start value must not be signmask for signed division, so simply being a
2159 // power of two is not sufficient, and it has to be a constant.
2160 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2161 return false;
2162 [[fallthrough]];
2163 case Instruction::UDiv:
2164 // Divisor must be a power of two.
2165 // If OrZero is false, cannot guarantee induction variable is non-zero after
2166 // division, same for Shr, unless it is exact division.
2167 return (OrZero || Q.IIQ.isExact(BO)) &&
2168 isKnownToBeAPowerOfTwo(Step, false, Depth, Q);
2169 case Instruction::Shl:
2170 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
2171 case Instruction::AShr:
2172 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2173 return false;
2174 [[fallthrough]];
2175 case Instruction::LShr:
2176 return OrZero || Q.IIQ.isExact(BO);
2177 default:
2178 return false;
2179 }
2180}
2181
2182/// Return true if the given value is known to have exactly one
2183/// bit set when defined. For vectors return true if every element is known to
2184/// be a power of two when defined. Supports values with integer or pointer
2185/// types and vectors of integers.
2186bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2187 const Query &Q) {
2188 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2188, __extension__ __PRETTY_FUNCTION__
))
;
2189
2190 // Attempt to match against constants.
2191 if (OrZero && match(V, m_Power2OrZero()))
2192 return true;
2193 if (match(V, m_Power2()))
2194 return true;
2195
2196 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2197 // it is shifted off the end then the result is undefined.
2198 if (match(V, m_Shl(m_One(), m_Value())))
2199 return true;
2200
2201 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2202 // the bottom. If it is shifted off the bottom then the result is undefined.
2203 if (match(V, m_LShr(m_SignMask(), m_Value())))
2204 return true;
2205
2206 // The remaining tests are all recursive, so bail out if we hit the limit.
2207 if (Depth++ == MaxAnalysisRecursionDepth)
2208 return false;
2209
2210 Value *X = nullptr, *Y = nullptr;
2211 // A shift left or a logical shift right of a power of two is a power of two
2212 // or zero.
2213 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2214 match(V, m_LShr(m_Value(X), m_Value()))))
2215 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2216
2217 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2218 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2219
2220 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2221 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2222 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2223
2224 // Peek through min/max.
2225 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2226 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2227 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2228 }
2229
2230 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2231 // A power of two and'd with anything is a power of two or zero.
2232 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2233 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2234 return true;
2235 // X & (-X) is always a power of two or zero.
2236 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2237 return true;
2238 return false;
2239 }
2240
2241 // Adding a power-of-two or zero to the same power-of-two or zero yields
2242 // either the original power-of-two, a larger power-of-two or zero.
2243 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2244 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2245 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2246 Q.IIQ.hasNoSignedWrap(VOBO)) {
2247 if (match(X, m_And(m_Specific(Y), m_Value())) ||
2248 match(X, m_And(m_Value(), m_Specific(Y))))
2249 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2250 return true;
2251 if (match(Y, m_And(m_Specific(X), m_Value())) ||
2252 match(Y, m_And(m_Value(), m_Specific(X))))
2253 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2254 return true;
2255
2256 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2257 KnownBits LHSBits(BitWidth);
2258 computeKnownBits(X, LHSBits, Depth, Q);
2259
2260 KnownBits RHSBits(BitWidth);
2261 computeKnownBits(Y, RHSBits, Depth, Q);
2262 // If i8 V is a power of two or zero:
2263 // ZeroBits: 1 1 1 0 1 1 1 1
2264 // ~ZeroBits: 0 0 0 1 0 0 0 0
2265 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2266 // If OrZero isn't set, we cannot give back a zero result.
2267 // Make sure either the LHS or RHS has a bit set.
2268 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2269 return true;
2270 }
2271 }
2272
2273 // A PHI node is power of two if all incoming values are power of two, or if
2274 // it is an induction variable where in each step its value is a power of two.
2275 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2276 Query RecQ = Q;
2277
2278 // Check if it is an induction variable and always power of two.
2279 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ))
2280 return true;
2281
2282 // Recursively check all incoming values. Limit recursion to 2 levels, so
2283 // that search complexity is limited to number of operands^2.
2284 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2285 return llvm::all_of(PN->operands(), [&](const Use &U) {
2286 // Value is power of 2 if it is coming from PHI node itself by induction.
2287 if (U.get() == PN)
2288 return true;
2289
2290 // Change the context instruction to the incoming block where it is
2291 // evaluated.
2292 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2293 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2294 });
2295 }
2296
2297 // An exact divide or right shift can only shift off zero bits, so the result
2298 // is a power of two only if the first operand is a power of two and not
2299 // copying a sign bit (sdiv int_min, 2).
2300 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2301 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2302 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2303 Depth, Q);
2304 }
2305
2306 return false;
2307}
2308
2309/// Test whether a GEP's result is known to be non-null.
2310///
2311/// Uses properties inherent in a GEP to try to determine whether it is known
2312/// to be non-null.
2313///
2314/// Currently this routine does not support vector GEPs.
2315static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2316 const Query &Q) {
2317 const Function *F = nullptr;
2318 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2319 F = I->getFunction();
2320
2321 if (!GEP->isInBounds() ||
2322 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2323 return false;
2324
2325 // FIXME: Support vector-GEPs.
2326 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")(static_cast <bool> (GEP->getType()->isPointerTy(
) && "We only support plain pointer GEP") ? void (0) :
__assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2326, __extension__ __PRETTY_FUNCTION__
))
;
2327
2328 // If the base pointer is non-null, we cannot walk to a null address with an
2329 // inbounds GEP in address space zero.
2330 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2331 return true;
2332
2333 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2334 // If so, then the GEP cannot produce a null pointer, as doing so would
2335 // inherently violate the inbounds contract within address space zero.
2336 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2337 GTI != GTE; ++GTI) {
2338 // Struct types are easy -- they must always be indexed by a constant.
2339 if (StructType *STy = GTI.getStructTypeOrNull()) {
2340 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2341 unsigned ElementIdx = OpC->getZExtValue();
2342 const StructLayout *SL = Q.DL.getStructLayout(STy);
2343 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2344 if (ElementOffset > 0)
2345 return true;
2346 continue;
2347 }
2348
2349 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2350 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).isZero())
2351 continue;
2352
2353 // Fast path the constant operand case both for efficiency and so we don't
2354 // increment Depth when just zipping down an all-constant GEP.
2355 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2356 if (!OpC->isZero())
2357 return true;
2358 continue;
2359 }
2360
2361 // We post-increment Depth here because while isKnownNonZero increments it
2362 // as well, when we pop back up that increment won't persist. We don't want
2363 // to recurse 10k times just because we have 10k GEP operands. We don't
2364 // bail completely out because we want to handle constant GEPs regardless
2365 // of depth.
2366 if (Depth++ >= MaxAnalysisRecursionDepth)
2367 continue;
2368
2369 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2370 return true;
2371 }
2372
2373 return false;
2374}
2375
2376static bool isKnownNonNullFromDominatingCondition(const Value *V,
2377 const Instruction *CtxI,
2378 const DominatorTree *DT) {
2379 assert(!isa<Constant>(V) && "Called for constant?")(static_cast <bool> (!isa<Constant>(V) &&
"Called for constant?") ? void (0) : __assert_fail ("!isa<Constant>(V) && \"Called for constant?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2379, __extension__ __PRETTY_FUNCTION__
))
;
2380
2381 if (!CtxI || !DT)
2382 return false;
2383
2384 unsigned NumUsesExplored = 0;
2385 for (const auto *U : V->users()) {
2386 // Avoid massive lists
2387 if (NumUsesExplored >= DomConditionsMaxUses)
2388 break;
2389 NumUsesExplored++;
2390
2391 // If the value is used as an argument to a call or invoke, then argument
2392 // attributes may provide an answer about null-ness.
2393 if (const auto *CB = dyn_cast<CallBase>(U))
2394 if (auto *CalledFunc = CB->getCalledFunction())
2395 for (const Argument &Arg : CalledFunc->args())
2396 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2397 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2398 DT->dominates(CB, CtxI))
2399 return true;
2400
2401 // If the value is used as a load/store, then the pointer must be non null.
2402 if (V == getLoadStorePointerOperand(U)) {
2403 const Instruction *I = cast<Instruction>(U);
2404 if (!NullPointerIsDefined(I->getFunction(),
2405 V->getType()->getPointerAddressSpace()) &&
2406 DT->dominates(I, CtxI))
2407 return true;
2408 }
2409
2410 // Consider only compare instructions uniquely controlling a branch
2411 Value *RHS;
2412 CmpInst::Predicate Pred;
2413 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2414 continue;
2415
2416 bool NonNullIfTrue;
2417 if (cmpExcludesZero(Pred, RHS))
2418 NonNullIfTrue = true;
2419 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2420 NonNullIfTrue = false;
2421 else
2422 continue;
2423
2424 SmallVector<const User *, 4> WorkList;
2425 SmallPtrSet<const User *, 4> Visited;
2426 for (const auto *CmpU : U->users()) {
2427 assert(WorkList.empty() && "Should be!")(static_cast <bool> (WorkList.empty() && "Should be!"
) ? void (0) : __assert_fail ("WorkList.empty() && \"Should be!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2427, __extension__ __PRETTY_FUNCTION__
))
;
2428 if (Visited.insert(CmpU).second)
2429 WorkList.push_back(CmpU);
2430
2431 while (!WorkList.empty()) {
2432 auto *Curr = WorkList.pop_back_val();
2433
2434 // If a user is an AND, add all its users to the work list. We only
2435 // propagate "pred != null" condition through AND because it is only
2436 // correct to assume that all conditions of AND are met in true branch.
2437 // TODO: Support similar logic of OR and EQ predicate?
2438 if (NonNullIfTrue)
2439 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2440 for (const auto *CurrU : Curr->users())
2441 if (Visited.insert(CurrU).second)
2442 WorkList.push_back(CurrU);
2443 continue;
2444 }
2445
2446 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2447 assert(BI->isConditional() && "uses a comparison!")(static_cast <bool> (BI->isConditional() && "uses a comparison!"
) ? void (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2447, __extension__ __PRETTY_FUNCTION__
))
;
2448
2449 BasicBlock *NonNullSuccessor =
2450 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2451 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2452 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2453 return true;
2454 } else if (NonNullIfTrue && isGuard(Curr) &&
2455 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2456 return true;
2457 }
2458 }
2459 }
2460 }
2461
2462 return false;
2463}
2464
2465/// Does the 'Range' metadata (which must be a valid MD_range operand list)
2466/// ensure that the value it's attached to is never Value? 'RangeType' is
2467/// is the type of the value described by the range.
2468static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2469 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2470 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp",
2470, __extension__ __PRETTY_FUNCTION__))
;
2471 for (unsigned i = 0; i < NumRanges; ++i) {
2472 ConstantInt *Lower =
2473 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2474 ConstantInt *Upper =
2475 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2476 ConstantRange Range(Lower->getValue(), Upper->getValue());
2477 if (Range.contains(Value))
2478 return false;
2479 }
2480 return true;
2481}
2482
2483/// Try to detect a recurrence that monotonically increases/decreases from a
2484/// non-zero starting value. These are common as induction variables.
2485static bool isNonZeroRecurrence(const PHINode *PN) {
2486 BinaryOperator *BO = nullptr;
2487 Value *Start = nullptr, *Step = nullptr;
2488 const APInt *StartC, *StepC;
2489 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2490 !match(Start, m_APInt(StartC)) || StartC->isZero())
2491 return false;
2492
2493 switch (BO->getOpcode()) {
2494 case Instruction::Add:
2495 // Starting from non-zero and stepping away from zero can never wrap back
2496 // to zero.
2497 return BO->hasNoUnsignedWrap() ||
2498 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2499 StartC->isNegative() == StepC->isNegative());
2500 case Instruction::Mul:
2501 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2502 match(Step, m_APInt(StepC)) && !StepC->isZero();
2503 case Instruction::Shl:
2504 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2505 case Instruction::AShr:
2506 case Instruction::LShr:
2507 return BO->isExact();
2508 default:
2509 return false;
2510 }
2511}
2512
2513static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
2514 const Query &Q, unsigned BitWidth, Value *X, Value *Y,
2515 bool NSW) {
2516 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2517 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2518
2519 // If X and Y are both non-negative (as signed values) then their sum is not
2520 // zero unless both X and Y are zero.
2521 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2522 if (isKnownNonZero(Y, DemandedElts, Depth, Q) ||
2523 isKnownNonZero(X, DemandedElts, Depth, Q))
2524 return true;
2525
2526 // If X and Y are both negative (as signed values) then their sum is not
2527 // zero unless both X and Y equal INT_MIN.
2528 if (XKnown.isNegative() && YKnown.isNegative()) {
2529 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2530 // The sign bit of X is set. If some other bit is set then X is not equal
2531 // to INT_MIN.
2532 if (XKnown.One.intersects(Mask))
2533 return true;
2534 // The sign bit of Y is set. If some other bit is set then Y is not equal
2535 // to INT_MIN.
2536 if (YKnown.One.intersects(Mask))
2537 return true;
2538 }
2539
2540 // The sum of a non-negative number and a power of two is not zero.
2541 if (XKnown.isNonNegative() &&
2542 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2543 return true;
2544 if (YKnown.isNonNegative() &&
2545 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2546 return true;
2547
2548 return KnownBits::computeForAddSub(/*Add*/ true, NSW, XKnown, YKnown)
2549 .isNonZero();
2550}
2551
2552static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
2553 const Query &Q, unsigned BitWidth, Value *X,
2554 Value *Y) {
2555 if (auto *C = dyn_cast<Constant>(X))
2556 if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Depth, Q))
2557 return true;
2558
2559 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2560 if (XKnown.isUnknown())
2561 return false;
2562 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2563 // If X != Y then X - Y is non zero.
2564 std::optional<bool> ne = KnownBits::ne(XKnown, YKnown);
2565 // If we are unable to compute if X != Y, we won't be able to do anything
2566 // computing the knownbits of the sub expression so just return here.
2567 return ne && *ne;
2568}
2569
2570static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
2571 unsigned Depth, const Query &Q,
2572 const KnownBits &KnownVal) {
2573 auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
2574 switch (I->getOpcode()) {
2575 case Instruction::Shl:
2576 return Lhs.shl(Rhs);
2577 case Instruction::LShr:
2578 return Lhs.lshr(Rhs);
2579 case Instruction::AShr:
2580 return Lhs.ashr(Rhs);
2581 default:
2582 llvm_unreachable("Unknown Shift Opcode")::llvm::llvm_unreachable_internal("Unknown Shift Opcode", "llvm/lib/Analysis/ValueTracking.cpp"
, 2582)
;
2583 }
2584 };
2585
2586 auto InvShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
2587 switch (I->getOpcode()) {
2588 case Instruction::Shl:
2589 return Lhs.lshr(Rhs);
2590 case Instruction::LShr:
2591 case Instruction::AShr:
2592 return Lhs.shl(Rhs);
2593 default:
2594 llvm_unreachable("Unknown Shift Opcode")::llvm::llvm_unreachable_internal("Unknown Shift Opcode", "llvm/lib/Analysis/ValueTracking.cpp"
, 2594)
;
2595 }
2596 };
2597
2598 if (KnownVal.isUnknown())
2599 return false;
2600
2601 KnownBits KnownCnt =
2602 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2603 APInt MaxShift = KnownCnt.getMaxValue();
2604 unsigned NumBits = KnownVal.getBitWidth();
2605 if (MaxShift.uge(NumBits))
2606 return false;
2607
2608 if (!ShiftOp(KnownVal.One, MaxShift).isZero())
2609 return true;
2610
2611 // If all of the bits shifted out are known to be zero, and Val is known
2612 // non-zero then at least one non-zero bit must remain.
2613 if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift)
2614 .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) &&
2615 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q))
2616 return true;
2617
2618 return false;
2619}
2620
2621/// Return true if the given value is known to be non-zero when defined. For
2622/// vectors, return true if every demanded element is known to be non-zero when
2623/// defined. For pointers, if the context instruction and dominator tree are
2624/// specified, perform context-sensitive analysis and return true if the
2625/// pointer couldn't possibly be null at the specified instruction.
2626/// Supports values with integer or pointer type and vectors of integers.
2627bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2628 const Query &Q) {
2629
2630#ifndef NDEBUG
2631 Type *Ty = V->getType();
2632 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2632, __extension__ __PRETTY_FUNCTION__
))
;
2633
2634 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2635 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__
))
2636 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__
))
2637 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__
))
;
2638 } else {
2639 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2640, __extension__ __PRETTY_FUNCTION__
))
2640 "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2640, __extension__ __PRETTY_FUNCTION__
))
;
2641 }
2642#endif
2643
2644 if (auto *C = dyn_cast<Constant>(V)) {
2645 if (C->isNullValue())
2646 return false;
2647 if (isa<ConstantInt>(C))
2648 // Must be non-zero due to null test above.
2649 return true;
2650
2651 // For constant vectors, check that all elements are undefined or known
2652 // non-zero to determine that the whole vector is known non-zero.
2653 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2654 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2655 if (!DemandedElts[i])
2656 continue;
2657 Constant *Elt = C->getAggregateElement(i);
2658 if (!Elt || Elt->isNullValue())
2659 return false;
2660 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2661 return false;
2662 }
2663 return true;
2664 }
2665
2666 // A global variable in address space 0 is non null unless extern weak
2667 // or an absolute symbol reference. Other address spaces may have null as a
2668 // valid address for a global, so we can't assume anything.
2669 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2670 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2671 GV->getType()->getAddressSpace() == 0)
2672 return true;
2673 }
2674
2675 // For constant expressions, fall through to the Operator code below.
2676 if (!isa<ConstantExpr>(V))
2677 return false;
2678 }
2679
2680 if (auto *I = dyn_cast<Instruction>(V)) {
2681 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2682 // If the possible ranges don't contain zero, then the value is
2683 // definitely non-zero.
2684 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2685 const APInt ZeroValue(Ty->getBitWidth(), 0);
2686 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2687 return true;
2688 }
2689 }
2690 }
2691
2692 if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q))
2693 return true;
2694
2695 // Some of the tests below are recursive, so bail out if we hit the limit.
2696 if (Depth++ >= MaxAnalysisRecursionDepth)
2697 return false;
2698
2699 // Check for pointer simplifications.
2700
2701 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2702 // Alloca never returns null, malloc might.
2703 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2704 return true;
2705
2706 // A byval, inalloca may not be null in a non-default addres space. A
2707 // nonnull argument is assumed never 0.
2708 if (const Argument *A = dyn_cast<Argument>(V)) {
2709 if (((A->hasPassPointeeByValueCopyAttr() &&
2710 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2711 A->hasNonNullAttr()))
2712 return true;
2713 }
2714
2715 // A Load tagged with nonnull metadata is never null.
2716 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2717 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2718 return true;
2719
2720 if (const auto *Call = dyn_cast<CallBase>(V)) {
2721 if (Call->isReturnNonNull())
2722 return true;
2723 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2724 return isKnownNonZero(RP, Depth, Q);
2725 }
2726 }
2727
2728 if (!isa<Constant>(V) &&
2729 isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2730 return true;
2731
2732 const Operator *I = dyn_cast<Operator>(V);
2733 if (!I)
2734 return false;
2735
2736 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2737 switch (I->getOpcode()) {
2738 case Instruction::GetElementPtr:
2739 if (I->getType()->isPointerTy())
2740 return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q);
2741 break;
2742 case Instruction::BitCast: {
2743 // We need to be a bit careful here. We can only peek through the bitcast
2744 // if the scalar size of elements in the operand are smaller than and a
2745 // multiple of the size they are casting too. Take three cases:
2746 //
2747 // 1) Unsafe:
2748 // bitcast <2 x i16> %NonZero to <4 x i8>
2749 //
2750 // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a
2751 // <4 x i8> requires that all 4 i8 elements be non-zero which isn't
2752 // guranteed (imagine just sign bit set in the 2 i16 elements).
2753 //
2754 // 2) Unsafe:
2755 // bitcast <4 x i3> %NonZero to <3 x i4>
2756 //
2757 // Even though the scalar size of the src (`i3`) is smaller than the
2758 // scalar size of the dst `i4`, because `i3` is not a multiple of `i4`
2759 // its possible for the `3 x i4` elements to be zero because there are
2760 // some elements in the destination that don't contain any full src
2761 // element.
2762 //
2763 // 3) Safe:
2764 // bitcast <4 x i8> %NonZero to <2 x i16>
2765 //
2766 // This is always safe as non-zero in the 4 i8 elements implies
2767 // non-zero in the combination of any two adjacent ones. Since i8 is a
2768 // multiple of i16, each i16 is guranteed to have 2 full i8 elements.
2769 // This all implies the 2 i16 elements are non-zero.
2770 Type *FromTy = I->getOperand(0)->getType();
2771 if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) &&
2772 (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0)
2773 return isKnownNonZero(I->getOperand(0), Depth, Q);
2774 } break;
2775 case Instruction::IntToPtr:
2776 // Note that we have to take special care to avoid looking through
2777 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2778 // as casts that can alter the value, e.g., AddrSpaceCasts.
2779 if (!isa<ScalableVectorType>(I->getType()) &&
2780 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2781 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2782 return isKnownNonZero(I->getOperand(0), Depth, Q);
2783 break;
2784 case Instruction::PtrToInt:
2785 // Similar to int2ptr above, we can look through ptr2int here if the cast
2786 // is a no-op or an extend and not a truncate.
2787 if (!isa<ScalableVectorType>(I->getType()) &&
2788 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2789 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2790 return isKnownNonZero(I->getOperand(0), Depth, Q);
2791 break;
2792 case Instruction::Sub:
2793 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2794 I->getOperand(1));
2795 case Instruction::Or:
2796 // X | Y != 0 if X != 0 or Y != 0.
2797 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) ||
2798 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2799 case Instruction::SExt:
2800 case Instruction::ZExt:
2801 // ext X != 0 if X != 0.
2802 return isKnownNonZero(I->getOperand(0), Depth, Q);
2803
2804 case Instruction::Shl: {
2805 // shl nsw/nuw can't remove any non-zero bits.
2806 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2807 if (Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO))
2808 return isKnownNonZero(I->getOperand(0), Depth, Q);
2809
2810 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2811 // if the lowest bit is shifted off the end.
2812 KnownBits Known(BitWidth);
2813 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q);
2814 if (Known.One[0])
2815 return true;
2816
2817 return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
2818 }
2819 case Instruction::LShr:
2820 case Instruction::AShr: {
2821 // shr exact can only shift out zero bits.
2822 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2823 if (BO->isExact())
2824 return isKnownNonZero(I->getOperand(0), Depth, Q);
2825
2826 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2827 // defined if the sign bit is shifted off the end.
2828 KnownBits Known =
2829 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2830 if (Known.isNegative())
2831 return true;
2832
2833 return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
2834 }
2835 case Instruction::UDiv:
2836 case Instruction::SDiv:
2837 // X / Y
2838 // div exact can only produce a zero if the dividend is zero.
2839 if (cast<PossiblyExactOperator>(I)->isExact())
2840 return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2841 if (I->getOpcode() == Instruction::UDiv) {
2842 std::optional<bool> XUgeY;
2843 KnownBits XKnown =
2844 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2845 if (!XKnown.isUnknown()) {
2846 KnownBits YKnown =
2847 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2848 // If X u>= Y then div is non zero (0/0 is UB).
2849 XUgeY = KnownBits::uge(XKnown, YKnown);
2850 }
2851 // If X is total unknown or X u< Y we won't be able to prove non-zero
2852 // with compute known bits so just return early.
2853 return XUgeY && *XUgeY;
2854 }
2855 break;
2856 case Instruction::Add: {
2857 // X + Y.
2858
2859 // If Add has nuw wrap flag, then if either X or Y is non-zero the result is
2860 // non-zero.
2861 auto *BO = cast<OverflowingBinaryOperator>(V);
2862 if (Q.IIQ.hasNoUnsignedWrap(BO))
2863 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) ||
2864 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2865
2866 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2867 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO));
2868 }
2869 case Instruction::Mul: {
2870 // If X and Y are non-zero then so is X * Y as long as the multiplication
2871 // does not overflow.
2872 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2873 if (Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO))
2874 return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) &&
2875 isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q);
2876
2877 // If either X or Y is odd, then if the other is non-zero the result can't
2878 // be zero.
2879 KnownBits XKnown =
2880 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2881 if (XKnown.One[0])
2882 return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q);
2883
2884 KnownBits YKnown =
2885 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2886 if (YKnown.One[0])
2887 return XKnown.isNonZero() ||
2888 isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2889
2890 return KnownBits::mul(XKnown, YKnown).isNonZero();
2891 }
2892 case Instruction::Select:
2893 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2894 if (isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) &&
2895 isKnownNonZero(I->getOperand(2), DemandedElts, Depth, Q))
2896 return true;
2897 break;
2898 case Instruction::PHI: {
2899 auto *PN = cast<PHINode>(I);
2900 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2901 return true;
2902
2903 // Check if all incoming values are non-zero using recursion.
2904 Query RecQ = Q;
2905 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2906 return llvm::all_of(PN->operands(), [&](const Use &U) {
2907 if (U.get() == PN)
2908 return true;
2909 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2910 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2911 });
2912 }
2913 case Instruction::ExtractElement:
2914 if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2915 const Value *Vec = EEI->getVectorOperand();
2916 const Value *Idx = EEI->getIndexOperand();
2917 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2918 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2919 unsigned NumElts = VecTy->getNumElements();
2920 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2921 if (CIdx && CIdx->getValue().ult(NumElts))
2922 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2923 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2924 }
2925 }
2926 break;
2927 case Instruction::Freeze:
2928 return isKnownNonZero(I->getOperand(0), Depth, Q) &&
2929 isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
2930 Depth);
2931 case Instruction::Call:
2932 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2933 switch (II->getIntrinsicID()) {
2934 case Intrinsic::sshl_sat:
2935 case Intrinsic::ushl_sat:
2936 case Intrinsic::abs:
2937 case Intrinsic::bitreverse:
2938 case Intrinsic::bswap:
2939 case Intrinsic::ctpop:
2940 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2941 case Intrinsic::ssub_sat:
2942 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth,
2943 II->getArgOperand(0), II->getArgOperand(1));
2944 case Intrinsic::sadd_sat:
2945 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth,
2946 II->getArgOperand(0), II->getArgOperand(1),
2947 /*NSW*/ true);
2948 case Intrinsic::umax:
2949 case Intrinsic::uadd_sat:
2950 return isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q) ||
2951 isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2952 case Intrinsic::smin:
2953 case Intrinsic::smax: {
2954 auto KnownOpImpliesNonZero = [&](const KnownBits &K) {
2955 return II->getIntrinsicID() == Intrinsic::smin
2956 ? K.isNegative()
2957 : K.isStrictlyPositive();
2958 };
2959 KnownBits XKnown =
2960 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q);
2961 if (KnownOpImpliesNonZero(XKnown))
2962 return true;
2963 KnownBits YKnown =
2964 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q);
2965 if (KnownOpImpliesNonZero(YKnown))
2966 return true;
2967
2968 if (XKnown.isNonZero() && YKnown.isNonZero())
2969 return true;
2970 }
2971 [[fallthrough]];
2972 case Intrinsic::umin:
2973 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q) &&
2974 isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q);
2975 case Intrinsic::cttz:
2976 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
2977 .Zero[0];
2978 case Intrinsic::ctlz:
2979 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
2980 .isNonNegative();
2981 case Intrinsic::fshr:
2982 case Intrinsic::fshl:
2983 // If Op0 == Op1, this is a rotate. rotate(x, y) != 0 iff x != 0.
2984 if (II->getArgOperand(0) == II->getArgOperand(1))
2985 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q);
2986 break;
2987 case Intrinsic::vscale:
2988 return true;
2989 default:
2990 break;
2991 }
2992 }
2993 break;
2994 }
2995
2996 KnownBits Known(BitWidth);
2997 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2998 return Known.One != 0;
2999}
3000
3001bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
3002 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
3003 APInt DemandedElts =
3004 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
3005 return isKnownNonZero(V, DemandedElts, Depth, Q);
3006}
3007
3008/// If the pair of operators are the same invertible function, return the
3009/// the operands of the function corresponding to each input. Otherwise,
3010/// return std::nullopt. An invertible function is one that is 1-to-1 and maps
3011/// every input value to exactly one output value. This is equivalent to
3012/// saying that Op1 and Op2 are equal exactly when the specified pair of
3013/// operands are equal, (except that Op1 and Op2 may be poison more often.)
3014static std::optional<std::pair<Value*, Value*>>
3015getInvertibleOperands(const Operator *Op1,
3016 const Operator *Op2) {
3017 if (Op1->getOpcode() != Op2->getOpcode())
3018 return std::nullopt;
3019
3020 auto getOperands = [&](unsigned OpNum) -> auto {
3021 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
3022 };
3023
3024 switch (Op1->getOpcode()) {
3025 default:
3026 break;
3027 case Instruction::Add:
3028 case Instruction::Sub:
3029 if (Op1->getOperand(0) == Op2->getOperand(0))
3030 return getOperands(1);
3031 if (Op1->getOperand(1) == Op2->getOperand(1))
3032 return getOperands(0);
3033 break;
3034 case Instruction::Mul: {
3035 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
3036 // and N is the bitwdith. The nsw case is non-obvious, but proven by
3037 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
3038 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3039 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3040 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3041 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3042 break;
3043
3044 // Assume operand order has been canonicalized
3045 if (Op1->getOperand(1) == Op2->getOperand(1) &&
3046 isa<ConstantInt>(Op1->getOperand(1)) &&
3047 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
3048 return getOperands(0);
3049 break;
3050 }
3051 case Instruction::Shl: {
3052 // Same as multiplies, with the difference that we don't need to check
3053 // for a non-zero multiply. Shifts always multiply by non-zero.
3054 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3055 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3056 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3057 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3058 break;
3059
3060 if (Op1->getOperand(1) == Op2->getOperand(1))
3061 return getOperands(0);
3062 break;
3063 }
3064 case Instruction::AShr:
3065 case Instruction::LShr: {
3066 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
3067 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
3068 if (!PEO1->isExact() || !PEO2->isExact())
3069 break;
3070
3071 if (Op1->getOperand(1) == Op2->getOperand(1))
3072 return getOperands(0);
3073 break;
3074 }
3075 case Instruction::SExt:
3076 case Instruction::ZExt:
3077 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
3078 return getOperands(0);
3079 break;
3080 case Instruction::PHI: {
3081 const PHINode *PN1 = cast<PHINode>(Op1);
3082 const PHINode *PN2 = cast<PHINode>(Op2);
3083
3084 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
3085 // are a single invertible function of the start values? Note that repeated
3086 // application of an invertible function is also invertible
3087 BinaryOperator *BO1 = nullptr;
3088 Value *Start1 = nullptr, *Step1 = nullptr;
3089 BinaryOperator *BO2 = nullptr;
3090 Value *Start2 = nullptr, *Step2 = nullptr;
3091 if (PN1->getParent() != PN2->getParent() ||
3092 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
3093 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
3094 break;
3095
3096 auto Values = getInvertibleOperands(cast<Operator>(BO1),
3097 cast<Operator>(BO2));
3098 if (!Values)
3099 break;
3100
3101 // We have to be careful of mutually defined recurrences here. Ex:
3102 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
3103 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
3104 // The invertibility of these is complicated, and not worth reasoning
3105 // about (yet?).
3106 if (Values->first != PN1 || Values->second != PN2)
3107 break;
3108
3109 return std::make_pair(Start1, Start2);
3110 }
3111 }
3112 return std::nullopt;
3113}
3114
3115/// Return true if V2 == V1 + X, where X is known non-zero.
3116static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
3117 const Query &Q) {
3118 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
3119 if (!BO || BO->getOpcode() != Instruction::Add)
3120 return false;
3121 Value *Op = nullptr;
3122 if (V2 == BO->getOperand(0))
3123 Op = BO->getOperand(1);
3124 else if (V2 == BO->getOperand(1))
3125 Op = BO->getOperand(0);
3126 else
3127 return false;
3128 return isKnownNonZero(Op, Depth + 1, Q);
3129}
3130
3131/// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
3132/// the multiplication is nuw or nsw.
3133static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
3134 const Query &Q) {
3135 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3136 const APInt *C;
3137 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
3138 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3139 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
3140 }
3141 return false;
3142}
3143
3144/// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
3145/// the shift is nuw or nsw.
3146static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
3147 const Query &Q) {
3148 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3149 const APInt *C;
3150 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
3151 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3152 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
3153 }
3154 return false;
3155}
3156
3157static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
3158 unsigned Depth, const Query &Q) {
3159 // Check two PHIs are in same block.
3160 if (PN1->getParent() != PN2->getParent())
3161 return false;
3162
3163 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
3164 bool UsedFullRecursion = false;
3165 for (const BasicBlock *IncomBB : PN1->blocks()) {
3166 if (!VisitedBBs.insert(IncomBB).second)
3167 continue; // Don't reprocess blocks that we have dealt with already.
3168 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
3169 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
3170 const APInt *C1, *C2;
3171 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
3172 continue;
3173
3174 // Only one pair of phi operands is allowed for full recursion.
3175 if (UsedFullRecursion)
3176 return false;
3177
3178 Query RecQ = Q;
3179 RecQ.CxtI = IncomBB->getTerminator();
3180 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
3181 return false;
3182 UsedFullRecursion = true;
3183 }
3184 return true;
3185}
3186
3187/// Return true if it is known that V1 != V2.
3188static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
3189 const Query &Q) {
3190 if (V1 == V2)
3191 return false;
3192 if (V1->getType() != V2->getType())
3193 // We can't look through casts yet.
3194 return false;
3195
3196 if (Depth >= MaxAnalysisRecursionDepth)
3197 return false;
3198
3199 // See if we can recurse through (exactly one of) our operands. This
3200 // requires our operation be 1-to-1 and map every input value to exactly
3201 // one output value. Such an operation is invertible.
3202 auto *O1 = dyn_cast<Operator>(V1);
3203 auto *O2 = dyn_cast<Operator>(V2);
3204 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
3205 if (auto Values = getInvertibleOperands(O1, O2))
3206 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
3207
3208 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
3209 const PHINode *PN2 = cast<PHINode>(V2);
3210 // FIXME: This is missing a generalization to handle the case where one is
3211 // a PHI and another one isn't.
3212 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
3213 return true;
3214 };
3215 }
3216
3217 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
3218 return true;
3219
3220 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
3221 return true;
3222
3223 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
3224 return true;
3225
3226 if (V1->getType()->isIntOrIntVectorTy()) {
3227 // Are any known bits in V1 contradictory to known bits in V2? If V1
3228 // has a known zero where V2 has a known one, they must not be equal.
3229 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
3230 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
3231
3232 if (Known1.Zero.intersects(Known2.One) ||
3233 Known2.Zero.intersects(Known1.One))
3234 return true;
3235 }
3236 return false;
3237}
3238
3239/// Return true if 'V & Mask' is known to be zero. We use this predicate to
3240/// simplify operations downstream. Mask is known to be zero for bits that V
3241/// cannot have.
3242///
3243/// This function is defined on values with integer type, values with pointer
3244/// type, and vectors of integers. In the case
3245/// where V is a vector, the mask, known zero, and known one values are the
3246/// same width as the vector element, and the bit is set only if it is true
3247/// for all of the elements in the vector.
3248bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
3249 const Query &Q) {
3250 KnownBits Known(Mask.getBitWidth());
3251 computeKnownBits(V, Known, Depth, Q);
3252 return Mask.isSubsetOf(Known.Zero);
3253}
3254
3255// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
3256// Returns the input and lower/upper bounds.
3257static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
3258 const APInt *&CLow, const APInt *&CHigh) {
3259 assert(isa<Operator>(Select) &&(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__
))
3260 cast<Operator>(Select)->getOpcode() == Instruction::Select &&(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__
))
3261 "Input should be a Select!")(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__
))
;
3262
3263 const Value *LHS = nullptr, *RHS = nullptr;
3264 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
3265 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
3266 return false;
3267
3268 if (!match(RHS, m_APInt(CLow)))
3269 return false;
3270
3271 const Value *LHS2 = nullptr, *RHS2 = nullptr;
3272 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
3273 if (getInverseMinMaxFlavor(SPF) != SPF2)
3274 return false;
3275
3276 if (!match(RHS2, m_APInt(CHigh)))
3277 return false;
3278
3279 if (SPF == SPF_SMIN)
3280 std::swap(CLow, CHigh);
3281
3282 In = LHS2;
3283 return CLow->sle(*CHigh);
3284}
3285
3286static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
3287 const APInt *&CLow,
3288 const APInt *&CHigh) {
3289 assert((II->getIntrinsicID() == Intrinsic::smin ||(static_cast <bool> ((II->getIntrinsicID() == Intrinsic
::smin || II->getIntrinsicID() == Intrinsic::smax) &&
"Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3290, __extension__ __PRETTY_FUNCTION__
))
3290 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax")(static_cast <bool> ((II->getIntrinsicID() == Intrinsic
::smin || II->getIntrinsicID() == Intrinsic::smax) &&
"Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3290, __extension__ __PRETTY_FUNCTION__
))
;
3291
3292 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
3293 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
3294 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
3295 !match(II->getArgOperand(1), m_APInt(CLow)) ||
3296 !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
3297 return false;
3298
3299 if (II->getIntrinsicID() == Intrinsic::smin)
3300 std::swap(CLow, CHigh);
3301 return CLow->sle(*CHigh);
3302}
3303
3304/// For vector constants, loop over the elements and find the constant with the
3305/// minimum number of sign bits. Return 0 if the value is not a vector constant
3306/// or if any element was not analyzed; otherwise, return the count for the
3307/// element with the minimum number of sign bits.
3308static unsigned computeNumSignBitsVectorConstant(const Value *V,
3309 const APInt &DemandedElts,
3310 unsigned TyBits) {
3311 const auto *CV = dyn_cast<Constant>(V);
3312 if (!CV || !isa<FixedVectorType>(CV->getType()))
3313 return 0;
3314
3315 unsigned MinSignBits = TyBits;
3316 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3317 for (unsigned i = 0; i != NumElts; ++i) {
3318 if (!DemandedElts[i])
3319 continue;
3320 // If we find a non-ConstantInt, bail out.
3321 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3322 if (!Elt)
3323 return 0;
3324
3325 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3326 }
3327
3328 return MinSignBits;
3329}
3330
3331static unsigned ComputeNumSignBitsImpl(const Value *V,
3332 const APInt &DemandedElts,
3333 unsigned Depth, const Query &Q);
3334
3335static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
3336 unsigned Depth, const Query &Q) {
3337 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
3338 assert(Result > 0 && "At least one sign bit needs to be present!")(static_cast <bool> (Result > 0 && "At least one sign bit needs to be present!"
) ? void (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3338, __extension__ __PRETTY_FUNCTION__
))
;
3339 return Result;
3340}
3341
3342/// Return the number of times the sign bit of the register is replicated into
3343/// the other bits. We know that at least 1 bit is always equal to the sign bit
3344/// (itself), but other cases can give us information. For example, immediately
3345/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
3346/// other, so we return 3. For vectors, return the number of sign bits for the
3347/// vector element with the minimum number of known sign bits of the demanded
3348/// elements in the vector specified by DemandedElts.
3349static unsigned ComputeNumSignBitsImpl(const Value *V,
3350 const APInt &DemandedElts,
3351 unsigned Depth, const Query &Q) {
3352 Type *Ty = V->getType();
3353#ifndef NDEBUG
3354 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3354, __extension__ __PRETTY_FUNCTION__
))
;
3355
3356 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3357 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__
))
3358 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__
))
3359 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__
))
;
3360 } else {
3361 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3362, __extension__ __PRETTY_FUNCTION__
))
3362 "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3362, __extension__ __PRETTY_FUNCTION__
))
;
3363 }
3364#endif
3365
3366 // We return the minimum number of sign bits that are guaranteed to be present
3367 // in V, so for undef we have to conservatively return 1. We don't have the
3368 // same behavior for poison though -- that's a FIXME today.
3369
3370 Type *ScalarTy = Ty->getScalarType();
3371 unsigned TyBits = ScalarTy->isPointerTy() ?
3372 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
3373 Q.DL.getTypeSizeInBits(ScalarTy);
3374
3375 unsigned Tmp, Tmp2;
3376 unsigned FirstAnswer = 1;
3377
3378 // Note that ConstantInt is handled by the general computeKnownBits case
3379 // below.
3380
3381 if (Depth == MaxAnalysisRecursionDepth)
3382 return 1;
3383
3384 if (auto *U = dyn_cast<Operator>(V)) {
3385 switch (Operator::getOpcode(V)) {
3386 default: break;
3387 case Instruction::SExt:
3388 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3389 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
3390
3391 case Instruction::SDiv: {
3392 const APInt *Denominator;
3393 // sdiv X, C -> adds log(C) sign bits.
3394 if (match(U->getOperand(1), m_APInt(Denominator))) {
3395
3396 // Ignore non-positive denominator.
3397 if (!Denominator->isStrictlyPositive())
3398 break;
3399
3400 // Calculate the incoming numerator bits.
3401 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3402
3403 // Add floor(log(C)) bits to the numerator bits.
3404 return std::min(TyBits, NumBits + Denominator->logBase2());
3405 }
3406 break;
3407 }
3408
3409 case Instruction::SRem: {
3410 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3411
3412 const APInt *Denominator;
3413 // srem X, C -> we know that the result is within [-C+1,C) when C is a
3414 // positive constant. This let us put a lower bound on the number of sign
3415 // bits.
3416 if (match(U->getOperand(1), m_APInt(Denominator))) {
3417
3418 // Ignore non-positive denominator.
3419 if (Denominator->isStrictlyPositive()) {
3420 // Calculate the leading sign bit constraints by examining the
3421 // denominator. Given that the denominator is positive, there are two
3422 // cases:
3423 //
3424 // 1. The numerator is positive. The result range is [0,C) and
3425 // [0,C) u< (1 << ceilLogBase2(C)).
3426 //
3427 // 2. The numerator is negative. Then the result range is (-C,0] and
3428 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3429 //
3430 // Thus a lower bound on the number of sign bits is `TyBits -
3431 // ceilLogBase2(C)`.
3432
3433 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3434 Tmp = std::max(Tmp, ResBits);
3435 }
3436 }
3437 return Tmp;
3438 }
3439
3440 case Instruction::AShr: {
3441 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3442 // ashr X, C -> adds C sign bits. Vectors too.
3443 const APInt *ShAmt;
3444 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3445 if (ShAmt->uge(TyBits))
3446 break; // Bad shift.
3447 unsigned ShAmtLimited = ShAmt->getZExtValue();
3448 Tmp += ShAmtLimited;
3449 if (Tmp > TyBits) Tmp = TyBits;
3450 }
3451 return Tmp;
3452 }
3453 case Instruction::Shl: {
3454 const APInt *ShAmt;
3455 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3456 // shl destroys sign bits.
3457 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3458 if (ShAmt->uge(TyBits) || // Bad shift.
3459 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3460 Tmp2 = ShAmt->getZExtValue();
3461 return Tmp - Tmp2;
3462 }
3463 break;
3464 }
3465 case Instruction::And:
3466 case Instruction::Or:
3467 case Instruction::Xor: // NOT is handled here.
3468 // Logical binary ops preserve the number of sign bits at the worst.
3469 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3470 if (Tmp != 1) {
3471 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3472 FirstAnswer = std::min(Tmp, Tmp2);
3473 // We computed what we know about the sign bits as our first
3474 // answer. Now proceed to the generic code that uses
3475 // computeKnownBits, and pick whichever answer is better.
3476 }
3477 break;
3478
3479 case Instruction::Select: {
3480 // If we have a clamp pattern, we know that the number of sign bits will
3481 // be the minimum of the clamp min/max range.
3482 const Value *X;
3483 const APInt *CLow, *CHigh;
3484 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3485 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3486
3487 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3488 if (Tmp == 1) break;
3489 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3490 return std::min(Tmp, Tmp2);
3491 }
3492
3493 case Instruction::Add:
3494 // Add can have at most one carry bit. Thus we know that the output
3495 // is, at worst, one more bit than the inputs.
3496 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3497 if (Tmp == 1) break;
3498
3499 // Special case decrementing a value (ADD X, -1):
3500 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3501 if (CRHS->isAllOnesValue()) {
3502 KnownBits Known(TyBits);
3503 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3504
3505 // If the input is known to be 0 or 1, the output is 0/-1, which is
3506 // all sign bits set.
3507 if ((Known.Zero | 1).isAllOnes())
3508 return TyBits;
3509
3510 // If we are subtracting one from a positive number, there is no carry
3511 // out of the result.
3512 if (Known.isNonNegative())
3513 return Tmp;
3514 }
3515
3516 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3517 if (Tmp2 == 1) break;
3518 return std::min(Tmp, Tmp2) - 1;
3519
3520 case Instruction::Sub:
3521 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3522 if (Tmp2 == 1) break;
3523
3524 // Handle NEG.
3525 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3526 if (CLHS->isNullValue()) {
3527 KnownBits Known(TyBits);
3528 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3529 // If the input is known to be 0 or 1, the output is 0/-1, which is
3530 // all sign bits set.
3531 if ((Known.Zero | 1).isAllOnes())
3532 return TyBits;
3533
3534 // If the input is known to be positive (the sign bit is known clear),
3535 // the output of the NEG has the same number of sign bits as the
3536 // input.
3537 if (Known.isNonNegative())
3538 return Tmp2;
3539
3540 // Otherwise, we treat this like a SUB.
3541 }
3542
3543 // Sub can have at most one carry bit. Thus we know that the output
3544 // is, at worst, one more bit than the inputs.
3545 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3546 if (Tmp == 1) break;
3547 return std::min(Tmp, Tmp2) - 1;
3548
3549 case Instruction::Mul: {
3550 // The output of the Mul can be at most twice the valid bits in the
3551 // inputs.
3552 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3553 if (SignBitsOp0 == 1) break;
3554 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3555 if (SignBitsOp1 == 1) break;
3556 unsigned OutValidBits =
3557 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3558 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3559 }
3560
3561 case Instruction::PHI: {
3562 const PHINode *PN = cast<PHINode>(U);
3563 unsigned NumIncomingValues = PN->getNumIncomingValues();
3564 // Don't analyze large in-degree PHIs.
3565 if (NumIncomingValues > 4) break;
3566 // Unreachable blocks may have zero-operand PHI nodes.
3567 if (NumIncomingValues == 0) break;
3568
3569 // Take the minimum of all incoming values. This can't infinitely loop
3570 // because of our depth threshold.
3571 Query RecQ = Q;
3572 Tmp = TyBits;
3573 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3574 if (Tmp == 1) return Tmp;
3575 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3576 Tmp = std::min(
3577 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3578 }
3579 return Tmp;
3580 }
3581
3582 case Instruction::Trunc: {
3583 // If the input contained enough sign bits that some remain after the
3584 // truncation, then we can make use of that. Otherwise we don't know
3585 // anything.
3586 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3587 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
3588 if (Tmp > (OperandTyBits - TyBits))
3589 return Tmp - (OperandTyBits - TyBits);
3590
3591 return 1;
3592 }
3593
3594 case Instruction::ExtractElement:
3595 // Look through extract element. At the moment we keep this simple and
3596 // skip tracking the specific element. But at least we might find
3597 // information valid for all elements of the vector (for example if vector
3598 // is sign extended, shifted, etc).
3599 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3600
3601 case Instruction::ShuffleVector: {
3602 // Collect the minimum number of sign bits that are shared by every vector
3603 // element referenced by the shuffle.
3604 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3605 if (!Shuf) {
3606 // FIXME: Add support for shufflevector constant expressions.
3607 return 1;
3608 }
3609 APInt DemandedLHS, DemandedRHS;
3610 // For undef elements, we don't know anything about the common state of
3611 // the shuffle result.
3612 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3613 return 1;
3614 Tmp = std::numeric_limits<unsigned>::max();
3615 if (!!DemandedLHS) {
3616 const Value *LHS = Shuf->getOperand(0);
3617 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3618 }
3619 // If we don't know anything, early out and try computeKnownBits
3620 // fall-back.
3621 if (Tmp == 1)
3622 break;
3623 if (!!DemandedRHS) {
3624 const Value *RHS = Shuf->getOperand(1);
3625 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3626 Tmp = std::min(Tmp, Tmp2);
3627 }
3628 // If we don't know anything, early out and try computeKnownBits
3629 // fall-back.
3630 if (Tmp == 1)
3631 break;
3632 assert(Tmp <= TyBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= TyBits && "Failed to determine minimum sign bits"
) ? void (0) : __assert_fail ("Tmp <= TyBits && \"Failed to determine minimum sign bits\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3632, __extension__ __PRETTY_FUNCTION__
))
;
3633 return Tmp;
3634 }
3635 case Instruction::Call: {
3636 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3637 switch (II->getIntrinsicID()) {
3638 default: break;
3639 case Intrinsic::abs:
3640 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3641 if (Tmp == 1) break;
3642
3643 // Absolute value reduces number of sign bits by at most 1.
3644 return Tmp - 1;
3645 case Intrinsic::smin:
3646 case Intrinsic::smax: {
3647 const APInt *CLow, *CHigh;
3648 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
3649 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3650 }
3651 }
3652 }
3653 }
3654 }
3655 }
3656
3657 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3658 // use this information.
3659
3660 // If we can examine all elements of a vector constant successfully, we're
3661 // done (we can't do any better than that). If not, keep trying.
3662 if (unsigned VecSignBits =
3663 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3664 return VecSignBits;
3665
3666 KnownBits Known(TyBits);
3667 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3668
3669 // If we know that the sign bit is either zero or one, determine the number of
3670 // identical bits in the top of the input value.
3671 return std::max(FirstAnswer, Known.countMinSignBits());
3672}
3673
3674Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3675 const TargetLibraryInfo *TLI) {
3676 const Function *F = CB.getCalledFunction();
3677 if (!F)
3678 return Intrinsic::not_intrinsic;
3679
3680 if (F->isIntrinsic())
3681 return F->getIntrinsicID();
3682
3683 // We are going to infer semantics of a library function based on mapping it
3684 // to an LLVM intrinsic. Check that the library function is available from
3685 // this callbase and in this environment.
3686 LibFunc Func;
3687 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3688 !CB.onlyReadsMemory())
3689 return Intrinsic::not_intrinsic;
3690
3691 switch (Func) {
3692 default:
3693 break;
3694 case LibFunc_sin:
3695 case LibFunc_sinf:
3696 case LibFunc_sinl:
3697 return Intrinsic::sin;
3698 case LibFunc_cos:
3699 case LibFunc_cosf:
3700 case LibFunc_cosl:
3701 return Intrinsic::cos;
3702 case LibFunc_exp:
3703 case LibFunc_expf:
3704 case LibFunc_expl:
3705 return Intrinsic::exp;
3706 case LibFunc_exp2:
3707 case LibFunc_exp2f:
3708 case LibFunc_exp2l:
3709 return Intrinsic::exp2;
3710 case LibFunc_log:
3711 case LibFunc_logf:
3712 case LibFunc_logl:
3713 return Intrinsic::log;
3714 case LibFunc_log10:
3715 case LibFunc_log10f:
3716 case LibFunc_log10l:
3717 return Intrinsic::log10;
3718 case LibFunc_log2:
3719 case LibFunc_log2f:
3720 case LibFunc_log2l:
3721 return Intrinsic::log2;
3722 case LibFunc_fabs:
3723 case LibFunc_fabsf:
3724 case LibFunc_fabsl:
3725 return Intrinsic::fabs;
3726 case LibFunc_fmin:
3727 case LibFunc_fminf:
3728 case LibFunc_fminl:
3729 return Intrinsic::minnum;
3730 case LibFunc_fmax:
3731 case LibFunc_fmaxf:
3732 case LibFunc_fmaxl:
3733 return Intrinsic::maxnum;
3734 case LibFunc_copysign:
3735 case LibFunc_copysignf:
3736 case LibFunc_copysignl:
3737 return Intrinsic::copysign;
3738 case LibFunc_floor:
3739 case LibFunc_floorf:
3740 case LibFunc_floorl:
3741 return Intrinsic::floor;
3742 case LibFunc_ceil:
3743 case LibFunc_ceilf:
3744 case LibFunc_ceill:
3745 return Intrinsic::ceil;
3746 case LibFunc_trunc:
3747 case LibFunc_truncf:
3748 case LibFunc_truncl:
3749 return Intrinsic::trunc;
3750 case LibFunc_rint:
3751 case LibFunc_rintf:
3752 case LibFunc_rintl:
3753 return Intrinsic::rint;
3754 case LibFunc_nearbyint:
3755 case LibFunc_nearbyintf:
3756 case LibFunc_nearbyintl:
3757 return Intrinsic::nearbyint;
3758 case LibFunc_round:
3759 case LibFunc_roundf:
3760 case LibFunc_roundl:
3761 return Intrinsic::round;
3762 case LibFunc_roundeven:
3763 case LibFunc_roundevenf:
3764 case LibFunc_roundevenl:
3765 return Intrinsic::roundeven;
3766 case LibFunc_pow:
3767 case LibFunc_powf:
3768 case LibFunc_powl:
3769 return Intrinsic::pow;
3770 case LibFunc_sqrt:
3771 case LibFunc_sqrtf:
3772 case LibFunc_sqrtl:
3773 return Intrinsic::sqrt;
3774 }
3775
3776 return Intrinsic::not_intrinsic;
3777}
3778
3779/// Return true if we can prove that the specified FP value is never equal to
3780/// -0.0.
3781/// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3782/// that a value is not -0.0. It only guarantees that -0.0 may be treated
3783/// the same as +0.0 in floating-point ops.
3784bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3785 unsigned Depth) {
3786 if (auto *CFP = dyn_cast<ConstantFP>(V))
3787 return !CFP->getValueAPF().isNegZero();
3788
3789 if (Depth == MaxAnalysisRecursionDepth)
3790 return false;
3791
3792 auto *Op = dyn_cast<Operator>(V);
3793 if (!Op)
3794 return false;
3795
3796 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3797 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3798 return true;
3799
3800 // sitofp and uitofp turn into +0.0 for zero.
3801 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3802 return true;
3803
3804 if (auto *Call = dyn_cast<CallInst>(Op)) {
3805 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3806 switch (IID) {
3807 default:
3808 break;
3809 // sqrt(-0.0) = -0.0, no other negative results are possible.
3810 case Intrinsic::sqrt:
3811 case Intrinsic::canonicalize:
3812 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3813 case Intrinsic::experimental_constrained_sqrt: {
3814 // NOTE: This rounding mode restriction may be too strict.
3815 const auto *CI = cast<ConstrainedFPIntrinsic>(Call);
3816 if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven)
3817 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3818 else
3819 return false;
3820 }
3821 // fabs(x) != -0.0
3822 case Intrinsic::fabs:
3823 return true;
3824 // sitofp and uitofp turn into +0.0 for zero.
3825 case Intrinsic::experimental_constrained_sitofp:
3826 case Intrinsic::experimental_constrained_uitofp:
3827 return true;
3828 }
3829 }
3830
3831 return false;
3832}
3833
3834/// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3835/// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3836/// bit despite comparing equal.
3837static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3838 const TargetLibraryInfo *TLI,
3839 bool SignBitOnly,
3840 unsigned Depth) {
3841 // TODO: This function does not do the right thing when SignBitOnly is true
3842 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3843 // which flips the sign bits of NaNs. See
3844 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3845
3846 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3847 return !CFP->getValueAPF().isNegative() ||
3848 (!SignBitOnly && CFP->getValueAPF().isZero());
3849 }
3850
3851 // Handle vector of constants.
3852 if (auto *CV = dyn_cast<Constant>(V)) {
3853 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3854 unsigned NumElts = CVFVTy->getNumElements();
3855 for (unsigned i = 0; i != NumElts; ++i) {
3856 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3857 if (!CFP)
3858 return false;
3859 if (CFP->getValueAPF().isNegative() &&
3860 (SignBitOnly || !CFP->getValueAPF().isZero()))
3861 return false;
3862 }
3863
3864 // All non-negative ConstantFPs.
3865 return true;
3866 }
3867 }
3868
3869 if (Depth == MaxAnalysisRecursionDepth)
3870 return false;
3871
3872 const Operator *I = dyn_cast<Operator>(V);
3873 if (!I)
3874 return false;
3875
3876 switch (I->getOpcode()) {
3877 default:
3878 break;
3879 // Unsigned integers are always nonnegative.
3880 case Instruction::UIToFP:
3881 return true;
3882 case Instruction::FDiv:
3883 // X / X is always exactly 1.0 or a NaN.
3884 if (I->getOperand(0) == I->getOperand(1) &&
3885 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3886 return true;
3887
3888 // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN).
3889 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3890 Depth + 1) &&
3891 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3892 /*SignBitOnly*/ true, Depth + 1);
3893 case Instruction::FMul:
3894 // X * X is always non-negative or a NaN.
3895 if (I->getOperand(0) == I->getOperand(1) &&
3896 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3897 return true;
3898
3899 [[fallthrough]];
3900 case Instruction::FAdd:
3901 case Instruction::FRem:
3902 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3903 Depth + 1) &&
3904 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3905 Depth + 1);
3906 case Instruction::Select:
3907 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3908 Depth + 1) &&
3909 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3910 Depth + 1);
3911 case Instruction::FPExt:
3912 case Instruction::FPTrunc:
3913 // Widening/narrowing never change sign.
3914 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3915 Depth + 1);
3916 case Instruction::ExtractElement:
3917 // Look through extract element. At the moment we keep this simple and skip
3918 // tracking the specific element. But at least we might find information
3919 // valid for all elements of the vector.
3920 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3921 Depth + 1);
3922 case Instruction::Call:
3923 const auto *CI = cast<CallInst>(I);
3924 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3925 switch (IID) {
3926 default:
3927 break;
3928 case Intrinsic::canonicalize:
3929 case Intrinsic::arithmetic_fence:
3930 case Intrinsic::floor:
3931 case Intrinsic::ceil:
3932 case Intrinsic::trunc:
3933 case Intrinsic::rint:
3934 case Intrinsic::nearbyint:
3935 case Intrinsic::round:
3936 case Intrinsic::roundeven:
3937 case Intrinsic::fptrunc_round:
3938 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, Depth + 1);
3939 case Intrinsic::maxnum: {
3940 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3941 auto isPositiveNum = [&](Value *V) {
3942 if (SignBitOnly) {
3943 // With SignBitOnly, this is tricky because the result of
3944 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3945 // a constant strictly greater than 0.0.
3946 const APFloat *C;
3947 return match(V, m_APFloat(C)) &&
3948 *C > APFloat::getZero(C->getSemantics());
3949 }
3950
3951 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3952 // maxnum can't be ordered-less-than-zero.
3953 return isKnownNeverNaN(V, TLI) &&
3954 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3955 };
3956
3957 // TODO: This could be improved. We could also check that neither operand
3958 // has its sign bit set (and at least 1 is not-NAN?).
3959 return isPositiveNum(V0) || isPositiveNum(V1);
3960 }
3961
3962 case Intrinsic::maximum:
3963 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3964 Depth + 1) ||
3965 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3966 Depth + 1);
3967 case Intrinsic::minnum:
3968 case Intrinsic::minimum:
3969 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3970 Depth + 1) &&
3971 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3972 Depth + 1);
3973 case Intrinsic::exp:
3974 case Intrinsic::exp2:
3975 case Intrinsic::fabs:
3976 return true;
3977 case Intrinsic::copysign:
3978 // Only the sign operand matters.
3979 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, true,
3980 Depth + 1);
3981 case Intrinsic::sqrt:
3982 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3983 if (!SignBitOnly)
3984 return true;
3985 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3986 CannotBeNegativeZero(CI->getOperand(0), TLI));
3987
3988 case Intrinsic::powi:
3989 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3990 // powi(x,n) is non-negative if n is even.
3991 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3992 return true;
3993 }
3994 // TODO: This is not correct. Given that exp is an integer, here are the
3995 // ways that pow can return a negative value:
3996 //
3997 // pow(x, exp) --> negative if exp is odd and x is negative.
3998 // pow(-0, exp) --> -inf if exp is negative odd.
3999 // pow(-0, exp) --> -0 if exp is positive odd.
4000 // pow(-inf, exp) --> -0 if exp is negative odd.
4001 // pow(-inf, exp) --> -inf if exp is positive odd.
4002 //
4003 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
4004 // but we must return false if x == -0. Unfortunately we do not currently
4005 // have a way of expressing this constraint. See details in
4006 // https://llvm.org/bugs/show_bug.cgi?id=31702.
4007 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
4008 Depth + 1);
4009
4010 case Intrinsic::fma:
4011 case Intrinsic::fmuladd:
4012 // x*x+y is non-negative if y is non-negative.
4013 return I->getOperand(0) == I->getOperand(1) &&
4014 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
4015 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
4016 Depth + 1);
4017 }
4018 break;
4019 }
4020 return false;
4021}
4022
4023bool llvm::CannotBeOrderedLessThanZero(const Value *V,
4024 const TargetLibraryInfo *TLI) {
4025 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
4026}
4027
4028bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
4029 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
4030}
4031
4032bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
4033 unsigned Depth) {
4034 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for Inf on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for Inf on non-FP type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4034, __extension__ __PRETTY_FUNCTION__
))
;
4035
4036 // If we're told that infinities won't happen, assume they won't.
4037 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
4038 if (FPMathOp->hasNoInfs())
4039 return true;
4040
4041 if (const auto *Arg = dyn_cast<Argument>(V)) {
4042 if ((Arg->getNoFPClass() & fcInf) == fcInf)
4043 return true;
4044 }
4045
4046 // TODO: Use fpclass like API for isKnown queries and distinguish +inf from
4047 // -inf.
4048 if (const auto *CB = dyn_cast<CallBase>(V)) {
4049 if ((CB->getRetNoFPClass() & fcInf) == fcInf)
4050 return true;
4051 }
4052
4053 // Handle scalar constants.
4054 if (auto *CFP = dyn_cast<ConstantFP>(V))
4055 return !CFP->isInfinity();
4056
4057 if (Depth == MaxAnalysisRecursionDepth)
4058 return false;
4059
4060 if (auto *Inst = dyn_cast<Instruction>(V)) {
4061 switch (Inst->getOpcode()) {
4062 case Instruction::Select: {
4063 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
4064 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
4065 }
4066 case Instruction::SIToFP:
4067 case Instruction::UIToFP: {
4068 // Get width of largest magnitude integer (remove a bit if signed).
4069 // This still works for a signed minimum value because the largest FP
4070 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
4071 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
4072 if (Inst->getOpcode() == Instruction::SIToFP)
4073 --IntSize;
4074
4075 // If the exponent of the largest finite FP value can hold the largest
4076 // integer, the result of the cast must be finite.
4077 Type *FPTy = Inst->getType()->getScalarType();
4078 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
4079 }
4080 case Instruction::FNeg:
4081 case Instruction::FPExt: {
4082 // Peek through to source op. If it is not infinity, this is not infinity.
4083 return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1);
4084 }
4085 case Instruction::FPTrunc: {
4086 // Need a range check.
4087 return false;
4088 }
4089 default:
4090 break;
4091 }
4092
4093 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
4094 switch (II->getIntrinsicID()) {
4095 case Intrinsic::sin:
4096 case Intrinsic::cos:
4097 // Return NaN on infinite inputs.
4098 return true;
4099 case Intrinsic::fabs:
4100 case Intrinsic::sqrt:
4101 case Intrinsic::canonicalize:
4102 case Intrinsic::copysign:
4103 case Intrinsic::arithmetic_fence:
4104 case Intrinsic::trunc:
4105 return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1);
4106 case Intrinsic::floor:
4107 case Intrinsic::ceil:
4108 case Intrinsic::rint:
4109 case Intrinsic::nearbyint:
4110 case Intrinsic::round:
4111 case Intrinsic::roundeven:
4112 // PPC_FP128 is a special case.
4113 if (V->getType()->isMultiUnitFPType())
4114 return false;
4115 return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1);
4116 case Intrinsic::fptrunc_round:
4117 // Requires knowing the value range.
4118 return false;
4119 case Intrinsic::minnum:
4120 case Intrinsic::maxnum:
4121 case Intrinsic::minimum:
4122 case Intrinsic::maximum:
4123 return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
4124 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
4125 case Intrinsic::log:
4126 case Intrinsic::log10:
4127 case Intrinsic::log2:
4128 // log(+inf) -> +inf
4129 // log([+-]0.0) -> -inf
4130 // log(-inf) -> nan
4131 // log(-x) -> nan
4132 // TODO: We lack API to check the == 0 case.
4133 return false;
4134 case Intrinsic::exp:
4135 case Intrinsic::exp2:
4136 case Intrinsic::pow:
4137 case Intrinsic::powi:
4138 case Intrinsic::fma:
4139 case Intrinsic::fmuladd:
4140 // These can return infinities on overflow cases, so it's hard to prove
4141 // anything about it.
4142 return false;
4143 default:
4144 break;
4145 }
4146 }
4147 }
4148
4149 // try to handle fixed width vector constants
4150 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4151 if (VFVTy && isa<Constant>(V)) {
4152 // For vectors, verify that each element is not infinity.
4153 unsigned NumElts = VFVTy->getNumElements();
4154 for (unsigned i = 0; i != NumElts; ++i) {
4155 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
4156 if (!Elt)
4157 return false;
4158 if (isa<UndefValue>(Elt))
4159 continue;
4160 auto *CElt = dyn_cast<ConstantFP>(Elt);
4161 if (!CElt || CElt->isInfinity())
4162 return false;
4163 }
4164 // All elements were confirmed non-infinity or undefined.
4165 return true;
4166 }
4167
4168 // was not able to prove that V never contains infinity
4169 return false;
4170}
4171
4172bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
4173 unsigned Depth) {
4174 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for NaN on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4174, __extension__ __PRETTY_FUNCTION__
))
;
4175
4176 // If we're told that NaNs won't happen, assume they won't.
4177 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
4178 if (FPMathOp->hasNoNaNs())
4179 return true;
4180
4181 if (const auto *Arg = dyn_cast<Argument>(V)) {
4182 if ((Arg->getNoFPClass() & fcNan) == fcNan)
4183 return true;
4184 }
4185
4186 // TODO: Use fpclass like API for isKnown queries and distinguish snan from
4187 // qnan.
4188 if (const auto *CB = dyn_cast<CallBase>(V)) {
4189 FPClassTest Mask = CB->getRetNoFPClass();
4190 if ((Mask & fcNan) == fcNan)
4191 return true;
4192 }
4193
4194 // Handle scalar constants.
4195 if (auto *CFP = dyn_cast<ConstantFP>(V))
4196 return !CFP->isNaN();
4197
4198 if (Depth == MaxAnalysisRecursionDepth)
4199 return false;
4200
4201 if (auto *Inst = dyn_cast<Instruction>(V)) {
4202 switch (Inst->getOpcode()) {
4203 case Instruction::FAdd:
4204 case Instruction::FSub:
4205 // Adding positive and negative infinity produces NaN.
4206 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
4207 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
4208 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
4209 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
4210
4211 case Instruction::FMul:
4212 // Zero multiplied with infinity produces NaN.
4213 // FIXME: If neither side can be zero fmul never produces NaN.
4214 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
4215 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
4216 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
4217 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
4218
4219 case Instruction::FDiv:
4220 case Instruction::FRem:
4221 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
4222 return false;
4223
4224 case Instruction::Select: {
4225 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
4226 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
4227 }
4228 case Instruction::SIToFP:
4229 case Instruction::UIToFP:
4230 return true;
4231 case Instruction::FPTrunc:
4232 case Instruction::FPExt:
4233 case Instruction::FNeg:
4234 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
4235 default:
4236 break;
4237 }
4238 }
4239
4240 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
4241 switch (II->getIntrinsicID()) {
4242 case Intrinsic::canonicalize:
4243 case Intrinsic::fabs:
4244 case Intrinsic::copysign:
4245 case Intrinsic::exp:
4246 case Intrinsic::exp2:
4247 case Intrinsic::floor:
4248 case Intrinsic::ceil:
4249 case Intrinsic::trunc:
4250 case Intrinsic::rint:
4251 case Intrinsic::nearbyint:
4252 case Intrinsic::round:
4253 case Intrinsic::roundeven:
4254 case Intrinsic::arithmetic_fence:
4255 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
4256 case Intrinsic::sqrt:
4257 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
4258 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
4259 case Intrinsic::minnum:
4260 case Intrinsic::maxnum:
4261 // If either operand is not NaN, the result is not NaN.
4262 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
4263 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
4264 default:
4265 return false;
4266 }
4267 }
4268
4269 // Try to handle fixed width vector constants
4270 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4271 if (VFVTy && isa<Constant>(V)) {
4272 // For vectors, verify that each element is not NaN.
4273 unsigned NumElts = VFVTy->getNumElements();
4274 for (unsigned i = 0; i != NumElts; ++i) {
4275 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
4276 if (!Elt)
4277 return false;
4278 if (isa<UndefValue>(Elt))
4279 continue;
4280 auto *CElt = dyn_cast<ConstantFP>(Elt);
4281 if (!CElt || CElt->isNaN())
4282 return false;
4283 }
4284 // All elements were confirmed not-NaN or undefined.
4285 return true;
4286 }
4287
4288 // Was not able to prove that V never contains NaN
4289 return false;
4290}
4291
4292/// Return true if it's possible to assume IEEE treatment of input denormals in
4293/// \p F for \p Val.
4294static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) {
4295 Ty = Ty->getScalarType();
4296 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE;
4297}
4298
4299bool KnownFPClass::isKnownNeverLogicalZero(const Function &F, Type *Ty) const {
4300 return isKnownNeverZero() &&
4301 (isKnownNeverSubnormal() || inputDenormalIsIEEE(F, Ty));
4302}
4303
4304/// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
4305/// same result as an fcmp with the given operands.
4306std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred,
4307 const Function &F,
4308 Value *LHS, Value *RHS,
4309 bool LookThroughSrc) {
4310 const APFloat *ConstRHS;
4311 if (!match(RHS, m_APFloat(ConstRHS)))
4312 return {nullptr, fcNone};
4313
4314 if (ConstRHS->isZero()) {
4315 // Compares with fcNone are only exactly equal to fcZero if input denormals
4316 // are not flushed.
4317 // TODO: Handle DAZ by expanding masks to cover subnormal cases.
4318 if (Pred != FCmpInst::FCMP_ORD && Pred != FCmpInst::FCMP_UNO &&
4319 !inputDenormalIsIEEE(F, LHS->getType()))
4320 return {nullptr, fcNone};
4321
4322 switch (Pred) {
4323 case FCmpInst::FCMP_OEQ: // Match x == 0.0
4324 return {LHS, fcZero};
4325 case FCmpInst::FCMP_UEQ: // Match isnan(x) || (x == 0.0)
4326 return {LHS, fcZero | fcNan};
4327 case FCmpInst::FCMP_UNE: // Match (x != 0.0)
4328 return {LHS, ~fcZero};
4329 case FCmpInst::FCMP_ONE: // Match !isnan(x) && x != 0.0
4330 return {LHS, ~fcNan & ~fcZero};
4331 case FCmpInst::FCMP_ORD:
4332 // Canonical form of ord/uno is with a zero. We could also handle
4333 // non-canonical other non-NaN constants or LHS == RHS.
4334 return {LHS, ~fcNan};
4335 case FCmpInst::FCMP_UNO:
4336 return {LHS, fcNan};
4337 case FCmpInst::FCMP_OGT: // x > 0
4338 return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf};
4339 case FCmpInst::FCMP_UGT: // isnan(x) || x > 0
4340 return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf | fcNan};
4341 case FCmpInst::FCMP_OGE: // x >= 0
4342 return {LHS, fcPositive | fcNegZero};
4343 case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0
4344 return {LHS, fcPositive | fcNegZero | fcNan};
4345 case FCmpInst::FCMP_OLT: // x < 0
4346 return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf};
4347 case FCmpInst::FCMP_ULT: // isnan(x) || x < 0
4348 return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf | fcNan};
4349 case FCmpInst::FCMP_OLE: // x <= 0
4350 return {LHS, fcNegative | fcPosZero};
4351 case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0
4352 return {LHS, fcNegative | fcPosZero | fcNan};
4353 default:
4354 break;
4355 }
4356
4357 return {nullptr, fcNone};
4358 }
4359
4360 Value *Src = LHS;
4361 const bool IsFabs = LookThroughSrc && match(LHS, m_FAbs(m_Value(Src)));
4362
4363 // Compute the test mask that would return true for the ordered comparisons.
4364 FPClassTest Mask;
4365
4366 if (ConstRHS->isInfinity()) {
4367 switch (Pred) {
4368 case FCmpInst::FCMP_OEQ:
4369 case FCmpInst::FCMP_UNE: {
4370 // Match __builtin_isinf patterns
4371 //
4372 // fcmp oeq x, +inf -> is_fpclass x, fcPosInf
4373 // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf
4374 // fcmp oeq x, -inf -> is_fpclass x, fcNegInf
4375 // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false
4376 //
4377 // fcmp une x, +inf -> is_fpclass x, ~fcPosInf
4378 // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf
4379 // fcmp une x, -inf -> is_fpclass x, ~fcNegInf
4380 // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true
4381
4382 if (ConstRHS->isNegative()) {
4383 Mask = fcNegInf;
4384 if (IsFabs)
4385 Mask = fcNone;
4386 } else {
4387 Mask = fcPosInf;
4388 if (IsFabs)
4389 Mask |= fcNegInf;
4390 }
4391
4392 break;
4393 }
4394 case FCmpInst::FCMP_ONE:
4395 case FCmpInst::FCMP_UEQ: {
4396 // Match __builtin_isinf patterns
4397 // fcmp one x, -inf -> is_fpclass x, fcNegInf
4398 // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan
4399 // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan
4400 // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan
4401 //
4402 // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan
4403 // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan
4404 // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan
4405 // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan
4406 if (ConstRHS->isNegative()) {
4407 Mask = ~fcNegInf & ~fcNan;
4408 if (IsFabs)
4409 Mask = ~fcNan;
4410 } else {
4411 Mask = ~fcPosInf & ~fcNan;
4412 if (IsFabs)
4413 Mask &= ~fcNegInf;
4414 }
4415
4416 break;
4417 }
4418 case FCmpInst::FCMP_OLT:
4419 case FCmpInst::FCMP_UGE: {
4420 if (ConstRHS->isNegative()) // TODO
4421 return {nullptr, fcNone};
4422
4423 // fcmp olt fabs(x), +inf -> fcFinite
4424 // fcmp uge fabs(x), +inf -> ~fcFinite
4425 // fcmp olt x, +inf -> fcFinite|fcNegInf
4426 // fcmp uge x, +inf -> ~(fcFinite|fcNegInf)
4427 Mask = fcFinite;
4428 if (!IsFabs)
4429 Mask |= fcNegInf;
4430 break;
4431 }
4432 case FCmpInst::FCMP_OGE:
4433 case FCmpInst::FCMP_ULT: {
4434 if (ConstRHS->isNegative()) // TODO
4435 return {nullptr, fcNone};
4436
4437 // fcmp oge fabs(x), +inf -> fcInf
4438 // fcmp oge x, +inf -> fcPosInf
4439 // fcmp ult fabs(x), +inf -> ~fcInf
4440 // fcmp ult x, +inf -> ~fcPosInf
4441 Mask = fcPosInf;
4442 if (IsFabs)
4443 Mask |= fcNegInf;
4444 break;
4445 }
4446 default:
4447 return {nullptr, fcNone};
4448 }
4449 } else if (ConstRHS->isSmallestNormalized() && !ConstRHS->isNegative()) {
4450 // Match pattern that's used in __builtin_isnormal.
4451 switch (Pred) {
4452 case FCmpInst::FCMP_OLT:
4453 case FCmpInst::FCMP_UGE: {
4454 // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero
4455 // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero
4456 // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf
4457 // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero)
4458 Mask = fcZero | fcSubnormal;
4459 if (!IsFabs)
4460 Mask |= fcNegNormal | fcNegInf;
4461
4462 break;
4463 }
4464 case FCmpInst::FCMP_OGE:
4465 case FCmpInst::FCMP_ULT: {
4466 // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf
4467 // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal
4468 // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf)
4469 // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal)
4470 Mask = fcPosInf | fcPosNormal;
4471 if (IsFabs)
4472 Mask |= fcNegInf | fcNegNormal;
4473 break;
4474 }
4475 default:
4476 return {nullptr, fcNone};
4477 }
4478 } else
4479 return {nullptr, fcNone};
4480
4481 // Invert the comparison for the unordered cases.
4482 if (FCmpInst::isUnordered(Pred))
4483 Mask = ~Mask;
4484
4485 return {Src, Mask};
4486}
4487
4488static FPClassTest computeKnownFPClassFromAssumes(const Value *V,
4489 const Query &Q) {
4490 FPClassTest KnownFromAssume = fcAllFlags;
4491
4492 // Try to restrict the floating-point classes based on information from
4493 // assumptions.
4494 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
4495 if (!AssumeVH)
4496 continue;
4497 CallInst *I = cast<CallInst>(AssumeVH);
4498 const Function *F = I->getFunction();
4499
4500 assert(F == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (F == Q.CxtI->getParent()->getParent
() && "Got assumption for the wrong function!") ? void
(0) : __assert_fail ("F == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4501, __extension__ __PRETTY_FUNCTION__
))
4501 "Got assumption for the wrong function!")(static_cast <bool> (F == Q.CxtI->getParent()->getParent
() && "Got assumption for the wrong function!") ? void
(0) : __assert_fail ("F == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4501, __extension__ __PRETTY_FUNCTION__
))
;
4502 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4503, __extension__ __PRETTY_FUNCTION__
))
4503 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4503, __extension__ __PRETTY_FUNCTION__
))
;
4504
4505 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
4506 continue;
4507
4508 CmpInst::Predicate Pred;
4509 Value *LHS, *RHS;
4510 uint64_t ClassVal = 0;
4511 if (match(I->getArgOperand(0), m_FCmp(Pred, m_Value(LHS), m_Value(RHS)))) {
4512 auto [TestedValue, TestedMask] =
4513 fcmpToClassTest(Pred, *F, LHS, RHS, true);
4514 // First see if we can fold in fabs/fneg into the test.
4515 if (TestedValue == V)
4516 KnownFromAssume &= TestedMask;
4517 else {
4518 // Try again without the lookthrough if we found a different source
4519 // value.
4520 auto [TestedValue, TestedMask] =
4521 fcmpToClassTest(Pred, *F, LHS, RHS, false);
4522 if (TestedValue == V)
4523 KnownFromAssume &= TestedMask;
4524 }
4525 } else if (match(I->getArgOperand(0),
4526 m_Intrinsic<Intrinsic::is_fpclass>(
4527 m_Value(LHS), m_ConstantInt(ClassVal)))) {
4528 KnownFromAssume &= static_cast<FPClassTest>(ClassVal);
4529 }
4530 }
4531
4532 return KnownFromAssume;
4533}
4534
4535void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4536 FPClassTest InterestedClasses, KnownFPClass &Known,
4537 unsigned Depth, const Query &Q,
4538 const TargetLibraryInfo *TLI);
4539
4540static void computeKnownFPClass(const Value *V, KnownFPClass &Known,
4541 FPClassTest InterestedClasses, unsigned Depth,
4542 const Query &Q, const TargetLibraryInfo *TLI) {
4543 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
4544 APInt DemandedElts =
4545 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
4546 computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q, TLI);
4547}
4548
4549static void computeKnownFPClassForFPTrunc(const Operator *Op,
4550 const APInt &DemandedElts,
4551 FPClassTest InterestedClasses,
4552 KnownFPClass &Known, unsigned Depth,
4553 const Query &Q,
4554 const TargetLibraryInfo *TLI) {
4555 if ((InterestedClasses & fcNan) == fcNone)
4556 return;
4557
4558 KnownFPClass KnownSrc;
4559 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4560 KnownSrc, Depth + 1, Q, TLI);
4561 if (KnownSrc.isKnownNeverNaN())
4562 Known.knownNot(fcNan);
4563
4564 // Infinity needs a range check.
4565 // TODO: Sign bit should be preserved
4566}
4567
4568// TODO: Merge implementations of isKnownNeverNaN, isKnownNeverInfinity,
4569// CannotBeNegativeZero, cannotBeOrderedLessThanZero into here.
4570
4571void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4572 FPClassTest InterestedClasses, KnownFPClass &Known,
4573 unsigned Depth, const Query &Q,
4574 const TargetLibraryInfo *TLI) {
4575 assert(Known.isUnknown() && "should not be called with known information")(static_cast <bool> (Known.isUnknown() && "should not be called with known information"
) ? void (0) : __assert_fail ("Known.isUnknown() && \"should not be called with known information\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4575, __extension__ __PRETTY_FUNCTION__
))
;
4576
4577 if (!DemandedElts) {
4578 // No demanded elts, better to assume we don't know anything.
4579 Known.resetAll();
4580 return;
4581 }
4582
4583 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4583, __extension__ __PRETTY_FUNCTION__
))
;
4584
4585 if (auto *CFP = dyn_cast_or_null<ConstantFP>(V)) {
4586 Known.KnownFPClasses = CFP->getValueAPF().classify();
4587 Known.SignBit = CFP->isNegative();
4588 return;
4589 }
4590
4591 // Try to handle fixed width vector constants
4592 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4593 const Constant *CV = dyn_cast<Constant>(V);
4594 if (VFVTy && CV) {
4595 Known.KnownFPClasses = fcNone;
4596
4597 // For vectors, verify that each element is not NaN.
4598 unsigned NumElts = VFVTy->getNumElements();
4599 for (unsigned i = 0; i != NumElts; ++i) {
4600 Constant *Elt = CV->getAggregateElement(i);
4601 if (!Elt) {
4602 Known = KnownFPClass();
4603 return;
4604 }
4605 if (isa<UndefValue>(Elt))
4606 continue;
4607 auto *CElt = dyn_cast<ConstantFP>(Elt);
4608 if (!CElt) {
4609 Known = KnownFPClass();
4610 return;
4611 }
4612
4613 KnownFPClass KnownElt{CElt->getValueAPF().classify(), CElt->isNegative()};
4614 Known |= KnownElt;
4615 }
4616
4617 return;
4618 }
4619
4620 FPClassTest KnownNotFromFlags = fcNone;
4621 if (const auto *CB = dyn_cast<CallBase>(V))
4622 KnownNotFromFlags |= CB->getRetNoFPClass();
4623 else if (const auto *Arg = dyn_cast<Argument>(V))
4624 KnownNotFromFlags |= Arg->getNoFPClass();
4625
4626 const Operator *Op = dyn_cast<Operator>(V);
4627 if (const FPMathOperator *FPOp = dyn_cast_or_null<FPMathOperator>(Op)) {
4628 if (FPOp->hasNoNaNs())
4629 KnownNotFromFlags |= fcNan;
4630 if (FPOp->hasNoInfs())
4631 KnownNotFromFlags |= fcInf;
4632 }
4633
4634 if (Q.AC) {
4635 FPClassTest AssumedClasses = computeKnownFPClassFromAssumes(V, Q);
4636 KnownNotFromFlags |= ~AssumedClasses;
4637 }
4638
4639 // We no longer need to find out about these bits from inputs if we can
4640 // assume this from flags/attributes.
4641 InterestedClasses &= ~KnownNotFromFlags;
4642
4643 auto ClearClassesFromFlags = make_scope_exit([=, &Known] {
4644 Known.knownNot(KnownNotFromFlags);
4645 });
4646
4647 if (!Op)
4648 return;
4649
4650 // All recursive calls that increase depth must come after this.
4651 if (Depth == MaxAnalysisRecursionDepth)
4652 return;
4653
4654 const unsigned Opc = Op->getOpcode();
4655 switch (Opc) {
4656 case Instruction::FNeg: {
4657 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4658 Known, Depth + 1, Q, TLI);
4659 Known.fneg();
4660 break;
4661 }
4662 case Instruction::Select: {
4663 KnownFPClass Known2;
4664 computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedClasses,
4665 Known, Depth + 1, Q, TLI);
4666 computeKnownFPClass(Op->getOperand(2), DemandedElts, InterestedClasses,
4667 Known2, Depth + 1, Q, TLI);
4668 Known |= Known2;
4669 break;
4670 }
4671 case Instruction::Call: {
4672 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op)) {
4673 const Intrinsic::ID IID = II->getIntrinsicID();
4674 switch (IID) {
4675 case Intrinsic::fabs: {
4676 if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) {
4677 // If we only care about the sign bit we don't need to inspect the
4678 // operand.
4679 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4680 InterestedClasses, Known, Depth + 1, Q, TLI);
4681 }
4682
4683 Known.fabs();
4684 break;
4685 }
4686 case Intrinsic::copysign: {
4687 KnownFPClass KnownSign;
4688
4689 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4690 InterestedClasses, Known, Depth + 1, Q, TLI);
4691 computeKnownFPClass(II->getArgOperand(1), DemandedElts,
4692 InterestedClasses, KnownSign, Depth + 1, Q, TLI);
4693 Known.copysign(KnownSign);
4694 break;
4695 }
4696 case Intrinsic::fma:
4697 case Intrinsic::fmuladd: {
4698 if ((InterestedClasses & fcNegative) == fcNone)
4699 break;
4700
4701 if (II->getArgOperand(0) != II->getArgOperand(1))
4702 break;
4703
4704 // The multiply cannot be -0 and therefore the add can't be -0
4705 Known.knownNot(fcNegZero);
4706
4707 // x * x + y is non-negative if y is non-negative.
4708 KnownFPClass KnownAddend;
4709 computeKnownFPClass(II->getArgOperand(2), DemandedElts,
4710 InterestedClasses, KnownAddend, Depth + 1, Q, TLI);
4711
4712 // TODO: Known sign bit with no nans
4713 if (KnownAddend.cannotBeOrderedLessThanZero())
4714 Known.knownNot(fcNegative);
4715 break;
4716 }
4717 case Intrinsic::sin:
4718 case Intrinsic::cos: {
4719 // Return NaN on infinite inputs.
4720 KnownFPClass KnownSrc;
4721 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4722 InterestedClasses, KnownSrc, Depth + 1, Q, TLI);
4723 Known.knownNot(fcInf);
4724 if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity())
4725 Known.knownNot(fcNan);
4726 break;
4727 }
4728
4729 case Intrinsic::maxnum:
4730 case Intrinsic::minnum:
4731 case Intrinsic::minimum:
4732 case Intrinsic::maximum: {
4733 KnownFPClass KnownLHS, KnownRHS;
4734 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4735 InterestedClasses, KnownLHS, Depth + 1, Q, TLI);
4736 computeKnownFPClass(II->getArgOperand(1), DemandedElts,
4737 InterestedClasses, KnownRHS, Depth + 1, Q, TLI);
4738
4739 bool NeverNaN =
4740 KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN();
4741 Known = KnownLHS | KnownRHS;
4742
4743 // If either operand is not NaN, the result is not NaN.
4744 if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum))
4745 Known.knownNot(fcNan);
4746
4747 if (IID == Intrinsic::maxnum) {
4748 // If at least one operand is known to be positive, the result must be
4749 // positive.
4750 if ((KnownLHS.cannotBeOrderedLessThanZero() &&
4751 KnownLHS.isKnownNeverNaN()) ||
4752 (KnownRHS.cannotBeOrderedLessThanZero() &&
4753 KnownRHS.isKnownNeverNaN()))
4754 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4755 } else if (IID == Intrinsic::maximum) {
4756 // If at least one operand is known to be positive, the result must be
4757 // positive.
4758 if (KnownLHS.cannotBeOrderedLessThanZero() ||
4759 KnownRHS.cannotBeOrderedLessThanZero())
4760 Known.knownNot(KnownFPClass::OrderedLessThanZeroMask);
4761 } else if (IID == Intrinsic::minnum) {
4762 // If at least one operand is known to be negative, the result must be
4763 // negative.
4764 if ((KnownLHS.cannotBeOrderedGreaterThanZero() &&
4765 KnownLHS.isKnownNeverNaN()) ||
4766 (KnownRHS.cannotBeOrderedGreaterThanZero() &&
4767 KnownRHS.isKnownNeverNaN()))
4768 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
4769 } else {
4770 // If at least one operand is known to be negative, the result must be
4771 // negative.
4772 if (KnownLHS.cannotBeOrderedGreaterThanZero() ||
4773 KnownRHS.cannotBeOrderedGreaterThanZero())
4774 Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask);
4775 }
4776
4777 // Fixup zero handling if denormals could be returned as a zero.
4778 //
4779 // As there's no spec for denormal flushing, be conservative with the
4780 // treatment of denormals that could be flushed to zero. For older
4781 // subtargets on AMDGPU the min/max instructions would not flush the
4782 // output and return the original value.
4783 //
4784 // TODO: This could be refined based on the sign
4785 if ((Known.KnownFPClasses & fcZero) != fcNone &&
4786 !Known.isKnownNeverSubnormal()) {
4787 const Function *Parent = II->getFunction();
4788 DenormalMode Mode = Parent->getDenormalMode(
4789 II->getType()->getScalarType()->getFltSemantics());
4790 if (Mode != DenormalMode::getIEEE())
4791 Known.KnownFPClasses |= fcZero;
4792 }
4793
4794 break;
4795 }
4796 case Intrinsic::canonicalize: {
4797 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4798 InterestedClasses, Known, Depth + 1, Q, TLI);
4799 // Canonicalize is guaranteed to quiet signaling nans.
4800 Known.knownNot(fcSNan);
4801
4802 // If the parent function flushes denormals, the canonical output cannot
4803 // be a denormal.
4804 const fltSemantics &FPType = II->getType()->getFltSemantics();
4805 DenormalMode DenormMode = II->getFunction()->getDenormalMode(FPType);
4806 if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero())
4807 Known.knownNot(fcSubnormal);
4808
4809 if (DenormMode.Input == DenormalMode::PositiveZero ||
4810 (DenormMode.Output == DenormalMode::PositiveZero &&
4811 DenormMode.Input == DenormalMode::IEEE))
4812 Known.knownNot(fcNegZero);
4813
4814 break;
4815 }
4816 case Intrinsic::trunc: {
4817 KnownFPClass KnownSrc;
4818
4819 FPClassTest InterestedSrcs = InterestedClasses;
4820 if (InterestedClasses & fcZero)
4821 InterestedClasses |= fcNormal | fcSubnormal;
4822
4823 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
4824 KnownSrc, Depth + 1, Q, TLI);
4825
4826 // Integer results cannot be subnormal.
4827 Known.knownNot(fcSubnormal);
4828
4829 // trunc passes through infinities.
4830 if (KnownSrc.isKnownNeverPosInfinity())
4831 Known.knownNot(fcPosInf);
4832 if (KnownSrc.isKnownNeverNegInfinity())
4833 Known.knownNot(fcNegInf);
4834
4835 // Non-constrained intrinsics do not guarantee signaling nan quieting.
4836 if (KnownSrc.isKnownNeverNaN())
4837 Known.knownNot(fcNan);
4838
4839 if (KnownSrc.isKnownNever(fcPosNormal))
4840 Known.knownNot(fcPosNormal);
4841
4842 if (KnownSrc.isKnownNever(fcNegNormal))
4843 Known.knownNot(fcNegNormal);
4844
4845 if (KnownSrc.isKnownNever(fcPosZero | fcPosSubnormal | fcPosNormal))
4846 Known.knownNot(fcPosZero);
4847
4848 if (KnownSrc.isKnownNever(fcNegZero | fcNegSubnormal | fcNegNormal))
4849 Known.knownNot(fcNegZero);
4850
4851 // Sign should be preserved
4852 Known.SignBit = KnownSrc.SignBit;
4853 break;
4854 }
4855 case Intrinsic::exp:
4856 case Intrinsic::exp2: {
4857 Known.knownNot(fcNegative);
4858 if ((InterestedClasses & fcNan) == fcNone)
4859 break;
4860
4861 KnownFPClass KnownSrc;
4862 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4863 InterestedClasses, KnownSrc, Depth + 1, Q, TLI);
4864 if (KnownSrc.isKnownNeverNaN()) {
4865 Known.knownNot(fcNan);
4866 Known.SignBit = false;
4867 }
4868
4869 break;
4870 }
4871 case Intrinsic::fptrunc_round: {
4872 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses,
4873 Known, Depth, Q, TLI);
4874 break;
4875 }
4876 case Intrinsic::log:
4877 case Intrinsic::log10:
4878 case Intrinsic::log2:
4879 case Intrinsic::experimental_constrained_log:
4880 case Intrinsic::experimental_constrained_log10:
4881 case Intrinsic::experimental_constrained_log2: {
4882 // log(+inf) -> +inf
4883 // log([+-]0.0) -> -inf
4884 // log(-inf) -> nan
4885 // log(-x) -> nan
4886 if ((InterestedClasses & (fcNan | fcInf)) == fcNone)
4887 break;
4888
4889 FPClassTest InterestedSrcs = InterestedClasses;
4890 if ((InterestedClasses & fcNegInf) != fcNone)
4891 InterestedSrcs |= fcZero | fcSubnormal;
4892 if ((InterestedClasses & fcNan) != fcNone)
4893 InterestedSrcs |= fcNan | (fcNegative & ~fcNan);
4894
4895 KnownFPClass KnownSrc;
4896 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
4897 KnownSrc, Depth + 1, Q, TLI);
4898
4899 if (KnownSrc.isKnownNeverPosInfinity())
4900 Known.knownNot(fcPosInf);
4901
4902 if (KnownSrc.isKnownNeverNaN() &&
4903 KnownSrc.cannotBeOrderedLessThanZero())
4904 Known.knownNot(fcNan);
4905
4906 if (KnownSrc.isKnownNeverLogicalZero(*II->getFunction(), II->getType()))
4907 Known.knownNot(fcNegInf);
4908
4909 break;
4910 }
4911 case Intrinsic::powi: {
4912 if ((InterestedClasses & fcNegative) == fcNone)
4913 break;
4914
4915 const Value *Exp = II->getArgOperand(1);
4916 unsigned BitWidth =
4917 Exp->getType()->getScalarType()->getIntegerBitWidth();
4918 KnownBits ExponentKnownBits(BitWidth);
4919 computeKnownBits(Exp, DemandedElts, ExponentKnownBits, Depth + 1, Q);
4920
4921 if (ExponentKnownBits.Zero[0]) { // Is even
4922 Known.knownNot(fcNegative);
4923 break;
4924 }
4925
4926 // Given that exp is an integer, here are the
4927 // ways that pow can return a negative value:
4928 //
4929 // pow(-x, exp) --> negative if exp is odd and x is negative.
4930 // pow(-0, exp) --> -inf if exp is negative odd.
4931 // pow(-0, exp) --> -0 if exp is positive odd.
4932 // pow(-inf, exp) --> -0 if exp is negative odd.
4933 // pow(-inf, exp) --> -inf if exp is positive odd.
4934 KnownFPClass KnownSrc;
4935 computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative,
4936 KnownSrc, Depth + 1, Q, TLI);
4937 if (KnownSrc.isKnownNever(fcNegative))
4938 Known.knownNot(fcNegative);
4939 break;
4940 }
4941 case Intrinsic::arithmetic_fence: {
4942 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
4943 InterestedClasses, Known, Depth + 1, Q, TLI);
4944 break;
4945 }
4946 case Intrinsic::experimental_constrained_sitofp:
4947 case Intrinsic::experimental_constrained_uitofp:
4948 // Cannot produce nan
4949 Known.knownNot(fcNan);
4950
4951 // sitofp and uitofp turn into +0.0 for zero.
4952 Known.knownNot(fcNegZero);
4953
4954 // Integers cannot be subnormal
4955 Known.knownNot(fcSubnormal);
4956
4957 if (IID == Intrinsic::experimental_constrained_uitofp)
4958 Known.signBitMustBeZero();
4959
4960 // TODO: Copy inf handling from instructions
4961 break;
4962 default:
4963 break;
4964 }
4965 }
4966
4967 break;
4968 }
4969 case Instruction::FAdd:
4970 case Instruction::FSub: {
4971 KnownFPClass KnownLHS, KnownRHS;
4972 computeKnownFPClass(Op->getOperand(1), DemandedElts, fcNan | fcInf,
4973 KnownRHS, Depth + 1, Q, TLI);
4974 if (KnownRHS.isKnownNeverNaN()) {
4975 // RHS is canonically cheaper to compute. Skip inspecting the LHS if
4976 // there's no point.
4977 computeKnownFPClass(Op->getOperand(0), DemandedElts, fcNan | fcInf,
4978 KnownLHS, Depth + 1, Q, TLI);
4979 // Adding positive and negative infinity produces NaN.
4980 // TODO: Check sign of infinities.
4981 if (KnownLHS.isKnownNeverNaN() &&
4982 (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity()))
4983 Known.knownNot(fcNan);
4984 }
4985
4986 break;
4987 }
4988 case Instruction::FMul: {
4989 // X * X is always non-negative or a NaN.
4990 if (Op->getOperand(0) == Op->getOperand(1))
4991 Known.knownNot(fcNegative);
4992
4993 if ((InterestedClasses & fcNan) != fcNan)
4994 break;
4995
4996 KnownFPClass KnownLHS, KnownRHS;
4997 computeKnownFPClass(Op->getOperand(1), DemandedElts,
4998 fcNan | fcInf | fcZero | fcSubnormal, KnownRHS,
4999 Depth + 1, Q, TLI);
5000 if (KnownRHS.isKnownNeverNaN() &&
5001 (KnownRHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverZero())) {
5002 computeKnownFPClass(Op->getOperand(0), DemandedElts,
5003 fcNan | fcInf | fcZero, KnownLHS, Depth + 1, Q, TLI);
5004 if (!KnownLHS.isKnownNeverNaN())
5005 break;
5006
5007 const Function *F = cast<Instruction>(Op)->getFunction();
5008
5009 // If neither side can be zero (or nan) fmul never produces NaN.
5010 // TODO: Check operand combinations.
5011 // e.g. fmul nofpclass(inf nan zero), nofpclass(nan) -> nofpclass(nan)
5012 if ((KnownLHS.isKnownNeverInfinity() ||
5013 KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) &&
5014 (KnownRHS.isKnownNeverInfinity() ||
5015 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))
5016 Known.knownNot(fcNan);
5017 }
5018
5019 break;
5020 }
5021 case Instruction::FDiv:
5022 case Instruction::FRem: {
5023 if (Op->getOperand(0) == Op->getOperand(1)) {
5024 // TODO: Could filter out snan if we inspect the operand
5025 if (Op->getOpcode() == Instruction::FDiv) {
5026 // X / X is always exactly 1.0 or a NaN.
5027 Known.KnownFPClasses = fcNan | fcPosNormal;
5028 } else {
5029 // X % X is always exactly [+-]0.0 or a NaN.
5030 Known.KnownFPClasses = fcNan | fcZero;
5031 }
5032
5033 break;
5034 }
5035
5036 const bool WantNan = (InterestedClasses & fcNan) != fcNone;
5037 const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;
5038 const bool WantPositive =
5039 Opc == Instruction::FRem && (InterestedClasses & fcPositive) != fcNone;
5040 if (!WantNan && !WantNegative && !WantPositive)
5041 break;
5042
5043 KnownFPClass KnownLHS, KnownRHS;
5044
5045 computeKnownFPClass(Op->getOperand(1), DemandedElts,
5046 fcNan | fcInf | fcZero | fcNegative, KnownRHS,
5047 Depth + 1, Q, TLI);
5048
5049 bool KnowSomethingUseful =
5050 KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative);
5051
5052 if (KnowSomethingUseful || WantPositive) {
5053 const FPClassTest InterestedLHS =
5054 WantPositive ? fcAllFlags
5055 : fcNan | fcInf | fcZero | fcSubnormal | fcNegative;
5056
5057 computeKnownFPClass(Op->getOperand(0), DemandedElts,
5058 InterestedClasses & InterestedLHS, KnownLHS,
5059 Depth + 1, Q, TLI);
5060 }
5061
5062 const Function *F = cast<Instruction>(Op)->getFunction();
5063
5064 if (Op->getOpcode() == Instruction::FDiv) {
5065 // Only 0/0, Inf/Inf produce NaN.
5066 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5067 (KnownLHS.isKnownNeverInfinity() ||
5068 KnownRHS.isKnownNeverInfinity()) &&
5069 (KnownLHS.isKnownNeverLogicalZero(*F, Op->getType()) ||
5070 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) {
5071 Known.knownNot(fcNan);
5072 }
5073
5074 // X / -0.0 is -Inf (or NaN).
5075 // +X / +X is +X
5076 if (KnownLHS.isKnownNever(fcNegative) && KnownRHS.isKnownNever(fcNegative))
5077 Known.knownNot(fcNegative);
5078 } else {
5079 // Inf REM x and x REM 0 produce NaN.
5080 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5081 KnownLHS.isKnownNeverInfinity() &&
5082 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) {
5083 Known.knownNot(fcNan);
5084 }
5085
5086 // The sign for frem is the same as the first operand.
5087 if (KnownLHS.isKnownNever(fcNegative))
5088 Known.knownNot(fcNegative);
5089 if (KnownLHS.isKnownNever(fcPositive))
5090 Known.knownNot(fcPositive);
5091 }
5092
5093 break;
5094 }
5095 case Instruction::FPExt: {
5096 // Infinity, nan and zero propagate from source.
5097 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
5098 Known, Depth + 1, Q, TLI);
5099
5100 const fltSemantics &DstTy =
5101 Op->getType()->getScalarType()->getFltSemantics();
5102 const fltSemantics &SrcTy =
5103 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5104
5105 // All subnormal inputs should be in the normal range in the result type.
5106 if (APFloat::isRepresentableAsNormalIn(SrcTy, DstTy))
5107 Known.knownNot(fcSubnormal);
5108
5109 // Sign bit of a nan isn't guaranteed.
5110 if (!Known.isKnownNeverNaN())
5111 Known.SignBit = std::nullopt;
5112 break;
5113 }
5114 case Instruction::FPTrunc: {
5115 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
5116 Depth, Q, TLI);
5117 break;
5118 }
5119 case Instruction::SIToFP:
5120 case Instruction::UIToFP: {
5121 // Cannot produce nan
5122 Known.knownNot(fcNan);
5123
5124 // Integers cannot be subnormal
5125 Known.knownNot(fcSubnormal);
5126
5127 // sitofp and uitofp turn into +0.0 for zero.
5128 Known.knownNot(fcNegZero);
5129 if (Op->getOpcode() == Instruction::UIToFP)
5130 Known.signBitMustBeZero();
5131
5132 if (InterestedClasses & fcInf) {
5133 // Get width of largest magnitude integer (remove a bit if signed).
5134 // This still works for a signed minimum value because the largest FP
5135 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
5136 int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits();
5137 if (Op->getOpcode() == Instruction::SIToFP)
5138 --IntSize;
5139
5140 // If the exponent of the largest finite FP value can hold the largest
5141 // integer, the result of the cast must be finite.
5142 Type *FPTy = Op->getType()->getScalarType();
5143 if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize)
5144 Known.knownNot(fcInf);
5145 }
5146
5147 break;
5148 }
5149 case Instruction::ExtractElement: {
5150 // Look through extract element. If the index is non-constant or
5151 // out-of-range demand all elements, otherwise just the extracted element.
5152 const Value *Vec = Op->getOperand(0);
5153 const Value *Idx = Op->getOperand(1);
5154 auto *CIdx = dyn_cast<ConstantInt>(Idx);
5155
5156 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
5157 unsigned NumElts = VecTy->getNumElements();
5158 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
5159 if (CIdx && CIdx->getValue().ult(NumElts))
5160 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
5161 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
5162 Depth + 1, Q, TLI);
5163 }
5164
5165 break;
5166 }
5167 case Instruction::InsertElement: {
5168 if (isa<ScalableVectorType>(Op->getType()))
5169 return;
5170
5171 const Value *Vec = Op->getOperand(0);
5172 const Value *Elt = Op->getOperand(1);
5173 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2));
5174 // Early out if the index is non-constant or out-of-range.
5175 unsigned NumElts = DemandedElts.getBitWidth();
5176 if (!CIdx || CIdx->getValue().uge(NumElts))
5177 return;
5178
5179 unsigned EltIdx = CIdx->getZExtValue();
5180 // Do we demand the inserted element?
5181 if (DemandedElts[EltIdx]) {
5182 computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q, TLI);
5183 // If we don't know any bits, early out.
5184 if (Known.isUnknown())
5185 break;
5186 } else {
5187 Known.KnownFPClasses = fcNone;
5188 }
5189
5190 // We don't need the base vector element that has been inserted.
5191 APInt DemandedVecElts = DemandedElts;
5192 DemandedVecElts.clearBit(EltIdx);
5193 if (!!DemandedVecElts) {
5194 KnownFPClass Known2;
5195 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
5196 Depth + 1, Q, TLI);
5197 Known |= Known2;
5198 }
5199
5200 break;
5201 }
5202 case Instruction::ShuffleVector: {
5203 // For undef elements, we don't know anything about the common state of
5204 // the shuffle result.
5205 APInt DemandedLHS, DemandedRHS;
5206 auto *Shuf = dyn_cast<ShuffleVectorInst>(Op);
5207 if (!Shuf || !getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
5208 return;
5209
5210 if (!!DemandedLHS) {
5211 const Value *LHS = Shuf->getOperand(0);
5212 computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,
5213 Depth + 1, Q, TLI);
5214
5215 // If we don't know any bits, early out.
5216 if (Known.isUnknown())
5217 break;
5218 } else {
5219 Known.KnownFPClasses = fcNone;
5220 }
5221
5222 if (!!DemandedRHS) {
5223 KnownFPClass Known2;
5224 const Value *RHS = Shuf->getOperand(1);
5225 computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,
5226 Depth + 1, Q, TLI);
5227 Known |= Known2;
5228 }
5229
5230 break;
5231 }
5232 case Instruction::ExtractValue: {
5233 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
5234 Known, Depth + 1, Q, TLI);
5235 break;
5236 }
5237 default:
5238 break;
5239 }
5240}
5241
5242KnownFPClass llvm::computeKnownFPClass(
5243 const Value *V, const APInt &DemandedElts, const DataLayout &DL,
5244 FPClassTest InterestedClasses, unsigned Depth, const TargetLibraryInfo *TLI,
5245 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
5246 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
5247 KnownFPClass KnownClasses;
5248 ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth,
5249 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE),
5250 TLI);
5251 return KnownClasses;
5252}
5253
5254KnownFPClass
5255llvm::computeKnownFPClass(const Value *V, const DataLayout &DL,
5256 FPClassTest InterestedClasses, unsigned Depth,
5257 const TargetLibraryInfo *TLI, AssumptionCache *AC,
5258 const Instruction *CxtI, const DominatorTree *DT,
5259 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
5260 KnownFPClass Known;
5261 ::computeKnownFPClass(V, Known, InterestedClasses, Depth,
5262 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE),
5263 TLI);
5264 return Known;
5265}
5266
5267Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
5268
5269 // All byte-wide stores are splatable, even of arbitrary variables.
5270 if (V->getType()->isIntegerTy(8))
5271 return V;
5272
5273 LLVMContext &Ctx = V->getContext();
5274
5275 // Undef don't care.
5276 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
5277 if (isa<UndefValue>(V))
5278 return UndefInt8;
5279
5280 // Return Undef for zero-sized type.
5281 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
5282 return UndefInt8;
5283
5284 Constant *C = dyn_cast<Constant>(V);
5285 if (!C) {
5286 // Conceptually, we could handle things like:
5287 // %a = zext i8 %X to i16
5288 // %b = shl i16 %a, 8
5289 // %c = or i16 %a, %b
5290 // but until there is an example that actually needs this, it doesn't seem
5291 // worth worrying about.
5292 return nullptr;
5293 }
5294
5295 // Handle 'null' ConstantArrayZero etc.
5296 if (C->isNullValue())
5297 return Constant::getNullValue(Type::getInt8Ty(Ctx));
5298
5299 // Constant floating-point values can be handled as integer values if the
5300 // corresponding integer value is "byteable". An important case is 0.0.
5301 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
5302 Type *Ty = nullptr;
5303 if (CFP->getType()->isHalfTy())
5304 Ty = Type::getInt16Ty(Ctx);
5305 else if (CFP->getType()->isFloatTy())
5306 Ty = Type::getInt32Ty(Ctx);
5307 else if (CFP->getType()->isDoubleTy())
5308 Ty = Type::getInt64Ty(Ctx);
5309 // Don't handle long double formats, which have strange constraints.
5310 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
5311 : nullptr;
5312 }
5313
5314 // We can handle constant integers that are multiple of 8 bits.
5315 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
5316 if (CI->getBitWidth() % 8 == 0) {
5317 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")(static_cast <bool> (CI->getBitWidth() > 8 &&
"8 bits should be handled above!") ? void (0) : __assert_fail
("CI->getBitWidth() > 8 && \"8 bits should be handled above!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5317, __extension__ __PRETTY_FUNCTION__
))
;
5318 if (!CI->getValue().isSplat(8))
5319 return nullptr;
5320 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
5321 }
5322 }
5323
5324 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
5325 if (CE->getOpcode() == Instruction::IntToPtr) {
5326 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
5327 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
5328 return isBytewiseValue(
5329 ConstantExpr::getIntegerCast(CE->getOperand(0),
5330 Type::getIntNTy(Ctx, BitWidth), false),
5331 DL);
5332 }
5333 }
5334 }
5335
5336 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
5337 if (LHS == RHS)
5338 return LHS;
5339 if (!LHS || !RHS)
5340 return nullptr;
5341 if (LHS == UndefInt8)
5342 return RHS;
5343 if (RHS == UndefInt8)
5344 return LHS;
5345 return nullptr;
5346 };
5347
5348 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
5349 Value *Val = UndefInt8;
5350 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
5351 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
5352 return nullptr;
5353 return Val;
5354 }
5355
5356 if (isa<ConstantAggregate>(C)) {
5357 Value *Val = UndefInt8;
5358 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
5359 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
5360 return nullptr;
5361 return Val;
5362 }
5363
5364 // Don't try to handle the handful of other constants.
5365 return nullptr;
5366}
5367
5368// This is the recursive version of BuildSubAggregate. It takes a few different
5369// arguments. Idxs is the index within the nested struct From that we are
5370// looking at now (which is of type IndexedType). IdxSkip is the number of
5371// indices from Idxs that should be left out when inserting into the resulting
5372// struct. To is the result struct built so far, new insertvalue instructions
5373// build on that.
5374static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
5375 SmallVectorImpl<unsigned> &Idxs,
5376 unsigned IdxSkip,
5377 Instruction *InsertBefore) {
5378 StructType *STy = dyn_cast<StructType>(IndexedType);
5379 if (STy) {
5380 // Save the original To argument so we can modify it
5381 Value *OrigTo = To;
5382 // General case, the type indexed by Idxs is a struct
5383 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5384 // Process each struct element recursively
5385 Idxs.push_back(i);
5386 Value *PrevTo = To;
5387 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
5388 InsertBefore);
5389 Idxs.pop_back();
5390 if (!To) {
5391 // Couldn't find any inserted value for this index? Cleanup
5392 while (PrevTo != OrigTo) {
5393 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
5394 PrevTo = Del->getAggregateOperand();
5395 Del->eraseFromParent();
5396 }
5397 // Stop processing elements
5398 break;
5399 }
5400 }
5401 // If we successfully found a value for each of our subaggregates
5402 if (To)
5403 return To;
5404 }
5405 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
5406 // the struct's elements had a value that was inserted directly. In the latter
5407 // case, perhaps we can't determine each of the subelements individually, but
5408 // we might be able to find the complete struct somewhere.
5409
5410 // Find the value that is at that particular spot
5411 Value *V = FindInsertedValue(From, Idxs);
5412
5413 if (!V)
5414 return nullptr;
5415
5416 // Insert the value in the new (sub) aggregate
5417 return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp",
5418 InsertBefore);
5419}
5420
5421// This helper takes a nested struct and extracts a part of it (which is again a
5422// struct) into a new value. For example, given the struct:
5423// { a, { b, { c, d }, e } }
5424// and the indices "1, 1" this returns
5425// { c, d }.
5426//
5427// It does this by inserting an insertvalue for each element in the resulting
5428// struct, as opposed to just inserting a single struct. This will only work if
5429// each of the elements of the substruct are known (ie, inserted into From by an
5430// insertvalue instruction somewhere).
5431//
5432// All inserted insertvalue instructions are inserted before InsertBefore
5433static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
5434 Instruction *InsertBefore) {
5435 assert(InsertBefore && "Must have someplace to insert!")(static_cast <bool> (InsertBefore && "Must have someplace to insert!"
) ? void (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5435, __extension__ __PRETTY_FUNCTION__
))
;
5436 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
5437 idx_range);
5438 Value *To = PoisonValue::get(IndexedType);
5439 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
5440 unsigned IdxSkip = Idxs.size();
5441
5442 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
5443}
5444
5445/// Given an aggregate and a sequence of indices, see if the scalar value
5446/// indexed is already around as a register, for example if it was inserted
5447/// directly into the aggregate.
5448///
5449/// If InsertBefore is not null, this function will duplicate (modified)
5450/// insertvalues when a part of a nested struct is extracted.
5451Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
5452 Instruction *InsertBefore) {
5453 // Nothing to index? Just return V then (this is useful at the end of our
5454 // recursion).
5455 if (idx_range.empty())
5456 return V;
5457 // We have indices, so V should have an indexable type.
5458 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5459, __extension__ __PRETTY_FUNCTION__
))
5459 "Not looking at a struct or array?")(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5459, __extension__ __PRETTY_FUNCTION__
))
;
5460 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5461, __extension__ __PRETTY_FUNCTION__
))
5461 "Invalid indices for type?")(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5461, __extension__ __PRETTY_FUNCTION__
))
;
5462
5463 if (Constant *C = dyn_cast<Constant>(V)) {
5464 C = C->getAggregateElement(idx_range[0]);
5465 if (!C) return nullptr;
5466 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
5467 }
5468
5469 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
5470 // Loop the indices for the insertvalue instruction in parallel with the
5471 // requested indices
5472 const unsigned *req_idx = idx_range.begin();
5473 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
5474 i != e; ++i, ++req_idx) {
5475 if (req_idx == idx_range.end()) {
5476 // We can't handle this without inserting insertvalues
5477 if (!InsertBefore)
5478 return nullptr;
5479
5480 // The requested index identifies a part of a nested aggregate. Handle
5481 // this specially. For example,
5482 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
5483 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
5484 // %C = extractvalue {i32, { i32, i32 } } %B, 1
5485 // This can be changed into
5486 // %A = insertvalue {i32, i32 } undef, i32 10, 0
5487 // %C = insertvalue {i32, i32 } %A, i32 11, 1
5488 // which allows the unused 0,0 element from the nested struct to be
5489 // removed.
5490 return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx),
5491 InsertBefore);
5492 }
5493
5494 // This insert value inserts something else than what we are looking for.
5495 // See if the (aggregate) value inserted into has the value we are
5496 // looking for, then.
5497 if (*req_idx != *i)
5498 return FindInsertedValue(I->getAggregateOperand(), idx_range,
5499 InsertBefore);
5500 }
5501 // If we end up here, the indices of the insertvalue match with those
5502 // requested (though possibly only partially). Now we recursively look at
5503 // the inserted value, passing any remaining indices.
5504 return FindInsertedValue(I->getInsertedValueOperand(),
5505 ArrayRef(req_idx, idx_range.end()), InsertBefore);
5506 }
5507
5508 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
5509 // If we're extracting a value from an aggregate that was extracted from
5510 // something else, we can extract from that something else directly instead.
5511 // However, we will need to chain I's indices with the requested indices.
5512
5513 // Calculate the number of indices required
5514 unsigned size = I->getNumIndices() + idx_range.size();
5515 // Allocate some space to put the new indices in
5516 SmallVector<unsigned, 5> Idxs;
5517 Idxs.reserve(size);
5518 // Add indices from the extract value instruction
5519 Idxs.append(I->idx_begin(), I->idx_end());
5520
5521 // Add requested indices
5522 Idxs.append(idx_range.begin(), idx_range.end());
5523
5524 assert(Idxs.size() == size(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5525, __extension__ __PRETTY_FUNCTION__
))
5525 && "Number of indices added not correct?")(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5525, __extension__ __PRETTY_FUNCTION__
))
;
5526
5527 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
5528 }
5529 // Otherwise, we don't know (such as, extracting from a function return value
5530 // or load instruction)
5531 return nullptr;
5532}
5533
5534bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
5535 unsigned CharSize) {
5536 // Make sure the GEP has exactly three arguments.
5537 if (GEP->getNumOperands() != 3)
5538 return false;
5539
5540 // Make sure the index-ee is a pointer to array of \p CharSize integers.
5541 // CharSize.
5542 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
5543 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
5544 return false;
5545
5546 // Check to make sure that the first operand of the GEP is an integer and
5547 // has value 0 so that we are sure we're indexing into the initializer.
5548 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
5549 if (!FirstIdx || !FirstIdx->isZero())
5550 return false;
5551
5552 return true;
5553}
5554
5555// If V refers to an initialized global constant, set Slice either to
5556// its initializer if the size of its elements equals ElementSize, or,
5557// for ElementSize == 8, to its representation as an array of unsiged
5558// char. Return true on success.
5559// Offset is in the unit "nr of ElementSize sized elements".
5560bool llvm::getConstantDataArrayInfo(const Value *V,
5561 ConstantDataArraySlice &Slice,
5562 unsigned ElementSize, uint64_t Offset) {
5563 assert(V && "V should not be null.")(static_cast <bool> (V && "V should not be null."
) ? void (0) : __assert_fail ("V && \"V should not be null.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5563, __extension__ __PRETTY_FUNCTION__
))
;
2
Assuming 'V' is non-null
3
'?' condition is true
5564 assert((ElementSize % 8) == 0 &&(static_cast <bool> ((ElementSize % 8) == 0 && "ElementSize expected to be a multiple of the size of a byte."
) ? void (0) : __assert_fail ("(ElementSize % 8) == 0 && \"ElementSize expected to be a multiple of the size of a byte.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5565, __extension__ __PRETTY_FUNCTION__
))
4
'?' condition is true
5565 "ElementSize expected to be a multiple of the size of a byte.")(static_cast <bool> ((ElementSize % 8) == 0 && "ElementSize expected to be a multiple of the size of a byte."
) ? void (0) : __assert_fail ("(ElementSize % 8) == 0 && \"ElementSize expected to be a multiple of the size of a byte.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5565, __extension__ __PRETTY_FUNCTION__
))
;
5566 unsigned ElementSizeInBytes = ElementSize / 8;
5567
5568 // Drill down into the pointer expression V, ignoring any intervening
5569 // casts, and determine the identity of the object it references along
5570 // with the cumulative byte offset into it.
5571 const GlobalVariable *GV =
5572 dyn_cast<GlobalVariable>(getUnderlyingObject(V));
5
Assuming the object is a 'CastReturnType'
5573 if (!GV
5.1
'GV' is non-null
|| !GV->isConstant() || !GV->hasDefinitiveInitializer())
6
Assuming the condition is false
7
Taking false branch
5574 // Fail if V is not based on constant global object.
5575 return false;
5576
5577 const DataLayout &DL = GV->getParent()->getDataLayout();
5578 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0);
5579
5580 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off,
8
Assuming the condition is false
9
Taking false branch
5581 /*AllowNonInbounds*/ true))
5582 // Fail if a constant offset could not be determined.
5583 return false;
5584
5585 uint64_t StartIdx = Off.getLimitedValue();
5586 if (StartIdx == UINT64_MAX(18446744073709551615UL))
10
Assuming 'StartIdx' is not equal to UINT64_MAX
11
Taking false branch
5587 // Fail if the constant offset is excessive.
5588 return false;
5589
5590 // Off/StartIdx is in the unit of bytes. So we need to convert to number of
5591 // elements. Simply bail out if that isn't possible.
5592 if ((StartIdx % ElementSizeInBytes) != 0)
12
Taking false branch
5593 return false;
5594
5595 Offset += StartIdx / ElementSizeInBytes;
5596 ConstantDataArray *Array = nullptr;
5597 ArrayType *ArrayTy = nullptr;
5598
5599 if (GV->getInitializer()->isNullValue()) {
13
Assuming the condition is false
14
Taking false branch
5600 Type *GVTy = GV->getValueType();
5601 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue();
5602 uint64_t Length = SizeInBytes / ElementSizeInBytes;
5603
5604 Slice.Array = nullptr;
5605 Slice.Offset = 0;
5606 // Return an empty Slice for undersized constants to let callers
5607 // transform even undefined library calls into simpler, well-defined
5608 // expressions. This is preferable to making the calls although it
5609 // prevents sanitizers from detecting such calls.
5610 Slice.Length = Length < Offset ? 0 : Length - Offset;
5611 return true;
5612 }
5613
5614 auto *Init = const_cast<Constant *>(GV->getInitializer());
5615 if (auto *ArrayInit
15.1
'ArrayInit' is null
= dyn_cast<ConstantDataArray>(Init)) {
15
Assuming 'Init' is not a 'CastReturnType'
16
Taking false branch
5616 Type *InitElTy = ArrayInit->getElementType();
5617 if (InitElTy->isIntegerTy(ElementSize)) {
5618 // If Init is an initializer for an array of the expected type
5619 // and size, use it as is.
5620 Array = ArrayInit;
5621 ArrayTy = ArrayInit->getType();
5622 }
5623 }
5624
5625 if (!Array
16.1
'Array' is null
) {
17
Taking true branch
5626 if (ElementSize
17.1
'ElementSize' is equal to 8
!= 8)
18
Taking false branch
5627 // TODO: Handle conversions to larger integral types.
5628 return false;
5629
5630 // Otherwise extract the portion of the initializer starting
5631 // at Offset as an array of bytes, and reset Offset.
5632 Init = ReadByteArrayFromGlobal(GV, Offset);
5633 if (!Init)
19
Assuming 'Init' is non-null
20
Taking false branch
5634 return false;
5635
5636 Offset = 0;
5637 Array = dyn_cast<ConstantDataArray>(Init);
21
Assuming 'Init' is a 'CastReturnType'
5638 ArrayTy = dyn_cast<ArrayType>(Init->getType());
22
Assuming the object is not a 'CastReturnType'
23
Null pointer value stored to 'ArrayTy'
5639 }
5640
5641 uint64_t NumElts = ArrayTy->getArrayNumElements();
24
Called C++ object pointer is null
5642 if (Offset > NumElts)
5643 return false;
5644
5645 Slice.Array = Array;
5646 Slice.Offset = Offset;
5647 Slice.Length = NumElts - Offset;
5648 return true;
5649}
5650
5651/// Extract bytes from the initializer of the constant array V, which need
5652/// not be a nul-terminated string. On success, store the bytes in Str and
5653/// return true. When TrimAtNul is set, Str will contain only the bytes up
5654/// to but not including the first nul. Return false on failure.
5655bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
5656 bool TrimAtNul) {
5657 ConstantDataArraySlice Slice;
5658 if (!getConstantDataArrayInfo(V, Slice, 8))
1
Calling 'getConstantDataArrayInfo'
5659 return false;
5660
5661 if (Slice.Array == nullptr) {
5662 if (TrimAtNul) {
5663 // Return a nul-terminated string even for an empty Slice. This is
5664 // safe because all existing SimplifyLibcalls callers require string
5665 // arguments and the behavior of the functions they fold is undefined
5666 // otherwise. Folding the calls this way is preferable to making
5667 // the undefined library calls, even though it prevents sanitizers
5668 // from reporting such calls.
5669 Str = StringRef();
5670 return true;
5671 }
5672 if (Slice.Length == 1) {
5673 Str = StringRef("", 1);
5674 return true;
5675 }
5676 // We cannot instantiate a StringRef as we do not have an appropriate string
5677 // of 0s at hand.
5678 return false;
5679 }
5680
5681 // Start out with the entire array in the StringRef.
5682 Str = Slice.Array->getAsString();
5683 // Skip over 'offset' bytes.
5684 Str = Str.substr(Slice.Offset);
5685
5686 if (TrimAtNul) {
5687 // Trim off the \0 and anything after it. If the array is not nul
5688 // terminated, we just return the whole end of string. The client may know
5689 // some other way that the string is length-bound.
5690 Str = Str.substr(0, Str.find('\0'));
5691 }
5692 return true;
5693}
5694
5695// These next two are very similar to the above, but also look through PHI
5696// nodes.
5697// TODO: See if we can integrate these two together.
5698
5699/// If we can compute the length of the string pointed to by
5700/// the specified pointer, return 'len+1'. If we can't, return 0.
5701static uint64_t GetStringLengthH(const Value *V,
5702 SmallPtrSetImpl<const PHINode*> &PHIs,
5703 unsigned CharSize) {
5704 // Look through noop bitcast instructions.
5705 V = V->stripPointerCasts();
5706
5707 // If this is a PHI node, there are two cases: either we have already seen it
5708 // or we haven't.
5709 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
5710 if (!PHIs.insert(PN).second)
5711 return ~0ULL; // already in the set.
5712
5713 // If it was new, see if all the input strings are the same length.
5714 uint64_t LenSoFar = ~0ULL;
5715 for (Value *IncValue : PN->incoming_values()) {
5716 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
5717 if (Len == 0) return 0; // Unknown length -> unknown.
5718
5719 if (Len == ~0ULL) continue;
5720
5721 if (Len != LenSoFar && LenSoFar != ~0ULL)
5722 return 0; // Disagree -> unknown.
5723 LenSoFar = Len;
5724 }
5725
5726 // Success, all agree.
5727 return LenSoFar;
5728 }
5729
5730 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
5731 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
5732 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
5733 if (Len1 == 0) return 0;
5734 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
5735 if (Len2 == 0) return 0;
5736 if (Len1 == ~0ULL) return Len2;
5737 if (Len2 == ~0ULL) return Len1;
5738 if (Len1 != Len2) return 0;
5739 return Len1;
5740 }
5741
5742 // Otherwise, see if we can read the string.
5743 ConstantDataArraySlice Slice;
5744 if (!getConstantDataArrayInfo(V, Slice, CharSize))
5745 return 0;
5746
5747 if (Slice.Array == nullptr)
5748 // Zeroinitializer (including an empty one).
5749 return 1;
5750
5751 // Search for the first nul character. Return a conservative result even
5752 // when there is no nul. This is safe since otherwise the string function
5753 // being folded such as strlen is undefined, and can be preferable to
5754 // making the undefined library call.
5755 unsigned NullIndex = 0;
5756 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
5757 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
5758 break;
5759 }
5760
5761 return NullIndex + 1;
5762}
5763
5764/// If we can compute the length of the string pointed to by
5765/// the specified pointer, return 'len+1'. If we can't, return 0.
5766uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
5767 if (!V->getType()->isPointerTy())
5768 return 0;
5769
5770 SmallPtrSet<const PHINode*, 32> PHIs;
5771 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
5772 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
5773 // an empty string as a length.
5774 return Len == ~0ULL ? 1 : Len;
5775}
5776
5777const Value *
5778llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
5779 bool MustPreserveNullness) {
5780 assert(Call &&(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5781, __extension__ __PRETTY_FUNCTION__
))
5781 "getArgumentAliasingToReturnedPointer only works on nonnull calls")(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5781, __extension__ __PRETTY_FUNCTION__
))
;
5782 if (const Value *RV = Call->getReturnedArgOperand())
5783 return RV;
5784 // This can be used only as a aliasing property.
5785 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
5786 Call, MustPreserveNullness))
5787 return Call->getArgOperand(0);
5788 return nullptr;
5789}
5790
5791bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
5792 const CallBase *Call, bool MustPreserveNullness) {
5793 switch (Call->getIntrinsicID()) {
5794 case Intrinsic::launder_invariant_group:
5795 case Intrinsic::strip_invariant_group:
5796 case Intrinsic::aarch64_irg:
5797 case Intrinsic::aarch64_tagp:
5798 return true;
5799 case Intrinsic::ptrmask:
5800 return !MustPreserveNullness;
5801 default:
5802 return false;
5803 }
5804}
5805
5806/// \p PN defines a loop-variant pointer to an object. Check if the
5807/// previous iteration of the loop was referring to the same object as \p PN.
5808static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
5809 const LoopInfo *LI) {
5810 // Find the loop-defined value.
5811 Loop *L = LI->getLoopFor(PN->getParent());
5812 if (PN->getNumIncomingValues() != 2)
5813 return true;
5814
5815 // Find the value from previous iteration.
5816 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
5817 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
5818 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
5819 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
5820 return true;
5821
5822 // If a new pointer is loaded in the loop, the pointer references a different
5823 // object in every iteration. E.g.:
5824 // for (i)
5825 // int *p = a[i];
5826 // ...
5827 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
5828 if (!L->isLoopInvariant(Load->getPointerOperand()))
5829 return false;
5830 return true;
5831}
5832
5833const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
5834 if (!V->getType()->isPointerTy())
5835 return V;
5836 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
5837 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
5838 V = GEP->getPointerOperand();
5839 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
5840 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
5841 V = cast<Operator>(V)->getOperand(0);
5842 if (!V->getType()->isPointerTy())
5843 return V;
5844 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
5845 if (GA->isInterposable())
5846 return V;
5847 V = GA->getAliasee();
5848 } else {
5849 if (auto *PHI = dyn_cast<PHINode>(V)) {
5850 // Look through single-arg phi nodes created by LCSSA.
5851 if (PHI->getNumIncomingValues() == 1) {
5852 V = PHI->getIncomingValue(0);
5853 continue;
5854 }
5855 } else if (auto *Call = dyn_cast<CallBase>(V)) {
5856 // CaptureTracking can know about special capturing properties of some
5857 // intrinsics like launder.invariant.group, that can't be expressed with
5858 // the attributes, but have properties like returning aliasing pointer.
5859 // Because some analysis may assume that nocaptured pointer is not
5860 // returned from some special intrinsic (because function would have to
5861 // be marked with returns attribute), it is crucial to use this function
5862 // because it should be in sync with CaptureTracking. Not using it may
5863 // cause weird miscompilations where 2 aliasing pointers are assumed to
5864 // noalias.
5865 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
5866 V = RP;
5867 continue;
5868 }
5869 }
5870
5871 return V;
5872 }
5873 assert(V->getType()->isPointerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isPointerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5873, __extension__ __PRETTY_FUNCTION__
))
;
5874 }
5875 return V;
5876}
5877
5878void llvm::getUnderlyingObjects(const Value *V,
5879 SmallVectorImpl<const Value *> &Objects,
5880 LoopInfo *LI, unsigned MaxLookup) {
5881 SmallPtrSet<const Value *, 4> Visited;
5882 SmallVector<const Value *, 4> Worklist;
5883 Worklist.push_back(V);
5884 do {
5885 const Value *P = Worklist.pop_back_val();
5886 P = getUnderlyingObject(P, MaxLookup);
5887
5888 if (!Visited.insert(P).second)
5889 continue;
5890
5891 if (auto *SI = dyn_cast<SelectInst>(P)) {
5892 Worklist.push_back(SI->getTrueValue());
5893 Worklist.push_back(SI->getFalseValue());
5894 continue;
5895 }
5896
5897 if (auto *PN = dyn_cast<PHINode>(P)) {
5898 // If this PHI changes the underlying object in every iteration of the
5899 // loop, don't look through it. Consider:
5900 // int **A;
5901 // for (i) {
5902 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
5903 // Curr = A[i];
5904 // *Prev, *Curr;
5905 //
5906 // Prev is tracking Curr one iteration behind so they refer to different
5907 // underlying objects.
5908 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
5909 isSameUnderlyingObjectInLoop(PN, LI))
5910 append_range(Worklist, PN->incoming_values());
5911 continue;
5912 }
5913
5914 Objects.push_back(P);
5915 } while (!Worklist.empty());
5916}
5917
5918/// This is the function that does the work of looking through basic
5919/// ptrtoint+arithmetic+inttoptr sequences.
5920static const Value *getUnderlyingObjectFromInt(const Value *V) {
5921 do {
5922 if (const Operator *U = dyn_cast<Operator>(V)) {
5923 // If we find a ptrtoint, we can transfer control back to the
5924 // regular getUnderlyingObjectFromInt.
5925 if (U->getOpcode() == Instruction::PtrToInt)
5926 return U->getOperand(0);
5927 // If we find an add of a constant, a multiplied value, or a phi, it's
5928 // likely that the other operand will lead us to the base
5929 // object. We don't have to worry about the case where the
5930 // object address is somehow being computed by the multiply,
5931 // because our callers only care when the result is an
5932 // identifiable object.
5933 if (U->getOpcode() != Instruction::Add ||
5934 (!isa<ConstantInt>(U->getOperand(1)) &&
5935 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
5936 !isa<PHINode>(U->getOperand(1))))
5937 return V;
5938 V = U->getOperand(0);
5939 } else {
5940 return V;
5941 }
5942 assert(V->getType()->isIntegerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isIntegerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5942, __extension__ __PRETTY_FUNCTION__
))
;
5943 } while (true);
5944}
5945
5946/// This is a wrapper around getUnderlyingObjects and adds support for basic
5947/// ptrtoint+arithmetic+inttoptr sequences.
5948/// It returns false if unidentified object is found in getUnderlyingObjects.
5949bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
5950 SmallVectorImpl<Value *> &Objects) {
5951 SmallPtrSet<const Value *, 16> Visited;
5952 SmallVector<const Value *, 4> Working(1, V);
5953 do {
5954 V = Working.pop_back_val();
5955
5956 SmallVector<const Value *, 4> Objs;
5957 getUnderlyingObjects(V, Objs);
5958
5959 for (const Value *V : Objs) {
5960 if (!Visited.insert(V).second)
5961 continue;
5962 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
5963 const Value *O =
5964 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
5965 if (O->getType()->isPointerTy()) {
5966 Working.push_back(O);
5967 continue;
5968 }
5969 }
5970 // If getUnderlyingObjects fails to find an identifiable object,
5971 // getUnderlyingObjectsForCodeGen also fails for safety.
5972 if (!isIdentifiedObject(V)) {
5973 Objects.clear();
5974 return false;
5975 }
5976 Objects.push_back(const_cast<Value *>(V));
5977 }
5978 } while (!Working.empty());
5979 return true;
5980}
5981
5982AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
5983 AllocaInst *Result = nullptr;
5984 SmallPtrSet<Value *, 4> Visited;
5985 SmallVector<Value *, 4> Worklist;
5986
5987 auto AddWork = [&](Value *V) {
5988 if (Visited.insert(V).second)
5989 Worklist.push_back(V);
5990 };
5991
5992 AddWork(V);
5993 do {
5994 V = Worklist.pop_back_val();
5995 assert(Visited.count(V))(static_cast <bool> (Visited.count(V)) ? void (0) : __assert_fail
("Visited.count(V)", "llvm/lib/Analysis/ValueTracking.cpp", 5995
, __extension__ __PRETTY_FUNCTION__))
;
5996
5997 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
5998 if (Result && Result != AI)
5999 return nullptr;
6000 Result = AI;
6001 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
6002 AddWork(CI->getOperand(0));
6003 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
6004 for (Value *IncValue : PN->incoming_values())
6005 AddWork(IncValue);
6006 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
6007 AddWork(SI->getTrueValue());
6008 AddWork(SI->getFalseValue());
6009 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
6010 if (OffsetZero && !GEP->hasAllZeroIndices())
6011 return nullptr;
6012 AddWork(GEP->getPointerOperand());
6013 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
6014 Value *Returned = CB->getReturnedArgOperand();
6015 if (Returned)
6016 AddWork(Returned);
6017 else
6018 return nullptr;
6019 } else {
6020 return nullptr;
6021 }
6022 } while (!Worklist.empty());
6023
6024 return Result;
6025}
6026
6027static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6028 const Value *V, bool AllowLifetime, bool AllowDroppable) {
6029 for (const User *U : V->users()) {
6030 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
6031 if (!II)
6032 return false;
6033
6034 if (AllowLifetime && II->isLifetimeStartOrEnd())
6035 continue;
6036
6037 if (AllowDroppable && II->isDroppable())
6038 continue;
6039
6040 return false;
6041 }
6042 return true;
6043}
6044
6045bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
6046 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6047 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
6048}
6049bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
6050 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
6051 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
6052}
6053
6054bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
6055 if (!LI.isUnordered())
6056 return true;
6057 const Function &F = *LI.getFunction();
6058 // Speculative load may create a race that did not exist in the source.
6059 return F.hasFnAttribute(Attribute::SanitizeThread) ||
6060 // Speculative load may load data from dirty regions.
6061 F.hasFnAttribute(Attribute::SanitizeAddress) ||
6062 F.hasFnAttribute(Attribute::SanitizeHWAddress);
6063}
6064
6065bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
6066 const Instruction *CtxI,
6067 AssumptionCache *AC,
6068 const DominatorTree *DT,
6069 const TargetLibraryInfo *TLI) {
6070 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
6071 AC, DT, TLI);
6072}
6073
6074bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
6075 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
6076 AssumptionCache *AC, const DominatorTree *DT,
6077 const TargetLibraryInfo *TLI) {
6078#ifndef NDEBUG
6079 if (Inst->getOpcode() != Opcode) {
6080 // Check that the operands are actually compatible with the Opcode override.
6081 auto hasEqualReturnAndLeadingOperandTypes =
6082 [](const Instruction *Inst, unsigned NumLeadingOperands) {
6083 if (Inst->getNumOperands() < NumLeadingOperands)
6084 return false;
6085 const Type *ExpectedType = Inst->getType();
6086 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
6087 if (Inst->getOperand(ItOp)->getType() != ExpectedType)
6088 return false;
6089 return true;
6090 };
6091 assert(!Instruction::isBinaryOp(Opcode) ||(static_cast <bool> (!Instruction::isBinaryOp(Opcode) ||
hasEqualReturnAndLeadingOperandTypes(Inst, 2)) ? void (0) : __assert_fail
("!Instruction::isBinaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 2)"
, "llvm/lib/Analysis/ValueTracking.cpp", 6092, __extension__ __PRETTY_FUNCTION__
))
6092 hasEqualReturnAndLeadingOperandTypes(Inst, 2))(static_cast <bool> (!Instruction::isBinaryOp(Opcode) ||
hasEqualReturnAndLeadingOperandTypes(Inst, 2)) ? void (0) : __assert_fail
("!Instruction::isBinaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 2)"
, "llvm/lib/Analysis/ValueTracking.cpp", 6092, __extension__ __PRETTY_FUNCTION__
))
;
6093 assert(!Instruction::isUnaryOp(Opcode) ||(static_cast <bool> (!Instruction::isUnaryOp(Opcode) ||
hasEqualReturnAndLeadingOperandTypes(Inst, 1)) ? void (0) : __assert_fail
("!Instruction::isUnaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 1)"
, "llvm/lib/Analysis/ValueTracking.cpp", 6094, __extension__ __PRETTY_FUNCTION__
))
6094 hasEqualReturnAndLeadingOperandTypes(Inst, 1))(static_cast <bool> (!Instruction::isUnaryOp(Opcode) ||
hasEqualReturnAndLeadingOperandTypes(Inst, 1)) ? void (0) : __assert_fail
("!Instruction::isUnaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 1)"
, "llvm/lib/Analysis/ValueTracking.cpp", 6094, __extension__ __PRETTY_FUNCTION__
))
;
6095 }
6096#endif
6097
6098 switch (Opcode) {
6099 default:
6100 return true;
6101 case Instruction::UDiv:
6102 case Instruction::URem: {
6103 // x / y is undefined if y == 0.
6104 const APInt *V;
6105 if (match(Inst->getOperand(1), m_APInt(V)))
6106 return *V != 0;
6107 return false;
6108 }
6109 case Instruction::SDiv:
6110 case Instruction::SRem: {
6111 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
6112 const APInt *Numerator, *Denominator;
6113 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
6114 return false;
6115 // We cannot hoist this division if the denominator is 0.
6116 if (*Denominator == 0)
6117 return false;
6118 // It's safe to hoist if the denominator is not 0 or -1.
6119 if (!Denominator->isAllOnes())
6120 return true;
6121 // At this point we know that the denominator is -1. It is safe to hoist as
6122 // long we know that the numerator is not INT_MIN.
6123 if (match(Inst->getOperand(0), m_APInt(Numerator)))
6124 return !Numerator->isMinSignedValue();
6125 // The numerator *might* be MinSignedValue.
6126 return false;
6127 }
6128 case Instruction::Load: {
6129 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
6130 if (!LI)
6131 return false;
6132 if (mustSuppressSpeculation(*LI))
6133 return false;
6134 const DataLayout &DL = LI->getModule()->getDataLayout();
6135 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
6136 LI->getType(), LI->getAlign(), DL,
6137 CtxI, AC, DT, TLI);
6138 }
6139 case Instruction::Call: {
6140 auto *CI = dyn_cast<const CallInst>(Inst);
6141 if (!CI)
6142 return false;
6143 const Function *Callee = CI->getCalledFunction();
6144
6145 // The called function could have undefined behavior or side-effects, even
6146 // if marked readnone nounwind.
6147 return Callee && Callee->isSpeculatable();
6148 }
6149 case Instruction::VAArg:
6150 case Instruction::Alloca:
6151 case Instruction::Invoke:
6152 case Instruction::CallBr:
6153 case Instruction::PHI:
6154 case Instruction::Store:
6155 case Instruction::Ret:
6156 case Instruction::Br:
6157 case Instruction::IndirectBr:
6158 case Instruction::Switch:
6159 case Instruction::Unreachable:
6160 case Instruction::Fence:
6161 case Instruction::AtomicRMW:
6162 case Instruction::AtomicCmpXchg:
6163 case Instruction::LandingPad:
6164 case Instruction::Resume:
6165 case Instruction::CatchSwitch:
6166 case Instruction::CatchPad:
6167 case Instruction::CatchRet:
6168 case Instruction::CleanupPad:
6169 case Instruction::CleanupRet:
6170 return false; // Misc instructions which have effects
6171 }
6172}
6173
6174bool llvm::mayHaveNonDefUseDependency(const Instruction &I) {
6175 if (I.mayReadOrWriteMemory())
6176 // Memory dependency possible
6177 return true;
6178 if (!isSafeToSpeculativelyExecute(&I))
6179 // Can't move above a maythrow call or infinite loop. Or if an
6180 // inalloca alloca, above a stacksave call.
6181 return true;
6182 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6183 // 1) Can't reorder two inf-loop calls, even if readonly
6184 // 2) Also can't reorder an inf-loop call below a instruction which isn't
6185 // safe to speculative execute. (Inverse of above)
6186 return true;
6187 return false;
6188}
6189
6190/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
6191static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
6192 switch (OR) {
6193 case ConstantRange::OverflowResult::MayOverflow:
6194 return OverflowResult::MayOverflow;
6195 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
6196 return OverflowResult::AlwaysOverflowsLow;
6197 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
6198 return OverflowResult::AlwaysOverflowsHigh;
6199 case ConstantRange::OverflowResult::NeverOverflows:
6200 return OverflowResult::NeverOverflows;
6201 }
6202 llvm_unreachable("Unknown OverflowResult")::llvm::llvm_unreachable_internal("Unknown OverflowResult", "llvm/lib/Analysis/ValueTracking.cpp"
, 6202)
;
6203}
6204
6205/// Combine constant ranges from computeConstantRange() and computeKnownBits().
6206static ConstantRange computeConstantRangeIncludingKnownBits(
6207 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
6208 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
6209 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
6210 KnownBits Known = computeKnownBits(
6211 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
6212 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
6213 ConstantRange CR2 = computeConstantRange(V, ForSigned, UseInstrInfo);
6214 ConstantRange::PreferredRangeType RangeType =
6215 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
6216 return CR1.intersectWith(CR2, RangeType);
6217}
6218
6219OverflowResult llvm::computeOverflowForUnsignedMul(
6220 const Value *LHS, const Value *RHS, const DataLayout &DL,
6221 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
6222 bool UseInstrInfo) {
6223 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
6224 nullptr, UseInstrInfo);
6225 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
6226 nullptr, UseInstrInfo);
6227 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
6228 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
6229 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
6230}
6231
6232OverflowResult
6233llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
6234 const DataLayout &DL, AssumptionCache *AC,
6235 const Instruction *CxtI,
6236 const DominatorTree *DT, bool UseInstrInfo) {
6237 // Multiplying n * m significant bits yields a result of n + m significant
6238 // bits. If the total number of significant bits does not exceed the
6239 // result bit width (minus 1), there is no overflow.
6240 // This means if we have enough leading sign bits in the operands
6241 // we can guarantee that the result does not overflow.
6242 // Ref: "Hacker's Delight" by Henry Warren
6243 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
6244
6245 // Note that underestimating the number of sign bits gives a more
6246 // conservative answer.
6247 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
6248 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
6249
6250 // First handle the easy case: if we have enough sign bits there's
6251 // definitely no overflow.
6252 if (SignBits > BitWidth + 1)
6253 return OverflowResult::NeverOverflows;
6254
6255 // There are two ambiguous cases where there can be no overflow:
6256 // SignBits == BitWidth + 1 and
6257 // SignBits == BitWidth
6258 // The second case is difficult to check, therefore we only handle the
6259 // first case.
6260 if (SignBits == BitWidth + 1) {
6261 // It overflows only when both arguments are negative and the true
6262 // product is exactly the minimum negative number.
6263 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
6264 // For simplicity we just check if at least one side is not negative.
6265 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
6266 nullptr, UseInstrInfo);
6267 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
6268 nullptr, UseInstrInfo);
6269 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
6270 return OverflowResult::NeverOverflows;
6271 }
6272 return OverflowResult::MayOverflow;
6273}
6274
6275OverflowResult llvm::computeOverflowForUnsignedAdd(
6276 const Value *LHS, const Value *RHS, const DataLayout &DL,
6277 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
6278 bool UseInstrInfo) {
6279 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
6280 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
6281 nullptr, UseInstrInfo);
6282 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
6283 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
6284 nullptr, UseInstrInfo);
6285 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
6286}
6287
6288static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
6289 const Value *RHS,
6290 const AddOperator *Add,
6291 const DataLayout &DL,
6292 AssumptionCache *AC,
6293 const Instruction *CxtI,
6294 const DominatorTree *DT) {
6295 if (Add && Add->hasNoSignedWrap()) {
6296 return OverflowResult::NeverOverflows;
6297 }
6298
6299 // If LHS and RHS each have at least two sign bits, the addition will look
6300 // like
6301 //
6302 // XX..... +
6303 // YY.....
6304 //
6305 // If the carry into the most significant position is 0, X and Y can't both
6306 // be 1 and therefore the carry out of the addition is also 0.
6307 //
6308 // If the carry into the most significant position is 1, X and Y can't both
6309 // be 0 and therefore the carry out of the addition is also 1.
6310 //
6311 // Since the carry into the most significant position is always equal to
6312 // the carry out of the addition, there is no signed overflow.
6313 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
6314 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
6315 return OverflowResult::NeverOverflows;
6316
6317 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
6318 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
6319 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
6320 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
6321 OverflowResult OR =
6322 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
6323 if (OR != OverflowResult::MayOverflow)
6324 return OR;
6325
6326 // The remaining code needs Add to be available. Early returns if not so.
6327 if (!Add)
6328 return OverflowResult::MayOverflow;
6329
6330 // If the sign of Add is the same as at least one of the operands, this add
6331 // CANNOT overflow. If this can be determined from the known bits of the
6332 // operands the above signedAddMayOverflow() check will have already done so.
6333 // The only other way to improve on the known bits is from an assumption, so
6334 // call computeKnownBitsFromAssume() directly.
6335 bool LHSOrRHSKnownNonNegative =
6336 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
6337 bool LHSOrRHSKnownNegative =
6338 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
6339 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
6340 KnownBits AddKnown(LHSRange.getBitWidth());
6341 computeKnownBitsFromAssume(
6342 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
6343 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
6344 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
6345 return OverflowResult::NeverOverflows;
6346 }
6347
6348 return OverflowResult::MayOverflow;
6349}
6350
6351OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
6352 const Value *RHS,
6353 const DataLayout &DL,
6354 AssumptionCache *AC,
6355 const Instruction *CxtI,
6356 const DominatorTree *DT) {
6357 // X - (X % ?)
6358 // The remainder of a value can't have greater magnitude than itself,
6359 // so the subtraction can't overflow.
6360
6361 // X - (X -nuw ?)
6362 // In the minimal case, this would simplify to "?", so there's no subtract
6363 // at all. But if this analysis is used to peek through casts, for example,
6364 // then determining no-overflow may allow other transforms.
6365
6366 // TODO: There are other patterns like this.
6367 // See simplifyICmpWithBinOpOnLHS() for candidates.
6368 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) ||
6369 match(RHS, m_NUWSub(m_Specific(LHS), m_Value())))
6370 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
6371 return OverflowResult::NeverOverflows;
6372
6373 // Checking for conditions implied by dominating conditions may be expensive.
6374 // Limit it to usub_with_overflow calls for now.
6375 if (match(CxtI,
6376 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
6377 if (auto C =
6378 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
6379 if (*C)
6380 return OverflowResult::NeverOverflows;
6381 return OverflowResult::AlwaysOverflowsLow;
6382 }
6383 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
6384 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
6385 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
6386 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
6387 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
6388}
6389
6390OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
6391 const Value *RHS,
6392 const DataLayout &DL,
6393 AssumptionCache *AC,
6394 const Instruction *CxtI,
6395 const DominatorTree *DT) {
6396 // X - (X % ?)
6397 // The remainder of a value can't have greater magnitude than itself,
6398 // so the subtraction can't overflow.
6399
6400 // X - (X -nsw ?)
6401 // In the minimal case, this would simplify to "?", so there's no subtract
6402 // at all. But if this analysis is used to peek through casts, for example,
6403 // then determining no-overflow may allow other transforms.
6404 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) ||
6405 match(RHS, m_NSWSub(m_Specific(LHS), m_Value())))
6406 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
6407 return OverflowResult::NeverOverflows;
6408
6409 // If LHS and RHS each have at least two sign bits, the subtraction
6410 // cannot overflow.
6411 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
6412 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
6413 return OverflowResult::NeverOverflows;
6414
6415 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
6416 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
6417 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
6418 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
6419 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
6420}
6421
6422bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
6423 const DominatorTree &DT) {
6424 SmallVector<const BranchInst *, 2> GuardingBranches;
6425 SmallVector<const ExtractValueInst *, 2> Results;
6426
6427 for (const User *U : WO->users()) {
6428 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
6429 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getNumIndices() == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6429, __extension__ __PRETTY_FUNCTION__
))
;
6430
6431 if (EVI->getIndices()[0] == 0)
6432 Results.push_back(EVI);
6433 else {
6434 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getIndices()[0] == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6434, __extension__ __PRETTY_FUNCTION__
))
;
6435
6436 for (const auto *U : EVI->users())
6437 if (const auto *B = dyn_cast<BranchInst>(U)) {
6438 assert(B->isConditional() && "How else is it using an i1?")(static_cast <bool> (B->isConditional() && "How else is it using an i1?"
) ? void (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6438, __extension__ __PRETTY_FUNCTION__
))
;
6439 GuardingBranches.push_back(B);
6440 }
6441 }
6442 } else {
6443 // We are using the aggregate directly in a way we don't want to analyze
6444 // here (storing it to a global, say).
6445 return false;
6446 }
6447 }
6448
6449 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
6450 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
6451 if (!NoWrapEdge.isSingleEdge())
6452 return false;
6453
6454 // Check if all users of the add are provably no-wrap.
6455 for (const auto *Result : Results) {
6456 // If the extractvalue itself is not executed on overflow, the we don't
6457 // need to check each use separately, since domination is transitive.
6458 if (DT.dominates(NoWrapEdge, Result->getParent()))
6459 continue;
6460
6461 for (const auto &RU : Result->uses())
6462 if (!DT.dominates(NoWrapEdge, RU))
6463 return false;
6464 }
6465
6466 return true;
6467 };
6468
6469 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
6470}
6471
6472/// Shifts return poison if shiftwidth is larger than the bitwidth.
6473static bool shiftAmountKnownInRange(const Value *ShiftAmount) {
6474 auto *C = dyn_cast<Constant>(ShiftAmount);
6475 if (!C)
6476 return false;
6477
6478 // Shifts return poison if shiftwidth is larger than the bitwidth.
6479 SmallVector<const Constant *, 4> ShiftAmounts;
6480 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
6481 unsigned NumElts = FVTy->getNumElements();
6482 for (unsigned i = 0; i < NumElts; ++i)
6483 ShiftAmounts.push_back(C->getAggregateElement(i));
6484 } else if (isa<ScalableVectorType>(C->getType()))
6485 return false; // Can't tell, just return false to be safe
6486 else
6487 ShiftAmounts.push_back(C);
6488
6489 bool Safe = llvm::all_of(ShiftAmounts, [](const Constant *C) {
6490 auto *CI = dyn_cast_or_null<ConstantInt>(C);
6491 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
6492 });
6493
6494 return Safe;
6495}
6496
6497static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
6498 bool ConsiderFlagsAndMetadata) {
6499
6500 if (ConsiderFlagsAndMetadata && Op->hasPoisonGeneratingFlagsOrMetadata())
6501 return true;
6502
6503 unsigned Opcode = Op->getOpcode();
6504
6505 // Check whether opcode is a poison/undef-generating operation
6506 switch (Opcode) {
6507 case Instruction::Shl:
6508 case Instruction::AShr:
6509 case Instruction::LShr:
6510 return !shiftAmountKnownInRange(Op->getOperand(1));
6511 case Instruction::FPToSI:
6512 case Instruction::FPToUI:
6513 // fptosi/ui yields poison if the resulting value does not fit in the
6514 // destination type.
6515 return true;
6516 case Instruction::Call:
6517 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
6518 switch (II->getIntrinsicID()) {
6519 // TODO: Add more intrinsics.
6520 case Intrinsic::ctlz:
6521 case Intrinsic::cttz:
6522 case Intrinsic::abs:
6523 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue())
6524 return false;
6525 break;
6526 case Intrinsic::ctpop:
6527 case Intrinsic::bswap:
6528 case Intrinsic::bitreverse:
6529 case Intrinsic::fshl:
6530 case Intrinsic::fshr:
6531 case Intrinsic::smax:
6532 case Intrinsic::smin:
6533 case Intrinsic::umax:
6534 case Intrinsic::umin:
6535 case Intrinsic::ptrmask:
6536 case Intrinsic::fptoui_sat:
6537 case Intrinsic::fptosi_sat:
6538 case Intrinsic::sadd_with_overflow:
6539 case Intrinsic::ssub_with_overflow:
6540 case Intrinsic::smul_with_overflow:
6541 case Intrinsic::uadd_with_overflow:
6542 case Intrinsic::usub_with_overflow:
6543 case Intrinsic::umul_with_overflow:
6544 case Intrinsic::sadd_sat:
6545 case Intrinsic::uadd_sat:
6546 case Intrinsic::ssub_sat:
6547 case Intrinsic::usub_sat:
6548 return false;
6549 case Intrinsic::sshl_sat:
6550 case Intrinsic::ushl_sat:
6551 return !shiftAmountKnownInRange(II->getArgOperand(1));
6552 case Intrinsic::fma:
6553 case Intrinsic::fmuladd:
6554 case Intrinsic::sqrt:
6555 case Intrinsic::powi:
6556 case Intrinsic::sin:
6557 case Intrinsic::cos:
6558 case Intrinsic::pow:
6559 case Intrinsic::log:
6560 case Intrinsic::log10:
6561 case Intrinsic::log2:
6562 case Intrinsic::exp:
6563 case Intrinsic::exp2:
6564 case Intrinsic::fabs:
6565 case Intrinsic::copysign:
6566 case Intrinsic::floor:
6567 case Intrinsic::ceil:
6568 case Intrinsic::trunc:
6569 case Intrinsic::rint:
6570 case Intrinsic::nearbyint:
6571 case Intrinsic::round:
6572 case Intrinsic::roundeven:
6573 case Intrinsic::fptrunc_round:
6574 case Intrinsic::canonicalize:
6575 case Intrinsic::arithmetic_fence:
6576 case Intrinsic::minnum:
6577 case Intrinsic::maxnum:
6578 case Intrinsic::minimum:
6579 case Intrinsic::maximum:
6580 case Intrinsic::is_fpclass:
6581 return false;
6582 case Intrinsic::lround:
6583 case Intrinsic::llround:
6584 case Intrinsic::lrint:
6585 case Intrinsic::llrint:
6586 // If the value doesn't fit an unspecified value is returned (but this
6587 // is not poison).
6588 return false;
6589 }
6590 }
6591 [[fallthrough]];
6592 case Instruction::CallBr:
6593 case Instruction::Invoke: {
6594 const auto *CB = cast<CallBase>(Op);
6595 return !CB->hasRetAttr(Attribute::NoUndef);
6596 }
6597 case Instruction::InsertElement:
6598 case Instruction::ExtractElement: {
6599 // If index exceeds the length of the vector, it returns poison
6600 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
6601 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
6602 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
6603 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
6604 return true;
6605 return false;
6606 }
6607 case Instruction::ShuffleVector: {
6608 // shufflevector may return undef.
6609 if (PoisonOnly)
6610 return false;
6611 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
6612 ? cast<ConstantExpr>(Op)->getShuffleMask()
6613 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
6614 return is_contained(Mask, PoisonMaskElem);
6615 }
6616 case Instruction::FNeg:
6617 case Instruction::PHI:
6618 case Instruction::Select:
6619 case Instruction::URem:
6620 case Instruction::SRem:
6621 case Instruction::ExtractValue:
6622 case Instruction::InsertValue:
6623 case Instruction::Freeze:
6624 case Instruction::ICmp:
6625 case Instruction::FCmp:
6626 case Instruction::FAdd:
6627 case Instruction::FSub:
6628 case Instruction::FMul:
6629 case Instruction::FDiv:
6630 case Instruction::FRem:
6631 return false;
6632 case Instruction::GetElementPtr:
6633 // inbounds is handled above
6634 // TODO: what about inrange on constexpr?
6635 return false;
6636 default: {
6637 const auto *CE = dyn_cast<ConstantExpr>(Op);
6638 if (isa<CastInst>(Op) || (CE && CE->isCast()))
6639 return false;
6640 else if (Instruction::isBinaryOp(Opcode))
6641 return false;
6642 // Be conservative and return true.
6643 return true;
6644 }
6645 }
6646}
6647
6648bool llvm::canCreateUndefOrPoison(const Operator *Op,
6649 bool ConsiderFlagsAndMetadata) {
6650 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false,
6651 ConsiderFlagsAndMetadata);
6652}
6653
6654bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) {
6655 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true,
6656 ConsiderFlagsAndMetadata);
6657}
6658
6659static bool directlyImpliesPoison(const Value *ValAssumedPoison,
6660 const Value *V, unsigned Depth) {
6661 if (ValAssumedPoison == V)
6662 return true;
6663
6664 const unsigned MaxDepth = 2;
6665 if (Depth >= MaxDepth)
6666 return false;
6667
6668 if (const auto *I = dyn_cast<Instruction>(V)) {
6669 if (any_of(I->operands(), [=](const Use &Op) {
6670 return propagatesPoison(Op) &&
6671 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
6672 }))
6673 return true;
6674
6675 // V = extractvalue V0, idx
6676 // V2 = extractvalue V0, idx2
6677 // V0's elements are all poison or not. (e.g., add_with_overflow)
6678 const WithOverflowInst *II;
6679 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
6680 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
6681 llvm::is_contained(II->args(), ValAssumedPoison)))
6682 return true;
6683 }
6684 return false;
6685}
6686
6687static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
6688 unsigned Depth) {
6689 if (isGuaranteedNotToBePoison(ValAssumedPoison))
6690 return true;
6691
6692 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
6693 return true;
6694
6695 const unsigned MaxDepth = 2;
6696 if (Depth >= MaxDepth)
6697 return false;
6698
6699 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
6700 if (I && !canCreatePoison(cast<Operator>(I))) {
6701 return all_of(I->operands(), [=](const Value *Op) {
6702 return impliesPoison(Op, V, Depth + 1);
6703 });
6704 }
6705 return false;
6706}
6707
6708bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
6709 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
6710}
6711
6712static bool programUndefinedIfUndefOrPoison(const Value *V,
6713 bool PoisonOnly);
6714
6715static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
6716 AssumptionCache *AC,
6717 const Instruction *CtxI,
6718 const DominatorTree *DT,
6719 unsigned Depth, bool PoisonOnly) {
6720 if (Depth >= MaxAnalysisRecursionDepth)
6721 return false;
6722
6723 if (isa<MetadataAsValue>(V))
6724 return false;
6725
6726 if (const auto *A = dyn_cast<Argument>(V)) {
6727 if (A->hasAttribute(Attribute::NoUndef) ||
6728 A->hasAttribute(Attribute::Dereferenceable) ||
6729 A->hasAttribute(Attribute::DereferenceableOrNull))
6730 return true;
6731 }
6732
6733 if (auto *C = dyn_cast<Constant>(V)) {
6734 if (isa<UndefValue>(C))
6735 return PoisonOnly && !isa<PoisonValue>(C);
6736
6737 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
6738 isa<ConstantPointerNull>(C) || isa<Function>(C))
6739 return true;
6740
6741 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
6742 return (PoisonOnly ? !C->containsPoisonElement()
6743 : !C->containsUndefOrPoisonElement()) &&
6744 !C->containsConstantExpression();
6745 }
6746
6747 // Strip cast operations from a pointer value.
6748 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
6749 // inbounds with zero offset. To guarantee that the result isn't poison, the
6750 // stripped pointer is checked as it has to be pointing into an allocated
6751 // object or be null `null` to ensure `inbounds` getelement pointers with a
6752 // zero offset could not produce poison.
6753 // It can strip off addrspacecast that do not change bit representation as
6754 // well. We believe that such addrspacecast is equivalent to no-op.
6755 auto *StrippedV = V->stripPointerCastsSameRepresentation();
6756 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
6757 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
6758 return true;
6759
6760 auto OpCheck = [&](const Value *V) {
6761 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
6762 PoisonOnly);
6763 };
6764
6765 if (auto *Opr = dyn_cast<Operator>(V)) {
6766 // If the value is a freeze instruction, then it can never
6767 // be undef or poison.
6768 if (isa<FreezeInst>(V))
6769 return true;
6770
6771 if (const auto *CB = dyn_cast<CallBase>(V)) {
6772 if (CB->hasRetAttr(Attribute::NoUndef))
6773 return true;
6774 }
6775
6776 if (const auto *PN = dyn_cast<PHINode>(V)) {
6777 unsigned Num = PN->getNumIncomingValues();
6778 bool IsWellDefined = true;
6779 for (unsigned i = 0; i < Num; ++i) {
6780 auto *TI = PN->getIncomingBlock(i)->getTerminator();
6781 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
6782 DT, Depth + 1, PoisonOnly)) {
6783 IsWellDefined = false;
6784 break;
6785 }
6786 }
6787 if (IsWellDefined)
6788 return true;
6789 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
6790 return true;
6791 }
6792
6793 if (auto *I = dyn_cast<LoadInst>(V))
6794 if (I->hasMetadata(LLVMContext::MD_noundef) ||
6795 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
6796 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
6797 return true;
6798
6799 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
6800 return true;
6801
6802 // CxtI may be null or a cloned instruction.
6803 if (!CtxI || !CtxI->getParent() || !DT)
6804 return false;
6805
6806 auto *DNode = DT->getNode(CtxI->getParent());
6807 if (!DNode)
6808 // Unreachable block
6809 return false;
6810
6811 // If V is used as a branch condition before reaching CtxI, V cannot be
6812 // undef or poison.
6813 // br V, BB1, BB2
6814 // BB1:
6815 // CtxI ; V cannot be undef or poison here
6816 auto *Dominator = DNode->getIDom();
6817 while (Dominator) {
6818 auto *TI = Dominator->getBlock()->getTerminator();
6819
6820 Value *Cond = nullptr;
6821 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
6822 if (BI->isConditional())
6823 Cond = BI->getCondition();
6824 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
6825 Cond = SI->getCondition();
6826 }
6827
6828 if (Cond) {
6829 if (Cond == V)
6830 return true;
6831 else if (PoisonOnly && isa<Operator>(Cond)) {
6832 // For poison, we can analyze further
6833 auto *Opr = cast<Operator>(Cond);
6834 if (any_of(Opr->operands(),
6835 [V](const Use &U) { return V == U && propagatesPoison(U); }))
6836 return true;
6837 }
6838 }
6839
6840 Dominator = Dominator->getIDom();
6841 }
6842
6843 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
6844 return true;
6845
6846 return false;
6847}
6848
6849bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
6850 const Instruction *CtxI,
6851 const DominatorTree *DT,
6852 unsigned Depth) {
6853 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
6854}
6855
6856bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
6857 const Instruction *CtxI,
6858 const DominatorTree *DT, unsigned Depth) {
6859 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
6860}
6861
6862/// Return true if undefined behavior would provably be executed on the path to
6863/// OnPathTo if Root produced a posion result. Note that this doesn't say
6864/// anything about whether OnPathTo is actually executed or whether Root is
6865/// actually poison. This can be used to assess whether a new use of Root can
6866/// be added at a location which is control equivalent with OnPathTo (such as
6867/// immediately before it) without introducing UB which didn't previously
6868/// exist. Note that a false result conveys no information.
6869bool llvm::mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
6870 Instruction *OnPathTo,
6871 DominatorTree *DT) {
6872 // Basic approach is to assume Root is poison, propagate poison forward
6873 // through all users we can easily track, and then check whether any of those
6874 // users are provable UB and must execute before out exiting block might
6875 // exit.
6876
6877 // The set of all recursive users we've visited (which are assumed to all be
6878 // poison because of said visit)
6879 SmallSet<const Value *, 16> KnownPoison;
6880 SmallVector<const Instruction*, 16> Worklist;
6881 Worklist.push_back(Root);
6882 while (!Worklist.empty()) {
6883 const Instruction *I = Worklist.pop_back_val();
6884
6885 // If we know this must trigger UB on a path leading our target.
6886 if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo))
6887 return true;
6888
6889 // If we can't analyze propagation through this instruction, just skip it
6890 // and transitive users. Safe as false is a conservative result.
6891 if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) {
6892 return KnownPoison.contains(U) && propagatesPoison(U);
6893 }))
6894 continue;
6895
6896 if (KnownPoison.insert(I).second)
6897 for (const User *User : I->users())
6898 Worklist.push_back(cast<Instruction>(User));
6899 }
6900
6901 // Might be non-UB, or might have a path we couldn't prove must execute on
6902 // way to exiting bb.
6903 return false;
6904}
6905
6906OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
6907 const DataLayout &DL,
6908 AssumptionCache *AC,
6909 const Instruction *CxtI,
6910 const DominatorTree *DT) {
6911 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
6912 Add, DL, AC, CxtI, DT);
6913}
6914
6915OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
6916 const Value *RHS,
6917 const DataLayout &DL,
6918 AssumptionCache *AC,
6919 const Instruction *CxtI,
6920 const DominatorTree *DT) {
6921 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
6922}
6923
6924bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
6925 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
6926 // of time because it's possible for another thread to interfere with it for an
6927 // arbitrary length of time, but programs aren't allowed to rely on that.
6928
6929 // If there is no successor, then execution can't transfer to it.
6930 if (isa<ReturnInst>(I))
6931 return false;
6932 if (isa<UnreachableInst>(I))
6933 return false;
6934
6935 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
6936 // Instruction::willReturn.
6937 //
6938 // FIXME: Move this check into Instruction::willReturn.
6939 if (isa<CatchPadInst>(I)) {
6940 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
6941 default:
6942 // A catchpad may invoke exception object constructors and such, which
6943 // in some languages can be arbitrary code, so be conservative by default.
6944 return false;
6945 case EHPersonality::CoreCLR:
6946 // For CoreCLR, it just involves a type test.
6947 return true;
6948 }
6949 }
6950
6951 // An instruction that returns without throwing must transfer control flow
6952 // to a successor.
6953 return !I->mayThrow() && I->willReturn();
6954}
6955
6956bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
6957 // TODO: This is slightly conservative for invoke instruction since exiting
6958 // via an exception *is* normal control for them.
6959 for (const Instruction &I : *BB)
6960 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6961 return false;
6962 return true;
6963}
6964
6965bool llvm::isGuaranteedToTransferExecutionToSuccessor(
6966 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
6967 unsigned ScanLimit) {
6968 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
6969 ScanLimit);
6970}
6971
6972bool llvm::isGuaranteedToTransferExecutionToSuccessor(
6973 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
6974 assert(ScanLimit && "scan limit must be non-zero")(static_cast <bool> (ScanLimit && "scan limit must be non-zero"
) ? void (0) : __assert_fail ("ScanLimit && \"scan limit must be non-zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 6974, __extension__ __PRETTY_FUNCTION__
))
;
6975 for (const Instruction &I : Range) {
6976 if (isa<DbgInfoIntrinsic>(I))
6977 continue;
6978 if (--ScanLimit == 0)
6979 return false;
6980 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6981 return false;
6982 }
6983 return true;
6984}
6985
6986bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
6987 const Loop *L) {
6988 // The loop header is guaranteed to be executed for every iteration.
6989 //
6990 // FIXME: Relax this constraint to cover all basic blocks that are
6991 // guaranteed to be executed at every iteration.
6992 if (I->getParent() != L->getHeader()) return false;
6993
6994 for (const Instruction &LI : *L->getHeader()) {
6995 if (&LI == I) return true;
6996 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
6997 }
6998 llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block."
, "llvm/lib/Analysis/ValueTracking.cpp", 6998)
;
6999}
7000
7001bool llvm::propagatesPoison(const Use &PoisonOp) {
7002 const Operator *I = cast<Operator>(PoisonOp.getUser());
7003 switch (I->getOpcode()) {
7004 case Instruction::Freeze:
7005 case Instruction::PHI:
7006 case Instruction::Invoke:
7007 return false;
7008 case Instruction::Select:
7009 return PoisonOp.getOperandNo() == 0;
7010 case Instruction::Call:
7011 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
7012 switch (II->getIntrinsicID()) {
7013 // TODO: Add more intrinsics.
7014 case Intrinsic::sadd_with_overflow:
7015 case Intrinsic::ssub_with_overflow:
7016 case Intrinsic::smul_with_overflow:
7017 case Intrinsic::uadd_with_overflow:
7018 case Intrinsic::usub_with_overflow:
7019 case Intrinsic::umul_with_overflow:
7020 // If an input is a vector containing a poison element, the
7021 // two output vectors (calculated results, overflow bits)'
7022 // corresponding lanes are poison.
7023 return true;
7024 case Intrinsic::ctpop:
7025 return true;
7026 }
7027 }
7028 return false;
7029 case Instruction::ICmp:
7030 case Instruction::FCmp:
7031 case Instruction::GetElementPtr:
7032 return true;
7033 default:
7034 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
7035 return true;
7036
7037 // Be conservative and return false.
7038 return false;
7039 }
7040}
7041
7042void llvm::getGuaranteedWellDefinedOps(
7043 const Instruction *I, SmallVectorImpl<const Value *> &Operands) {
7044 switch (I->getOpcode()) {
7045 case Instruction::Store:
7046 Operands.push_back(cast<StoreInst>(I)->getPointerOperand());
7047 break;
7048
7049 case Instruction::Load:
7050 Operands.push_back(cast<LoadInst>(I)->getPointerOperand());
7051 break;
7052
7053 // Since dereferenceable attribute imply noundef, atomic operations
7054 // also implicitly have noundef pointers too
7055 case Instruction::AtomicCmpXchg:
7056 Operands.push_back(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
7057 break;
7058
7059 case Instruction::AtomicRMW:
7060 Operands.push_back(cast<AtomicRMWInst>(I)->getPointerOperand());
7061 break;
7062
7063 case Instruction::Call:
7064 case Instruction::Invoke: {
7065 const CallBase *CB = cast<CallBase>(I);
7066 if (CB->isIndirectCall())
7067 Operands.push_back(CB->getCalledOperand());
7068 for (unsigned i = 0; i < CB->arg_size(); ++i) {
7069 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
7070 CB->paramHasAttr(i, Attribute::Dereferenceable))
7071 Operands.push_back(CB->getArgOperand(i));
7072 }
7073 break;
7074 }
7075 case Instruction::Ret:
7076 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
7077 Operands.push_back(I->getOperand(0));
7078 break;
7079 case Instruction::Switch:
7080 Operands.push_back(cast<SwitchInst>(I)->getCondition());
7081 break;
7082 case Instruction::Br: {
7083 auto *BR = cast<BranchInst>(I);
7084 if (BR->isConditional())
7085 Operands.push_back(BR->getCondition());
7086 break;
7087 }
7088 default:
7089 break;
7090 }
7091}
7092
7093void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
7094 SmallVectorImpl<const Value *> &Operands) {
7095 getGuaranteedWellDefinedOps(I, Operands);
7096 switch (I->getOpcode()) {
7097 // Divisors of these operations are allowed to be partially undef.
7098 case Instruction::UDiv:
7099 case Instruction::SDiv:
7100 case Instruction::URem:
7101 case Instruction::SRem:
7102 Operands.push_back(I->getOperand(1));
7103 break;
7104 default:
7105 break;
7106 }
7107}
7108
7109bool llvm::mustTriggerUB(const Instruction *I,
7110 const SmallPtrSetImpl<const Value *> &KnownPoison) {
7111 SmallVector<const Value *, 4> NonPoisonOps;
7112 getGuaranteedNonPoisonOps(I, NonPoisonOps);
7113
7114 for (const auto *V : NonPoisonOps)
7115 if (KnownPoison.count(V))
7116 return true;
7117
7118 return false;
7119}
7120
7121static bool programUndefinedIfUndefOrPoison(const Value *V,
7122 bool PoisonOnly) {
7123 // We currently only look for uses of values within the same basic
7124 // block, as that makes it easier to guarantee that the uses will be
7125 // executed given that Inst is executed.
7126 //
7127 // FIXME: Expand this to consider uses beyond the same basic block. To do
7128 // this, look out for the distinction between post-dominance and strong
7129 // post-dominance.
7130 const BasicBlock *BB = nullptr;
7131 BasicBlock::const_iterator Begin;
7132 if (const auto *Inst = dyn_cast<Instruction>(V)) {
7133 BB = Inst->getParent();
7134 Begin = Inst->getIterator();
7135 Begin++;
7136 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
7137 BB = &Arg->getParent()->getEntryBlock();
7138 Begin = BB->begin();
7139 } else {
7140 return false;
7141 }
7142
7143 // Limit number of instructions we look at, to avoid scanning through large
7144 // blocks. The current limit is chosen arbitrarily.
7145 unsigned ScanLimit = 32;
7146 BasicBlock::const_iterator End = BB->end();
7147
7148 if (!PoisonOnly) {
7149 // Since undef does not propagate eagerly, be conservative & just check
7150 // whether a value is directly passed to an instruction that must take
7151 // well-defined operands.
7152
7153 for (const auto &I : make_range(Begin, End)) {
7154 if (isa<DbgInfoIntrinsic>(I))
7155 continue;
7156 if (--ScanLimit == 0)
7157 break;
7158
7159 SmallVector<const Value *, 4> WellDefinedOps;
7160 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
7161 if (is_contained(WellDefinedOps, V))
7162 return true;
7163
7164 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7165 break;
7166 }
7167 return false;
7168 }
7169
7170 // Set of instructions that we have proved will yield poison if Inst
7171 // does.
7172 SmallSet<const Value *, 16> YieldsPoison;
7173 SmallSet<const BasicBlock *, 4> Visited;
7174
7175 YieldsPoison.insert(V);
7176 Visited.insert(BB);
7177
7178 while (true) {
7179 for (const auto &I : make_range(Begin, End)) {
7180 if (isa<DbgInfoIntrinsic>(I))
7181 continue;
7182 if (--ScanLimit == 0)
7183 return false;
7184 if (mustTriggerUB(&I, YieldsPoison))
7185 return true;
7186 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
7187 return false;
7188
7189 // If an operand is poison and propagates it, mark I as yielding poison.
7190 for (const Use &Op : I.operands()) {
7191 if (YieldsPoison.count(Op) && propagatesPoison(Op)) {
7192 YieldsPoison.insert(&I);
7193 break;
7194 }
7195 }
7196
7197 // Special handling for select, which returns poison if its operand 0 is
7198 // poison (handled in the loop above) *or* if both its true/false operands
7199 // are poison (handled here).
7200 if (I.getOpcode() == Instruction::Select &&
7201 YieldsPoison.count(I.getOperand(1)) &&
7202 YieldsPoison.count(I.getOperand(2))) {
7203 YieldsPoison.insert(&I);
7204 }
7205 }
7206
7207 BB = BB->getSingleSuccessor();
7208 if (!BB || !Visited.insert(BB).second)
7209 break;
7210
7211 Begin = BB->getFirstNonPHI()->getIterator();
7212 End = BB->end();
7213 }
7214 return false;
7215}
7216
7217bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
7218 return ::programUndefinedIfUndefOrPoison(Inst, false);
7219}
7220
7221bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
7222 return ::programUndefinedIfUndefOrPoison(Inst, true);
7223}
7224
7225static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
7226 if (FMF.noNaNs())
7227 return true;
7228
7229 if (auto *C = dyn_cast<ConstantFP>(V))
7230 return !C->isNaN();
7231
7232 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
7233 if (!C->getElementType()->isFloatingPointTy())
7234 return false;
7235 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
7236 if (C->getElementAsAPFloat(I).isNaN())
7237 return false;
7238 }
7239 return true;
7240 }
7241
7242 if (isa<ConstantAggregateZero>(V))
7243 return true;
7244
7245 return false;
7246}
7247
7248static bool isKnownNonZero(const Value *V) {
7249 if (auto *C = dyn_cast<ConstantFP>(V))
7250 return !C->isZero();
7251
7252 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
7253 if (!C->getElementType()->isFloatingPointTy())
7254 return false;
7255 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
7256 if (C->getElementAsAPFloat(I).isZero())
7257 return false;
7258 }
7259 return true;
7260 }
7261
7262 return false;
7263}
7264
7265/// Match clamp pattern for float types without care about NaNs or signed zeros.
7266/// Given non-min/max outer cmp/select from the clamp pattern this
7267/// function recognizes if it can be substitued by a "canonical" min/max
7268/// pattern.
7269static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
7270 Value *CmpLHS, Value *CmpRHS,
7271 Value *TrueVal, Value *FalseVal,
7272 Value *&LHS, Value *&RHS) {
7273 // Try to match
7274 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
7275 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
7276 // and return description of the outer Max/Min.
7277
7278 // First, check if select has inverse order:
7279 if (CmpRHS == FalseVal) {
7280 std::swap(TrueVal, FalseVal);
7281 Pred = CmpInst::getInversePredicate(Pred);
7282 }
7283
7284 // Assume success now. If there's no match, callers should not use these anyway.
7285 LHS = TrueVal;
7286 RHS = FalseVal;
7287
7288 const APFloat *FC1;
7289 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
7290 return {SPF_UNKNOWN, SPNB_NA, false};
7291
7292 const APFloat *FC2;
7293 switch (Pred) {
7294 case CmpInst::FCMP_OLT:
7295 case CmpInst::FCMP_OLE:
7296 case CmpInst::FCMP_ULT:
7297 case CmpInst::FCMP_ULE:
7298 if (match(FalseVal,
7299 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
7300 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
7301 *FC1 < *FC2)
7302 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
7303 break;
7304 case CmpInst::FCMP_OGT:
7305 case CmpInst::FCMP_OGE:
7306 case CmpInst::FCMP_UGT:
7307 case CmpInst::FCMP_UGE:
7308 if (match(FalseVal,
7309 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
7310 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
7311 *FC1 > *FC2)
7312 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
7313 break;
7314 default:
7315 break;
7316 }
7317
7318 return {SPF_UNKNOWN, SPNB_NA, false};
7319}
7320
7321/// Recognize variations of:
7322/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
7323static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
7324 Value *CmpLHS, Value *CmpRHS,
7325 Value *TrueVal, Value *FalseVal) {
7326 // Swap the select operands and predicate to match the patterns below.
7327 if (CmpRHS != TrueVal) {
7328 Pred = ICmpInst::getSwappedPredicate(Pred);
7329 std::swap(TrueVal, FalseVal);
7330 }
7331 const APInt *C1;
7332 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
7333 const APInt *C2;
7334 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
7335 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
7336 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
7337 return {SPF_SMAX, SPNB_NA, false};
7338
7339 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
7340 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
7341 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
7342 return {SPF_SMIN, SPNB_NA, false};
7343
7344 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
7345 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
7346 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
7347 return {SPF_UMAX, SPNB_NA, false};
7348
7349 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
7350 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
7351 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
7352 return {SPF_UMIN, SPNB_NA, false};
7353 }
7354 return {SPF_UNKNOWN, SPNB_NA, false};
7355}
7356
7357/// Recognize variations of:
7358/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
7359static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
7360 Value *CmpLHS, Value *CmpRHS,
7361 Value *TVal, Value *FVal,
7362 unsigned Depth) {
7363 // TODO: Allow FP min/max with nnan/nsz.
7364 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")(static_cast <bool> (CmpInst::isIntPredicate(Pred) &&
"Expected integer comparison") ? void (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7364, __extension__ __PRETTY_FUNCTION__
))
;
7365
7366 Value *A = nullptr, *B = nullptr;
7367 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
7368 if (!SelectPatternResult::isMinOrMax(L.Flavor))
7369 return {SPF_UNKNOWN, SPNB_NA, false};
7370
7371 Value *C = nullptr, *D = nullptr;
7372 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
7373 if (L.Flavor != R.Flavor)
7374 return {SPF_UNKNOWN, SPNB_NA, false};
7375
7376 // We have something like: x Pred y ? min(a, b) : min(c, d).
7377 // Try to match the compare to the min/max operations of the select operands.
7378 // First, make sure we have the right compare predicate.
7379 switch (L.Flavor) {
7380 case SPF_SMIN:
7381 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
7382 Pred = ICmpInst::getSwappedPredicate(Pred);
7383 std::swap(CmpLHS, CmpRHS);
7384 }
7385 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
7386 break;
7387 return {SPF_UNKNOWN, SPNB_NA, false};
7388 case SPF_SMAX:
7389 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
7390 Pred = ICmpInst::getSwappedPredicate(Pred);
7391 std::swap(CmpLHS, CmpRHS);
7392 }
7393 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
7394 break;
7395 return {SPF_UNKNOWN, SPNB_NA, false};
7396 case SPF_UMIN:
7397 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
7398 Pred = ICmpInst::getSwappedPredicate(Pred);
7399 std::swap(CmpLHS, CmpRHS);
7400 }
7401 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
7402 break;
7403 return {SPF_UNKNOWN, SPNB_NA, false};
7404 case SPF_UMAX:
7405 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
7406 Pred = ICmpInst::getSwappedPredicate(Pred);
7407 std::swap(CmpLHS, CmpRHS);
7408 }
7409 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
7410 break;
7411 return {SPF_UNKNOWN, SPNB_NA, false};
7412 default:
7413 return {SPF_UNKNOWN, SPNB_NA, false};
7414 }
7415
7416 // If there is a common operand in the already matched min/max and the other
7417 // min/max operands match the compare operands (either directly or inverted),
7418 // then this is min/max of the same flavor.
7419
7420 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
7421 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
7422 if (D == B) {
7423 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
7424 match(A, m_Not(m_Specific(CmpRHS)))))
7425 return {L.Flavor, SPNB_NA, false};
7426 }
7427 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
7428 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
7429 if (C == B) {
7430 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
7431 match(A, m_Not(m_Specific(CmpRHS)))))
7432 return {L.Flavor, SPNB_NA, false};
7433 }
7434 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
7435 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
7436 if (D == A) {
7437 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
7438 match(B, m_Not(m_Specific(CmpRHS)))))
7439 return {L.Flavor, SPNB_NA, false};
7440 }
7441 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
7442 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
7443 if (C == A) {
7444 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
7445 match(B, m_Not(m_Specific(CmpRHS)))))
7446 return {L.Flavor, SPNB_NA, false};
7447 }
7448
7449 return {SPF_UNKNOWN, SPNB_NA, false};
7450}
7451
7452/// If the input value is the result of a 'not' op, constant integer, or vector
7453/// splat of a constant integer, return the bitwise-not source value.
7454/// TODO: This could be extended to handle non-splat vector integer constants.
7455static Value *getNotValue(Value *V) {
7456 Value *NotV;
7457 if (match(V, m_Not(m_Value(NotV))))
7458 return NotV;
7459
7460 const APInt *C;
7461 if (match(V, m_APInt(C)))
7462 return ConstantInt::get(V->getType(), ~(*C));
7463
7464 return nullptr;
7465}
7466
7467/// Match non-obvious integer minimum and maximum sequences.
7468static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
7469 Value *CmpLHS, Value *CmpRHS,
7470 Value *TrueVal, Value *FalseVal,
7471 Value *&LHS, Value *&RHS,
7472 unsigned Depth) {
7473 // Assume success. If there's no match, callers should not use these anyway.
7474 LHS = TrueVal;
7475 RHS = FalseVal;
7476
7477 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
7478 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
7479 return SPR;
7480
7481 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
7482 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
7483 return SPR;
7484
7485 // Look through 'not' ops to find disguised min/max.
7486 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
7487 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
7488 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
7489 switch (Pred) {
7490 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
7491 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
7492 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
7493 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
7494 default: break;
7495 }
7496 }
7497
7498 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
7499 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
7500 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
7501 switch (Pred) {
7502 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
7503 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
7504 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
7505 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
7506 default: break;
7507 }
7508 }
7509
7510 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
7511 return {SPF_UNKNOWN, SPNB_NA, false};
7512
7513 const APInt *C1;
7514 if (!match(CmpRHS, m_APInt(C1)))
7515 return {SPF_UNKNOWN, SPNB_NA, false};
7516
7517 // An unsigned min/max can be written with a signed compare.
7518 const APInt *C2;
7519 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
7520 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
7521 // Is the sign bit set?
7522 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
7523 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
7524 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
7525 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
7526
7527 // Is the sign bit clear?
7528 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
7529 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
7530 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
7531 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
7532 }
7533
7534 return {SPF_UNKNOWN, SPNB_NA, false};
7535}
7536
7537bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
7538 assert(X && Y && "Invalid operand")(static_cast <bool> (X && Y && "Invalid operand"
) ? void (0) : __assert_fail ("X && Y && \"Invalid operand\""
, "llvm/lib/Analysis/ValueTracking.cpp", 7538, __extension__ __PRETTY_FUNCTION__
))
;
7539
7540 // X = sub (0, Y) || X = sub nsw (0, Y)
7541 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
7542 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
7543 return true;
7544
7545 // Y = sub (0, X) || Y = sub nsw (0, X)
7546 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
7547 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
7548 return true;
7549
7550 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
7551 Value *A, *B;
7552 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
7553 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
7554 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
7555 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
7556}
7557
7558static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
7559 FastMathFlags FMF,
7560 Value *CmpLHS, Value *CmpRHS,
7561 Value *TrueVal, Value *FalseVal,
7562 Value *&LHS, Value *&RHS,
7563 unsigned Depth) {
7564 bool HasMismatchedZeros = false;
7565 if (CmpInst::isFPPredicate(Pred)) {
7566 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
7567 // 0.0 operand, set the compare's 0.0 operands to that same value for the
7568 // purpose of identifying min/max. Disregard vector constants with undefined
7569 // elements because those can not be back-propagated for analysis.
7570 Value *OutputZeroVal = nullptr;
7571 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
7572 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
7573 OutputZeroVal = TrueVal;
7574 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
7575 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
7576 OutputZeroVal = FalseVal;
7577
7578 if (OutputZeroVal) {
7579 if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) {
7580 HasMismatchedZeros = true;
7581 CmpLHS = OutputZeroVal;
7582 }
7583 if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) {
7584 HasMismatchedZeros = true;
7585 CmpRHS = OutputZeroVal;
7586 }
7587 }
7588 }
7589
7590 LHS = CmpLHS;
7591 RHS = CmpRHS;
7592
7593 // Signed zero may return inconsistent results between implementations.
7594 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
7595 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
7596 // Therefore, we behave conservatively and only proceed if at least one of the
7597 // operands is known to not be zero or if we don't care about signed zero.
7598 switch (Pred) {
7599 default: break;
7600 case CmpInst::FCMP_OGT: case CmpInst::FCMP_OLT:
7601 case CmpInst::FCMP_UGT: case CmpInst::FCMP_ULT:
7602 if (!HasMismatchedZeros)
7603 break;
7604 [[fallthrough]];
7605 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
7606 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
7607 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
7608 !isKnownNonZero(CmpRHS))
7609 return {SPF_UNKNOWN, SPNB_NA, false};
7610 }
7611
7612 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
7613 bool Ordered = false;
7614
7615 // When given one NaN and one non-NaN input:
7616 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
7617 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
7618 // ordered comparison fails), which could be NaN or non-NaN.
7619 // so here we discover exactly what NaN behavior is required/accepted.
7620 if (CmpInst::isFPPredicate(Pred)) {
7621 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
7622 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
7623
7624 if (LHSSafe && RHSSafe) {
7625 // Both operands are known non-NaN.
7626 NaNBehavior = SPNB_RETURNS_ANY;
7627 } else if (CmpInst::isOrdered(Pred)) {
7628 // An ordered comparison will return false when given a NaN, so it
7629 // returns the RHS.
7630 Ordered = true;
7631 if (LHSSafe)
7632 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
7633 NaNBehavior = SPNB_RETURNS_NAN;
7634 else if (RHSSafe)
7635 NaNBehavior = SPNB_RETURNS_OTHER;
7636 else
7637 // Completely unsafe.
7638 return {SPF_UNKNOWN, SPNB_NA, false};
7639 } else {
7640 Ordered = false;
7641 // An unordered comparison will return true when given a NaN, so it
7642 // returns the LHS.
7643 if (LHSSafe)
7644 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
7645 NaNBehavior = SPNB_RETURNS_OTHER;
7646 else if (RHSSafe)
7647 NaNBehavior = SPNB_RETURNS_NAN;
7648 else
7649 // Completely unsafe.
7650 return {SPF_UNKNOWN, SPNB_NA, false};
7651 }
7652 }
7653
7654 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
7655 std::swap(CmpLHS, CmpRHS);
7656 Pred = CmpInst::getSwappedPredicate(Pred);
7657 if (NaNBehavior == SPNB_RETURNS_NAN)
7658 NaNBehavior = SPNB_RETURNS_OTHER;
7659 else if (NaNBehavior == SPNB_RETURNS_OTHER)
7660 NaNBehavior = SPNB_RETURNS_NAN;
7661 Ordered = !Ordered;
7662 }
7663
7664 // ([if]cmp X, Y) ? X : Y
7665 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
7666 switch (Pred) {
7667 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
7668 case ICmpInst::ICMP_UGT:
7669 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
7670 case ICmpInst::ICMP_SGT:
7671 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
7672 case ICmpInst::ICMP_ULT:
7673 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
7674 case ICmpInst::ICMP_SLT:
7675 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
7676 case FCmpInst::FCMP_UGT:
7677 case FCmpInst::FCMP_UGE:
7678 case FCmpInst::FCMP_OGT:
7679 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
7680 case FCmpInst::FCMP_ULT:
7681 case FCmpInst::FCMP_ULE:
7682 case FCmpInst::FCMP_OLT:
7683 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
7684 }
7685 }
7686
7687 if (isKnownNegation(TrueVal, FalseVal)) {
7688 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
7689 // match against either LHS or sext(LHS).
7690 auto MaybeSExtCmpLHS =
7691 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
7692 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
7693 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
7694 if (match(TrueVal, MaybeSExtCmpLHS)) {
7695 // Set the return values. If the compare uses the negated value (-X >s 0),
7696 // swap the return values because the negated value is always 'RHS'.
7697 LHS = TrueVal;
7698 RHS = FalseVal;
7699 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
7700 std::swap(LHS, RHS);
7701
7702 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
7703 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
7704 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
7705 return {SPF_ABS, SPNB_NA, false};
7706
7707 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
7708 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
7709 return {SPF_ABS, SPNB_NA, false};
7710
7711 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
7712 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
7713 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
7714 return {SPF_NABS, SPNB_NA, false};
7715 }
7716 else if (match(FalseVal, MaybeSExtCmpLHS)) {
7717 // Set the return values. If the compare uses the negated value (-X >s 0),
7718 // swap the return values because the negated value is always 'RHS'.
7719 LHS = FalseVal;
7720 RHS = TrueVal;
7721 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
7722 std::swap(LHS, RHS);
7723
7724 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
7725 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
7726 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
7727 return {SPF_NABS, SPNB_NA, false};
7728
7729 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
7730 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
7731 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
7732 return {SPF_ABS, SPNB_NA, false};
7733 }
7734 }
7735
7736 if (CmpInst::isIntPredicate(Pred))
7737 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
7738
7739 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
7740 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
7741 // semantics than minNum. Be conservative in such case.
7742 if (NaNBehavior != SPNB_RETURNS_ANY ||
7743 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
7744 !isKnownNonZero(CmpRHS)))
7745 return {SPF_UNKNOWN, SPNB_NA, false};
7746
7747 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
7748}
7749
7750/// Helps to match a select pattern in case of a type mismatch.
7751///
7752/// The function processes the case when type of true and false values of a
7753/// select instruction differs from type of the cmp instruction operands because
7754/// of a cast instruction. The function checks if it is legal to move the cast
7755/// operation after "select". If yes, it returns the new second value of
7756/// "select" (with the assumption that cast is moved):
7757/// 1. As operand of cast instruction when both values of "select" are same cast
7758/// instructions.
7759/// 2. As restored constant (by applying reverse cast operation) when the first
7760/// value of the "select" is a cast operation and the second value is a
7761/// constant.
7762/// NOTE: We return only the new second value because the first value could be
7763/// accessed as operand of cast instruction.
7764static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
7765 Instruction::CastOps *CastOp) {
7766 auto *Cast1 = dyn_cast<CastInst>(V1);
7767 if (!Cast1)
7768 return nullptr;
7769
7770 *CastOp = Cast1->getOpcode();
7771 Type *SrcTy = Cast1->getSrcTy();
7772 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
7773 // If V1 and V2 are both the same cast from the same type, look through V1.
7774 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
7775 return Cast2->getOperand(0);
7776 return nullptr;
7777 }
7778
7779 auto *C = dyn_cast<Constant>(V2);
7780 if (!C)
7781 return nullptr;
7782
7783 Constant *CastedTo = nullptr;
7784 switch (*CastOp) {
7785 case Instruction::ZExt:
7786 if (CmpI->isUnsigned())
7787 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
7788 break;
7789 case Instruction::SExt:
7790 if (CmpI->isSigned())
7791 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
7792 break;
7793 case Instruction::Trunc:
7794 Constant *CmpConst;
7795 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
7796 CmpConst->getType() == SrcTy) {
7797 // Here we have the following case:
7798 //
7799 // %cond = cmp iN %x, CmpConst
7800 // %tr = trunc iN %x to iK
7801 // %narrowsel = select i1 %cond, iK %t, iK C
7802 //
7803 // We can always move trunc after select operation:
7804 //
7805 // %cond = cmp iN %x, CmpConst
7806 // %widesel = select i1 %cond, iN %x, iN CmpConst
7807 // %tr = trunc iN %widesel to iK
7808 //
7809 // Note that C could be extended in any way because we don't care about
7810 // upper bits after truncation. It can't be abs pattern, because it would
7811 // look like:
7812 //
7813 // select i1 %cond, x, -x.
7814 //
7815 // So only min/max pattern could be matched. Such match requires widened C
7816 // == CmpConst. That is why set widened C = CmpConst, condition trunc
7817 // CmpConst == C is checked below.
7818 CastedTo = CmpConst;
7819 } else {
7820 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
7821 }
7822 break;
7823 case Instruction::FPTrunc:
7824 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
7825 break;
7826 case Instruction::FPExt:
7827 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
7828 break;
7829 case Instruction::FPToUI:
7830 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
7831 break;
7832 case Instruction::FPToSI:
7833 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
7834 break;
7835 case Instruction::UIToFP:
7836 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
7837 break;
7838 case Instruction::SIToFP:
7839 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
7840 break;
7841 default:
7842 break;
7843 }
7844
7845 if (!CastedTo)
7846 return nullptr;
7847
7848 // Make sure the cast doesn't lose any information.
7849 Constant *CastedBack =
7850 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
7851 if (CastedBack != C)
7852 return nullptr;
7853
7854 return CastedTo;
7855}
7856
7857SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
7858 Instruction::CastOps *CastOp,
7859 unsigned Depth) {
7860 if (Depth >= MaxAnalysisRecursionDepth)
7861 return {SPF_UNKNOWN, SPNB_NA, false};
7862
7863 SelectInst *SI = dyn_cast<SelectInst>(V);
7864 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
7865
7866 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
7867 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
7868
7869 Value *TrueVal = SI->getTrueValue();
7870 Value *FalseVal = SI->getFalseValue();
7871
7872 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
7873 CastOp, Depth);
7874}
7875
7876SelectPatternResult llvm::matchDecomposedSelectPattern(
7877 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
7878 Instruction::CastOps *CastOp, unsigned Depth) {
7879 CmpInst::Predicate Pred = CmpI->getPredicate();
7880 Value *CmpLHS = CmpI->getOperand(0);
7881 Value *CmpRHS = CmpI->getOperand(1);
7882 FastMathFlags FMF;
7883 if (isa<FPMathOperator>(CmpI))
7884 FMF = CmpI->getFastMathFlags();
7885
7886 // Bail out early.
7887 if (CmpI->isEquality())
7888 return {SPF_UNKNOWN, SPNB_NA, false};
7889
7890 // Deal with type mismatches.
7891 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
7892 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
7893 // If this is a potential fmin/fmax with a cast to integer, then ignore
7894 // -0.0 because there is no corresponding integer value.
7895 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
7896 FMF.setNoSignedZeros();
7897 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
7898 cast<CastInst>(TrueVal)->getOperand(0), C,
7899 LHS, RHS, Depth);
7900 }
7901 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
7902 // If this is a potential fmin/fmax with a cast to integer, then ignore
7903 // -0.0 because there is no corresponding integer value.
7904 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
7905 FMF.setNoSignedZeros();
7906 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
7907 C, cast<CastInst>(FalseVal)->getOperand(0),
7908 LHS, RHS, Depth);
7909 }
7910 }
7911 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
7912 LHS, RHS, Depth);
7913}
7914
7915CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
7916 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
7917 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
7918 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
7919 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
7920 if (SPF == SPF_FMINNUM)
7921 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
7922 if (SPF == SPF_FMAXNUM)
7923 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
7924 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "llvm/lib/Analysis/ValueTracking.cpp"
, 7924)
;
7925}
7926
7927SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
7928 if (SPF == SPF_SMIN) return SPF_SMAX;
7929 if (SPF == SPF_UMIN) return SPF_UMAX;
7930 if (SPF == SPF_SMAX) return SPF_SMIN;
7931 if (SPF == SPF_UMAX) return SPF_UMIN;
7932 llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "llvm/lib/Analysis/ValueTracking.cpp"
, 7932)
;
7933}
7934
7935Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
7936 switch (MinMaxID) {
7937 case Intrinsic::smax: return Intrinsic::smin;
7938 case Intrinsic::smin: return Intrinsic::smax;
7939 case Intrinsic::umax: return Intrinsic::umin;
7940 case Intrinsic::umin: return Intrinsic::umax;
7941 // Please note that next four intrinsics may produce the same result for
7942 // original and inverted case even if X != Y due to NaN is handled specially.
7943 case Intrinsic::maximum: return Intrinsic::minimum;
7944 case Intrinsic::minimum: return Intrinsic::maximum;
7945 case Intrinsic::maxnum: return Intrinsic::minnum;
7946 case Intrinsic::minnum: return Intrinsic::maxnum;
7947 default: llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/Analysis/ValueTracking.cpp"
, 7947)
;
7948 }
7949}
7950
7951APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
7952 switch (SPF) {
7953 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
7954 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
7955 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
7956 case SPF_UMIN: return APInt::getMinValue(BitWidth);
7957 default: llvm_unreachable("Unexpected flavor")::llvm::llvm_unreachable_internal("Unexpected flavor", "llvm/lib/Analysis/ValueTracking.cpp"
, 7957)
;
7958 }
7959}
7960
7961std::pair<Intrinsic::ID, bool>
7962llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
7963 // Check if VL contains select instructions that can be folded into a min/max
7964 // vector intrinsic and return the intrinsic if it is possible.
7965 // TODO: Support floating point min/max.
7966 bool AllCmpSingleUse = true;
7967 SelectPatternResult SelectPattern;
7968 SelectPattern.Flavor = SPF_UNKNOWN;
7969 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
7970 Value *LHS, *RHS;
7971 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
7972 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
7973 CurrentPattern.Flavor == SPF_FMINNUM ||
7974 CurrentPattern.Flavor == SPF_FMAXNUM ||
7975 !I->getType()->isIntOrIntVectorTy())
7976 return false;
7977 if (SelectPattern.Flavor != SPF_UNKNOWN &&
7978 SelectPattern.Flavor != CurrentPattern.Flavor)
7979 return false;
7980 SelectPattern = CurrentPattern;
7981 AllCmpSingleUse &=
7982 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
7983 return true;
7984 })) {
7985 switch (SelectPattern.Flavor) {
7986 case SPF_SMIN:
7987 return {Intrinsic::smin, AllCmpSingleUse};
7988 case SPF_UMIN:
7989 return {Intrinsic::umin, AllCmpSingleUse};
7990 case SPF_SMAX:
7991 return {Intrinsic::smax, AllCmpSingleUse};
7992 case SPF_UMAX:
7993 return {Intrinsic::umax, AllCmpSingleUse};
7994 default:
7995 llvm_unreachable("unexpected select pattern flavor")::llvm::llvm_unreachable_internal("unexpected select pattern flavor"
, "llvm/lib/Analysis/ValueTracking.cpp", 7995)
;
7996 }
7997 }
7998 return {Intrinsic::not_intrinsic, false};
7999}
8000
8001bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
8002 Value *&Start, Value *&Step) {
8003 // Handle the case of a simple two-predecessor recurrence PHI.
8004 // There's a lot more that could theoretically be done here, but
8005 // this is sufficient to catch some interesting cases.
8006 if (P->getNumIncomingValues() != 2)
8007 return false;
8008
8009 for (unsigned i = 0; i != 2; ++i) {
8010 Value *L = P->getIncomingValue(i);
8011 Value *R = P->getIncomingValue(!i);
8012 Operator *LU = dyn_cast<Operator>(L);
8013 if (!LU)
8014 continue;
8015 unsigned Opcode = LU->getOpcode();
8016
8017 switch (Opcode) {
8018 default:
8019 continue;
8020 // TODO: Expand list -- xor, div, gep, uaddo, etc..
8021 case Instruction::LShr:
8022 case Instruction::AShr:
8023 case Instruction::Shl:
8024 case Instruction::Add:
8025 case Instruction::Sub:
8026 case Instruction::And:
8027 case Instruction::Or:
8028 case Instruction::Mul:
8029 case Instruction::FMul: {
8030 Value *LL = LU->getOperand(0);
8031 Value *LR = LU->getOperand(1);
8032 // Find a recurrence.
8033 if (LL == P)
8034 L = LR;
8035 else if (LR == P)
8036 L = LL;
8037 else
8038 continue; // Check for recurrence with L and R flipped.
8039
8040 break; // Match!
8041 }
8042 };
8043
8044 // We have matched a recurrence of the form:
8045 // %iv = [R, %entry], [%iv.next, %backedge]
8046 // %iv.next = binop %iv, L
8047 // OR
8048 // %iv = [R, %entry], [%iv.next, %backedge]
8049 // %iv.next = binop L, %iv
8050 BO = cast<BinaryOperator>(LU);
8051 Start = R;
8052 Step = L;
8053 return true;
8054 }
8055 return false;
8056}
8057
8058bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
8059 Value *&Start, Value *&Step) {
8060 BinaryOperator *BO = nullptr;
8061 P = dyn_cast<PHINode>(I->getOperand(0));
8062 if (!P)
8063 P = dyn_cast<PHINode>(I->getOperand(1));
8064 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
8065}
8066
8067/// Return true if "icmp Pred LHS RHS" is always true.
8068static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
8069 const Value *RHS, const DataLayout &DL,
8070 unsigned Depth) {
8071 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
8072 return true;
8073
8074 switch (Pred) {
8075 default:
8076 return false;
8077
8078 case CmpInst::ICMP_SLE: {
8079 const APInt *C;
8080
8081 // LHS s<= LHS +_{nsw} C if C >= 0
8082 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
8083 return !C->isNegative();
8084 return false;
8085 }
8086
8087 case CmpInst::ICMP_ULE: {
8088 const APInt *C;
8089
8090 // LHS u<= LHS +_{nuw} C for any C
8091 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
8092 return true;
8093
8094 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
8095 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
8096 const Value *&X,
8097 const APInt *&CA, const APInt *&CB) {
8098 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
8099 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
8100 return true;
8101
8102 // If X & C == 0 then (X | C) == X +_{nuw} C
8103 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
8104 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
8105 KnownBits Known(CA->getBitWidth());
8106 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
8107 /*CxtI*/ nullptr, /*DT*/ nullptr);
8108 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
8109 return true;
8110 }
8111
8112 return false;
8113 };
8114
8115 const Value *X;
8116 const APInt *CLHS, *CRHS;
8117 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
8118 return CLHS->ule(*CRHS);
8119
8120 return false;
8121 }
8122 }
8123}
8124
8125/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
8126/// ALHS ARHS" is true. Otherwise, return std::nullopt.
8127static std::optional<bool>
8128isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
8129 const Value *ARHS, const Value *BLHS, const Value *BRHS,
8130 const DataLayout &DL, unsigned Depth) {
8131 switch (Pred) {
8132 default:
8133 return std::nullopt;
8134
8135 case CmpInst::ICMP_SLT:
8136 case CmpInst::ICMP_SLE:
8137 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
8138 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
8139 return true;
8140 return std::nullopt;
8141
8142 case CmpInst::ICMP_SGT:
8143 case CmpInst::ICMP_SGE:
8144 if (isTruePredicate(CmpInst::ICMP_SLE, ALHS, BLHS, DL, Depth) &&
8145 isTruePredicate(CmpInst::ICMP_SLE, BRHS, ARHS, DL, Depth))
8146 return true;
8147 return std::nullopt;
8148
8149 case CmpInst::ICMP_ULT:
8150 case CmpInst::ICMP_ULE:
8151 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
8152 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
8153 return true;
8154 return std::nullopt;
8155
8156 case CmpInst::ICMP_UGT:
8157 case CmpInst::ICMP_UGE:
8158 if (isTruePredicate(CmpInst::ICMP_ULE, ALHS, BLHS, DL, Depth) &&
8159 isTruePredicate(CmpInst::ICMP_ULE, BRHS, ARHS, DL, Depth))
8160 return true;
8161 return std::nullopt;
8162 }
8163}
8164
8165/// Return true if the operands of two compares (expanded as "L0 pred L1" and
8166/// "R0 pred R1") match. IsSwappedOps is true when the operands match, but are
8167/// swapped.
8168static bool areMatchingOperands(const Value *L0, const Value *L1, const Value *R0,
8169 const Value *R1, bool &AreSwappedOps) {
8170 bool AreMatchingOps = (L0 == R0 && L1 == R1);
8171 AreSwappedOps = (L0 == R1 && L1 == R0);
8172 return AreMatchingOps || AreSwappedOps;
8173}
8174
8175/// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
8176/// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false.
8177/// Otherwise, return std::nullopt if we can't infer anything.
8178static std::optional<bool>
8179isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
8180 CmpInst::Predicate RPred, bool AreSwappedOps) {
8181 // Canonicalize the predicate as if the operands were not commuted.
8182 if (AreSwappedOps)
8183 RPred = ICmpInst::getSwappedPredicate(RPred);
8184
8185 if (CmpInst::isImpliedTrueByMatchingCmp(LPred, RPred))
8186 return true;
8187 if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred))
8188 return false;
8189
8190 return std::nullopt;
8191}
8192
8193/// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
8194/// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false.
8195/// Otherwise, return std::nullopt if we can't infer anything.
8196static std::optional<bool> isImpliedCondCommonOperandWithConstants(
8197 CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred,
8198 const APInt &RC) {
8199 ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC);
8200 ConstantRange CR = ConstantRange::makeExactICmpRegion(RPred, RC);
8201 ConstantRange Intersection = DomCR.intersectWith(CR);
8202 ConstantRange Difference = DomCR.difference(CR);
8203 if (Intersection.isEmptySet())
8204 return false;
8205 if (Difference.isEmptySet())
8206 return true;
8207 return std::nullopt;
8208}
8209
8210/// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
8211/// is true. Return false if LHS implies RHS is false. Otherwise, return
8212/// std::nullopt if we can't infer anything.
8213static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
8214 CmpInst::Predicate RPred,
8215 const Value *R0, const Value *R1,
8216 const DataLayout &DL,
8217 bool LHSIsTrue, unsigned Depth) {
8218 Value *L0 = LHS->getOperand(0);
8219 Value *L1 = LHS->getOperand(1);
8220
8221 // The rest of the logic assumes the LHS condition is true. If that's not the
8222 // case, invert the predicate to make it so.
8223 CmpInst::Predicate LPred =
8224 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
8225
8226 // Can we infer anything when the two compares have matching operands?
8227 bool AreSwappedOps;
8228 if (areMatchingOperands(L0, L1, R0, R1, AreSwappedOps))
8229 return isImpliedCondMatchingOperands(LPred, RPred, AreSwappedOps);
8230
8231 // Can we infer anything when the 0-operands match and the 1-operands are
8232 // constants (not necessarily matching)?
8233 const APInt *LC, *RC;
8234 if (L0 == R0 && match(L1, m_APInt(LC)) && match(R1, m_APInt(RC)))
8235 return isImpliedCondCommonOperandWithConstants(LPred, *LC, RPred, *RC);
8236
8237 if (LPred == RPred)
8238 return isImpliedCondOperands(LPred, L0, L1, R0, R1, DL, Depth);
8239
8240 return std::nullopt;
8241}
8242
8243/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
8244/// false. Otherwise, return std::nullopt if we can't infer anything. We
8245/// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select'
8246/// instruction.
8247static std::optional<bool>
8248isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
8249 const Value *RHSOp0, const Value *RHSOp1,
8250 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
8251 // The LHS must be an 'or', 'and', or a 'select' instruction.
8252 assert((LHS->getOpcode() == Instruction::And ||(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8255, __extension__ __PRETTY_FUNCTION__
))
8253 LHS->getOpcode() == Instruction::Or ||(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8255, __extension__ __PRETTY_FUNCTION__
))
8254 LHS->getOpcode() == Instruction::Select) &&(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8255, __extension__ __PRETTY_FUNCTION__
))
8255 "Expected LHS to be 'and', 'or', or 'select'.")(static_cast <bool> ((LHS->getOpcode() == Instruction
::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode
() == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'."
) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8255, __extension__ __PRETTY_FUNCTION__
))
;
8256
8257 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Hit recursion limit") ? void (0) : __assert_fail
("Depth <= MaxAnalysisRecursionDepth && \"Hit recursion limit\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8257, __extension__ __PRETTY_FUNCTION__
))
;
8258
8259 // If the result of an 'or' is false, then we know both legs of the 'or' are
8260 // false. Similarly, if the result of an 'and' is true, then we know both
8261 // legs of the 'and' are true.
8262 const Value *ALHS, *ARHS;
8263 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
8264 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
8265 // FIXME: Make this non-recursion.
8266 if (std::optional<bool> Implication = isImpliedCondition(
8267 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
8268 return Implication;
8269 if (std::optional<bool> Implication = isImpliedCondition(
8270 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
8271 return Implication;
8272 return std::nullopt;
8273 }
8274 return std::nullopt;
8275}
8276
8277std::optional<bool>
8278llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
8279 const Value *RHSOp0, const Value *RHSOp1,
8280 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
8281 // Bail out when we hit the limit.
8282 if (Depth == MaxAnalysisRecursionDepth)
8283 return std::nullopt;
8284
8285 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
8286 // example.
8287 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
8288 return std::nullopt;
8289
8290 assert(LHS->getType()->isIntOrIntVectorTy(1) &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
(1) && "Expected integer type only!") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy(1) && \"Expected integer type only!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8291, __extension__ __PRETTY_FUNCTION__
))
8291 "Expected integer type only!")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
(1) && "Expected integer type only!") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy(1) && \"Expected integer type only!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8291, __extension__ __PRETTY_FUNCTION__
))
;
8292
8293 // Both LHS and RHS are icmps.
8294 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
8295 if (LHSCmp)
8296 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
8297 Depth);
8298
8299 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
8300 /// the RHS to be an icmp.
8301 /// FIXME: Add support for and/or/select on the RHS.
8302 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
8303 if ((LHSI->getOpcode() == Instruction::And ||
8304 LHSI->getOpcode() == Instruction::Or ||
8305 LHSI->getOpcode() == Instruction::Select))
8306 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
8307 Depth);
8308 }
8309 return std::nullopt;
8310}
8311
8312std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
8313 const DataLayout &DL,
8314 bool LHSIsTrue, unsigned Depth) {
8315 // LHS ==> RHS by definition
8316 if (LHS == RHS)
8317 return LHSIsTrue;
8318
8319 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS))
8320 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
8321 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
8322 LHSIsTrue, Depth);
8323
8324 if (Depth == MaxAnalysisRecursionDepth)
8325 return std::nullopt;
8326
8327 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
8328 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
8329 const Value *RHS1, *RHS2;
8330 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
8331 if (std::optional<bool> Imp =
8332 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
8333 if (*Imp == true)
8334 return true;
8335 if (std::optional<bool> Imp =
8336 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
8337 if (*Imp == true)
8338 return true;
8339 }
8340 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
8341 if (std::optional<bool> Imp =
8342 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
8343 if (*Imp == false)
8344 return false;
8345 if (std::optional<bool> Imp =
8346 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
8347 if (*Imp == false)
8348 return false;
8349 }
8350
8351 return std::nullopt;
8352}
8353
8354// Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
8355// condition dominating ContextI or nullptr, if no condition is found.
8356static std::pair<Value *, bool>
8357getDomPredecessorCondition(const Instruction *ContextI) {
8358 if (!ContextI || !ContextI->getParent())
8359 return {nullptr, false};
8360
8361 // TODO: This is a poor/cheap way to determine dominance. Should we use a
8362 // dominator tree (eg, from a SimplifyQuery) instead?
8363 const BasicBlock *ContextBB = ContextI->getParent();
8364 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
8365 if (!PredBB)
8366 return {nullptr, false};
8367
8368 // We need a conditional branch in the predecessor.
8369 Value *PredCond;
8370 BasicBlock *TrueBB, *FalseBB;
8371 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
8372 return {nullptr, false};
8373
8374 // The branch should get simplified. Don't bother simplifying this condition.
8375 if (TrueBB == FalseBB)
8376 return {nullptr, false};
8377
8378 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&(static_cast <bool> ((TrueBB == ContextBB || FalseBB ==
ContextBB) && "Predecessor block does not point to successor?"
) ? void (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8379, __extension__ __PRETTY_FUNCTION__
))
8379 "Predecessor block does not point to successor?")(static_cast <bool> ((TrueBB == ContextBB || FalseBB ==
ContextBB) && "Predecessor block does not point to successor?"
) ? void (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8379, __extension__ __PRETTY_FUNCTION__
))
;
8380
8381 // Is this condition implied by the predecessor condition?
8382 return {PredCond, TrueBB == ContextBB};
8383}
8384
8385std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
8386 const Instruction *ContextI,
8387 const DataLayout &DL) {
8388 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool")(static_cast <bool> (Cond->getType()->isIntOrIntVectorTy
(1) && "Condition must be bool") ? void (0) : __assert_fail
("Cond->getType()->isIntOrIntVectorTy(1) && \"Condition must be bool\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8388, __extension__ __PRETTY_FUNCTION__
))
;
8389 auto PredCond = getDomPredecessorCondition(ContextI);
8390 if (PredCond.first)
8391 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
8392 return std::nullopt;
8393}
8394
8395std::optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
8396 const Value *LHS,
8397 const Value *RHS,
8398 const Instruction *ContextI,
8399 const DataLayout &DL) {
8400 auto PredCond = getDomPredecessorCondition(ContextI);
8401 if (PredCond.first)
8402 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
8403 PredCond.second);
8404 return std::nullopt;
8405}
8406
8407static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
8408 APInt &Upper, const InstrInfoQuery &IIQ,
8409 bool PreferSignedRange) {
8410 unsigned Width = Lower.getBitWidth();
8411 const APInt *C;
8412 switch (BO.getOpcode()) {
8413 case Instruction::Add:
8414 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
8415 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
8416 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
8417
8418 // If the caller expects a signed compare, then try to use a signed range.
8419 // Otherwise if both no-wraps are set, use the unsigned range because it
8420 // is never larger than the signed range. Example:
8421 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
8422 if (PreferSignedRange && HasNSW && HasNUW)
8423 HasNUW = false;
8424
8425 if (HasNUW) {
8426 // 'add nuw x, C' produces [C, UINT_MAX].
8427 Lower = *C;
8428 } else if (HasNSW) {
8429 if (C->isNegative()) {
8430 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
8431 Lower = APInt::getSignedMinValue(Width);
8432 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
8433 } else {
8434 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
8435 Lower = APInt::getSignedMinValue(Width) + *C;
8436 Upper = APInt::getSignedMaxValue(Width) + 1;
8437 }
8438 }
8439 }
8440 break;
8441
8442 case Instruction::And:
8443 if (match(BO.getOperand(1), m_APInt(C)))
8444 // 'and x, C' produces [0, C].
8445 Upper = *C + 1;
8446 break;
8447
8448 case Instruction::Or:
8449 if (match(BO.getOperand(1), m_APInt(C)))
8450 // 'or x, C' produces [C, UINT_MAX].
8451 Lower = *C;
8452 break;
8453
8454 case Instruction::AShr:
8455 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
8456 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
8457 Lower = APInt::getSignedMinValue(Width).ashr(*C);
8458 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
8459 } else if (match(BO.getOperand(0), m_APInt(C))) {
8460 unsigned ShiftAmount = Width - 1;
8461 if (!C->isZero() && IIQ.isExact(&BO))
8462 ShiftAmount = C->countr_zero();
8463 if (C->isNegative()) {
8464 // 'ashr C, x' produces [C, C >> (Width-1)]
8465 Lower = *C;
8466 Upper = C->ashr(ShiftAmount) + 1;
8467 } else {
8468 // 'ashr C, x' produces [C >> (Width-1), C]
8469 Lower = C->ashr(ShiftAmount);
8470 Upper = *C + 1;
8471 }
8472 }
8473 break;
8474
8475 case Instruction::LShr:
8476 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
8477 // 'lshr x, C' produces [0, UINT_MAX >> C].
8478 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
8479 } else if (match(BO.getOperand(0), m_APInt(C))) {
8480 // 'lshr C, x' produces [C >> (Width-1), C].
8481 unsigned ShiftAmount = Width - 1;
8482 if (!C->isZero() && IIQ.isExact(&BO))
8483 ShiftAmount = C->countr_zero();
8484 Lower = C->lshr(ShiftAmount);
8485 Upper = *C + 1;
8486 }
8487 break;
8488
8489 case Instruction::Shl:
8490 if (match(BO.getOperand(0), m_APInt(C))) {
8491 if (IIQ.hasNoUnsignedWrap(&BO)) {
8492 // 'shl nuw C, x' produces [C, C << CLZ(C)]
8493 Lower = *C;
8494 Upper = Lower.shl(Lower.countl_zero()) + 1;
8495 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
8496 if (C->isNegative()) {
8497 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
8498 unsigned ShiftAmount = C->countl_one() - 1;
8499 Lower = C->shl(ShiftAmount);
8500 Upper = *C + 1;
8501 } else {
8502 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
8503 unsigned ShiftAmount = C->countl_zero() - 1;
8504 Lower = *C;
8505 Upper = C->shl(ShiftAmount) + 1;
8506 }
8507 }
8508 }
8509 break;
8510
8511 case Instruction::SDiv:
8512 if (match(BO.getOperand(1), m_APInt(C))) {
8513 APInt IntMin = APInt::getSignedMinValue(Width);
8514 APInt IntMax = APInt::getSignedMaxValue(Width);
8515 if (C->isAllOnes()) {
8516 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
8517 // where C != -1 and C != 0 and C != 1
8518 Lower = IntMin + 1;
8519 Upper = IntMax + 1;
8520 } else if (C->countl_zero() < Width - 1) {
8521 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
8522 // where C != -1 and C != 0 and C != 1
8523 Lower = IntMin.sdiv(*C);
8524 Upper = IntMax.sdiv(*C);
8525 if (Lower.sgt(Upper))
8526 std::swap(Lower, Upper);
8527 Upper = Upper + 1;
8528 assert(Upper != Lower && "Upper part of range has wrapped!")(static_cast <bool> (Upper != Lower && "Upper part of range has wrapped!"
) ? void (0) : __assert_fail ("Upper != Lower && \"Upper part of range has wrapped!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8528, __extension__ __PRETTY_FUNCTION__
))
;
8529 }
8530 } else if (match(BO.getOperand(0), m_APInt(C))) {
8531 if (C->isMinSignedValue()) {
8532 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
8533 Lower = *C;
8534 Upper = Lower.lshr(1) + 1;
8535 } else {
8536 // 'sdiv C, x' produces [-|C|, |C|].
8537 Upper = C->abs() + 1;
8538 Lower = (-Upper) + 1;
8539 }
8540 }
8541 break;
8542
8543 case Instruction::UDiv:
8544 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
8545 // 'udiv x, C' produces [0, UINT_MAX / C].
8546 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
8547 } else if (match(BO.getOperand(0), m_APInt(C))) {
8548 // 'udiv C, x' produces [0, C].
8549 Upper = *C + 1;
8550 }
8551 break;
8552
8553 case Instruction::SRem:
8554 if (match(BO.getOperand(1), m_APInt(C))) {
8555 // 'srem x, C' produces (-|C|, |C|).
8556 Upper = C->abs();
8557 Lower = (-Upper) + 1;
8558 }
8559 break;
8560
8561 case Instruction::URem:
8562 if (match(BO.getOperand(1), m_APInt(C)))
8563 // 'urem x, C' produces [0, C).
8564 Upper = *C;
8565 break;
8566
8567 default:
8568 break;
8569 }
8570}
8571
8572static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II) {
8573 unsigned Width = II.getType()->getScalarSizeInBits();
8574 const APInt *C;
8575 switch (II.getIntrinsicID()) {
8576 case Intrinsic::ctpop:
8577 case Intrinsic::ctlz:
8578 case Intrinsic::cttz:
8579 // Maximum of set/clear bits is the bit width.
8580 return ConstantRange(APInt::getZero(Width), APInt(Width, Width + 1));
8581 case Intrinsic::uadd_sat:
8582 // uadd.sat(x, C) produces [C, UINT_MAX].
8583 if (match(II.getOperand(0), m_APInt(C)) ||
8584 match(II.getOperand(1), m_APInt(C)))
8585 return ConstantRange::getNonEmpty(*C, APInt::getZero(Width));
8586 break;
8587 case Intrinsic::sadd_sat:
8588 if (match(II.getOperand(0), m_APInt(C)) ||
8589 match(II.getOperand(1), m_APInt(C))) {
8590 if (C->isNegative())
8591 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
8592 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8593 APInt::getSignedMaxValue(Width) + *C +
8594 1);
8595
8596 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
8597 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) + *C,
8598 APInt::getSignedMaxValue(Width) + 1);
8599 }
8600 break;
8601 case Intrinsic::usub_sat:
8602 // usub.sat(C, x) produces [0, C].
8603 if (match(II.getOperand(0), m_APInt(C)))
8604 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
8605
8606 // usub.sat(x, C) produces [0, UINT_MAX - C].
8607 if (match(II.getOperand(1), m_APInt(C)))
8608 return ConstantRange::getNonEmpty(APInt::getZero(Width),
8609 APInt::getMaxValue(Width) - *C + 1);
8610 break;
8611 case Intrinsic::ssub_sat:
8612 if (match(II.getOperand(0), m_APInt(C))) {
8613 if (C->isNegative())
8614 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
8615 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8616 *C - APInt::getSignedMinValue(Width) +
8617 1);
8618
8619 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
8620 return ConstantRange::getNonEmpty(*C - APInt::getSignedMaxValue(Width),
8621 APInt::getSignedMaxValue(Width) + 1);
8622 } else if (match(II.getOperand(1), m_APInt(C))) {
8623 if (C->isNegative())
8624 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
8625 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) - *C,
8626 APInt::getSignedMaxValue(Width) + 1);
8627
8628 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
8629 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8630 APInt::getSignedMaxValue(Width) - *C +
8631 1);
8632 }
8633 break;
8634 case Intrinsic::umin:
8635 case Intrinsic::umax:
8636 case Intrinsic::smin:
8637 case Intrinsic::smax:
8638 if (!match(II.getOperand(0), m_APInt(C)) &&
8639 !match(II.getOperand(1), m_APInt(C)))
8640 break;
8641
8642 switch (II.getIntrinsicID()) {
8643 case Intrinsic::umin:
8644 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
8645 case Intrinsic::umax:
8646 return ConstantRange::getNonEmpty(*C, APInt::getZero(Width));
8647 case Intrinsic::smin:
8648 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width),
8649 *C + 1);
8650 case Intrinsic::smax:
8651 return ConstantRange::getNonEmpty(*C,
8652 APInt::getSignedMaxValue(Width) + 1);
8653 default:
8654 llvm_unreachable("Must be min/max intrinsic")::llvm::llvm_unreachable_internal("Must be min/max intrinsic"
, "llvm/lib/Analysis/ValueTracking.cpp", 8654)
;
8655 }
8656 break;
8657 case Intrinsic::abs:
8658 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
8659 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
8660 if (match(II.getOperand(1), m_One()))
8661 return ConstantRange(APInt::getZero(Width),
8662 APInt::getSignedMaxValue(Width) + 1);
8663
8664 return ConstantRange(APInt::getZero(Width),
8665 APInt::getSignedMinValue(Width) + 1);
8666 case Intrinsic::vscale:
8667 if (!II.getParent() || !II.getFunction())
8668 break;
8669 return getVScaleRange(II.getFunction(), Width);
8670 default:
8671 break;
8672 }
8673
8674 return ConstantRange::getFull(Width);
8675}
8676
8677static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
8678 APInt &Upper, const InstrInfoQuery &IIQ) {
8679 const Value *LHS = nullptr, *RHS = nullptr;
8680 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
8681 if (R.Flavor == SPF_UNKNOWN)
8682 return;
8683
8684 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
8685
8686 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
8687 // If the negation part of the abs (in RHS) has the NSW flag,
8688 // then the result of abs(X) is [0..SIGNED_MAX],
8689 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
8690 Lower = APInt::getZero(BitWidth);
8691 if (match(RHS, m_Neg(m_Specific(LHS))) &&
8692 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
8693 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
8694 else
8695 Upper = APInt::getSignedMinValue(BitWidth) + 1;
8696 return;
8697 }
8698
8699 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
8700 // The result of -abs(X) is <= 0.
8701 Lower = APInt::getSignedMinValue(BitWidth);
8702 Upper = APInt(BitWidth, 1);
8703 return;
8704 }
8705
8706 const APInt *C;
8707 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
8708 return;
8709
8710 switch (R.Flavor) {
8711 case SPF_UMIN:
8712 Upper = *C + 1;
8713 break;
8714 case SPF_UMAX:
8715 Lower = *C;
8716 break;
8717 case SPF_SMIN:
8718 Lower = APInt::getSignedMinValue(BitWidth);
8719 Upper = *C + 1;
8720 break;
8721 case SPF_SMAX:
8722 Lower = *C;
8723 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
8724 break;
8725 default:
8726 break;
8727 }
8728}
8729
8730static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
8731 // The maximum representable value of a half is 65504. For floats the maximum
8732 // value is 3.4e38 which requires roughly 129 bits.
8733 unsigned BitWidth = I->getType()->getScalarSizeInBits();
8734 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
8735 return;
8736 if (isa<FPToSIInst>(I) && BitWidth >= 17) {
8737 Lower = APInt(BitWidth, -65504);
8738 Upper = APInt(BitWidth, 65505);
8739 }
8740
8741 if (isa<FPToUIInst>(I) && BitWidth >= 16) {
8742 // For a fptoui the lower limit is left as 0.
8743 Upper = APInt(BitWidth, 65505);
8744 }
8745}
8746
8747ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
8748 bool UseInstrInfo, AssumptionCache *AC,
8749 const Instruction *CtxI,
8750 const DominatorTree *DT,
8751 unsigned Depth) {
8752 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && "Expected integer instruction") ? void (0) : __assert_fail
("V->getType()->isIntOrIntVectorTy() && \"Expected integer instruction\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8752, __extension__ __PRETTY_FUNCTION__
))
;
8753
8754 if (Depth == MaxAnalysisRecursionDepth)
8755 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
8756
8757 const APInt *C;
8758 if (match(V, m_APInt(C)))
8759 return ConstantRange(*C);
8760
8761 InstrInfoQuery IIQ(UseInstrInfo);
8762 unsigned BitWidth = V->getType()->getScalarSizeInBits();
8763 ConstantRange CR = ConstantRange::getFull(BitWidth);
8764 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
8765 APInt Lower = APInt(BitWidth, 0);
8766 APInt Upper = APInt(BitWidth, 0);
8767 // TODO: Return ConstantRange.
8768 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
8769 CR = ConstantRange::getNonEmpty(Lower, Upper);
8770 } else if (auto *II = dyn_cast<IntrinsicInst>(V))
8771 CR = getRangeForIntrinsic(*II);
8772 else if (auto *SI = dyn_cast<SelectInst>(V)) {
8773 APInt Lower = APInt(BitWidth, 0);
8774 APInt Upper = APInt(BitWidth, 0);
8775 // TODO: Return ConstantRange.
8776 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
8777 CR = ConstantRange::getNonEmpty(Lower, Upper);
8778 } else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) {
8779 APInt Lower = APInt(BitWidth, 0);
8780 APInt Upper = APInt(BitWidth, 0);
8781 // TODO: Return ConstantRange.
8782 setLimitForFPToI(cast<Instruction>(V), Lower, Upper);
8783 CR = ConstantRange::getNonEmpty(Lower, Upper);
8784 }
8785
8786 if (auto *I = dyn_cast<Instruction>(V))
8787 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
8788 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
8789
8790 if (CtxI && AC) {
8791 // Try to restrict the range based on information from assumptions.
8792 for (auto &AssumeVH : AC->assumptionsFor(V)) {
8793 if (!AssumeVH)
8794 continue;
8795 CallInst *I = cast<CallInst>(AssumeVH);
8796 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() ==
CtxI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8797, __extension__ __PRETTY_FUNCTION__
))
8797 "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() ==
CtxI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8797, __extension__ __PRETTY_FUNCTION__
))
;
8798 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8799, __extension__ __PRETTY_FUNCTION__
))
8799 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 8799, __extension__ __PRETTY_FUNCTION__
))
;
8800
8801 if (!isValidAssumeForContext(I, CtxI, DT))
8802 continue;
8803 Value *Arg = I->getArgOperand(0);
8804 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
8805 // Currently we just use information from comparisons.
8806 if (!Cmp || Cmp->getOperand(0) != V)
8807 continue;
8808 // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
8809 ConstantRange RHS =
8810 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
8811 UseInstrInfo, AC, I, DT, Depth + 1);
8812 CR = CR.intersectWith(
8813 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
8814 }
8815 }
8816
8817 return CR;
8818}