Bug Summary

File:llvm/lib/Analysis/ValueTracking.cpp
Warning:line 213, column 31
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ValueTracking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Analysis -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Analysis -I include -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-19-134126-35450-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Analysis/ValueTracking.cpp

/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Analysis/ValueTracking.cpp

1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/None.h"
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/AssumeBundleQueries.h"
28#include "llvm/Analysis/AssumptionCache.h"
29#include "llvm/Analysis/EHPersonalities.h"
30#include "llvm/Analysis/GuardUtils.h"
31#include "llvm/Analysis/InstructionSimplify.h"
32#include "llvm/Analysis/Loads.h"
33#include "llvm/Analysis/LoopInfo.h"
34#include "llvm/Analysis/OptimizationRemarkEmitter.h"
35#include "llvm/Analysis/TargetLibraryInfo.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/Constant.h"
40#include "llvm/IR/ConstantRange.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DerivedTypes.h"
43#include "llvm/IR/DiagnosticInfo.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GetElementPtrTypeIterator.h"
47#include "llvm/IR/GlobalAlias.h"
48#include "llvm/IR/GlobalValue.h"
49#include "llvm/IR/GlobalVariable.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
52#include "llvm/IR/Instructions.h"
53#include "llvm/IR/IntrinsicInst.h"
54#include "llvm/IR/Intrinsics.h"
55#include "llvm/IR/IntrinsicsAArch64.h"
56#include "llvm/IR/IntrinsicsRISCV.h"
57#include "llvm/IR/IntrinsicsX86.h"
58#include "llvm/IR/LLVMContext.h"
59#include "llvm/IR/Metadata.h"
60#include "llvm/IR/Module.h"
61#include "llvm/IR/Operator.h"
62#include "llvm/IR/PatternMatch.h"
63#include "llvm/IR/Type.h"
64#include "llvm/IR/User.h"
65#include "llvm/IR/Value.h"
66#include "llvm/Support/Casting.h"
67#include "llvm/Support/CommandLine.h"
68#include "llvm/Support/Compiler.h"
69#include "llvm/Support/ErrorHandling.h"
70#include "llvm/Support/KnownBits.h"
71#include "llvm/Support/MathExtras.h"
72#include <algorithm>
73#include <array>
74#include <cassert>
75#include <cstdint>
76#include <iterator>
77#include <utility>
78
79using namespace llvm;
80using namespace llvm::PatternMatch;
81
82// Controls the number of uses of the value searched for possible
83// dominating comparisons.
84static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85 cl::Hidden, cl::init(20));
86
87// According to the LangRef, branching on a poison condition is absolutely
88// immediate full UB. However, historically we haven't implemented that
89// consistently as we have an important transformation (non-trivial unswitch)
90// which introduces instances of branch on poison/undef to otherwise well
91// defined programs. This flag exists to let us test optimization benefit
92// of exploiting the specified behavior (in combination with enabling the
93// unswitch fix.)
94static cl::opt<bool> BranchOnPoisonAsUB("branch-on-poison-as-ub",
95 cl::Hidden, cl::init(false));
96
97
98/// Returns the bitwidth of the given scalar or pointer type. For vector types,
99/// returns the element type's bitwidth.
100static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
101 if (unsigned BitWidth = Ty->getScalarSizeInBits())
102 return BitWidth;
103
104 return DL.getPointerTypeSizeInBits(Ty);
105}
106
107namespace {
108
109// Simplifying using an assume can only be done in a particular control-flow
110// context (the context instruction provides that context). If an assume and
111// the context instruction are not in the same block then the DT helps in
112// figuring out if we can use it.
113struct Query {
114 const DataLayout &DL;
115 AssumptionCache *AC;
116 const Instruction *CxtI;
117 const DominatorTree *DT;
118
119 // Unlike the other analyses, this may be a nullptr because not all clients
120 // provide it currently.
121 OptimizationRemarkEmitter *ORE;
122
123 /// If true, it is safe to use metadata during simplification.
124 InstrInfoQuery IIQ;
125
126 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
127 const DominatorTree *DT, bool UseInstrInfo,
128 OptimizationRemarkEmitter *ORE = nullptr)
129 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
130};
131
132} // end anonymous namespace
133
134// Given the provided Value and, potentially, a context instruction, return
135// the preferred context instruction (if any).
136static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
137 // If we've been provided with a context instruction, then use that (provided
138 // it has been inserted).
139 if (CxtI && CxtI->getParent())
140 return CxtI;
141
142 // If the value is really an already-inserted instruction, then use that.
143 CxtI = dyn_cast<Instruction>(V);
144 if (CxtI && CxtI->getParent())
145 return CxtI;
146
147 return nullptr;
148}
149
150static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
151 // If we've been provided with a context instruction, then use that (provided
152 // it has been inserted).
153 if (CxtI && CxtI->getParent())
154 return CxtI;
155
156 // If the value is really an already-inserted instruction, then use that.
157 CxtI = dyn_cast<Instruction>(V1);
158 if (CxtI && CxtI->getParent())
159 return CxtI;
160
161 CxtI = dyn_cast<Instruction>(V2);
162 if (CxtI && CxtI->getParent())
163 return CxtI;
164
165 return nullptr;
166}
167
168static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
169 const APInt &DemandedElts,
170 APInt &DemandedLHS, APInt &DemandedRHS) {
171 // The length of scalable vectors is unknown at compile time, thus we
172 // cannot check their values
173 if (isa<ScalableVectorType>(Shuf->getType()))
174 return false;
175
176 int NumElts =
177 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
178 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
179 DemandedLHS = DemandedRHS = APInt::getZero(NumElts);
180 if (DemandedElts.isZero())
181 return true;
182 // Simple case of a shuffle with zeroinitializer.
183 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
184 DemandedLHS.setBit(0);
185 return true;
186 }
187 for (int i = 0; i != NumMaskElts; ++i) {
188 if (!DemandedElts[i])
189 continue;
190 int M = Shuf->getMaskValue(i);
191 assert(M < (NumElts * 2) && "Invalid shuffle mask constant")(static_cast <bool> (M < (NumElts * 2) && "Invalid shuffle mask constant"
) ? void (0) : __assert_fail ("M < (NumElts * 2) && \"Invalid shuffle mask constant\""
, "llvm/lib/Analysis/ValueTracking.cpp", 191, __extension__ __PRETTY_FUNCTION__
))
;
192
193 // For undef elements, we don't know anything about the common state of
194 // the shuffle result.
195 if (M == -1)
196 return false;
197 if (M < NumElts)
198 DemandedLHS.setBit(M % NumElts);
199 else
200 DemandedRHS.setBit(M % NumElts);
201 }
202
203 return true;
204}
205
206static void computeKnownBits(const Value *V, const APInt &DemandedElts,
207 KnownBits &Known, unsigned Depth, const Query &Q);
208
209static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
210 const Query &Q) {
211 // FIXME: We currently have no way to represent the DemandedElts of a scalable
212 // vector
213 if (isa<ScalableVectorType>(V->getType())) {
25
Called C++ object pointer is null
214 Known.resetAll();
215 return;
216 }
217
218 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
219 APInt DemandedElts =
220 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
221 computeKnownBits(V, DemandedElts, Known, Depth, Q);
222}
223
224void llvm::computeKnownBits(const Value *V, KnownBits &Known,
225 const DataLayout &DL, unsigned Depth,
226 AssumptionCache *AC, const Instruction *CxtI,
227 const DominatorTree *DT,
228 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
229 ::computeKnownBits(V, Known, Depth,
230 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
231}
232
233void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
234 KnownBits &Known, const DataLayout &DL,
235 unsigned Depth, AssumptionCache *AC,
236 const Instruction *CxtI, const DominatorTree *DT,
237 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
238 ::computeKnownBits(V, DemandedElts, Known, Depth,
239 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
240}
241
242static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
243 unsigned Depth, const Query &Q);
244
245static KnownBits computeKnownBits(const Value *V, unsigned Depth,
246 const Query &Q);
247
248KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
249 unsigned Depth, AssumptionCache *AC,
250 const Instruction *CxtI,
251 const DominatorTree *DT,
252 OptimizationRemarkEmitter *ORE,
253 bool UseInstrInfo) {
254 return ::computeKnownBits(
255 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
256}
257
258KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
259 const DataLayout &DL, unsigned Depth,
260 AssumptionCache *AC, const Instruction *CxtI,
261 const DominatorTree *DT,
262 OptimizationRemarkEmitter *ORE,
263 bool UseInstrInfo) {
264 return ::computeKnownBits(
265 V, DemandedElts, Depth,
266 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
267}
268
269bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
270 const DataLayout &DL, AssumptionCache *AC,
271 const Instruction *CxtI, const DominatorTree *DT,
272 bool UseInstrInfo) {
273 assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 274, __extension__ __PRETTY_FUNCTION__
))
274 "LHS and RHS should have the same type")(static_cast <bool> (LHS->getType() == RHS->getType
() && "LHS and RHS should have the same type") ? void
(0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 274, __extension__ __PRETTY_FUNCTION__
))
;
275 assert(LHS->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "llvm/lib/Analysis/ValueTracking.cpp", 276, __extension__ __PRETTY_FUNCTION__
))
276 "LHS and RHS should be integers")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy
() && "LHS and RHS should be integers") ? void (0) : __assert_fail
("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\""
, "llvm/lib/Analysis/ValueTracking.cpp", 276, __extension__ __PRETTY_FUNCTION__
))
;
277 // Look for an inverted mask: (X & ~M) op (Y & M).
278 Value *M;
279 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
280 match(RHS, m_c_And(m_Specific(M), m_Value())))
281 return true;
282 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
283 match(LHS, m_c_And(m_Specific(M), m_Value())))
284 return true;
285 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
286 KnownBits LHSKnown(IT->getBitWidth());
287 KnownBits RHSKnown(IT->getBitWidth());
288 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
289 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
290 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
291}
292
293bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
294 return !I->user_empty() && all_of(I->users(), [](const User *U) {
295 ICmpInst::Predicate P;
296 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
297 });
298}
299
300static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
301 const Query &Q);
302
303bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
304 bool OrZero, unsigned Depth,
305 AssumptionCache *AC, const Instruction *CxtI,
306 const DominatorTree *DT, bool UseInstrInfo) {
307 return ::isKnownToBeAPowerOfTwo(
308 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
309}
310
311static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
312 unsigned Depth, const Query &Q);
313
314static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
315
316bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
317 AssumptionCache *AC, const Instruction *CxtI,
318 const DominatorTree *DT, bool UseInstrInfo) {
319 return ::isKnownNonZero(V, Depth,
320 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
321}
322
323bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
324 unsigned Depth, AssumptionCache *AC,
325 const Instruction *CxtI, const DominatorTree *DT,
326 bool UseInstrInfo) {
327 KnownBits Known =
328 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
329 return Known.isNonNegative();
330}
331
332bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
333 AssumptionCache *AC, const Instruction *CxtI,
334 const DominatorTree *DT, bool UseInstrInfo) {
335 if (auto *CI = dyn_cast<ConstantInt>(V))
336 return CI->getValue().isStrictlyPositive();
337
338 // TODO: We'd doing two recursive queries here. We should factor this such
339 // that only a single query is needed.
340 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
341 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
342}
343
344bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
345 AssumptionCache *AC, const Instruction *CxtI,
346 const DominatorTree *DT, bool UseInstrInfo) {
347 KnownBits Known =
348 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
349 return Known.isNegative();
350}
351
352static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
353 const Query &Q);
354
355bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
356 const DataLayout &DL, AssumptionCache *AC,
357 const Instruction *CxtI, const DominatorTree *DT,
358 bool UseInstrInfo) {
359 return ::isKnownNonEqual(V1, V2, 0,
360 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
361 UseInstrInfo, /*ORE=*/nullptr));
362}
363
364static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
365 const Query &Q);
366
367bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
368 const DataLayout &DL, unsigned Depth,
369 AssumptionCache *AC, const Instruction *CxtI,
370 const DominatorTree *DT, bool UseInstrInfo) {
371 return ::MaskedValueIsZero(
372 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
373}
374
375static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
376 unsigned Depth, const Query &Q);
377
378static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
379 const Query &Q) {
380 // FIXME: We currently have no way to represent the DemandedElts of a scalable
381 // vector
382 if (isa<ScalableVectorType>(V->getType()))
383 return 1;
384
385 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
386 APInt DemandedElts =
387 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
388 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
389}
390
391unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
392 unsigned Depth, AssumptionCache *AC,
393 const Instruction *CxtI,
394 const DominatorTree *DT, bool UseInstrInfo) {
395 return ::ComputeNumSignBits(
396 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
397}
398
399unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
400 unsigned Depth, AssumptionCache *AC,
401 const Instruction *CxtI,
402 const DominatorTree *DT) {
403 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
404 return V->getType()->getScalarSizeInBits() - SignBits + 1;
405}
406
407static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
408 bool NSW, const APInt &DemandedElts,
409 KnownBits &KnownOut, KnownBits &Known2,
410 unsigned Depth, const Query &Q) {
411 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
412
413 // If one operand is unknown and we have no nowrap information,
414 // the result will be unknown independently of the second operand.
415 if (KnownOut.isUnknown() && !NSW)
416 return;
417
418 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
419 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
420}
421
422static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
423 const APInt &DemandedElts, KnownBits &Known,
424 KnownBits &Known2, unsigned Depth,
425 const Query &Q) {
426 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
427 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
428
429 bool isKnownNegative = false;
430 bool isKnownNonNegative = false;
431 // If the multiplication is known not to overflow, compute the sign bit.
432 if (NSW) {
433 if (Op0 == Op1) {
434 // The product of a number with itself is non-negative.
435 isKnownNonNegative = true;
436 } else {
437 bool isKnownNonNegativeOp1 = Known.isNonNegative();
438 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
439 bool isKnownNegativeOp1 = Known.isNegative();
440 bool isKnownNegativeOp0 = Known2.isNegative();
441 // The product of two numbers with the same sign is non-negative.
442 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
443 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
444 // The product of a negative number and a non-negative number is either
445 // negative or zero.
446 if (!isKnownNonNegative)
447 isKnownNegative =
448 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
449 Known2.isNonZero()) ||
450 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
451 }
452 }
453
454 Known = KnownBits::mul(Known, Known2);
455
456 // Only make use of no-wrap flags if we failed to compute the sign bit
457 // directly. This matters if the multiplication always overflows, in
458 // which case we prefer to follow the result of the direct computation,
459 // though as the program is invoking undefined behaviour we can choose
460 // whatever we like here.
461 if (isKnownNonNegative && !Known.isNegative())
462 Known.makeNonNegative();
463 else if (isKnownNegative && !Known.isNonNegative())
464 Known.makeNegative();
465}
466
467void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
468 KnownBits &Known) {
469 unsigned BitWidth = Known.getBitWidth();
470 unsigned NumRanges = Ranges.getNumOperands() / 2;
471 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp",
471, __extension__ __PRETTY_FUNCTION__))
;
472
473 Known.Zero.setAllBits();
474 Known.One.setAllBits();
475
476 for (unsigned i = 0; i < NumRanges; ++i) {
477 ConstantInt *Lower =
478 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
479 ConstantInt *Upper =
480 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
481 ConstantRange Range(Lower->getValue(), Upper->getValue());
482
483 // The first CommonPrefixBits of all values in Range are equal.
484 unsigned CommonPrefixBits =
485 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
486 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
487 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
488 Known.One &= UnsignedMax & Mask;
489 Known.Zero &= ~UnsignedMax & Mask;
490 }
491}
492
493static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
494 SmallVector<const Value *, 16> WorkSet(1, I);
495 SmallPtrSet<const Value *, 32> Visited;
496 SmallPtrSet<const Value *, 16> EphValues;
497
498 // The instruction defining an assumption's condition itself is always
499 // considered ephemeral to that assumption (even if it has other
500 // non-ephemeral users). See r246696's test case for an example.
501 if (is_contained(I->operands(), E))
502 return true;
503
504 while (!WorkSet.empty()) {
505 const Value *V = WorkSet.pop_back_val();
506 if (!Visited.insert(V).second)
507 continue;
508
509 // If all uses of this value are ephemeral, then so is this value.
510 if (llvm::all_of(V->users(), [&](const User *U) {
511 return EphValues.count(U);
512 })) {
513 if (V == E)
514 return true;
515
516 if (V == I || (isa<Instruction>(V) &&
517 !cast<Instruction>(V)->mayHaveSideEffects() &&
518 !cast<Instruction>(V)->isTerminator())) {
519 EphValues.insert(V);
520 if (const User *U = dyn_cast<User>(V))
521 append_range(WorkSet, U->operands());
522 }
523 }
524 }
525
526 return false;
527}
528
529// Is this an intrinsic that cannot be speculated but also cannot trap?
530bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
531 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
532 return CI->isAssumeLikeIntrinsic();
533
534 return false;
535}
536
537bool llvm::isValidAssumeForContext(const Instruction *Inv,
538 const Instruction *CxtI,
539 const DominatorTree *DT) {
540 // There are two restrictions on the use of an assume:
541 // 1. The assume must dominate the context (or the control flow must
542 // reach the assume whenever it reaches the context).
543 // 2. The context must not be in the assume's set of ephemeral values
544 // (otherwise we will use the assume to prove that the condition
545 // feeding the assume is trivially true, thus causing the removal of
546 // the assume).
547
548 if (Inv->getParent() == CxtI->getParent()) {
549 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
550 // in the BB.
551 if (Inv->comesBefore(CxtI))
552 return true;
553
554 // Don't let an assume affect itself - this would cause the problems
555 // `isEphemeralValueOf` is trying to prevent, and it would also make
556 // the loop below go out of bounds.
557 if (Inv == CxtI)
558 return false;
559
560 // The context comes first, but they're both in the same block.
561 // Make sure there is nothing in between that might interrupt
562 // the control flow, not even CxtI itself.
563 // We limit the scan distance between the assume and its context instruction
564 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
565 // it can be adjusted if needed (could be turned into a cl::opt).
566 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
567 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
568 return false;
569
570 return !isEphemeralValueOf(Inv, CxtI);
571 }
572
573 // Inv and CxtI are in different blocks.
574 if (DT) {
575 if (DT->dominates(Inv, CxtI))
576 return true;
577 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
578 // We don't have a DT, but this trivially dominates.
579 return true;
580 }
581
582 return false;
583}
584
585static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
586 // v u> y implies v != 0.
587 if (Pred == ICmpInst::ICMP_UGT)
588 return true;
589
590 // Special-case v != 0 to also handle v != null.
591 if (Pred == ICmpInst::ICMP_NE)
592 return match(RHS, m_Zero());
593
594 // All other predicates - rely on generic ConstantRange handling.
595 const APInt *C;
596 if (!match(RHS, m_APInt(C)))
597 return false;
598
599 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
600 return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
601}
602
603static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
604 // Use of assumptions is context-sensitive. If we don't have a context, we
605 // cannot use them!
606 if (!Q.AC || !Q.CxtI)
607 return false;
608
609 if (Q.CxtI && V->getType()->isPointerTy()) {
610 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
611 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
612 V->getType()->getPointerAddressSpace()))
613 AttrKinds.push_back(Attribute::Dereferenceable);
614
615 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
616 return true;
617 }
618
619 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
620 if (!AssumeVH)
621 continue;
622 CallInst *I = cast<CallInst>(AssumeVH);
623 assert(I->getFunction() == Q.CxtI->getFunction() &&(static_cast <bool> (I->getFunction() == Q.CxtI->
getFunction() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 624, __extension__ __PRETTY_FUNCTION__
))
624 "Got assumption for the wrong function!")(static_cast <bool> (I->getFunction() == Q.CxtI->
getFunction() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 624, __extension__ __PRETTY_FUNCTION__
))
;
625
626 // Warning: This loop can end up being somewhat performance sensitive.
627 // We're running this loop for once for each value queried resulting in a
628 // runtime of ~O(#assumes * #values).
629
630 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 631, __extension__ __PRETTY_FUNCTION__
))
631 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 631, __extension__ __PRETTY_FUNCTION__
))
;
632
633 Value *RHS;
634 CmpInst::Predicate Pred;
635 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
636 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
637 return false;
638
639 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
640 return true;
641 }
642
643 return false;
644}
645
646static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
647 unsigned Depth, const Query &Q) {
648 // Use of assumptions is context-sensitive. If we don't have a context, we
649 // cannot use them!
650 if (!Q.AC || !Q.CxtI)
651 return;
652
653 unsigned BitWidth = Known.getBitWidth();
654
655 // Refine Known set if the pointer alignment is set by assume bundles.
656 if (V->getType()->isPointerTy()) {
657 if (RetainedKnowledge RK = getKnowledgeValidInContext(
658 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
659 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
660 }
661 }
662
663 // Note that the patterns below need to be kept in sync with the code
664 // in AssumptionCache::updateAffectedValues.
665
666 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
667 if (!AssumeVH)
668 continue;
669 CallInst *I = cast<CallInst>(AssumeVH);
670 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 671, __extension__ __PRETTY_FUNCTION__
))
671 "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() ==
Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"
) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 671, __extension__ __PRETTY_FUNCTION__
))
;
672
673 // Warning: This loop can end up being somewhat performance sensitive.
674 // We're running this loop for once for each value queried resulting in a
675 // runtime of ~O(#assumes * #values).
676
677 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 678, __extension__ __PRETTY_FUNCTION__
))
678 "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID
() == Intrinsic::assume && "must be an assume intrinsic"
) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\""
, "llvm/lib/Analysis/ValueTracking.cpp", 678, __extension__ __PRETTY_FUNCTION__
))
;
679
680 Value *Arg = I->getArgOperand(0);
681
682 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
683 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 683, __extension__ __PRETTY_FUNCTION__
))
;
684 Known.setAllOnes();
685 return;
686 }
687 if (match(Arg, m_Not(m_Specific(V))) &&
688 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
689 assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?"
) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 689, __extension__ __PRETTY_FUNCTION__
))
;
690 Known.setAllZero();
691 return;
692 }
693
694 // The remaining tests are all recursive, so bail out if we hit the limit.
695 if (Depth == MaxAnalysisRecursionDepth)
696 continue;
697
698 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
699 if (!Cmp)
700 continue;
701
702 // We are attempting to compute known bits for the operands of an assume.
703 // Do not try to use other assumptions for those recursive calls because
704 // that can lead to mutual recursion and a compile-time explosion.
705 // An example of the mutual recursion: computeKnownBits can call
706 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
707 // and so on.
708 Query QueryNoAC = Q;
709 QueryNoAC.AC = nullptr;
710
711 // Note that ptrtoint may change the bitwidth.
712 Value *A, *B;
713 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
714
715 CmpInst::Predicate Pred;
716 uint64_t C;
717 switch (Cmp->getPredicate()) {
718 default:
719 break;
720 case ICmpInst::ICMP_EQ:
721 // assume(v = a)
722 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
723 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
724 KnownBits RHSKnown =
725 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
726 Known.Zero |= RHSKnown.Zero;
727 Known.One |= RHSKnown.One;
728 // assume(v & b = a)
729 } else if (match(Cmp,
730 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
731 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
732 KnownBits RHSKnown =
733 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
734 KnownBits MaskKnown =
735 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
736
737 // For those bits in the mask that are known to be one, we can propagate
738 // known bits from the RHS to V.
739 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
740 Known.One |= RHSKnown.One & MaskKnown.One;
741 // assume(~(v & b) = a)
742 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
743 m_Value(A))) &&
744 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
745 KnownBits RHSKnown =
746 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
747 KnownBits MaskKnown =
748 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
749
750 // For those bits in the mask that are known to be one, we can propagate
751 // inverted known bits from the RHS to V.
752 Known.Zero |= RHSKnown.One & MaskKnown.One;
753 Known.One |= RHSKnown.Zero & MaskKnown.One;
754 // assume(v | b = a)
755 } else if (match(Cmp,
756 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
757 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
758 KnownBits RHSKnown =
759 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
760 KnownBits BKnown =
761 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
762
763 // For those bits in B that are known to be zero, we can propagate known
764 // bits from the RHS to V.
765 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
766 Known.One |= RHSKnown.One & BKnown.Zero;
767 // assume(~(v | b) = a)
768 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
769 m_Value(A))) &&
770 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
771 KnownBits RHSKnown =
772 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
773 KnownBits BKnown =
774 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
775
776 // For those bits in B that are known to be zero, we can propagate
777 // inverted known bits from the RHS to V.
778 Known.Zero |= RHSKnown.One & BKnown.Zero;
779 Known.One |= RHSKnown.Zero & BKnown.Zero;
780 // assume(v ^ b = a)
781 } else if (match(Cmp,
782 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
783 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
784 KnownBits RHSKnown =
785 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
786 KnownBits BKnown =
787 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
788
789 // For those bits in B that are known to be zero, we can propagate known
790 // bits from the RHS to V. For those bits in B that are known to be one,
791 // we can propagate inverted known bits from the RHS to V.
792 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
793 Known.One |= RHSKnown.One & BKnown.Zero;
794 Known.Zero |= RHSKnown.One & BKnown.One;
795 Known.One |= RHSKnown.Zero & BKnown.One;
796 // assume(~(v ^ b) = a)
797 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
798 m_Value(A))) &&
799 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
800 KnownBits RHSKnown =
801 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
802 KnownBits BKnown =
803 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
804
805 // For those bits in B that are known to be zero, we can propagate
806 // inverted known bits from the RHS to V. For those bits in B that are
807 // known to be one, we can propagate known bits from the RHS to V.
808 Known.Zero |= RHSKnown.One & BKnown.Zero;
809 Known.One |= RHSKnown.Zero & BKnown.Zero;
810 Known.Zero |= RHSKnown.Zero & BKnown.One;
811 Known.One |= RHSKnown.One & BKnown.One;
812 // assume(v << c = a)
813 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
814 m_Value(A))) &&
815 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
816 KnownBits RHSKnown =
817 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
818
819 // For those bits in RHS that are known, we can propagate them to known
820 // bits in V shifted to the right by C.
821 RHSKnown.Zero.lshrInPlace(C);
822 Known.Zero |= RHSKnown.Zero;
823 RHSKnown.One.lshrInPlace(C);
824 Known.One |= RHSKnown.One;
825 // assume(~(v << c) = a)
826 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
827 m_Value(A))) &&
828 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
829 KnownBits RHSKnown =
830 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
831 // For those bits in RHS that are known, we can propagate them inverted
832 // to known bits in V shifted to the right by C.
833 RHSKnown.One.lshrInPlace(C);
834 Known.Zero |= RHSKnown.One;
835 RHSKnown.Zero.lshrInPlace(C);
836 Known.One |= RHSKnown.Zero;
837 // assume(v >> c = a)
838 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
839 m_Value(A))) &&
840 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
841 KnownBits RHSKnown =
842 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
843 // For those bits in RHS that are known, we can propagate them to known
844 // bits in V shifted to the right by C.
845 Known.Zero |= RHSKnown.Zero << C;
846 Known.One |= RHSKnown.One << C;
847 // assume(~(v >> c) = a)
848 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
849 m_Value(A))) &&
850 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
851 KnownBits RHSKnown =
852 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
853 // For those bits in RHS that are known, we can propagate them inverted
854 // to known bits in V shifted to the right by C.
855 Known.Zero |= RHSKnown.One << C;
856 Known.One |= RHSKnown.Zero << C;
857 }
858 break;
859 case ICmpInst::ICMP_SGE:
860 // assume(v >=_s c) where c is non-negative
861 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
862 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
863 KnownBits RHSKnown =
864 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
865
866 if (RHSKnown.isNonNegative()) {
867 // We know that the sign bit is zero.
868 Known.makeNonNegative();
869 }
870 }
871 break;
872 case ICmpInst::ICMP_SGT:
873 // assume(v >_s c) where c is at least -1.
874 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
875 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
876 KnownBits RHSKnown =
877 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
878
879 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
880 // We know that the sign bit is zero.
881 Known.makeNonNegative();
882 }
883 }
884 break;
885 case ICmpInst::ICMP_SLE:
886 // assume(v <=_s c) where c is negative
887 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
888 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
889 KnownBits RHSKnown =
890 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
891
892 if (RHSKnown.isNegative()) {
893 // We know that the sign bit is one.
894 Known.makeNegative();
895 }
896 }
897 break;
898 case ICmpInst::ICMP_SLT:
899 // assume(v <_s c) where c is non-positive
900 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
901 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
902 KnownBits RHSKnown =
903 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
904
905 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
906 // We know that the sign bit is one.
907 Known.makeNegative();
908 }
909 }
910 break;
911 case ICmpInst::ICMP_ULE:
912 // assume(v <=_u c)
913 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
914 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
915 KnownBits RHSKnown =
916 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
917
918 // Whatever high bits in c are zero are known to be zero.
919 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
920 }
921 break;
922 case ICmpInst::ICMP_ULT:
923 // assume(v <_u c)
924 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
925 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
926 KnownBits RHSKnown =
927 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
928
929 // If the RHS is known zero, then this assumption must be wrong (nothing
930 // is unsigned less than zero). Signal a conflict and get out of here.
931 if (RHSKnown.isZero()) {
932 Known.Zero.setAllBits();
933 Known.One.setAllBits();
934 break;
935 }
936
937 // Whatever high bits in c are zero are known to be zero (if c is a power
938 // of 2, then one more).
939 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
940 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
941 else
942 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
943 }
944 break;
945 }
946 }
947
948 // If assumptions conflict with each other or previous known bits, then we
949 // have a logical fallacy. It's possible that the assumption is not reachable,
950 // so this isn't a real bug. On the other hand, the program may have undefined
951 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
952 // clear out the known bits, try to warn the user, and hope for the best.
953 if (Known.Zero.intersects(Known.One)) {
954 Known.resetAll();
955
956 if (Q.ORE)
957 Q.ORE->emit([&]() {
958 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
959 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
960 CxtI)
961 << "Detected conflicting code assumptions. Program may "
962 "have undefined behavior, or compiler may have "
963 "internal error.";
964 });
965 }
966}
967
968/// Compute known bits from a shift operator, including those with a
969/// non-constant shift amount. Known is the output of this function. Known2 is a
970/// pre-allocated temporary with the same bit width as Known and on return
971/// contains the known bit of the shift value source. KF is an
972/// operator-specific function that, given the known-bits and a shift amount,
973/// compute the implied known-bits of the shift operator's result respectively
974/// for that shift amount. The results from calling KF are conservatively
975/// combined for all permitted shift amounts.
976static void computeKnownBitsFromShiftOperator(
977 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
978 KnownBits &Known2, unsigned Depth, const Query &Q,
979 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
980 unsigned BitWidth = Known.getBitWidth();
981 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
982 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
983
984 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
985 // BitWidth > 64 and any upper bits are known, we'll end up returning the
986 // limit value (which implies all bits are known).
987 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
988 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
989 bool ShiftAmtIsConstant = Known.isConstant();
990 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
991
992 if (ShiftAmtIsConstant) {
993 Known = KF(Known2, Known);
994
995 // If the known bits conflict, this must be an overflowing left shift, so
996 // the shift result is poison. We can return anything we want. Choose 0 for
997 // the best folding opportunity.
998 if (Known.hasConflict())
999 Known.setAllZero();
1000
1001 return;
1002 }
1003
1004 // If the shift amount could be greater than or equal to the bit-width of the
1005 // LHS, the value could be poison, but bail out because the check below is
1006 // expensive.
1007 // TODO: Should we just carry on?
1008 if (MaxShiftAmtIsOutOfRange) {
1009 Known.resetAll();
1010 return;
1011 }
1012
1013 // It would be more-clearly correct to use the two temporaries for this
1014 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1015 Known.resetAll();
1016
1017 // If we know the shifter operand is nonzero, we can sometimes infer more
1018 // known bits. However this is expensive to compute, so be lazy about it and
1019 // only compute it when absolutely necessary.
1020 Optional<bool> ShifterOperandIsNonZero;
1021
1022 // Early exit if we can't constrain any well-defined shift amount.
1023 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1024 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1025 ShifterOperandIsNonZero =
1026 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1027 if (!*ShifterOperandIsNonZero)
1028 return;
1029 }
1030
1031 Known.Zero.setAllBits();
1032 Known.One.setAllBits();
1033 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1034 // Combine the shifted known input bits only for those shift amounts
1035 // compatible with its known constraints.
1036 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1037 continue;
1038 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1039 continue;
1040 // If we know the shifter is nonzero, we may be able to infer more known
1041 // bits. This check is sunk down as far as possible to avoid the expensive
1042 // call to isKnownNonZero if the cheaper checks above fail.
1043 if (ShiftAmt == 0) {
1044 if (!ShifterOperandIsNonZero.hasValue())
1045 ShifterOperandIsNonZero =
1046 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1047 if (*ShifterOperandIsNonZero)
1048 continue;
1049 }
1050
1051 Known = KnownBits::commonBits(
1052 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1053 }
1054
1055 // If the known bits conflict, the result is poison. Return a 0 and hope the
1056 // caller can further optimize that.
1057 if (Known.hasConflict())
1058 Known.setAllZero();
1059}
1060
1061static void computeKnownBitsFromOperator(const Operator *I,
1062 const APInt &DemandedElts,
1063 KnownBits &Known, unsigned Depth,
1064 const Query &Q) {
1065 unsigned BitWidth = Known.getBitWidth();
1066
1067 KnownBits Known2(BitWidth);
1068 switch (I->getOpcode()) {
1
Calling 'Operator::getOpcode'
6
Returning from 'Operator::getOpcode'
7
Control jumps to 'case PHI:' at line 1408
1069 default: break;
1070 case Instruction::Load:
1071 if (MDNode *MD =
1072 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1073 computeKnownBitsFromRangeMetadata(*MD, Known);
1074 break;
1075 case Instruction::And: {
1076 // If either the LHS or the RHS are Zero, the result is zero.
1077 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1078 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1079
1080 Known &= Known2;
1081
1082 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1083 // here we handle the more general case of adding any odd number by
1084 // matching the form add(x, add(x, y)) where y is odd.
1085 // TODO: This could be generalized to clearing any bit set in y where the
1086 // following bit is known to be unset in y.
1087 Value *X = nullptr, *Y = nullptr;
1088 if (!Known.Zero[0] && !Known.One[0] &&
1089 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1090 Known2.resetAll();
1091 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1092 if (Known2.countMinTrailingOnes() > 0)
1093 Known.Zero.setBit(0);
1094 }
1095 break;
1096 }
1097 case Instruction::Or:
1098 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1099 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1100
1101 Known |= Known2;
1102 break;
1103 case Instruction::Xor:
1104 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1105 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1106
1107 Known ^= Known2;
1108 break;
1109 case Instruction::Mul: {
1110 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1111 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1112 Known, Known2, Depth, Q);
1113 break;
1114 }
1115 case Instruction::UDiv: {
1116 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1117 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1118 Known = KnownBits::udiv(Known, Known2);
1119 break;
1120 }
1121 case Instruction::Select: {
1122 const Value *LHS = nullptr, *RHS = nullptr;
1123 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1124 if (SelectPatternResult::isMinOrMax(SPF)) {
1125 computeKnownBits(RHS, Known, Depth + 1, Q);
1126 computeKnownBits(LHS, Known2, Depth + 1, Q);
1127 switch (SPF) {
1128 default:
1129 llvm_unreachable("Unhandled select pattern flavor!")::llvm::llvm_unreachable_internal("Unhandled select pattern flavor!"
, "llvm/lib/Analysis/ValueTracking.cpp", 1129)
;
1130 case SPF_SMAX:
1131 Known = KnownBits::smax(Known, Known2);
1132 break;
1133 case SPF_SMIN:
1134 Known = KnownBits::smin(Known, Known2);
1135 break;
1136 case SPF_UMAX:
1137 Known = KnownBits::umax(Known, Known2);
1138 break;
1139 case SPF_UMIN:
1140 Known = KnownBits::umin(Known, Known2);
1141 break;
1142 }
1143 break;
1144 }
1145
1146 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1147 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1148
1149 // Only known if known in both the LHS and RHS.
1150 Known = KnownBits::commonBits(Known, Known2);
1151
1152 if (SPF == SPF_ABS) {
1153 // RHS from matchSelectPattern returns the negation part of abs pattern.
1154 // If the negate has an NSW flag we can assume the sign bit of the result
1155 // will be 0 because that makes abs(INT_MIN) undefined.
1156 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1157 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS)))
1158 Known.Zero.setSignBit();
1159 }
1160
1161 break;
1162 }
1163 case Instruction::FPTrunc:
1164 case Instruction::FPExt:
1165 case Instruction::FPToUI:
1166 case Instruction::FPToSI:
1167 case Instruction::SIToFP:
1168 case Instruction::UIToFP:
1169 break; // Can't work with floating point.
1170 case Instruction::PtrToInt:
1171 case Instruction::IntToPtr:
1172 // Fall through and handle them the same as zext/trunc.
1173 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1174 case Instruction::ZExt:
1175 case Instruction::Trunc: {
1176 Type *SrcTy = I->getOperand(0)->getType();
1177
1178 unsigned SrcBitWidth;
1179 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1180 // which fall through here.
1181 Type *ScalarTy = SrcTy->getScalarType();
1182 SrcBitWidth = ScalarTy->isPointerTy() ?
1183 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1184 Q.DL.getTypeSizeInBits(ScalarTy);
1185
1186 assert(SrcBitWidth && "SrcBitWidth can't be zero")(static_cast <bool> (SrcBitWidth && "SrcBitWidth can't be zero"
) ? void (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1186, __extension__ __PRETTY_FUNCTION__
))
;
1187 Known = Known.anyextOrTrunc(SrcBitWidth);
1188 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1189 Known = Known.zextOrTrunc(BitWidth);
1190 break;
1191 }
1192 case Instruction::BitCast: {
1193 Type *SrcTy = I->getOperand(0)->getType();
1194 if (SrcTy->isIntOrPtrTy() &&
1195 // TODO: For now, not handling conversions like:
1196 // (bitcast i64 %x to <2 x i32>)
1197 !I->getType()->isVectorTy()) {
1198 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1199 break;
1200 }
1201
1202 // Handle cast from vector integer type to scalar or vector integer.
1203 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1204 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1205 !I->getType()->isIntOrIntVectorTy())
1206 break;
1207
1208 // Look through a cast from narrow vector elements to wider type.
1209 // Examples: v4i32 -> v2i64, v3i8 -> v24
1210 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1211 if (BitWidth % SubBitWidth == 0) {
1212 // Known bits are automatically intersected across demanded elements of a
1213 // vector. So for example, if a bit is computed as known zero, it must be
1214 // zero across all demanded elements of the vector.
1215 //
1216 // For this bitcast, each demanded element of the output is sub-divided
1217 // across a set of smaller vector elements in the source vector. To get
1218 // the known bits for an entire element of the output, compute the known
1219 // bits for each sub-element sequentially. This is done by shifting the
1220 // one-set-bit demanded elements parameter across the sub-elements for
1221 // consecutive calls to computeKnownBits. We are using the demanded
1222 // elements parameter as a mask operator.
1223 //
1224 // The known bits of each sub-element are then inserted into place
1225 // (dependent on endian) to form the full result of known bits.
1226 unsigned NumElts = DemandedElts.getBitWidth();
1227 unsigned SubScale = BitWidth / SubBitWidth;
1228 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1229 for (unsigned i = 0; i != NumElts; ++i) {
1230 if (DemandedElts[i])
1231 SubDemandedElts.setBit(i * SubScale);
1232 }
1233
1234 KnownBits KnownSrc(SubBitWidth);
1235 for (unsigned i = 0; i != SubScale; ++i) {
1236 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1237 Depth + 1, Q);
1238 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1239 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1240 }
1241 }
1242 break;
1243 }
1244 case Instruction::SExt: {
1245 // Compute the bits in the result that are not present in the input.
1246 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1247
1248 Known = Known.trunc(SrcBitWidth);
1249 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1250 // If the sign bit of the input is known set or clear, then we know the
1251 // top bits of the result.
1252 Known = Known.sext(BitWidth);
1253 break;
1254 }
1255 case Instruction::Shl: {
1256 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1257 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1258 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1259 // If this shift has "nsw" keyword, then the result is either a poison
1260 // value or has the same sign bit as the first operand.
1261 if (NSW) {
1262 if (KnownVal.Zero.isSignBitSet())
1263 Result.Zero.setSignBit();
1264 if (KnownVal.One.isSignBitSet())
1265 Result.One.setSignBit();
1266 }
1267 return Result;
1268 };
1269 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1270 KF);
1271 // Trailing zeros of a right-shifted constant never decrease.
1272 const APInt *C;
1273 if (match(I->getOperand(0), m_APInt(C)))
1274 Known.Zero.setLowBits(C->countTrailingZeros());
1275 break;
1276 }
1277 case Instruction::LShr: {
1278 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1279 return KnownBits::lshr(KnownVal, KnownAmt);
1280 };
1281 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1282 KF);
1283 // Leading zeros of a left-shifted constant never decrease.
1284 const APInt *C;
1285 if (match(I->getOperand(0), m_APInt(C)))
1286 Known.Zero.setHighBits(C->countLeadingZeros());
1287 break;
1288 }
1289 case Instruction::AShr: {
1290 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1291 return KnownBits::ashr(KnownVal, KnownAmt);
1292 };
1293 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1294 KF);
1295 break;
1296 }
1297 case Instruction::Sub: {
1298 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1299 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1300 DemandedElts, Known, Known2, Depth, Q);
1301 break;
1302 }
1303 case Instruction::Add: {
1304 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1305 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1306 DemandedElts, Known, Known2, Depth, Q);
1307 break;
1308 }
1309 case Instruction::SRem:
1310 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1311 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1312 Known = KnownBits::srem(Known, Known2);
1313 break;
1314
1315 case Instruction::URem:
1316 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1317 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1318 Known = KnownBits::urem(Known, Known2);
1319 break;
1320 case Instruction::Alloca:
1321 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1322 break;
1323 case Instruction::GetElementPtr: {
1324 // Analyze all of the subscripts of this getelementptr instruction
1325 // to determine if we can prove known low zero bits.
1326 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1327 // Accumulate the constant indices in a separate variable
1328 // to minimize the number of calls to computeForAddSub.
1329 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1330
1331 gep_type_iterator GTI = gep_type_begin(I);
1332 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1333 // TrailZ can only become smaller, short-circuit if we hit zero.
1334 if (Known.isUnknown())
1335 break;
1336
1337 Value *Index = I->getOperand(i);
1338
1339 // Handle case when index is zero.
1340 Constant *CIndex = dyn_cast<Constant>(Index);
1341 if (CIndex && CIndex->isZeroValue())
1342 continue;
1343
1344 if (StructType *STy = GTI.getStructTypeOrNull()) {
1345 // Handle struct member offset arithmetic.
1346
1347 assert(CIndex &&(static_cast <bool> (CIndex && "Access to structure field must be known at compile time"
) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1348, __extension__ __PRETTY_FUNCTION__
))
1348 "Access to structure field must be known at compile time")(static_cast <bool> (CIndex && "Access to structure field must be known at compile time"
) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1348, __extension__ __PRETTY_FUNCTION__
))
;
1349
1350 if (CIndex->getType()->isVectorTy())
1351 Index = CIndex->getSplatValue();
1352
1353 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1354 const StructLayout *SL = Q.DL.getStructLayout(STy);
1355 uint64_t Offset = SL->getElementOffset(Idx);
1356 AccConstIndices += Offset;
1357 continue;
1358 }
1359
1360 // Handle array index arithmetic.
1361 Type *IndexedTy = GTI.getIndexedType();
1362 if (!IndexedTy->isSized()) {
1363 Known.resetAll();
1364 break;
1365 }
1366
1367 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1368 KnownBits IndexBits(IndexBitWidth);
1369 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1370 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1371 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1372 KnownBits ScalingFactor(IndexBitWidth);
1373 // Multiply by current sizeof type.
1374 // &A[i] == A + i * sizeof(*A[i]).
1375 if (IndexTypeSize.isScalable()) {
1376 // For scalable types the only thing we know about sizeof is
1377 // that this is a multiple of the minimum size.
1378 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1379 } else if (IndexBits.isConstant()) {
1380 APInt IndexConst = IndexBits.getConstant();
1381 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1382 IndexConst *= ScalingFactor;
1383 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1384 continue;
1385 } else {
1386 ScalingFactor =
1387 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1388 }
1389 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1390
1391 // If the offsets have a different width from the pointer, according
1392 // to the language reference we need to sign-extend or truncate them
1393 // to the width of the pointer.
1394 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1395
1396 // Note that inbounds does *not* guarantee nsw for the addition, as only
1397 // the offset is signed, while the base address is unsigned.
1398 Known = KnownBits::computeForAddSub(
1399 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1400 }
1401 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1402 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1403 Known = KnownBits::computeForAddSub(
1404 /*Add=*/true, /*NSW=*/false, Known, Index);
1405 }
1406 break;
1407 }
1408 case Instruction::PHI: {
1409 const PHINode *P = cast<PHINode>(I);
8
'I' is a 'PHINode'
1410 BinaryOperator *BO = nullptr;
1411 Value *R = nullptr, *L = nullptr;
1412 if (matchSimpleRecurrence(P, BO, R, L)) {
9
Value assigned to 'R'
10
Assuming the condition is true
11
Taking true branch
1413 // Handle the case of a simple two-predecessor recurrence PHI.
1414 // There's a lot more that could theoretically be done here, but
1415 // this is sufficient to catch some interesting cases.
1416 unsigned Opcode = BO->getOpcode();
1417
1418 // If this is a shift recurrence, we know the bits being shifted in.
1419 // We can combine that with information about the start value of the
1420 // recurrence to conclude facts about the result.
1421 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
12
Assuming 'Opcode' is not equal to LShr
13
Assuming 'Opcode' is not equal to AShr
1422 Opcode == Instruction::Shl) &&
14
Assuming 'Opcode' is not equal to Shl
1423 BO->getOperand(0) == I) {
1424
1425 // We have matched a recurrence of the form:
1426 // %iv = [R, %entry], [%iv.next, %backedge]
1427 // %iv.next = shift_op %iv, L
1428
1429 // Recurse with the phi context to avoid concern about whether facts
1430 // inferred hold at original context instruction. TODO: It may be
1431 // correct to use the original context. IF warranted, explore and
1432 // add sufficient tests to cover.
1433 Query RecQ = Q;
1434 RecQ.CxtI = P;
1435 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1436 switch (Opcode) {
1437 case Instruction::Shl:
1438 // A shl recurrence will only increase the tailing zeros
1439 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1440 break;
1441 case Instruction::LShr:
1442 // A lshr recurrence will preserve the leading zeros of the
1443 // start value
1444 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1445 break;
1446 case Instruction::AShr:
1447 // An ashr recurrence will extend the initial sign bit
1448 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1449 Known.One.setHighBits(Known2.countMinLeadingOnes());
1450 break;
1451 };
1452 }
1453
1454 // Check for operations that have the property that if
1455 // both their operands have low zero bits, the result
1456 // will have low zero bits.
1457 if (Opcode == Instruction::Add ||
15
Assuming 'Opcode' is not equal to Add
20
Taking true branch
1458 Opcode == Instruction::Sub ||
16
Assuming 'Opcode' is not equal to Sub
1459 Opcode == Instruction::And ||
17
Assuming 'Opcode' is not equal to And
1460 Opcode == Instruction::Or ||
18
Assuming 'Opcode' is not equal to Or
1461 Opcode == Instruction::Mul) {
19
Assuming 'Opcode' is equal to Mul
1462 // Change the context instruction to the "edge" that flows into the
1463 // phi. This is important because that is where the value is actually
1464 // "evaluated" even though it is used later somewhere else. (see also
1465 // D69571).
1466 Query RecQ = Q;
1467
1468 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
21
Assuming pointer value is null
22
'?' condition is true
1469 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1470 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1471
1472 // Ok, we have a PHI of the form L op= R. Check for low
1473 // zero bits.
1474 RecQ.CxtI = RInst;
1475 computeKnownBits(R, Known2, Depth + 1, RecQ);
23
Passing null pointer value via 1st parameter 'V'
24
Calling 'computeKnownBits'
1476
1477 // We need to take the minimum number of known bits
1478 KnownBits Known3(BitWidth);
1479 RecQ.CxtI = LInst;
1480 computeKnownBits(L, Known3, Depth + 1, RecQ);
1481
1482 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1483 Known3.countMinTrailingZeros()));
1484
1485 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1486 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1487 // If initial value of recurrence is nonnegative, and we are adding
1488 // a nonnegative number with nsw, the result can only be nonnegative
1489 // or poison value regardless of the number of times we execute the
1490 // add in phi recurrence. If initial value is negative and we are
1491 // adding a negative number with nsw, the result can only be
1492 // negative or poison value. Similar arguments apply to sub and mul.
1493 //
1494 // (add non-negative, non-negative) --> non-negative
1495 // (add negative, negative) --> negative
1496 if (Opcode == Instruction::Add) {
1497 if (Known2.isNonNegative() && Known3.isNonNegative())
1498 Known.makeNonNegative();
1499 else if (Known2.isNegative() && Known3.isNegative())
1500 Known.makeNegative();
1501 }
1502
1503 // (sub nsw non-negative, negative) --> non-negative
1504 // (sub nsw negative, non-negative) --> negative
1505 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1506 if (Known2.isNonNegative() && Known3.isNegative())
1507 Known.makeNonNegative();
1508 else if (Known2.isNegative() && Known3.isNonNegative())
1509 Known.makeNegative();
1510 }
1511
1512 // (mul nsw non-negative, non-negative) --> non-negative
1513 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1514 Known3.isNonNegative())
1515 Known.makeNonNegative();
1516 }
1517
1518 break;
1519 }
1520 }
1521
1522 // Unreachable blocks may have zero-operand PHI nodes.
1523 if (P->getNumIncomingValues() == 0)
1524 break;
1525
1526 // Otherwise take the unions of the known bit sets of the operands,
1527 // taking conservative care to avoid excessive recursion.
1528 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1529 // Skip if every incoming value references to ourself.
1530 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1531 break;
1532
1533 Known.Zero.setAllBits();
1534 Known.One.setAllBits();
1535 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1536 Value *IncValue = P->getIncomingValue(u);
1537 // Skip direct self references.
1538 if (IncValue == P) continue;
1539
1540 // Change the context instruction to the "edge" that flows into the
1541 // phi. This is important because that is where the value is actually
1542 // "evaluated" even though it is used later somewhere else. (see also
1543 // D69571).
1544 Query RecQ = Q;
1545 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1546
1547 Known2 = KnownBits(BitWidth);
1548 // Recurse, but cap the recursion to one level, because we don't
1549 // want to waste time spinning around in loops.
1550 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1551 Known = KnownBits::commonBits(Known, Known2);
1552 // If all bits have been ruled out, there's no need to check
1553 // more operands.
1554 if (Known.isUnknown())
1555 break;
1556 }
1557 }
1558 break;
1559 }
1560 case Instruction::Call:
1561 case Instruction::Invoke:
1562 // If range metadata is attached to this call, set known bits from that,
1563 // and then intersect with known bits based on other properties of the
1564 // function.
1565 if (MDNode *MD =
1566 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1567 computeKnownBitsFromRangeMetadata(*MD, Known);
1568 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1569 computeKnownBits(RV, Known2, Depth + 1, Q);
1570 Known.Zero |= Known2.Zero;
1571 Known.One |= Known2.One;
1572 }
1573 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1574 switch (II->getIntrinsicID()) {
1575 default: break;
1576 case Intrinsic::abs: {
1577 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1578 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1579 Known = Known2.abs(IntMinIsPoison);
1580 break;
1581 }
1582 case Intrinsic::bitreverse:
1583 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1584 Known.Zero |= Known2.Zero.reverseBits();
1585 Known.One |= Known2.One.reverseBits();
1586 break;
1587 case Intrinsic::bswap:
1588 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1589 Known.Zero |= Known2.Zero.byteSwap();
1590 Known.One |= Known2.One.byteSwap();
1591 break;
1592 case Intrinsic::ctlz: {
1593 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1594 // If we have a known 1, its position is our upper bound.
1595 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1596 // If this call is undefined for 0, the result will be less than 2^n.
1597 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1598 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1599 unsigned LowBits = Log2_32(PossibleLZ)+1;
1600 Known.Zero.setBitsFrom(LowBits);
1601 break;
1602 }
1603 case Intrinsic::cttz: {
1604 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1605 // If we have a known 1, its position is our upper bound.
1606 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1607 // If this call is undefined for 0, the result will be less than 2^n.
1608 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1609 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1610 unsigned LowBits = Log2_32(PossibleTZ)+1;
1611 Known.Zero.setBitsFrom(LowBits);
1612 break;
1613 }
1614 case Intrinsic::ctpop: {
1615 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1616 // We can bound the space the count needs. Also, bits known to be zero
1617 // can't contribute to the population.
1618 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1619 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1620 Known.Zero.setBitsFrom(LowBits);
1621 // TODO: we could bound KnownOne using the lower bound on the number
1622 // of bits which might be set provided by popcnt KnownOne2.
1623 break;
1624 }
1625 case Intrinsic::fshr:
1626 case Intrinsic::fshl: {
1627 const APInt *SA;
1628 if (!match(I->getOperand(2), m_APInt(SA)))
1629 break;
1630
1631 // Normalize to funnel shift left.
1632 uint64_t ShiftAmt = SA->urem(BitWidth);
1633 if (II->getIntrinsicID() == Intrinsic::fshr)
1634 ShiftAmt = BitWidth - ShiftAmt;
1635
1636 KnownBits Known3(BitWidth);
1637 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1638 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1639
1640 Known.Zero =
1641 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1642 Known.One =
1643 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1644 break;
1645 }
1646 case Intrinsic::uadd_sat:
1647 case Intrinsic::usub_sat: {
1648 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1649 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1650 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1651
1652 // Add: Leading ones of either operand are preserved.
1653 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1654 // as leading zeros in the result.
1655 unsigned LeadingKnown;
1656 if (IsAdd)
1657 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1658 Known2.countMinLeadingOnes());
1659 else
1660 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1661 Known2.countMinLeadingOnes());
1662
1663 Known = KnownBits::computeForAddSub(
1664 IsAdd, /* NSW */ false, Known, Known2);
1665
1666 // We select between the operation result and all-ones/zero
1667 // respectively, so we can preserve known ones/zeros.
1668 if (IsAdd) {
1669 Known.One.setHighBits(LeadingKnown);
1670 Known.Zero.clearAllBits();
1671 } else {
1672 Known.Zero.setHighBits(LeadingKnown);
1673 Known.One.clearAllBits();
1674 }
1675 break;
1676 }
1677 case Intrinsic::umin:
1678 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1679 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1680 Known = KnownBits::umin(Known, Known2);
1681 break;
1682 case Intrinsic::umax:
1683 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1684 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1685 Known = KnownBits::umax(Known, Known2);
1686 break;
1687 case Intrinsic::smin:
1688 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1689 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1690 Known = KnownBits::smin(Known, Known2);
1691 break;
1692 case Intrinsic::smax:
1693 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1694 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1695 Known = KnownBits::smax(Known, Known2);
1696 break;
1697 case Intrinsic::x86_sse42_crc32_64_64:
1698 Known.Zero.setBitsFrom(32);
1699 break;
1700 case Intrinsic::riscv_vsetvli:
1701 case Intrinsic::riscv_vsetvlimax:
1702 // Assume that VL output is positive and would fit in an int32_t.
1703 // TODO: VLEN might be capped at 16 bits in a future V spec update.
1704 if (BitWidth >= 32)
1705 Known.Zero.setBitsFrom(31);
1706 break;
1707 case Intrinsic::vscale: {
1708 if (!II->getParent() || !II->getFunction() ||
1709 !II->getFunction()->hasFnAttribute(Attribute::VScaleRange))
1710 break;
1711
1712 auto Attr = II->getFunction()->getFnAttribute(Attribute::VScaleRange);
1713 Optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
1714
1715 if (!VScaleMax)
1716 break;
1717
1718 unsigned VScaleMin = Attr.getVScaleRangeMin();
1719
1720 // If vscale min = max then we know the exact value at compile time
1721 // and hence we know the exact bits.
1722 if (VScaleMin == VScaleMax) {
1723 Known.One = VScaleMin;
1724 Known.Zero = VScaleMin;
1725 Known.Zero.flipAllBits();
1726 break;
1727 }
1728
1729 unsigned FirstZeroHighBit =
1730 32 - countLeadingZeros(VScaleMax.getValue());
1731 if (FirstZeroHighBit < BitWidth)
1732 Known.Zero.setBitsFrom(FirstZeroHighBit);
1733
1734 break;
1735 }
1736 }
1737 }
1738 break;
1739 case Instruction::ShuffleVector: {
1740 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1741 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1742 if (!Shuf) {
1743 Known.resetAll();
1744 return;
1745 }
1746 // For undef elements, we don't know anything about the common state of
1747 // the shuffle result.
1748 APInt DemandedLHS, DemandedRHS;
1749 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1750 Known.resetAll();
1751 return;
1752 }
1753 Known.One.setAllBits();
1754 Known.Zero.setAllBits();
1755 if (!!DemandedLHS) {
1756 const Value *LHS = Shuf->getOperand(0);
1757 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1758 // If we don't know any bits, early out.
1759 if (Known.isUnknown())
1760 break;
1761 }
1762 if (!!DemandedRHS) {
1763 const Value *RHS = Shuf->getOperand(1);
1764 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1765 Known = KnownBits::commonBits(Known, Known2);
1766 }
1767 break;
1768 }
1769 case Instruction::InsertElement: {
1770 const Value *Vec = I->getOperand(0);
1771 const Value *Elt = I->getOperand(1);
1772 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1773 // Early out if the index is non-constant or out-of-range.
1774 unsigned NumElts = DemandedElts.getBitWidth();
1775 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1776 Known.resetAll();
1777 return;
1778 }
1779 Known.One.setAllBits();
1780 Known.Zero.setAllBits();
1781 unsigned EltIdx = CIdx->getZExtValue();
1782 // Do we demand the inserted element?
1783 if (DemandedElts[EltIdx]) {
1784 computeKnownBits(Elt, Known, Depth + 1, Q);
1785 // If we don't know any bits, early out.
1786 if (Known.isUnknown())
1787 break;
1788 }
1789 // We don't need the base vector element that has been inserted.
1790 APInt DemandedVecElts = DemandedElts;
1791 DemandedVecElts.clearBit(EltIdx);
1792 if (!!DemandedVecElts) {
1793 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1794 Known = KnownBits::commonBits(Known, Known2);
1795 }
1796 break;
1797 }
1798 case Instruction::ExtractElement: {
1799 // Look through extract element. If the index is non-constant or
1800 // out-of-range demand all elements, otherwise just the extracted element.
1801 const Value *Vec = I->getOperand(0);
1802 const Value *Idx = I->getOperand(1);
1803 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1804 if (isa<ScalableVectorType>(Vec->getType())) {
1805 // FIXME: there's probably *something* we can do with scalable vectors
1806 Known.resetAll();
1807 break;
1808 }
1809 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1810 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1811 if (CIdx && CIdx->getValue().ult(NumElts))
1812 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1813 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1814 break;
1815 }
1816 case Instruction::ExtractValue:
1817 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1818 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1819 if (EVI->getNumIndices() != 1) break;
1820 if (EVI->getIndices()[0] == 0) {
1821 switch (II->getIntrinsicID()) {
1822 default: break;
1823 case Intrinsic::uadd_with_overflow:
1824 case Intrinsic::sadd_with_overflow:
1825 computeKnownBitsAddSub(true, II->getArgOperand(0),
1826 II->getArgOperand(1), false, DemandedElts,
1827 Known, Known2, Depth, Q);
1828 break;
1829 case Intrinsic::usub_with_overflow:
1830 case Intrinsic::ssub_with_overflow:
1831 computeKnownBitsAddSub(false, II->getArgOperand(0),
1832 II->getArgOperand(1), false, DemandedElts,
1833 Known, Known2, Depth, Q);
1834 break;
1835 case Intrinsic::umul_with_overflow:
1836 case Intrinsic::smul_with_overflow:
1837 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1838 DemandedElts, Known, Known2, Depth, Q);
1839 break;
1840 }
1841 }
1842 }
1843 break;
1844 case Instruction::Freeze:
1845 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1846 Depth + 1))
1847 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1848 break;
1849 }
1850}
1851
1852/// Determine which bits of V are known to be either zero or one and return
1853/// them.
1854KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1855 unsigned Depth, const Query &Q) {
1856 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1857 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1858 return Known;
1859}
1860
1861/// Determine which bits of V are known to be either zero or one and return
1862/// them.
1863KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1864 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1865 computeKnownBits(V, Known, Depth, Q);
1866 return Known;
1867}
1868
1869/// Determine which bits of V are known to be either zero or one and return
1870/// them in the Known bit set.
1871///
1872/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1873/// we cannot optimize based on the assumption that it is zero without changing
1874/// it to be an explicit zero. If we don't change it to zero, other code could
1875/// optimized based on the contradictory assumption that it is non-zero.
1876/// Because instcombine aggressively folds operations with undef args anyway,
1877/// this won't lose us code quality.
1878///
1879/// This function is defined on values with integer type, values with pointer
1880/// type, and vectors of integers. In the case
1881/// where V is a vector, known zero, and known one values are the
1882/// same width as the vector element, and the bit is set only if it is true
1883/// for all of the demanded elements in the vector specified by DemandedElts.
1884void computeKnownBits(const Value *V, const APInt &DemandedElts,
1885 KnownBits &Known, unsigned Depth, const Query &Q) {
1886 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1887 // No demanded elts or V is a scalable vector, better to assume we don't
1888 // know anything.
1889 Known.resetAll();
1890 return;
1891 }
1892
1893 assert(V && "No Value?")(static_cast <bool> (V && "No Value?") ? void (
0) : __assert_fail ("V && \"No Value?\"", "llvm/lib/Analysis/ValueTracking.cpp"
, 1893, __extension__ __PRETTY_FUNCTION__))
;
1894 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1894, __extension__ __PRETTY_FUNCTION__
))
;
1895
1896#ifndef NDEBUG
1897 Type *Ty = V->getType();
1898 unsigned BitWidth = Known.getBitWidth();
1899
1900 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth
) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1901, __extension__ __PRETTY_FUNCTION__
))
1901 "Not integer or pointer type!")(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth
) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"
) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1901, __extension__ __PRETTY_FUNCTION__
))
;
1902
1903 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1904 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1906, __extension__ __PRETTY_FUNCTION__
))
1905 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1906, __extension__ __PRETTY_FUNCTION__
))
1906 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1906, __extension__ __PRETTY_FUNCTION__
))
;
1907 } else {
1908 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1909, __extension__ __PRETTY_FUNCTION__
))
1909 "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1909, __extension__ __PRETTY_FUNCTION__
))
;
1910 }
1911
1912 Type *ScalarTy = Ty->getScalarType();
1913 if (ScalarTy->isPointerTy()) {
1914 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1915, __extension__ __PRETTY_FUNCTION__
))
1915 "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1915, __extension__ __PRETTY_FUNCTION__
))
;
1916 } else {
1917 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1918, __extension__ __PRETTY_FUNCTION__
))
1918 "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits
(ScalarTy) && "V and Known should have same BitWidth"
) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1918, __extension__ __PRETTY_FUNCTION__
))
;
1919 }
1920#endif
1921
1922 const APInt *C;
1923 if (match(V, m_APInt(C))) {
1924 // We know all of the bits for a scalar constant or a splat vector constant!
1925 Known = KnownBits::makeConstant(*C);
1926 return;
1927 }
1928 // Null and aggregate-zero are all-zeros.
1929 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1930 Known.setAllZero();
1931 return;
1932 }
1933 // Handle a constant vector by taking the intersection of the known bits of
1934 // each element.
1935 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1936 // We know that CDV must be a vector of integers. Take the intersection of
1937 // each element.
1938 Known.Zero.setAllBits(); Known.One.setAllBits();
1939 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1940 if (!DemandedElts[i])
1941 continue;
1942 APInt Elt = CDV->getElementAsAPInt(i);
1943 Known.Zero &= ~Elt;
1944 Known.One &= Elt;
1945 }
1946 return;
1947 }
1948
1949 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1950 // We know that CV must be a vector of integers. Take the intersection of
1951 // each element.
1952 Known.Zero.setAllBits(); Known.One.setAllBits();
1953 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1954 if (!DemandedElts[i])
1955 continue;
1956 Constant *Element = CV->getAggregateElement(i);
1957 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1958 if (!ElementCI) {
1959 Known.resetAll();
1960 return;
1961 }
1962 const APInt &Elt = ElementCI->getValue();
1963 Known.Zero &= ~Elt;
1964 Known.One &= Elt;
1965 }
1966 return;
1967 }
1968
1969 // Start out not knowing anything.
1970 Known.resetAll();
1971
1972 // We can't imply anything about undefs.
1973 if (isa<UndefValue>(V))
1974 return;
1975
1976 // There's no point in looking through other users of ConstantData for
1977 // assumptions. Confirm that we've handled them all.
1978 assert(!isa<ConstantData>(V) && "Unhandled constant data!")(static_cast <bool> (!isa<ConstantData>(V) &&
"Unhandled constant data!") ? void (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 1978, __extension__ __PRETTY_FUNCTION__
))
;
1979
1980 // All recursive calls that increase depth must come after this.
1981 if (Depth == MaxAnalysisRecursionDepth)
1982 return;
1983
1984 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1985 // the bits of its aliasee.
1986 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1987 if (!GA->isInterposable())
1988 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1989 return;
1990 }
1991
1992 if (const Operator *I = dyn_cast<Operator>(V))
1993 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1994
1995 // Aligned pointers have trailing zeros - refine Known.Zero set
1996 if (isa<PointerType>(V->getType())) {
1997 Align Alignment = V->getPointerAlignment(Q.DL);
1998 Known.Zero.setLowBits(Log2(Alignment));
1999 }
2000
2001 // computeKnownBitsFromAssume strictly refines Known.
2002 // Therefore, we run them after computeKnownBitsFromOperator.
2003
2004 // Check whether a nearby assume intrinsic can determine some known bits.
2005 computeKnownBitsFromAssume(V, Known, Depth, Q);
2006
2007 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(static_cast <bool> ((Known.Zero & Known.One) == 0 &&
"Bits known to be one AND zero?") ? void (0) : __assert_fail
("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2007, __extension__ __PRETTY_FUNCTION__
))
;
2008}
2009
2010/// Return true if the given value is known to have exactly one
2011/// bit set when defined. For vectors return true if every element is known to
2012/// be a power of two when defined. Supports values with integer or pointer
2013/// types and vectors of integers.
2014bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2015 const Query &Q) {
2016 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2016, __extension__ __PRETTY_FUNCTION__
))
;
2017
2018 // Attempt to match against constants.
2019 if (OrZero && match(V, m_Power2OrZero()))
2020 return true;
2021 if (match(V, m_Power2()))
2022 return true;
2023
2024 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2025 // it is shifted off the end then the result is undefined.
2026 if (match(V, m_Shl(m_One(), m_Value())))
2027 return true;
2028
2029 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2030 // the bottom. If it is shifted off the bottom then the result is undefined.
2031 if (match(V, m_LShr(m_SignMask(), m_Value())))
2032 return true;
2033
2034 // The remaining tests are all recursive, so bail out if we hit the limit.
2035 if (Depth++ == MaxAnalysisRecursionDepth)
2036 return false;
2037
2038 Value *X = nullptr, *Y = nullptr;
2039 // A shift left or a logical shift right of a power of two is a power of two
2040 // or zero.
2041 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2042 match(V, m_LShr(m_Value(X), m_Value()))))
2043 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2044
2045 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2046 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2047
2048 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2049 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2050 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2051
2052 // Peek through min/max.
2053 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2054 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2055 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2056 }
2057
2058 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2059 // A power of two and'd with anything is a power of two or zero.
2060 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2061 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2062 return true;
2063 // X & (-X) is always a power of two or zero.
2064 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2065 return true;
2066 return false;
2067 }
2068
2069 // Adding a power-of-two or zero to the same power-of-two or zero yields
2070 // either the original power-of-two, a larger power-of-two or zero.
2071 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2072 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2073 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2074 Q.IIQ.hasNoSignedWrap(VOBO)) {
2075 if (match(X, m_And(m_Specific(Y), m_Value())) ||
2076 match(X, m_And(m_Value(), m_Specific(Y))))
2077 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2078 return true;
2079 if (match(Y, m_And(m_Specific(X), m_Value())) ||
2080 match(Y, m_And(m_Value(), m_Specific(X))))
2081 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2082 return true;
2083
2084 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2085 KnownBits LHSBits(BitWidth);
2086 computeKnownBits(X, LHSBits, Depth, Q);
2087
2088 KnownBits RHSBits(BitWidth);
2089 computeKnownBits(Y, RHSBits, Depth, Q);
2090 // If i8 V is a power of two or zero:
2091 // ZeroBits: 1 1 1 0 1 1 1 1
2092 // ~ZeroBits: 0 0 0 1 0 0 0 0
2093 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2094 // If OrZero isn't set, we cannot give back a zero result.
2095 // Make sure either the LHS or RHS has a bit set.
2096 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2097 return true;
2098 }
2099 }
2100
2101 // An exact divide or right shift can only shift off zero bits, so the result
2102 // is a power of two only if the first operand is a power of two and not
2103 // copying a sign bit (sdiv int_min, 2).
2104 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2105 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2106 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2107 Depth, Q);
2108 }
2109
2110 return false;
2111}
2112
2113/// Test whether a GEP's result is known to be non-null.
2114///
2115/// Uses properties inherent in a GEP to try to determine whether it is known
2116/// to be non-null.
2117///
2118/// Currently this routine does not support vector GEPs.
2119static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2120 const Query &Q) {
2121 const Function *F = nullptr;
2122 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2123 F = I->getFunction();
2124
2125 if (!GEP->isInBounds() ||
2126 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2127 return false;
2128
2129 // FIXME: Support vector-GEPs.
2130 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")(static_cast <bool> (GEP->getType()->isPointerTy(
) && "We only support plain pointer GEP") ? void (0) :
__assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2130, __extension__ __PRETTY_FUNCTION__
))
;
2131
2132 // If the base pointer is non-null, we cannot walk to a null address with an
2133 // inbounds GEP in address space zero.
2134 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2135 return true;
2136
2137 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2138 // If so, then the GEP cannot produce a null pointer, as doing so would
2139 // inherently violate the inbounds contract within address space zero.
2140 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2141 GTI != GTE; ++GTI) {
2142 // Struct types are easy -- they must always be indexed by a constant.
2143 if (StructType *STy = GTI.getStructTypeOrNull()) {
2144 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2145 unsigned ElementIdx = OpC->getZExtValue();
2146 const StructLayout *SL = Q.DL.getStructLayout(STy);
2147 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2148 if (ElementOffset > 0)
2149 return true;
2150 continue;
2151 }
2152
2153 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2154 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2155 continue;
2156
2157 // Fast path the constant operand case both for efficiency and so we don't
2158 // increment Depth when just zipping down an all-constant GEP.
2159 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2160 if (!OpC->isZero())
2161 return true;
2162 continue;
2163 }
2164
2165 // We post-increment Depth here because while isKnownNonZero increments it
2166 // as well, when we pop back up that increment won't persist. We don't want
2167 // to recurse 10k times just because we have 10k GEP operands. We don't
2168 // bail completely out because we want to handle constant GEPs regardless
2169 // of depth.
2170 if (Depth++ >= MaxAnalysisRecursionDepth)
2171 continue;
2172
2173 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2174 return true;
2175 }
2176
2177 return false;
2178}
2179
2180static bool isKnownNonNullFromDominatingCondition(const Value *V,
2181 const Instruction *CtxI,
2182 const DominatorTree *DT) {
2183 if (isa<Constant>(V))
2184 return false;
2185
2186 if (!CtxI || !DT)
2187 return false;
2188
2189 unsigned NumUsesExplored = 0;
2190 for (auto *U : V->users()) {
2191 // Avoid massive lists
2192 if (NumUsesExplored >= DomConditionsMaxUses)
2193 break;
2194 NumUsesExplored++;
2195
2196 // If the value is used as an argument to a call or invoke, then argument
2197 // attributes may provide an answer about null-ness.
2198 if (const auto *CB = dyn_cast<CallBase>(U))
2199 if (auto *CalledFunc = CB->getCalledFunction())
2200 for (const Argument &Arg : CalledFunc->args())
2201 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2202 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2203 DT->dominates(CB, CtxI))
2204 return true;
2205
2206 // If the value is used as a load/store, then the pointer must be non null.
2207 if (V == getLoadStorePointerOperand(U)) {
2208 const Instruction *I = cast<Instruction>(U);
2209 if (!NullPointerIsDefined(I->getFunction(),
2210 V->getType()->getPointerAddressSpace()) &&
2211 DT->dominates(I, CtxI))
2212 return true;
2213 }
2214
2215 // Consider only compare instructions uniquely controlling a branch
2216 Value *RHS;
2217 CmpInst::Predicate Pred;
2218 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2219 continue;
2220
2221 bool NonNullIfTrue;
2222 if (cmpExcludesZero(Pred, RHS))
2223 NonNullIfTrue = true;
2224 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2225 NonNullIfTrue = false;
2226 else
2227 continue;
2228
2229 SmallVector<const User *, 4> WorkList;
2230 SmallPtrSet<const User *, 4> Visited;
2231 for (auto *CmpU : U->users()) {
2232 assert(WorkList.empty() && "Should be!")(static_cast <bool> (WorkList.empty() && "Should be!"
) ? void (0) : __assert_fail ("WorkList.empty() && \"Should be!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2232, __extension__ __PRETTY_FUNCTION__
))
;
2233 if (Visited.insert(CmpU).second)
2234 WorkList.push_back(CmpU);
2235
2236 while (!WorkList.empty()) {
2237 auto *Curr = WorkList.pop_back_val();
2238
2239 // If a user is an AND, add all its users to the work list. We only
2240 // propagate "pred != null" condition through AND because it is only
2241 // correct to assume that all conditions of AND are met in true branch.
2242 // TODO: Support similar logic of OR and EQ predicate?
2243 if (NonNullIfTrue)
2244 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2245 for (auto *CurrU : Curr->users())
2246 if (Visited.insert(CurrU).second)
2247 WorkList.push_back(CurrU);
2248 continue;
2249 }
2250
2251 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2252 assert(BI->isConditional() && "uses a comparison!")(static_cast <bool> (BI->isConditional() && "uses a comparison!"
) ? void (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2252, __extension__ __PRETTY_FUNCTION__
))
;
2253
2254 BasicBlock *NonNullSuccessor =
2255 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2256 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2257 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2258 return true;
2259 } else if (NonNullIfTrue && isGuard(Curr) &&
2260 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2261 return true;
2262 }
2263 }
2264 }
2265 }
2266
2267 return false;
2268}
2269
2270/// Does the 'Range' metadata (which must be a valid MD_range operand list)
2271/// ensure that the value it's attached to is never Value? 'RangeType' is
2272/// is the type of the value described by the range.
2273static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2274 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2275 assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail
("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp",
2275, __extension__ __PRETTY_FUNCTION__))
;
2276 for (unsigned i = 0; i < NumRanges; ++i) {
2277 ConstantInt *Lower =
2278 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2279 ConstantInt *Upper =
2280 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2281 ConstantRange Range(Lower->getValue(), Upper->getValue());
2282 if (Range.contains(Value))
2283 return false;
2284 }
2285 return true;
2286}
2287
2288/// Try to detect a recurrence that monotonically increases/decreases from a
2289/// non-zero starting value. These are common as induction variables.
2290static bool isNonZeroRecurrence(const PHINode *PN) {
2291 BinaryOperator *BO = nullptr;
2292 Value *Start = nullptr, *Step = nullptr;
2293 const APInt *StartC, *StepC;
2294 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2295 !match(Start, m_APInt(StartC)) || StartC->isZero())
2296 return false;
2297
2298 switch (BO->getOpcode()) {
2299 case Instruction::Add:
2300 // Starting from non-zero and stepping away from zero can never wrap back
2301 // to zero.
2302 return BO->hasNoUnsignedWrap() ||
2303 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2304 StartC->isNegative() == StepC->isNegative());
2305 case Instruction::Mul:
2306 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2307 match(Step, m_APInt(StepC)) && !StepC->isZero();
2308 case Instruction::Shl:
2309 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2310 case Instruction::AShr:
2311 case Instruction::LShr:
2312 return BO->isExact();
2313 default:
2314 return false;
2315 }
2316}
2317
2318/// Return true if the given value is known to be non-zero when defined. For
2319/// vectors, return true if every demanded element is known to be non-zero when
2320/// defined. For pointers, if the context instruction and dominator tree are
2321/// specified, perform context-sensitive analysis and return true if the
2322/// pointer couldn't possibly be null at the specified instruction.
2323/// Supports values with integer or pointer type and vectors of integers.
2324bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2325 const Query &Q) {
2326 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2327 // vector
2328 if (isa<ScalableVectorType>(V->getType()))
2329 return false;
2330
2331 if (auto *C = dyn_cast<Constant>(V)) {
2332 if (C->isNullValue())
2333 return false;
2334 if (isa<ConstantInt>(C))
2335 // Must be non-zero due to null test above.
2336 return true;
2337
2338 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2339 // See the comment for IntToPtr/PtrToInt instructions below.
2340 if (CE->getOpcode() == Instruction::IntToPtr ||
2341 CE->getOpcode() == Instruction::PtrToInt)
2342 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2343 .getFixedSize() <=
2344 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2345 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2346 }
2347
2348 // For constant vectors, check that all elements are undefined or known
2349 // non-zero to determine that the whole vector is known non-zero.
2350 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2351 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2352 if (!DemandedElts[i])
2353 continue;
2354 Constant *Elt = C->getAggregateElement(i);
2355 if (!Elt || Elt->isNullValue())
2356 return false;
2357 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2358 return false;
2359 }
2360 return true;
2361 }
2362
2363 // A global variable in address space 0 is non null unless extern weak
2364 // or an absolute symbol reference. Other address spaces may have null as a
2365 // valid address for a global, so we can't assume anything.
2366 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2367 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2368 GV->getType()->getAddressSpace() == 0)
2369 return true;
2370 } else
2371 return false;
2372 }
2373
2374 if (auto *I = dyn_cast<Instruction>(V)) {
2375 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2376 // If the possible ranges don't contain zero, then the value is
2377 // definitely non-zero.
2378 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2379 const APInt ZeroValue(Ty->getBitWidth(), 0);
2380 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2381 return true;
2382 }
2383 }
2384 }
2385
2386 if (isKnownNonZeroFromAssume(V, Q))
2387 return true;
2388
2389 // Some of the tests below are recursive, so bail out if we hit the limit.
2390 if (Depth++ >= MaxAnalysisRecursionDepth)
2391 return false;
2392
2393 // Check for pointer simplifications.
2394
2395 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2396 // Alloca never returns null, malloc might.
2397 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2398 return true;
2399
2400 // A byval, inalloca may not be null in a non-default addres space. A
2401 // nonnull argument is assumed never 0.
2402 if (const Argument *A = dyn_cast<Argument>(V)) {
2403 if (((A->hasPassPointeeByValueCopyAttr() &&
2404 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2405 A->hasNonNullAttr()))
2406 return true;
2407 }
2408
2409 // A Load tagged with nonnull metadata is never null.
2410 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2411 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2412 return true;
2413
2414 if (const auto *Call = dyn_cast<CallBase>(V)) {
2415 if (Call->isReturnNonNull())
2416 return true;
2417 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2418 return isKnownNonZero(RP, Depth, Q);
2419 }
2420 }
2421
2422 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2423 return true;
2424
2425 // Check for recursive pointer simplifications.
2426 if (V->getType()->isPointerTy()) {
2427 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2428 // do not alter the value, or at least not the nullness property of the
2429 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2430 //
2431 // Note that we have to take special care to avoid looking through
2432 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2433 // as casts that can alter the value, e.g., AddrSpaceCasts.
2434 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2435 return isGEPKnownNonNull(GEP, Depth, Q);
2436
2437 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2438 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2439
2440 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2441 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2442 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2443 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2444 }
2445
2446 // Similar to int2ptr above, we can look through ptr2int here if the cast
2447 // is a no-op or an extend and not a truncate.
2448 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2449 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2450 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2451 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2452
2453 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2454
2455 // X | Y != 0 if X != 0 or Y != 0.
2456 Value *X = nullptr, *Y = nullptr;
2457 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2458 return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2459 isKnownNonZero(Y, DemandedElts, Depth, Q);
2460
2461 // ext X != 0 if X != 0.
2462 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2463 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2464
2465 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2466 // if the lowest bit is shifted off the end.
2467 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2468 // shl nuw can't remove any non-zero bits.
2469 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2470 if (Q.IIQ.hasNoUnsignedWrap(BO))
2471 return isKnownNonZero(X, Depth, Q);
2472
2473 KnownBits Known(BitWidth);
2474 computeKnownBits(X, DemandedElts, Known, Depth, Q);
2475 if (Known.One[0])
2476 return true;
2477 }
2478 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2479 // defined if the sign bit is shifted off the end.
2480 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2481 // shr exact can only shift out zero bits.
2482 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2483 if (BO->isExact())
2484 return isKnownNonZero(X, Depth, Q);
2485
2486 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2487 if (Known.isNegative())
2488 return true;
2489
2490 // If the shifter operand is a constant, and all of the bits shifted
2491 // out are known to be zero, and X is known non-zero then at least one
2492 // non-zero bit must remain.
2493 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2494 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2495 // Is there a known one in the portion not shifted out?
2496 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2497 return true;
2498 // Are all the bits to be shifted out known zero?
2499 if (Known.countMinTrailingZeros() >= ShiftVal)
2500 return isKnownNonZero(X, DemandedElts, Depth, Q);
2501 }
2502 }
2503 // div exact can only produce a zero if the dividend is zero.
2504 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2505 return isKnownNonZero(X, DemandedElts, Depth, Q);
2506 }
2507 // X + Y.
2508 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2509 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2510 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2511
2512 // If X and Y are both non-negative (as signed values) then their sum is not
2513 // zero unless both X and Y are zero.
2514 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2515 if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2516 isKnownNonZero(Y, DemandedElts, Depth, Q))
2517 return true;
2518
2519 // If X and Y are both negative (as signed values) then their sum is not
2520 // zero unless both X and Y equal INT_MIN.
2521 if (XKnown.isNegative() && YKnown.isNegative()) {
2522 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2523 // The sign bit of X is set. If some other bit is set then X is not equal
2524 // to INT_MIN.
2525 if (XKnown.One.intersects(Mask))
2526 return true;
2527 // The sign bit of Y is set. If some other bit is set then Y is not equal
2528 // to INT_MIN.
2529 if (YKnown.One.intersects(Mask))
2530 return true;
2531 }
2532
2533 // The sum of a non-negative number and a power of two is not zero.
2534 if (XKnown.isNonNegative() &&
2535 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2536 return true;
2537 if (YKnown.isNonNegative() &&
2538 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2539 return true;
2540 }
2541 // X * Y.
2542 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2543 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2544 // If X and Y are non-zero then so is X * Y as long as the multiplication
2545 // does not overflow.
2546 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2547 isKnownNonZero(X, DemandedElts, Depth, Q) &&
2548 isKnownNonZero(Y, DemandedElts, Depth, Q))
2549 return true;
2550 }
2551 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2552 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2553 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2554 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2555 return true;
2556 }
2557 // PHI
2558 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2559 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2560 return true;
2561
2562 // Check if all incoming values are non-zero using recursion.
2563 Query RecQ = Q;
2564 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2565 return llvm::all_of(PN->operands(), [&](const Use &U) {
2566 if (U.get() == PN)
2567 return true;
2568 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2569 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2570 });
2571 }
2572 // ExtractElement
2573 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2574 const Value *Vec = EEI->getVectorOperand();
2575 const Value *Idx = EEI->getIndexOperand();
2576 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2577 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2578 unsigned NumElts = VecTy->getNumElements();
2579 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2580 if (CIdx && CIdx->getValue().ult(NumElts))
2581 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2582 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2583 }
2584 }
2585 // Freeze
2586 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2587 auto *Op = FI->getOperand(0);
2588 if (isKnownNonZero(Op, Depth, Q) &&
2589 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2590 return true;
2591 }
2592
2593 KnownBits Known(BitWidth);
2594 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2595 return Known.One != 0;
2596}
2597
2598bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2599 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2600 // vector
2601 if (isa<ScalableVectorType>(V->getType()))
2602 return false;
2603
2604 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2605 APInt DemandedElts =
2606 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
2607 return isKnownNonZero(V, DemandedElts, Depth, Q);
2608}
2609
2610/// If the pair of operators are the same invertible function, return the
2611/// the operands of the function corresponding to each input. Otherwise,
2612/// return None. An invertible function is one that is 1-to-1 and maps
2613/// every input value to exactly one output value. This is equivalent to
2614/// saying that Op1 and Op2 are equal exactly when the specified pair of
2615/// operands are equal, (except that Op1 and Op2 may be poison more often.)
2616static Optional<std::pair<Value*, Value*>>
2617getInvertibleOperands(const Operator *Op1,
2618 const Operator *Op2) {
2619 if (Op1->getOpcode() != Op2->getOpcode())
2620 return None;
2621
2622 auto getOperands = [&](unsigned OpNum) -> auto {
2623 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2624 };
2625
2626 switch (Op1->getOpcode()) {
2627 default:
2628 break;
2629 case Instruction::Add:
2630 case Instruction::Sub:
2631 if (Op1->getOperand(0) == Op2->getOperand(0))
2632 return getOperands(1);
2633 if (Op1->getOperand(1) == Op2->getOperand(1))
2634 return getOperands(0);
2635 break;
2636 case Instruction::Mul: {
2637 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2638 // and N is the bitwdith. The nsw case is non-obvious, but proven by
2639 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2640 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2641 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2642 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2643 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2644 break;
2645
2646 // Assume operand order has been canonicalized
2647 if (Op1->getOperand(1) == Op2->getOperand(1) &&
2648 isa<ConstantInt>(Op1->getOperand(1)) &&
2649 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2650 return getOperands(0);
2651 break;
2652 }
2653 case Instruction::Shl: {
2654 // Same as multiplies, with the difference that we don't need to check
2655 // for a non-zero multiply. Shifts always multiply by non-zero.
2656 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2657 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2658 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2659 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2660 break;
2661
2662 if (Op1->getOperand(1) == Op2->getOperand(1))
2663 return getOperands(0);
2664 break;
2665 }
2666 case Instruction::AShr:
2667 case Instruction::LShr: {
2668 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2669 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2670 if (!PEO1->isExact() || !PEO2->isExact())
2671 break;
2672
2673 if (Op1->getOperand(1) == Op2->getOperand(1))
2674 return getOperands(0);
2675 break;
2676 }
2677 case Instruction::SExt:
2678 case Instruction::ZExt:
2679 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2680 return getOperands(0);
2681 break;
2682 case Instruction::PHI: {
2683 const PHINode *PN1 = cast<PHINode>(Op1);
2684 const PHINode *PN2 = cast<PHINode>(Op2);
2685
2686 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2687 // are a single invertible function of the start values? Note that repeated
2688 // application of an invertible function is also invertible
2689 BinaryOperator *BO1 = nullptr;
2690 Value *Start1 = nullptr, *Step1 = nullptr;
2691 BinaryOperator *BO2 = nullptr;
2692 Value *Start2 = nullptr, *Step2 = nullptr;
2693 if (PN1->getParent() != PN2->getParent() ||
2694 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2695 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2696 break;
2697
2698 auto Values = getInvertibleOperands(cast<Operator>(BO1),
2699 cast<Operator>(BO2));
2700 if (!Values)
2701 break;
2702
2703 // We have to be careful of mutually defined recurrences here. Ex:
2704 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2705 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2706 // The invertibility of these is complicated, and not worth reasoning
2707 // about (yet?).
2708 if (Values->first != PN1 || Values->second != PN2)
2709 break;
2710
2711 return std::make_pair(Start1, Start2);
2712 }
2713 }
2714 return None;
2715}
2716
2717/// Return true if V2 == V1 + X, where X is known non-zero.
2718static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2719 const Query &Q) {
2720 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2721 if (!BO || BO->getOpcode() != Instruction::Add)
2722 return false;
2723 Value *Op = nullptr;
2724 if (V2 == BO->getOperand(0))
2725 Op = BO->getOperand(1);
2726 else if (V2 == BO->getOperand(1))
2727 Op = BO->getOperand(0);
2728 else
2729 return false;
2730 return isKnownNonZero(Op, Depth + 1, Q);
2731}
2732
2733/// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2734/// the multiplication is nuw or nsw.
2735static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2736 const Query &Q) {
2737 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2738 const APInt *C;
2739 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2740 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2741 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
2742 }
2743 return false;
2744}
2745
2746/// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2747/// the shift is nuw or nsw.
2748static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2749 const Query &Q) {
2750 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2751 const APInt *C;
2752 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2753 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2754 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
2755 }
2756 return false;
2757}
2758
2759static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2760 unsigned Depth, const Query &Q) {
2761 // Check two PHIs are in same block.
2762 if (PN1->getParent() != PN2->getParent())
2763 return false;
2764
2765 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2766 bool UsedFullRecursion = false;
2767 for (const BasicBlock *IncomBB : PN1->blocks()) {
2768 if (!VisitedBBs.insert(IncomBB).second)
2769 continue; // Don't reprocess blocks that we have dealt with already.
2770 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2771 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2772 const APInt *C1, *C2;
2773 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2774 continue;
2775
2776 // Only one pair of phi operands is allowed for full recursion.
2777 if (UsedFullRecursion)
2778 return false;
2779
2780 Query RecQ = Q;
2781 RecQ.CxtI = IncomBB->getTerminator();
2782 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2783 return false;
2784 UsedFullRecursion = true;
2785 }
2786 return true;
2787}
2788
2789/// Return true if it is known that V1 != V2.
2790static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2791 const Query &Q) {
2792 if (V1 == V2)
2793 return false;
2794 if (V1->getType() != V2->getType())
2795 // We can't look through casts yet.
2796 return false;
2797
2798 if (Depth >= MaxAnalysisRecursionDepth)
2799 return false;
2800
2801 // See if we can recurse through (exactly one of) our operands. This
2802 // requires our operation be 1-to-1 and map every input value to exactly
2803 // one output value. Such an operation is invertible.
2804 auto *O1 = dyn_cast<Operator>(V1);
2805 auto *O2 = dyn_cast<Operator>(V2);
2806 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2807 if (auto Values = getInvertibleOperands(O1, O2))
2808 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2809
2810 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2811 const PHINode *PN2 = cast<PHINode>(V2);
2812 // FIXME: This is missing a generalization to handle the case where one is
2813 // a PHI and another one isn't.
2814 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2815 return true;
2816 };
2817 }
2818
2819 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2820 return true;
2821
2822 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2823 return true;
2824
2825 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2826 return true;
2827
2828 if (V1->getType()->isIntOrIntVectorTy()) {
2829 // Are any known bits in V1 contradictory to known bits in V2? If V1
2830 // has a known zero where V2 has a known one, they must not be equal.
2831 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2832 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2833
2834 if (Known1.Zero.intersects(Known2.One) ||
2835 Known2.Zero.intersects(Known1.One))
2836 return true;
2837 }
2838 return false;
2839}
2840
2841/// Return true if 'V & Mask' is known to be zero. We use this predicate to
2842/// simplify operations downstream. Mask is known to be zero for bits that V
2843/// cannot have.
2844///
2845/// This function is defined on values with integer type, values with pointer
2846/// type, and vectors of integers. In the case
2847/// where V is a vector, the mask, known zero, and known one values are the
2848/// same width as the vector element, and the bit is set only if it is true
2849/// for all of the elements in the vector.
2850bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2851 const Query &Q) {
2852 KnownBits Known(Mask.getBitWidth());
2853 computeKnownBits(V, Known, Depth, Q);
2854 return Mask.isSubsetOf(Known.Zero);
2855}
2856
2857// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2858// Returns the input and lower/upper bounds.
2859static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2860 const APInt *&CLow, const APInt *&CHigh) {
2861 assert(isa<Operator>(Select) &&(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2863, __extension__ __PRETTY_FUNCTION__
))
2862 cast<Operator>(Select)->getOpcode() == Instruction::Select &&(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2863, __extension__ __PRETTY_FUNCTION__
))
2863 "Input should be a Select!")(static_cast <bool> (isa<Operator>(Select) &&
cast<Operator>(Select)->getOpcode() == Instruction::
Select && "Input should be a Select!") ? void (0) : __assert_fail
("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2863, __extension__ __PRETTY_FUNCTION__
))
;
2864
2865 const Value *LHS = nullptr, *RHS = nullptr;
2866 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2867 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2868 return false;
2869
2870 if (!match(RHS, m_APInt(CLow)))
2871 return false;
2872
2873 const Value *LHS2 = nullptr, *RHS2 = nullptr;
2874 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2875 if (getInverseMinMaxFlavor(SPF) != SPF2)
2876 return false;
2877
2878 if (!match(RHS2, m_APInt(CHigh)))
2879 return false;
2880
2881 if (SPF == SPF_SMIN)
2882 std::swap(CLow, CHigh);
2883
2884 In = LHS2;
2885 return CLow->sle(*CHigh);
2886}
2887
2888/// For vector constants, loop over the elements and find the constant with the
2889/// minimum number of sign bits. Return 0 if the value is not a vector constant
2890/// or if any element was not analyzed; otherwise, return the count for the
2891/// element with the minimum number of sign bits.
2892static unsigned computeNumSignBitsVectorConstant(const Value *V,
2893 const APInt &DemandedElts,
2894 unsigned TyBits) {
2895 const auto *CV = dyn_cast<Constant>(V);
2896 if (!CV || !isa<FixedVectorType>(CV->getType()))
2897 return 0;
2898
2899 unsigned MinSignBits = TyBits;
2900 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2901 for (unsigned i = 0; i != NumElts; ++i) {
2902 if (!DemandedElts[i])
2903 continue;
2904 // If we find a non-ConstantInt, bail out.
2905 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2906 if (!Elt)
2907 return 0;
2908
2909 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2910 }
2911
2912 return MinSignBits;
2913}
2914
2915static unsigned ComputeNumSignBitsImpl(const Value *V,
2916 const APInt &DemandedElts,
2917 unsigned Depth, const Query &Q);
2918
2919static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2920 unsigned Depth, const Query &Q) {
2921 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2922 assert(Result > 0 && "At least one sign bit needs to be present!")(static_cast <bool> (Result > 0 && "At least one sign bit needs to be present!"
) ? void (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2922, __extension__ __PRETTY_FUNCTION__
))
;
2923 return Result;
2924}
2925
2926/// Return the number of times the sign bit of the register is replicated into
2927/// the other bits. We know that at least 1 bit is always equal to the sign bit
2928/// (itself), but other cases can give us information. For example, immediately
2929/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2930/// other, so we return 3. For vectors, return the number of sign bits for the
2931/// vector element with the minimum number of known sign bits of the demanded
2932/// elements in the vector specified by DemandedElts.
2933static unsigned ComputeNumSignBitsImpl(const Value *V,
2934 const APInt &DemandedElts,
2935 unsigned Depth, const Query &Q) {
2936 Type *Ty = V->getType();
2937
2938 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2939 // vector
2940 if (isa<ScalableVectorType>(Ty))
2941 return 1;
2942
2943#ifndef NDEBUG
2944 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth
&& "Limit Search Depth") ? void (0) : __assert_fail (
"Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2944, __extension__ __PRETTY_FUNCTION__
))
;
2945
2946 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2947 assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2949, __extension__ __PRETTY_FUNCTION__
))
2948 FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2949, __extension__ __PRETTY_FUNCTION__
))
2949 "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts
.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements"
) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2949, __extension__ __PRETTY_FUNCTION__
))
;
2950 } else {
2951 assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2952, __extension__ __PRETTY_FUNCTION__
))
2952 "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) &&
"DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail
("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\""
, "llvm/lib/Analysis/ValueTracking.cpp", 2952, __extension__ __PRETTY_FUNCTION__
))
;
2953 }
2954#endif
2955
2956 // We return the minimum number of sign bits that are guaranteed to be present
2957 // in V, so for undef we have to conservatively return 1. We don't have the
2958 // same behavior for poison though -- that's a FIXME today.
2959
2960 Type *ScalarTy = Ty->getScalarType();
2961 unsigned TyBits = ScalarTy->isPointerTy() ?
2962 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2963 Q.DL.getTypeSizeInBits(ScalarTy);
2964
2965 unsigned Tmp, Tmp2;
2966 unsigned FirstAnswer = 1;
2967
2968 // Note that ConstantInt is handled by the general computeKnownBits case
2969 // below.
2970
2971 if (Depth == MaxAnalysisRecursionDepth)
2972 return 1;
2973
2974 if (auto *U = dyn_cast<Operator>(V)) {
2975 switch (Operator::getOpcode(V)) {
2976 default: break;
2977 case Instruction::SExt:
2978 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2979 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2980
2981 case Instruction::SDiv: {
2982 const APInt *Denominator;
2983 // sdiv X, C -> adds log(C) sign bits.
2984 if (match(U->getOperand(1), m_APInt(Denominator))) {
2985
2986 // Ignore non-positive denominator.
2987 if (!Denominator->isStrictlyPositive())
2988 break;
2989
2990 // Calculate the incoming numerator bits.
2991 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2992
2993 // Add floor(log(C)) bits to the numerator bits.
2994 return std::min(TyBits, NumBits + Denominator->logBase2());
2995 }
2996 break;
2997 }
2998
2999 case Instruction::SRem: {
3000 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3001
3002 const APInt *Denominator;
3003 // srem X, C -> we know that the result is within [-C+1,C) when C is a
3004 // positive constant. This let us put a lower bound on the number of sign
3005 // bits.
3006 if (match(U->getOperand(1), m_APInt(Denominator))) {
3007
3008 // Ignore non-positive denominator.
3009 if (Denominator->isStrictlyPositive()) {
3010 // Calculate the leading sign bit constraints by examining the
3011 // denominator. Given that the denominator is positive, there are two
3012 // cases:
3013 //
3014 // 1. The numerator is positive. The result range is [0,C) and
3015 // [0,C) u< (1 << ceilLogBase2(C)).
3016 //
3017 // 2. The numerator is negative. Then the result range is (-C,0] and
3018 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3019 //
3020 // Thus a lower bound on the number of sign bits is `TyBits -
3021 // ceilLogBase2(C)`.
3022
3023 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3024 Tmp = std::max(Tmp, ResBits);
3025 }
3026 }
3027 return Tmp;
3028 }
3029
3030 case Instruction::AShr: {
3031 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3032 // ashr X, C -> adds C sign bits. Vectors too.
3033 const APInt *ShAmt;
3034 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3035 if (ShAmt->uge(TyBits))
3036 break; // Bad shift.
3037 unsigned ShAmtLimited = ShAmt->getZExtValue();
3038 Tmp += ShAmtLimited;
3039 if (Tmp > TyBits) Tmp = TyBits;
3040 }
3041 return Tmp;
3042 }
3043 case Instruction::Shl: {
3044 const APInt *ShAmt;
3045 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3046 // shl destroys sign bits.
3047 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3048 if (ShAmt->uge(TyBits) || // Bad shift.
3049 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3050 Tmp2 = ShAmt->getZExtValue();
3051 return Tmp - Tmp2;
3052 }
3053 break;
3054 }
3055 case Instruction::And:
3056 case Instruction::Or:
3057 case Instruction::Xor: // NOT is handled here.
3058 // Logical binary ops preserve the number of sign bits at the worst.
3059 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3060 if (Tmp != 1) {
3061 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3062 FirstAnswer = std::min(Tmp, Tmp2);
3063 // We computed what we know about the sign bits as our first
3064 // answer. Now proceed to the generic code that uses
3065 // computeKnownBits, and pick whichever answer is better.
3066 }
3067 break;
3068
3069 case Instruction::Select: {
3070 // If we have a clamp pattern, we know that the number of sign bits will
3071 // be the minimum of the clamp min/max range.
3072 const Value *X;
3073 const APInt *CLow, *CHigh;
3074 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3075 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3076
3077 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3078 if (Tmp == 1) break;
3079 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3080 return std::min(Tmp, Tmp2);
3081 }
3082
3083 case Instruction::Add:
3084 // Add can have at most one carry bit. Thus we know that the output
3085 // is, at worst, one more bit than the inputs.
3086 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3087 if (Tmp == 1) break;
3088
3089 // Special case decrementing a value (ADD X, -1):
3090 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3091 if (CRHS->isAllOnesValue()) {
3092 KnownBits Known(TyBits);
3093 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3094
3095 // If the input is known to be 0 or 1, the output is 0/-1, which is
3096 // all sign bits set.
3097 if ((Known.Zero | 1).isAllOnes())
3098 return TyBits;
3099
3100 // If we are subtracting one from a positive number, there is no carry
3101 // out of the result.
3102 if (Known.isNonNegative())
3103 return Tmp;
3104 }
3105
3106 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3107 if (Tmp2 == 1) break;
3108 return std::min(Tmp, Tmp2) - 1;
3109
3110 case Instruction::Sub:
3111 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3112 if (Tmp2 == 1) break;
3113
3114 // Handle NEG.
3115 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3116 if (CLHS->isNullValue()) {
3117 KnownBits Known(TyBits);
3118 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3119 // If the input is known to be 0 or 1, the output is 0/-1, which is
3120 // all sign bits set.
3121 if ((Known.Zero | 1).isAllOnes())
3122 return TyBits;
3123
3124 // If the input is known to be positive (the sign bit is known clear),
3125 // the output of the NEG has the same number of sign bits as the
3126 // input.
3127 if (Known.isNonNegative())
3128 return Tmp2;
3129
3130 // Otherwise, we treat this like a SUB.
3131 }
3132
3133 // Sub can have at most one carry bit. Thus we know that the output
3134 // is, at worst, one more bit than the inputs.
3135 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3136 if (Tmp == 1) break;
3137 return std::min(Tmp, Tmp2) - 1;
3138
3139 case Instruction::Mul: {
3140 // The output of the Mul can be at most twice the valid bits in the
3141 // inputs.
3142 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3143 if (SignBitsOp0 == 1) break;
3144 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3145 if (SignBitsOp1 == 1) break;
3146 unsigned OutValidBits =
3147 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3148 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3149 }
3150
3151 case Instruction::PHI: {
3152 const PHINode *PN = cast<PHINode>(U);
3153 unsigned NumIncomingValues = PN->getNumIncomingValues();
3154 // Don't analyze large in-degree PHIs.
3155 if (NumIncomingValues > 4) break;
3156 // Unreachable blocks may have zero-operand PHI nodes.
3157 if (NumIncomingValues == 0) break;
3158
3159 // Take the minimum of all incoming values. This can't infinitely loop
3160 // because of our depth threshold.
3161 Query RecQ = Q;
3162 Tmp = TyBits;
3163 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3164 if (Tmp == 1) return Tmp;
3165 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3166 Tmp = std::min(
3167 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3168 }
3169 return Tmp;
3170 }
3171
3172 case Instruction::Trunc:
3173 // FIXME: it's tricky to do anything useful for this, but it is an
3174 // important case for targets like X86.
3175 break;
3176
3177 case Instruction::ExtractElement:
3178 // Look through extract element. At the moment we keep this simple and
3179 // skip tracking the specific element. But at least we might find
3180 // information valid for all elements of the vector (for example if vector
3181 // is sign extended, shifted, etc).
3182 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3183
3184 case Instruction::ShuffleVector: {
3185 // Collect the minimum number of sign bits that are shared by every vector
3186 // element referenced by the shuffle.
3187 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3188 if (!Shuf) {
3189 // FIXME: Add support for shufflevector constant expressions.
3190 return 1;
3191 }
3192 APInt DemandedLHS, DemandedRHS;
3193 // For undef elements, we don't know anything about the common state of
3194 // the shuffle result.
3195 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3196 return 1;
3197 Tmp = std::numeric_limits<unsigned>::max();
3198 if (!!DemandedLHS) {
3199 const Value *LHS = Shuf->getOperand(0);
3200 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3201 }
3202 // If we don't know anything, early out and try computeKnownBits
3203 // fall-back.
3204 if (Tmp == 1)
3205 break;
3206 if (!!DemandedRHS) {
3207 const Value *RHS = Shuf->getOperand(1);
3208 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3209 Tmp = std::min(Tmp, Tmp2);
3210 }
3211 // If we don't know anything, early out and try computeKnownBits
3212 // fall-back.
3213 if (Tmp == 1)
3214 break;
3215 assert(Tmp <= TyBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= TyBits && "Failed to determine minimum sign bits"
) ? void (0) : __assert_fail ("Tmp <= TyBits && \"Failed to determine minimum sign bits\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3215, __extension__ __PRETTY_FUNCTION__
))
;
3216 return Tmp;
3217 }
3218 case Instruction::Call: {
3219 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3220 switch (II->getIntrinsicID()) {
3221 default: break;
3222 case Intrinsic::abs:
3223 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3224 if (Tmp == 1) break;
3225
3226 // Absolute value reduces number of sign bits by at most 1.
3227 return Tmp - 1;
3228 }
3229 }
3230 }
3231 }
3232 }
3233
3234 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3235 // use this information.
3236
3237 // If we can examine all elements of a vector constant successfully, we're
3238 // done (we can't do any better than that). If not, keep trying.
3239 if (unsigned VecSignBits =
3240 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3241 return VecSignBits;
3242
3243 KnownBits Known(TyBits);
3244 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3245
3246 // If we know that the sign bit is either zero or one, determine the number of
3247 // identical bits in the top of the input value.
3248 return std::max(FirstAnswer, Known.countMinSignBits());
3249}
3250
3251Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3252 const TargetLibraryInfo *TLI) {
3253 const Function *F = CB.getCalledFunction();
3254 if (!F)
3255 return Intrinsic::not_intrinsic;
3256
3257 if (F->isIntrinsic())
3258 return F->getIntrinsicID();
3259
3260 // We are going to infer semantics of a library function based on mapping it
3261 // to an LLVM intrinsic. Check that the library function is available from
3262 // this callbase and in this environment.
3263 LibFunc Func;
3264 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3265 !CB.onlyReadsMemory())
3266 return Intrinsic::not_intrinsic;
3267
3268 switch (Func) {
3269 default:
3270 break;
3271 case LibFunc_sin:
3272 case LibFunc_sinf:
3273 case LibFunc_sinl:
3274 return Intrinsic::sin;
3275 case LibFunc_cos:
3276 case LibFunc_cosf:
3277 case LibFunc_cosl:
3278 return Intrinsic::cos;
3279 case LibFunc_exp:
3280 case LibFunc_expf:
3281 case LibFunc_expl:
3282 return Intrinsic::exp;
3283 case LibFunc_exp2:
3284 case LibFunc_exp2f:
3285 case LibFunc_exp2l:
3286 return Intrinsic::exp2;
3287 case LibFunc_log:
3288 case LibFunc_logf:
3289 case LibFunc_logl:
3290 return Intrinsic::log;
3291 case LibFunc_log10:
3292 case LibFunc_log10f:
3293 case LibFunc_log10l:
3294 return Intrinsic::log10;
3295 case LibFunc_log2:
3296 case LibFunc_log2f:
3297 case LibFunc_log2l:
3298 return Intrinsic::log2;
3299 case LibFunc_fabs:
3300 case LibFunc_fabsf:
3301 case LibFunc_fabsl:
3302 return Intrinsic::fabs;
3303 case LibFunc_fmin:
3304 case LibFunc_fminf:
3305 case LibFunc_fminl:
3306 return Intrinsic::minnum;
3307 case LibFunc_fmax:
3308 case LibFunc_fmaxf:
3309 case LibFunc_fmaxl:
3310 return Intrinsic::maxnum;
3311 case LibFunc_copysign:
3312 case LibFunc_copysignf:
3313 case LibFunc_copysignl:
3314 return Intrinsic::copysign;
3315 case LibFunc_floor:
3316 case LibFunc_floorf:
3317 case LibFunc_floorl:
3318 return Intrinsic::floor;
3319 case LibFunc_ceil:
3320 case LibFunc_ceilf:
3321 case LibFunc_ceill:
3322 return Intrinsic::ceil;
3323 case LibFunc_trunc:
3324 case LibFunc_truncf:
3325 case LibFunc_truncl:
3326 return Intrinsic::trunc;
3327 case LibFunc_rint:
3328 case LibFunc_rintf:
3329 case LibFunc_rintl:
3330 return Intrinsic::rint;
3331 case LibFunc_nearbyint:
3332 case LibFunc_nearbyintf:
3333 case LibFunc_nearbyintl:
3334 return Intrinsic::nearbyint;
3335 case LibFunc_round:
3336 case LibFunc_roundf:
3337 case LibFunc_roundl:
3338 return Intrinsic::round;
3339 case LibFunc_roundeven:
3340 case LibFunc_roundevenf:
3341 case LibFunc_roundevenl:
3342 return Intrinsic::roundeven;
3343 case LibFunc_pow:
3344 case LibFunc_powf:
3345 case LibFunc_powl:
3346 return Intrinsic::pow;
3347 case LibFunc_sqrt:
3348 case LibFunc_sqrtf:
3349 case LibFunc_sqrtl:
3350 return Intrinsic::sqrt;
3351 }
3352
3353 return Intrinsic::not_intrinsic;
3354}
3355
3356/// Return true if we can prove that the specified FP value is never equal to
3357/// -0.0.
3358/// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3359/// that a value is not -0.0. It only guarantees that -0.0 may be treated
3360/// the same as +0.0 in floating-point ops.
3361///
3362/// NOTE: this function will need to be revisited when we support non-default
3363/// rounding modes!
3364bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3365 unsigned Depth) {
3366 if (auto *CFP = dyn_cast<ConstantFP>(V))
3367 return !CFP->getValueAPF().isNegZero();
3368
3369 if (Depth == MaxAnalysisRecursionDepth)
3370 return false;
3371
3372 auto *Op = dyn_cast<Operator>(V);
3373 if (!Op)
3374 return false;
3375
3376 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3377 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3378 return true;
3379
3380 // sitofp and uitofp turn into +0.0 for zero.
3381 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3382 return true;
3383
3384 if (auto *Call = dyn_cast<CallInst>(Op)) {
3385 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3386 switch (IID) {
3387 default:
3388 break;
3389 // sqrt(-0.0) = -0.0, no other negative results are possible.
3390 case Intrinsic::sqrt:
3391 case Intrinsic::canonicalize:
3392 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3393 // fabs(x) != -0.0
3394 case Intrinsic::fabs:
3395 return true;
3396 }
3397 }
3398
3399 return false;
3400}
3401
3402/// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3403/// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3404/// bit despite comparing equal.
3405static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3406 const TargetLibraryInfo *TLI,
3407 bool SignBitOnly,
3408 unsigned Depth) {
3409 // TODO: This function does not do the right thing when SignBitOnly is true
3410 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3411 // which flips the sign bits of NaNs. See
3412 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3413
3414 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3415 return !CFP->getValueAPF().isNegative() ||
3416 (!SignBitOnly && CFP->getValueAPF().isZero());
3417 }
3418
3419 // Handle vector of constants.
3420 if (auto *CV = dyn_cast<Constant>(V)) {
3421 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3422 unsigned NumElts = CVFVTy->getNumElements();
3423 for (unsigned i = 0; i != NumElts; ++i) {
3424 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3425 if (!CFP)
3426 return false;
3427 if (CFP->getValueAPF().isNegative() &&
3428 (SignBitOnly || !CFP->getValueAPF().isZero()))
3429 return false;
3430 }
3431
3432 // All non-negative ConstantFPs.
3433 return true;
3434 }
3435 }
3436
3437 if (Depth == MaxAnalysisRecursionDepth)
3438 return false;
3439
3440 const Operator *I = dyn_cast<Operator>(V);
3441 if (!I)
3442 return false;
3443
3444 switch (I->getOpcode()) {
3445 default:
3446 break;
3447 // Unsigned integers are always nonnegative.
3448 case Instruction::UIToFP:
3449 return true;
3450 case Instruction::FMul:
3451 case Instruction::FDiv:
3452 // X * X is always non-negative or a NaN.
3453 // X / X is always exactly 1.0 or a NaN.
3454 if (I->getOperand(0) == I->getOperand(1) &&
3455 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3456 return true;
3457
3458 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3459 case Instruction::FAdd:
3460 case Instruction::FRem:
3461 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3462 Depth + 1) &&
3463 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3464 Depth + 1);
3465 case Instruction::Select:
3466 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3467 Depth + 1) &&
3468 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3469 Depth + 1);
3470 case Instruction::FPExt:
3471 case Instruction::FPTrunc:
3472 // Widening/narrowing never change sign.
3473 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3474 Depth + 1);
3475 case Instruction::ExtractElement:
3476 // Look through extract element. At the moment we keep this simple and skip
3477 // tracking the specific element. But at least we might find information
3478 // valid for all elements of the vector.
3479 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3480 Depth + 1);
3481 case Instruction::Call:
3482 const auto *CI = cast<CallInst>(I);
3483 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3484 switch (IID) {
3485 default:
3486 break;
3487 case Intrinsic::maxnum: {
3488 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3489 auto isPositiveNum = [&](Value *V) {
3490 if (SignBitOnly) {
3491 // With SignBitOnly, this is tricky because the result of
3492 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3493 // a constant strictly greater than 0.0.
3494 const APFloat *C;
3495 return match(V, m_APFloat(C)) &&
3496 *C > APFloat::getZero(C->getSemantics());
3497 }
3498
3499 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3500 // maxnum can't be ordered-less-than-zero.
3501 return isKnownNeverNaN(V, TLI) &&
3502 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3503 };
3504
3505 // TODO: This could be improved. We could also check that neither operand
3506 // has its sign bit set (and at least 1 is not-NAN?).
3507 return isPositiveNum(V0) || isPositiveNum(V1);
3508 }
3509
3510 case Intrinsic::maximum:
3511 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3512 Depth + 1) ||
3513 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3514 Depth + 1);
3515 case Intrinsic::minnum:
3516 case Intrinsic::minimum:
3517 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3518 Depth + 1) &&
3519 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3520 Depth + 1);
3521 case Intrinsic::exp:
3522 case Intrinsic::exp2:
3523 case Intrinsic::fabs:
3524 return true;
3525
3526 case Intrinsic::sqrt:
3527 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3528 if (!SignBitOnly)
3529 return true;
3530 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3531 CannotBeNegativeZero(CI->getOperand(0), TLI));
3532
3533 case Intrinsic::powi:
3534 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3535 // powi(x,n) is non-negative if n is even.
3536 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3537 return true;
3538 }
3539 // TODO: This is not correct. Given that exp is an integer, here are the
3540 // ways that pow can return a negative value:
3541 //
3542 // pow(x, exp) --> negative if exp is odd and x is negative.
3543 // pow(-0, exp) --> -inf if exp is negative odd.
3544 // pow(-0, exp) --> -0 if exp is positive odd.
3545 // pow(-inf, exp) --> -0 if exp is negative odd.
3546 // pow(-inf, exp) --> -inf if exp is positive odd.
3547 //
3548 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3549 // but we must return false if x == -0. Unfortunately we do not currently
3550 // have a way of expressing this constraint. See details in
3551 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3552 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3553 Depth + 1);
3554
3555 case Intrinsic::fma:
3556 case Intrinsic::fmuladd:
3557 // x*x+y is non-negative if y is non-negative.
3558 return I->getOperand(0) == I->getOperand(1) &&
3559 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3560 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3561 Depth + 1);
3562 }
3563 break;
3564 }
3565 return false;
3566}
3567
3568bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3569 const TargetLibraryInfo *TLI) {
3570 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3571}
3572
3573bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3574 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3575}
3576
3577bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3578 unsigned Depth) {
3579 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for Inf on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for Inf on non-FP type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3579, __extension__ __PRETTY_FUNCTION__
))
;
3580
3581 // If we're told that infinities won't happen, assume they won't.
3582 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3583 if (FPMathOp->hasNoInfs())
3584 return true;
3585
3586 // Handle scalar constants.
3587 if (auto *CFP = dyn_cast<ConstantFP>(V))
3588 return !CFP->isInfinity();
3589
3590 if (Depth == MaxAnalysisRecursionDepth)
3591 return false;
3592
3593 if (auto *Inst = dyn_cast<Instruction>(V)) {
3594 switch (Inst->getOpcode()) {
3595 case Instruction::Select: {
3596 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3597 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3598 }
3599 case Instruction::SIToFP:
3600 case Instruction::UIToFP: {
3601 // Get width of largest magnitude integer (remove a bit if signed).
3602 // This still works for a signed minimum value because the largest FP
3603 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3604 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3605 if (Inst->getOpcode() == Instruction::SIToFP)
3606 --IntSize;
3607
3608 // If the exponent of the largest finite FP value can hold the largest
3609 // integer, the result of the cast must be finite.
3610 Type *FPTy = Inst->getType()->getScalarType();
3611 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3612 }
3613 default:
3614 break;
3615 }
3616 }
3617
3618 // try to handle fixed width vector constants
3619 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3620 if (VFVTy && isa<Constant>(V)) {
3621 // For vectors, verify that each element is not infinity.
3622 unsigned NumElts = VFVTy->getNumElements();
3623 for (unsigned i = 0; i != NumElts; ++i) {
3624 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3625 if (!Elt)
3626 return false;
3627 if (isa<UndefValue>(Elt))
3628 continue;
3629 auto *CElt = dyn_cast<ConstantFP>(Elt);
3630 if (!CElt || CElt->isInfinity())
3631 return false;
3632 }
3633 // All elements were confirmed non-infinity or undefined.
3634 return true;
3635 }
3636
3637 // was not able to prove that V never contains infinity
3638 return false;
3639}
3640
3641bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3642 unsigned Depth) {
3643 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy
() && "Querying for NaN on non-FP type") ? void (0) :
__assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3643, __extension__ __PRETTY_FUNCTION__
))
;
3644
3645 // If we're told that NaNs won't happen, assume they won't.
3646 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3647 if (FPMathOp->hasNoNaNs())
3648 return true;
3649
3650 // Handle scalar constants.
3651 if (auto *CFP = dyn_cast<ConstantFP>(V))
3652 return !CFP->isNaN();
3653
3654 if (Depth == MaxAnalysisRecursionDepth)
3655 return false;
3656
3657 if (auto *Inst = dyn_cast<Instruction>(V)) {
3658 switch (Inst->getOpcode()) {
3659 case Instruction::FAdd:
3660 case Instruction::FSub:
3661 // Adding positive and negative infinity produces NaN.
3662 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3663 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3664 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3665 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3666
3667 case Instruction::FMul:
3668 // Zero multiplied with infinity produces NaN.
3669 // FIXME: If neither side can be zero fmul never produces NaN.
3670 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3671 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3672 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3673 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3674
3675 case Instruction::FDiv:
3676 case Instruction::FRem:
3677 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3678 return false;
3679
3680 case Instruction::Select: {
3681 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3682 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3683 }
3684 case Instruction::SIToFP:
3685 case Instruction::UIToFP:
3686 return true;
3687 case Instruction::FPTrunc:
3688 case Instruction::FPExt:
3689 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3690 default:
3691 break;
3692 }
3693 }
3694
3695 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3696 switch (II->getIntrinsicID()) {
3697 case Intrinsic::canonicalize:
3698 case Intrinsic::fabs:
3699 case Intrinsic::copysign:
3700 case Intrinsic::exp:
3701 case Intrinsic::exp2:
3702 case Intrinsic::floor:
3703 case Intrinsic::ceil:
3704 case Intrinsic::trunc:
3705 case Intrinsic::rint:
3706 case Intrinsic::nearbyint:
3707 case Intrinsic::round:
3708 case Intrinsic::roundeven:
3709 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3710 case Intrinsic::sqrt:
3711 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3712 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3713 case Intrinsic::minnum:
3714 case Intrinsic::maxnum:
3715 // If either operand is not NaN, the result is not NaN.
3716 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3717 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3718 default:
3719 return false;
3720 }
3721 }
3722
3723 // Try to handle fixed width vector constants
3724 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3725 if (VFVTy && isa<Constant>(V)) {
3726 // For vectors, verify that each element is not NaN.
3727 unsigned NumElts = VFVTy->getNumElements();
3728 for (unsigned i = 0; i != NumElts; ++i) {
3729 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3730 if (!Elt)
3731 return false;
3732 if (isa<UndefValue>(Elt))
3733 continue;
3734 auto *CElt = dyn_cast<ConstantFP>(Elt);
3735 if (!CElt || CElt->isNaN())
3736 return false;
3737 }
3738 // All elements were confirmed not-NaN or undefined.
3739 return true;
3740 }
3741
3742 // Was not able to prove that V never contains NaN
3743 return false;
3744}
3745
3746Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3747
3748 // All byte-wide stores are splatable, even of arbitrary variables.
3749 if (V->getType()->isIntegerTy(8))
3750 return V;
3751
3752 LLVMContext &Ctx = V->getContext();
3753
3754 // Undef don't care.
3755 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3756 if (isa<UndefValue>(V))
3757 return UndefInt8;
3758
3759 // Return Undef for zero-sized type.
3760 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3761 return UndefInt8;
3762
3763 Constant *C = dyn_cast<Constant>(V);
3764 if (!C) {
3765 // Conceptually, we could handle things like:
3766 // %a = zext i8 %X to i16
3767 // %b = shl i16 %a, 8
3768 // %c = or i16 %a, %b
3769 // but until there is an example that actually needs this, it doesn't seem
3770 // worth worrying about.
3771 return nullptr;
3772 }
3773
3774 // Handle 'null' ConstantArrayZero etc.
3775 if (C->isNullValue())
3776 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3777
3778 // Constant floating-point values can be handled as integer values if the
3779 // corresponding integer value is "byteable". An important case is 0.0.
3780 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3781 Type *Ty = nullptr;
3782 if (CFP->getType()->isHalfTy())
3783 Ty = Type::getInt16Ty(Ctx);
3784 else if (CFP->getType()->isFloatTy())
3785 Ty = Type::getInt32Ty(Ctx);
3786 else if (CFP->getType()->isDoubleTy())
3787 Ty = Type::getInt64Ty(Ctx);
3788 // Don't handle long double formats, which have strange constraints.
3789 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3790 : nullptr;
3791 }
3792
3793 // We can handle constant integers that are multiple of 8 bits.
3794 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3795 if (CI->getBitWidth() % 8 == 0) {
3796 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")(static_cast <bool> (CI->getBitWidth() > 8 &&
"8 bits should be handled above!") ? void (0) : __assert_fail
("CI->getBitWidth() > 8 && \"8 bits should be handled above!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3796, __extension__ __PRETTY_FUNCTION__
))
;
3797 if (!CI->getValue().isSplat(8))
3798 return nullptr;
3799 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3800 }
3801 }
3802
3803 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3804 if (CE->getOpcode() == Instruction::IntToPtr) {
3805 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3806 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3807 return isBytewiseValue(
3808 ConstantExpr::getIntegerCast(CE->getOperand(0),
3809 Type::getIntNTy(Ctx, BitWidth), false),
3810 DL);
3811 }
3812 }
3813 }
3814
3815 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3816 if (LHS == RHS)
3817 return LHS;
3818 if (!LHS || !RHS)
3819 return nullptr;
3820 if (LHS == UndefInt8)
3821 return RHS;
3822 if (RHS == UndefInt8)
3823 return LHS;
3824 return nullptr;
3825 };
3826
3827 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3828 Value *Val = UndefInt8;
3829 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3830 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3831 return nullptr;
3832 return Val;
3833 }
3834
3835 if (isa<ConstantAggregate>(C)) {
3836 Value *Val = UndefInt8;
3837 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3838 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3839 return nullptr;
3840 return Val;
3841 }
3842
3843 // Don't try to handle the handful of other constants.
3844 return nullptr;
3845}
3846
3847// This is the recursive version of BuildSubAggregate. It takes a few different
3848// arguments. Idxs is the index within the nested struct From that we are
3849// looking at now (which is of type IndexedType). IdxSkip is the number of
3850// indices from Idxs that should be left out when inserting into the resulting
3851// struct. To is the result struct built so far, new insertvalue instructions
3852// build on that.
3853static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3854 SmallVectorImpl<unsigned> &Idxs,
3855 unsigned IdxSkip,
3856 Instruction *InsertBefore) {
3857 StructType *STy = dyn_cast<StructType>(IndexedType);
3858 if (STy) {
3859 // Save the original To argument so we can modify it
3860 Value *OrigTo = To;
3861 // General case, the type indexed by Idxs is a struct
3862 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3863 // Process each struct element recursively
3864 Idxs.push_back(i);
3865 Value *PrevTo = To;
3866 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3867 InsertBefore);
3868 Idxs.pop_back();
3869 if (!To) {
3870 // Couldn't find any inserted value for this index? Cleanup
3871 while (PrevTo != OrigTo) {
3872 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3873 PrevTo = Del->getAggregateOperand();
3874 Del->eraseFromParent();
3875 }
3876 // Stop processing elements
3877 break;
3878 }
3879 }
3880 // If we successfully found a value for each of our subaggregates
3881 if (To)
3882 return To;
3883 }
3884 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3885 // the struct's elements had a value that was inserted directly. In the latter
3886 // case, perhaps we can't determine each of the subelements individually, but
3887 // we might be able to find the complete struct somewhere.
3888
3889 // Find the value that is at that particular spot
3890 Value *V = FindInsertedValue(From, Idxs);
3891
3892 if (!V)
3893 return nullptr;
3894
3895 // Insert the value in the new (sub) aggregate
3896 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3897 "tmp", InsertBefore);
3898}
3899
3900// This helper takes a nested struct and extracts a part of it (which is again a
3901// struct) into a new value. For example, given the struct:
3902// { a, { b, { c, d }, e } }
3903// and the indices "1, 1" this returns
3904// { c, d }.
3905//
3906// It does this by inserting an insertvalue for each element in the resulting
3907// struct, as opposed to just inserting a single struct. This will only work if
3908// each of the elements of the substruct are known (ie, inserted into From by an
3909// insertvalue instruction somewhere).
3910//
3911// All inserted insertvalue instructions are inserted before InsertBefore
3912static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3913 Instruction *InsertBefore) {
3914 assert(InsertBefore && "Must have someplace to insert!")(static_cast <bool> (InsertBefore && "Must have someplace to insert!"
) ? void (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3914, __extension__ __PRETTY_FUNCTION__
))
;
3915 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3916 idx_range);
3917 Value *To = UndefValue::get(IndexedType);
3918 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3919 unsigned IdxSkip = Idxs.size();
3920
3921 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3922}
3923
3924/// Given an aggregate and a sequence of indices, see if the scalar value
3925/// indexed is already around as a register, for example if it was inserted
3926/// directly into the aggregate.
3927///
3928/// If InsertBefore is not null, this function will duplicate (modified)
3929/// insertvalues when a part of a nested struct is extracted.
3930Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3931 Instruction *InsertBefore) {
3932 // Nothing to index? Just return V then (this is useful at the end of our
3933 // recursion).
3934 if (idx_range.empty())
3935 return V;
3936 // We have indices, so V should have an indexable type.
3937 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3938, __extension__ __PRETTY_FUNCTION__
))
3938 "Not looking at a struct or array?")(static_cast <bool> ((V->getType()->isStructTy() ||
V->getType()->isArrayTy()) && "Not looking at a struct or array?"
) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3938, __extension__ __PRETTY_FUNCTION__
))
;
3939 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3940, __extension__ __PRETTY_FUNCTION__
))
3940 "Invalid indices for type?")(static_cast <bool> (ExtractValueInst::getIndexedType(V
->getType(), idx_range) && "Invalid indices for type?"
) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 3940, __extension__ __PRETTY_FUNCTION__
))
;
3941
3942 if (Constant *C = dyn_cast<Constant>(V)) {
3943 C = C->getAggregateElement(idx_range[0]);
3944 if (!C) return nullptr;
3945 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3946 }
3947
3948 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3949 // Loop the indices for the insertvalue instruction in parallel with the
3950 // requested indices
3951 const unsigned *req_idx = idx_range.begin();
3952 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3953 i != e; ++i, ++req_idx) {
3954 if (req_idx == idx_range.end()) {
3955 // We can't handle this without inserting insertvalues
3956 if (!InsertBefore)
3957 return nullptr;
3958
3959 // The requested index identifies a part of a nested aggregate. Handle
3960 // this specially. For example,
3961 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3962 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3963 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3964 // This can be changed into
3965 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3966 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3967 // which allows the unused 0,0 element from the nested struct to be
3968 // removed.
3969 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3970 InsertBefore);
3971 }
3972
3973 // This insert value inserts something else than what we are looking for.
3974 // See if the (aggregate) value inserted into has the value we are
3975 // looking for, then.
3976 if (*req_idx != *i)
3977 return FindInsertedValue(I->getAggregateOperand(), idx_range,
3978 InsertBefore);
3979 }
3980 // If we end up here, the indices of the insertvalue match with those
3981 // requested (though possibly only partially). Now we recursively look at
3982 // the inserted value, passing any remaining indices.
3983 return FindInsertedValue(I->getInsertedValueOperand(),
3984 makeArrayRef(req_idx, idx_range.end()),
3985 InsertBefore);
3986 }
3987
3988 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3989 // If we're extracting a value from an aggregate that was extracted from
3990 // something else, we can extract from that something else directly instead.
3991 // However, we will need to chain I's indices with the requested indices.
3992
3993 // Calculate the number of indices required
3994 unsigned size = I->getNumIndices() + idx_range.size();
3995 // Allocate some space to put the new indices in
3996 SmallVector<unsigned, 5> Idxs;
3997 Idxs.reserve(size);
3998 // Add indices from the extract value instruction
3999 Idxs.append(I->idx_begin(), I->idx_end());
4000
4001 // Add requested indices
4002 Idxs.append(idx_range.begin(), idx_range.end());
4003
4004 assert(Idxs.size() == size(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4005, __extension__ __PRETTY_FUNCTION__
))
4005 && "Number of indices added not correct?")(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?"
) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4005, __extension__ __PRETTY_FUNCTION__
))
;
4006
4007 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4008 }
4009 // Otherwise, we don't know (such as, extracting from a function return value
4010 // or load instruction)
4011 return nullptr;
4012}
4013
4014bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4015 unsigned CharSize) {
4016 // Make sure the GEP has exactly three arguments.
4017 if (GEP->getNumOperands() != 3)
4018 return false;
4019
4020 // Make sure the index-ee is a pointer to array of \p CharSize integers.
4021 // CharSize.
4022 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4023 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4024 return false;
4025
4026 // Check to make sure that the first operand of the GEP is an integer and
4027 // has value 0 so that we are sure we're indexing into the initializer.
4028 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4029 if (!FirstIdx || !FirstIdx->isZero())
4030 return false;
4031
4032 return true;
4033}
4034
4035bool llvm::getConstantDataArrayInfo(const Value *V,
4036 ConstantDataArraySlice &Slice,
4037 unsigned ElementSize, uint64_t Offset) {
4038 assert(V)(static_cast <bool> (V) ? void (0) : __assert_fail ("V"
, "llvm/lib/Analysis/ValueTracking.cpp", 4038, __extension__ __PRETTY_FUNCTION__
))
;
4039
4040 // Look through bitcast instructions and geps.
4041 V = V->stripPointerCasts();
4042
4043 // If the value is a GEP instruction or constant expression, treat it as an
4044 // offset.
4045 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4046 // The GEP operator should be based on a pointer to string constant, and is
4047 // indexing into the string constant.
4048 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
4049 return false;
4050
4051 // If the second index isn't a ConstantInt, then this is a variable index
4052 // into the array. If this occurs, we can't say anything meaningful about
4053 // the string.
4054 uint64_t StartIdx = 0;
4055 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
4056 StartIdx = CI->getZExtValue();
4057 else
4058 return false;
4059 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
4060 StartIdx + Offset);
4061 }
4062
4063 // The GEP instruction, constant or instruction, must reference a global
4064 // variable that is a constant and is initialized. The referenced constant
4065 // initializer is the array that we'll use for optimization.
4066 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
4067 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4068 return false;
4069
4070 const ConstantDataArray *Array;
4071 ArrayType *ArrayTy;
4072 if (GV->getInitializer()->isNullValue()) {
4073 Type *GVTy = GV->getValueType();
4074 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4075 // A zeroinitializer for the array; there is no ConstantDataArray.
4076 Array = nullptr;
4077 } else {
4078 const DataLayout &DL = GV->getParent()->getDataLayout();
4079 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4080 uint64_t Length = SizeInBytes / (ElementSize / 8);
4081 if (Length <= Offset)
4082 return false;
4083
4084 Slice.Array = nullptr;
4085 Slice.Offset = 0;
4086 Slice.Length = Length - Offset;
4087 return true;
4088 }
4089 } else {
4090 // This must be a ConstantDataArray.
4091 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4092 if (!Array)
4093 return false;
4094 ArrayTy = Array->getType();
4095 }
4096 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4097 return false;
4098
4099 uint64_t NumElts = ArrayTy->getArrayNumElements();
4100 if (Offset > NumElts)
4101 return false;
4102
4103 Slice.Array = Array;
4104 Slice.Offset = Offset;
4105 Slice.Length = NumElts - Offset;
4106 return true;
4107}
4108
4109/// This function computes the length of a null-terminated C string pointed to
4110/// by V. If successful, it returns true and returns the string in Str.
4111/// If unsuccessful, it returns false.
4112bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4113 uint64_t Offset, bool TrimAtNul) {
4114 ConstantDataArraySlice Slice;
4115 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4116 return false;
4117
4118 if (Slice.Array == nullptr) {
4119 if (TrimAtNul) {
4120 Str = StringRef();
4121 return true;
4122 }
4123 if (Slice.Length == 1) {
4124 Str = StringRef("", 1);
4125 return true;
4126 }
4127 // We cannot instantiate a StringRef as we do not have an appropriate string
4128 // of 0s at hand.
4129 return false;
4130 }
4131
4132 // Start out with the entire array in the StringRef.
4133 Str = Slice.Array->getAsString();
4134 // Skip over 'offset' bytes.
4135 Str = Str.substr(Slice.Offset);
4136
4137 if (TrimAtNul) {
4138 // Trim off the \0 and anything after it. If the array is not nul
4139 // terminated, we just return the whole end of string. The client may know
4140 // some other way that the string is length-bound.
4141 Str = Str.substr(0, Str.find('\0'));
4142 }
4143 return true;
4144}
4145
4146// These next two are very similar to the above, but also look through PHI
4147// nodes.
4148// TODO: See if we can integrate these two together.
4149
4150/// If we can compute the length of the string pointed to by
4151/// the specified pointer, return 'len+1'. If we can't, return 0.
4152static uint64_t GetStringLengthH(const Value *V,
4153 SmallPtrSetImpl<const PHINode*> &PHIs,
4154 unsigned CharSize) {
4155 // Look through noop bitcast instructions.
4156 V = V->stripPointerCasts();
4157
4158 // If this is a PHI node, there are two cases: either we have already seen it
4159 // or we haven't.
4160 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4161 if (!PHIs.insert(PN).second)
4162 return ~0ULL; // already in the set.
4163
4164 // If it was new, see if all the input strings are the same length.
4165 uint64_t LenSoFar = ~0ULL;
4166 for (Value *IncValue : PN->incoming_values()) {
4167 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4168 if (Len == 0) return 0; // Unknown length -> unknown.
4169
4170 if (Len == ~0ULL) continue;
4171
4172 if (Len != LenSoFar && LenSoFar != ~0ULL)
4173 return 0; // Disagree -> unknown.
4174 LenSoFar = Len;
4175 }
4176
4177 // Success, all agree.
4178 return LenSoFar;
4179 }
4180
4181 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4182 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4183 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4184 if (Len1 == 0) return 0;
4185 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4186 if (Len2 == 0) return 0;
4187 if (Len1 == ~0ULL) return Len2;
4188 if (Len2 == ~0ULL) return Len1;
4189 if (Len1 != Len2) return 0;
4190 return Len1;
4191 }
4192
4193 // Otherwise, see if we can read the string.
4194 ConstantDataArraySlice Slice;
4195 if (!getConstantDataArrayInfo(V, Slice, CharSize))
4196 return 0;
4197
4198 if (Slice.Array == nullptr)
4199 return 1;
4200
4201 // Search for nul characters
4202 unsigned NullIndex = 0;
4203 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4204 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4205 break;
4206 }
4207
4208 return NullIndex + 1;
4209}
4210
4211/// If we can compute the length of the string pointed to by
4212/// the specified pointer, return 'len+1'. If we can't, return 0.
4213uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4214 if (!V->getType()->isPointerTy())
4215 return 0;
4216
4217 SmallPtrSet<const PHINode*, 32> PHIs;
4218 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4219 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4220 // an empty string as a length.
4221 return Len == ~0ULL ? 1 : Len;
4222}
4223
4224const Value *
4225llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4226 bool MustPreserveNullness) {
4227 assert(Call &&(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4228, __extension__ __PRETTY_FUNCTION__
))
4228 "getArgumentAliasingToReturnedPointer only works on nonnull calls")(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls"
) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4228, __extension__ __PRETTY_FUNCTION__
))
;
4229 if (const Value *RV = Call->getReturnedArgOperand())
4230 return RV;
4231 // This can be used only as a aliasing property.
4232 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4233 Call, MustPreserveNullness))
4234 return Call->getArgOperand(0);
4235 return nullptr;
4236}
4237
4238bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4239 const CallBase *Call, bool MustPreserveNullness) {
4240 switch (Call->getIntrinsicID()) {
4241 case Intrinsic::launder_invariant_group:
4242 case Intrinsic::strip_invariant_group:
4243 case Intrinsic::aarch64_irg:
4244 case Intrinsic::aarch64_tagp:
4245 return true;
4246 case Intrinsic::ptrmask:
4247 return !MustPreserveNullness;
4248 default:
4249 return false;
4250 }
4251}
4252
4253/// \p PN defines a loop-variant pointer to an object. Check if the
4254/// previous iteration of the loop was referring to the same object as \p PN.
4255static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4256 const LoopInfo *LI) {
4257 // Find the loop-defined value.
4258 Loop *L = LI->getLoopFor(PN->getParent());
4259 if (PN->getNumIncomingValues() != 2)
4260 return true;
4261
4262 // Find the value from previous iteration.
4263 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4264 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4265 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4266 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4267 return true;
4268
4269 // If a new pointer is loaded in the loop, the pointer references a different
4270 // object in every iteration. E.g.:
4271 // for (i)
4272 // int *p = a[i];
4273 // ...
4274 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4275 if (!L->isLoopInvariant(Load->getPointerOperand()))
4276 return false;
4277 return true;
4278}
4279
4280const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4281 if (!V->getType()->isPointerTy())
4282 return V;
4283 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4284 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4285 V = GEP->getPointerOperand();
4286 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4287 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4288 V = cast<Operator>(V)->getOperand(0);
4289 if (!V->getType()->isPointerTy())
4290 return V;
4291 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4292 if (GA->isInterposable())
4293 return V;
4294 V = GA->getAliasee();
4295 } else {
4296 if (auto *PHI = dyn_cast<PHINode>(V)) {
4297 // Look through single-arg phi nodes created by LCSSA.
4298 if (PHI->getNumIncomingValues() == 1) {
4299 V = PHI->getIncomingValue(0);
4300 continue;
4301 }
4302 } else if (auto *Call = dyn_cast<CallBase>(V)) {
4303 // CaptureTracking can know about special capturing properties of some
4304 // intrinsics like launder.invariant.group, that can't be expressed with
4305 // the attributes, but have properties like returning aliasing pointer.
4306 // Because some analysis may assume that nocaptured pointer is not
4307 // returned from some special intrinsic (because function would have to
4308 // be marked with returns attribute), it is crucial to use this function
4309 // because it should be in sync with CaptureTracking. Not using it may
4310 // cause weird miscompilations where 2 aliasing pointers are assumed to
4311 // noalias.
4312 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4313 V = RP;
4314 continue;
4315 }
4316 }
4317
4318 return V;
4319 }
4320 assert(V->getType()->isPointerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isPointerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4320, __extension__ __PRETTY_FUNCTION__
))
;
4321 }
4322 return V;
4323}
4324
4325void llvm::getUnderlyingObjects(const Value *V,
4326 SmallVectorImpl<const Value *> &Objects,
4327 LoopInfo *LI, unsigned MaxLookup) {
4328 SmallPtrSet<const Value *, 4> Visited;
4329 SmallVector<const Value *, 4> Worklist;
4330 Worklist.push_back(V);
4331 do {
4332 const Value *P = Worklist.pop_back_val();
4333 P = getUnderlyingObject(P, MaxLookup);
4334
4335 if (!Visited.insert(P).second)
4336 continue;
4337
4338 if (auto *SI = dyn_cast<SelectInst>(P)) {
4339 Worklist.push_back(SI->getTrueValue());
4340 Worklist.push_back(SI->getFalseValue());
4341 continue;
4342 }
4343
4344 if (auto *PN = dyn_cast<PHINode>(P)) {
4345 // If this PHI changes the underlying object in every iteration of the
4346 // loop, don't look through it. Consider:
4347 // int **A;
4348 // for (i) {
4349 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4350 // Curr = A[i];
4351 // *Prev, *Curr;
4352 //
4353 // Prev is tracking Curr one iteration behind so they refer to different
4354 // underlying objects.
4355 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4356 isSameUnderlyingObjectInLoop(PN, LI))
4357 append_range(Worklist, PN->incoming_values());
4358 continue;
4359 }
4360
4361 Objects.push_back(P);
4362 } while (!Worklist.empty());
4363}
4364
4365/// This is the function that does the work of looking through basic
4366/// ptrtoint+arithmetic+inttoptr sequences.
4367static const Value *getUnderlyingObjectFromInt(const Value *V) {
4368 do {
4369 if (const Operator *U = dyn_cast<Operator>(V)) {
4370 // If we find a ptrtoint, we can transfer control back to the
4371 // regular getUnderlyingObjectFromInt.
4372 if (U->getOpcode() == Instruction::PtrToInt)
4373 return U->getOperand(0);
4374 // If we find an add of a constant, a multiplied value, or a phi, it's
4375 // likely that the other operand will lead us to the base
4376 // object. We don't have to worry about the case where the
4377 // object address is somehow being computed by the multiply,
4378 // because our callers only care when the result is an
4379 // identifiable object.
4380 if (U->getOpcode() != Instruction::Add ||
4381 (!isa<ConstantInt>(U->getOperand(1)) &&
4382 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4383 !isa<PHINode>(U->getOperand(1))))
4384 return V;
4385 V = U->getOperand(0);
4386 } else {
4387 return V;
4388 }
4389 assert(V->getType()->isIntegerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isIntegerTy() &&
"Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4389, __extension__ __PRETTY_FUNCTION__
))
;
4390 } while (true);
4391}
4392
4393/// This is a wrapper around getUnderlyingObjects and adds support for basic
4394/// ptrtoint+arithmetic+inttoptr sequences.
4395/// It returns false if unidentified object is found in getUnderlyingObjects.
4396bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4397 SmallVectorImpl<Value *> &Objects) {
4398 SmallPtrSet<const Value *, 16> Visited;
4399 SmallVector<const Value *, 4> Working(1, V);
4400 do {
4401 V = Working.pop_back_val();
4402
4403 SmallVector<const Value *, 4> Objs;
4404 getUnderlyingObjects(V, Objs);
4405
4406 for (const Value *V : Objs) {
4407 if (!Visited.insert(V).second)
4408 continue;
4409 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4410 const Value *O =
4411 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4412 if (O->getType()->isPointerTy()) {
4413 Working.push_back(O);
4414 continue;
4415 }
4416 }
4417 // If getUnderlyingObjects fails to find an identifiable object,
4418 // getUnderlyingObjectsForCodeGen also fails for safety.
4419 if (!isIdentifiedObject(V)) {
4420 Objects.clear();
4421 return false;
4422 }
4423 Objects.push_back(const_cast<Value *>(V));
4424 }
4425 } while (!Working.empty());
4426 return true;
4427}
4428
4429AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4430 AllocaInst *Result = nullptr;
4431 SmallPtrSet<Value *, 4> Visited;
4432 SmallVector<Value *, 4> Worklist;
4433
4434 auto AddWork = [&](Value *V) {
4435 if (Visited.insert(V).second)
4436 Worklist.push_back(V);
4437 };
4438
4439 AddWork(V);
4440 do {
4441 V = Worklist.pop_back_val();
4442 assert(Visited.count(V))(static_cast <bool> (Visited.count(V)) ? void (0) : __assert_fail
("Visited.count(V)", "llvm/lib/Analysis/ValueTracking.cpp", 4442
, __extension__ __PRETTY_FUNCTION__))
;
4443
4444 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4445 if (Result && Result != AI)
4446 return nullptr;
4447 Result = AI;
4448 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4449 AddWork(CI->getOperand(0));
4450 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4451 for (Value *IncValue : PN->incoming_values())
4452 AddWork(IncValue);
4453 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4454 AddWork(SI->getTrueValue());
4455 AddWork(SI->getFalseValue());
4456 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4457 if (OffsetZero && !GEP->hasAllZeroIndices())
4458 return nullptr;
4459 AddWork(GEP->getPointerOperand());
4460 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
4461 Value *Returned = CB->getReturnedArgOperand();
4462 if (Returned)
4463 AddWork(Returned);
4464 else
4465 return nullptr;
4466 } else {
4467 return nullptr;
4468 }
4469 } while (!Worklist.empty());
4470
4471 return Result;
4472}
4473
4474static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4475 const Value *V, bool AllowLifetime, bool AllowDroppable) {
4476 for (const User *U : V->users()) {
4477 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4478 if (!II)
4479 return false;
4480
4481 if (AllowLifetime && II->isLifetimeStartOrEnd())
4482 continue;
4483
4484 if (AllowDroppable && II->isDroppable())
4485 continue;
4486
4487 return false;
4488 }
4489 return true;
4490}
4491
4492bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4493 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4494 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4495}
4496bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4497 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4498 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4499}
4500
4501bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4502 if (!LI.isUnordered())
4503 return true;
4504 const Function &F = *LI.getFunction();
4505 // Speculative load may create a race that did not exist in the source.
4506 return F.hasFnAttribute(Attribute::SanitizeThread) ||
4507 // Speculative load may load data from dirty regions.
4508 F.hasFnAttribute(Attribute::SanitizeAddress) ||
4509 F.hasFnAttribute(Attribute::SanitizeHWAddress);
4510}
4511
4512
4513bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4514 const Instruction *CtxI,
4515 const DominatorTree *DT,
4516 const TargetLibraryInfo *TLI) {
4517 const Operator *Inst = dyn_cast<Operator>(V);
4518 if (!Inst)
4519 return false;
4520
4521 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4522 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4523 if (C->canTrap())
4524 return false;
4525
4526 switch (Inst->getOpcode()) {
4527 default:
4528 return true;
4529 case Instruction::UDiv:
4530 case Instruction::URem: {
4531 // x / y is undefined if y == 0.
4532 const APInt *V;
4533 if (match(Inst->getOperand(1), m_APInt(V)))
4534 return *V != 0;
4535 return false;
4536 }
4537 case Instruction::SDiv:
4538 case Instruction::SRem: {
4539 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4540 const APInt *Numerator, *Denominator;
4541 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4542 return false;
4543 // We cannot hoist this division if the denominator is 0.
4544 if (*Denominator == 0)
4545 return false;
4546 // It's safe to hoist if the denominator is not 0 or -1.
4547 if (!Denominator->isAllOnes())
4548 return true;
4549 // At this point we know that the denominator is -1. It is safe to hoist as
4550 // long we know that the numerator is not INT_MIN.
4551 if (match(Inst->getOperand(0), m_APInt(Numerator)))
4552 return !Numerator->isMinSignedValue();
4553 // The numerator *might* be MinSignedValue.
4554 return false;
4555 }
4556 case Instruction::Load: {
4557 const LoadInst *LI = cast<LoadInst>(Inst);
4558 if (mustSuppressSpeculation(*LI))
4559 return false;
4560 const DataLayout &DL = LI->getModule()->getDataLayout();
4561 return isDereferenceableAndAlignedPointer(
4562 LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlign()), DL,
4563 CtxI, DT, TLI);
4564 }
4565 case Instruction::Call: {
4566 auto *CI = cast<const CallInst>(Inst);
4567 const Function *Callee = CI->getCalledFunction();
4568
4569 // The called function could have undefined behavior or side-effects, even
4570 // if marked readnone nounwind.
4571 return Callee && Callee->isSpeculatable();
4572 }
4573 case Instruction::VAArg:
4574 case Instruction::Alloca:
4575 case Instruction::Invoke:
4576 case Instruction::CallBr:
4577 case Instruction::PHI:
4578 case Instruction::Store:
4579 case Instruction::Ret:
4580 case Instruction::Br:
4581 case Instruction::IndirectBr:
4582 case Instruction::Switch:
4583 case Instruction::Unreachable:
4584 case Instruction::Fence:
4585 case Instruction::AtomicRMW:
4586 case Instruction::AtomicCmpXchg:
4587 case Instruction::LandingPad:
4588 case Instruction::Resume:
4589 case Instruction::CatchSwitch:
4590 case Instruction::CatchPad:
4591 case Instruction::CatchRet:
4592 case Instruction::CleanupPad:
4593 case Instruction::CleanupRet:
4594 return false; // Misc instructions which have effects
4595 }
4596}
4597
4598bool llvm::mayBeMemoryDependent(const Instruction &I) {
4599 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4600}
4601
4602/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4603static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4604 switch (OR) {
4605 case ConstantRange::OverflowResult::MayOverflow:
4606 return OverflowResult::MayOverflow;
4607 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4608 return OverflowResult::AlwaysOverflowsLow;
4609 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4610 return OverflowResult::AlwaysOverflowsHigh;
4611 case ConstantRange::OverflowResult::NeverOverflows:
4612 return OverflowResult::NeverOverflows;
4613 }
4614 llvm_unreachable("Unknown OverflowResult")::llvm::llvm_unreachable_internal("Unknown OverflowResult", "llvm/lib/Analysis/ValueTracking.cpp"
, 4614)
;
4615}
4616
4617/// Combine constant ranges from computeConstantRange() and computeKnownBits().
4618static ConstantRange computeConstantRangeIncludingKnownBits(
4619 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4620 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4621 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4622 KnownBits Known = computeKnownBits(
4623 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4624 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4625 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4626 ConstantRange::PreferredRangeType RangeType =
4627 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4628 return CR1.intersectWith(CR2, RangeType);
4629}
4630
4631OverflowResult llvm::computeOverflowForUnsignedMul(
4632 const Value *LHS, const Value *RHS, const DataLayout &DL,
4633 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4634 bool UseInstrInfo) {
4635 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4636 nullptr, UseInstrInfo);
4637 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4638 nullptr, UseInstrInfo);
4639 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4640 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4641 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4642}
4643
4644OverflowResult
4645llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4646 const DataLayout &DL, AssumptionCache *AC,
4647 const Instruction *CxtI,
4648 const DominatorTree *DT, bool UseInstrInfo) {
4649 // Multiplying n * m significant bits yields a result of n + m significant
4650 // bits. If the total number of significant bits does not exceed the
4651 // result bit width (minus 1), there is no overflow.
4652 // This means if we have enough leading sign bits in the operands
4653 // we can guarantee that the result does not overflow.
4654 // Ref: "Hacker's Delight" by Henry Warren
4655 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4656
4657 // Note that underestimating the number of sign bits gives a more
4658 // conservative answer.
4659 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4660 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4661
4662 // First handle the easy case: if we have enough sign bits there's
4663 // definitely no overflow.
4664 if (SignBits > BitWidth + 1)
4665 return OverflowResult::NeverOverflows;
4666
4667 // There are two ambiguous cases where there can be no overflow:
4668 // SignBits == BitWidth + 1 and
4669 // SignBits == BitWidth
4670 // The second case is difficult to check, therefore we only handle the
4671 // first case.
4672 if (SignBits == BitWidth + 1) {
4673 // It overflows only when both arguments are negative and the true
4674 // product is exactly the minimum negative number.
4675 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4676 // For simplicity we just check if at least one side is not negative.
4677 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4678 nullptr, UseInstrInfo);
4679 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4680 nullptr, UseInstrInfo);
4681 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4682 return OverflowResult::NeverOverflows;
4683 }
4684 return OverflowResult::MayOverflow;
4685}
4686
4687OverflowResult llvm::computeOverflowForUnsignedAdd(
4688 const Value *LHS, const Value *RHS, const DataLayout &DL,
4689 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4690 bool UseInstrInfo) {
4691 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4692 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4693 nullptr, UseInstrInfo);
4694 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4695 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4696 nullptr, UseInstrInfo);
4697 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4698}
4699
4700static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4701 const Value *RHS,
4702 const AddOperator *Add,
4703 const DataLayout &DL,
4704 AssumptionCache *AC,
4705 const Instruction *CxtI,
4706 const DominatorTree *DT) {
4707 if (Add && Add->hasNoSignedWrap()) {
4708 return OverflowResult::NeverOverflows;
4709 }
4710
4711 // If LHS and RHS each have at least two sign bits, the addition will look
4712 // like
4713 //
4714 // XX..... +
4715 // YY.....
4716 //
4717 // If the carry into the most significant position is 0, X and Y can't both
4718 // be 1 and therefore the carry out of the addition is also 0.
4719 //
4720 // If the carry into the most significant position is 1, X and Y can't both
4721 // be 0 and therefore the carry out of the addition is also 1.
4722 //
4723 // Since the carry into the most significant position is always equal to
4724 // the carry out of the addition, there is no signed overflow.
4725 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4726 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4727 return OverflowResult::NeverOverflows;
4728
4729 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4730 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4731 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4732 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4733 OverflowResult OR =
4734 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4735 if (OR != OverflowResult::MayOverflow)
4736 return OR;
4737
4738 // The remaining code needs Add to be available. Early returns if not so.
4739 if (!Add)
4740 return OverflowResult::MayOverflow;
4741
4742 // If the sign of Add is the same as at least one of the operands, this add
4743 // CANNOT overflow. If this can be determined from the known bits of the
4744 // operands the above signedAddMayOverflow() check will have already done so.
4745 // The only other way to improve on the known bits is from an assumption, so
4746 // call computeKnownBitsFromAssume() directly.
4747 bool LHSOrRHSKnownNonNegative =
4748 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4749 bool LHSOrRHSKnownNegative =
4750 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4751 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4752 KnownBits AddKnown(LHSRange.getBitWidth());
4753 computeKnownBitsFromAssume(
4754 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4755 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4756 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4757 return OverflowResult::NeverOverflows;
4758 }
4759
4760 return OverflowResult::MayOverflow;
4761}
4762
4763OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4764 const Value *RHS,
4765 const DataLayout &DL,
4766 AssumptionCache *AC,
4767 const Instruction *CxtI,
4768 const DominatorTree *DT) {
4769 // Checking for conditions implied by dominating conditions may be expensive.
4770 // Limit it to usub_with_overflow calls for now.
4771 if (match(CxtI,
4772 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4773 if (auto C =
4774 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4775 if (*C)
4776 return OverflowResult::NeverOverflows;
4777 return OverflowResult::AlwaysOverflowsLow;
4778 }
4779 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4780 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4781 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4782 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4783 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4784}
4785
4786OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4787 const Value *RHS,
4788 const DataLayout &DL,
4789 AssumptionCache *AC,
4790 const Instruction *CxtI,
4791 const DominatorTree *DT) {
4792 // If LHS and RHS each have at least two sign bits, the subtraction
4793 // cannot overflow.
4794 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4795 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4796 return OverflowResult::NeverOverflows;
4797
4798 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4799 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4800 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4801 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4802 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4803}
4804
4805bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4806 const DominatorTree &DT) {
4807 SmallVector<const BranchInst *, 2> GuardingBranches;
4808 SmallVector<const ExtractValueInst *, 2> Results;
4809
4810 for (const User *U : WO->users()) {
4811 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4812 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getNumIndices() == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4812, __extension__ __PRETTY_FUNCTION__
))
;
4813
4814 if (EVI->getIndices()[0] == 0)
4815 Results.push_back(EVI);
4816 else {
4817 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getIndices()[0] == 1 &&
"Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4817, __extension__ __PRETTY_FUNCTION__
))
;
4818
4819 for (const auto *U : EVI->users())
4820 if (const auto *B = dyn_cast<BranchInst>(U)) {
4821 assert(B->isConditional() && "How else is it using an i1?")(static_cast <bool> (B->isConditional() && "How else is it using an i1?"
) ? void (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\""
, "llvm/lib/Analysis/ValueTracking.cpp", 4821, __extension__ __PRETTY_FUNCTION__
))
;
4822 GuardingBranches.push_back(B);
4823 }
4824 }
4825 } else {
4826 // We are using the aggregate directly in a way we don't want to analyze
4827 // here (storing it to a global, say).
4828 return false;
4829 }
4830 }
4831
4832 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4833 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4834 if (!NoWrapEdge.isSingleEdge())
4835 return false;
4836
4837 // Check if all users of the add are provably no-wrap.
4838 for (const auto *Result : Results) {
4839 // If the extractvalue itself is not executed on overflow, the we don't
4840 // need to check each use separately, since domination is transitive.
4841 if (DT.dominates(NoWrapEdge, Result->getParent()))
4842 continue;
4843
4844 for (auto &RU : Result->uses())
4845 if (!DT.dominates(NoWrapEdge, RU))
4846 return false;
4847 }
4848
4849 return true;
4850 };
4851
4852 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4853}
4854
4855static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
4856 bool ConsiderFlags) {
4857
4858 if (ConsiderFlags && Op->hasPoisonGeneratingFlags())
4859 return true;
4860
4861 unsigned Opcode = Op->getOpcode();
4862
4863 // Check whether opcode is a poison/undef-generating operation
4864 switch (Opcode) {
4865 case Instruction::Shl:
4866 case Instruction::AShr:
4867 case Instruction::LShr: {
4868 // Shifts return poison if shiftwidth is larger than the bitwidth.
4869 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4870 SmallVector<Constant *, 4> ShiftAmounts;
4871 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4872 unsigned NumElts = FVTy->getNumElements();
4873 for (unsigned i = 0; i < NumElts; ++i)
4874 ShiftAmounts.push_back(C->getAggregateElement(i));
4875 } else if (isa<ScalableVectorType>(C->getType()))
4876 return true; // Can't tell, just return true to be safe
4877 else
4878 ShiftAmounts.push_back(C);
4879
4880 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4881 auto *CI = dyn_cast_or_null<ConstantInt>(C);
4882 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4883 });
4884 return !Safe;
4885 }
4886 return true;
4887 }
4888 case Instruction::FPToSI:
4889 case Instruction::FPToUI:
4890 // fptosi/ui yields poison if the resulting value does not fit in the
4891 // destination type.
4892 return true;
4893 case Instruction::Call:
4894 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
4895 switch (II->getIntrinsicID()) {
4896 // TODO: Add more intrinsics.
4897 case Intrinsic::ctpop:
4898 case Intrinsic::sadd_with_overflow:
4899 case Intrinsic::ssub_with_overflow:
4900 case Intrinsic::smul_with_overflow:
4901 case Intrinsic::uadd_with_overflow:
4902 case Intrinsic::usub_with_overflow:
4903 case Intrinsic::umul_with_overflow:
4904 return false;
4905 }
4906 }
4907 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4908 case Instruction::CallBr:
4909 case Instruction::Invoke: {
4910 const auto *CB = cast<CallBase>(Op);
4911 return !CB->hasRetAttr(Attribute::NoUndef);
4912 }
4913 case Instruction::InsertElement:
4914 case Instruction::ExtractElement: {
4915 // If index exceeds the length of the vector, it returns poison
4916 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4917 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4918 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4919 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4920 return true;
4921 return false;
4922 }
4923 case Instruction::ShuffleVector: {
4924 // shufflevector may return undef.
4925 if (PoisonOnly)
4926 return false;
4927 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4928 ? cast<ConstantExpr>(Op)->getShuffleMask()
4929 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4930 return is_contained(Mask, UndefMaskElem);
4931 }
4932 case Instruction::FNeg:
4933 case Instruction::PHI:
4934 case Instruction::Select:
4935 case Instruction::URem:
4936 case Instruction::SRem:
4937 case Instruction::ExtractValue:
4938 case Instruction::InsertValue:
4939 case Instruction::Freeze:
4940 case Instruction::ICmp:
4941 case Instruction::FCmp:
4942 return false;
4943 case Instruction::GetElementPtr:
4944 // inbounds is handled above
4945 // TODO: what about inrange on constexpr?
4946 return false;
4947 default: {
4948 const auto *CE = dyn_cast<ConstantExpr>(Op);
4949 if (isa<CastInst>(Op) || (CE && CE->isCast()))
4950 return false;
4951 else if (Instruction::isBinaryOp(Opcode))
4952 return false;
4953 // Be conservative and return true.
4954 return true;
4955 }
4956 }
4957}
4958
4959bool llvm::canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlags) {
4960 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false, ConsiderFlags);
4961}
4962
4963bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlags) {
4964 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true, ConsiderFlags);
4965}
4966
4967static bool directlyImpliesPoison(const Value *ValAssumedPoison,
4968 const Value *V, unsigned Depth) {
4969 if (ValAssumedPoison == V)
4970 return true;
4971
4972 const unsigned MaxDepth = 2;
4973 if (Depth >= MaxDepth)
4974 return false;
4975
4976 if (const auto *I = dyn_cast<Instruction>(V)) {
4977 if (propagatesPoison(cast<Operator>(I)))
4978 return any_of(I->operands(), [=](const Value *Op) {
4979 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
4980 });
4981
4982 // 'select ValAssumedPoison, _, _' is poison.
4983 if (const auto *SI = dyn_cast<SelectInst>(I))
4984 return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
4985 Depth + 1);
4986 // V = extractvalue V0, idx
4987 // V2 = extractvalue V0, idx2
4988 // V0's elements are all poison or not. (e.g., add_with_overflow)
4989 const WithOverflowInst *II;
4990 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
4991 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
4992 llvm::is_contained(II->args(), ValAssumedPoison)))
4993 return true;
4994 }
4995 return false;
4996}
4997
4998static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
4999 unsigned Depth) {
5000 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5001 return true;
5002
5003 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5004 return true;
5005
5006 const unsigned MaxDepth = 2;
5007 if (Depth >= MaxDepth)
5008 return false;
5009
5010 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5011 if (I && !canCreatePoison(cast<Operator>(I))) {
5012 return all_of(I->operands(), [=](const Value *Op) {
5013 return impliesPoison(Op, V, Depth + 1);
5014 });
5015 }
5016 return false;
5017}
5018
5019bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5020 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5021}
5022
5023static bool programUndefinedIfUndefOrPoison(const Value *V,
5024 bool PoisonOnly);
5025
5026static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5027 AssumptionCache *AC,
5028 const Instruction *CtxI,
5029 const DominatorTree *DT,
5030 unsigned Depth, bool PoisonOnly) {
5031 if (Depth >= MaxAnalysisRecursionDepth)
5032 return false;
5033
5034 if (isa<MetadataAsValue>(V))
5035 return false;
5036
5037 if (const auto *A = dyn_cast<Argument>(V)) {
5038 if (A->hasAttribute(Attribute::NoUndef))
5039 return true;
5040 }
5041
5042 if (auto *C = dyn_cast<Constant>(V)) {
5043 if (isa<UndefValue>(C))
5044 return PoisonOnly && !isa<PoisonValue>(C);
5045
5046 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5047 isa<ConstantPointerNull>(C) || isa<Function>(C))
5048 return true;
5049
5050 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5051 return (PoisonOnly ? !C->containsPoisonElement()
5052 : !C->containsUndefOrPoisonElement()) &&
5053 !C->containsConstantExpression();
5054 }
5055
5056 // Strip cast operations from a pointer value.
5057 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5058 // inbounds with zero offset. To guarantee that the result isn't poison, the
5059 // stripped pointer is checked as it has to be pointing into an allocated
5060 // object or be null `null` to ensure `inbounds` getelement pointers with a
5061 // zero offset could not produce poison.
5062 // It can strip off addrspacecast that do not change bit representation as
5063 // well. We believe that such addrspacecast is equivalent to no-op.
5064 auto *StrippedV = V->stripPointerCastsSameRepresentation();
5065 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5066 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5067 return true;
5068
5069 auto OpCheck = [&](const Value *V) {
5070 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5071 PoisonOnly);
5072 };
5073
5074 if (auto *Opr = dyn_cast<Operator>(V)) {
5075 // If the value is a freeze instruction, then it can never
5076 // be undef or poison.
5077 if (isa<FreezeInst>(V))
5078 return true;
5079
5080 if (const auto *CB = dyn_cast<CallBase>(V)) {
5081 if (CB->hasRetAttr(Attribute::NoUndef))
5082 return true;
5083 }
5084
5085 if (const auto *PN = dyn_cast<PHINode>(V)) {
5086 unsigned Num = PN->getNumIncomingValues();
5087 bool IsWellDefined = true;
5088 for (unsigned i = 0; i < Num; ++i) {
5089 auto *TI = PN->getIncomingBlock(i)->getTerminator();
5090 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5091 DT, Depth + 1, PoisonOnly)) {
5092 IsWellDefined = false;
5093 break;
5094 }
5095 }
5096 if (IsWellDefined)
5097 return true;
5098 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5099 return true;
5100 }
5101
5102 if (auto *I = dyn_cast<LoadInst>(V))
5103 if (I->getMetadata(LLVMContext::MD_noundef))
5104 return true;
5105
5106 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5107 return true;
5108
5109 // CxtI may be null or a cloned instruction.
5110 if (!CtxI || !CtxI->getParent() || !DT)
5111 return false;
5112
5113 auto *DNode = DT->getNode(CtxI->getParent());
5114 if (!DNode)
5115 // Unreachable block
5116 return false;
5117
5118 // If V is used as a branch condition before reaching CtxI, V cannot be
5119 // undef or poison.
5120 // br V, BB1, BB2
5121 // BB1:
5122 // CtxI ; V cannot be undef or poison here
5123 auto *Dominator = DNode->getIDom();
5124 while (Dominator) {
5125 auto *TI = Dominator->getBlock()->getTerminator();
5126
5127 Value *Cond = nullptr;
5128 if (auto BI = dyn_cast<BranchInst>(TI)) {
5129 if (BI->isConditional())
5130 Cond = BI->getCondition();
5131 } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
5132 Cond = SI->getCondition();
5133 }
5134
5135 if (Cond) {
5136 if (Cond == V)
5137 return true;
5138 else if (PoisonOnly && isa<Operator>(Cond)) {
5139 // For poison, we can analyze further
5140 auto *Opr = cast<Operator>(Cond);
5141 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5142 return true;
5143 }
5144 }
5145
5146 Dominator = Dominator->getIDom();
5147 }
5148
5149 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
5150 return true;
5151
5152 return false;
5153}
5154
5155bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5156 const Instruction *CtxI,
5157 const DominatorTree *DT,
5158 unsigned Depth) {
5159 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5160}
5161
5162bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5163 const Instruction *CtxI,
5164 const DominatorTree *DT, unsigned Depth) {
5165 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5166}
5167
5168OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5169 const DataLayout &DL,
5170 AssumptionCache *AC,
5171 const Instruction *CxtI,
5172 const DominatorTree *DT) {
5173 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5174 Add, DL, AC, CxtI, DT);
5175}
5176
5177OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5178 const Value *RHS,
5179 const DataLayout &DL,
5180 AssumptionCache *AC,
5181 const Instruction *CxtI,
5182 const DominatorTree *DT) {
5183 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5184}
5185
5186bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5187 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5188 // of time because it's possible for another thread to interfere with it for an
5189 // arbitrary length of time, but programs aren't allowed to rely on that.
5190
5191 // If there is no successor, then execution can't transfer to it.
5192 if (isa<ReturnInst>(I))
5193 return false;
5194 if (isa<UnreachableInst>(I))
5195 return false;
5196
5197 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
5198 // Instruction::willReturn.
5199 //
5200 // FIXME: Move this check into Instruction::willReturn.
5201 if (isa<CatchPadInst>(I)) {
5202 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
5203 default:
5204 // A catchpad may invoke exception object constructors and such, which
5205 // in some languages can be arbitrary code, so be conservative by default.
5206 return false;
5207 case EHPersonality::CoreCLR:
5208 // For CoreCLR, it just involves a type test.
5209 return true;
5210 }
5211 }
5212
5213 // An instruction that returns without throwing must transfer control flow
5214 // to a successor.
5215 return !I->mayThrow() && I->willReturn();
5216}
5217
5218bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5219 // TODO: This is slightly conservative for invoke instruction since exiting
5220 // via an exception *is* normal control for them.
5221 for (const Instruction &I : *BB)
5222 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5223 return false;
5224 return true;
5225}
5226
5227bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5228 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
5229 unsigned ScanLimit) {
5230 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
5231 ScanLimit);
5232}
5233
5234bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5235 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
5236 assert(ScanLimit && "scan limit must be non-zero")(static_cast <bool> (ScanLimit && "scan limit must be non-zero"
) ? void (0) : __assert_fail ("ScanLimit && \"scan limit must be non-zero\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5236, __extension__ __PRETTY_FUNCTION__
))
;
5237 for (const Instruction &I : Range) {
5238 if (isa<DbgInfoIntrinsic>(I))
5239 continue;
5240 if (--ScanLimit == 0)
5241 return false;
5242 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5243 return false;
5244 }
5245 return true;
5246}
5247
5248bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5249 const Loop *L) {
5250 // The loop header is guaranteed to be executed for every iteration.
5251 //
5252 // FIXME: Relax this constraint to cover all basic blocks that are
5253 // guaranteed to be executed at every iteration.
5254 if (I->getParent() != L->getHeader()) return false;
5255
5256 for (const Instruction &LI : *L->getHeader()) {
5257 if (&LI == I) return true;
5258 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5259 }
5260 llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block."
, "llvm/lib/Analysis/ValueTracking.cpp", 5260)
;
5261}
5262
5263bool llvm::propagatesPoison(const Operator *I) {
5264 switch (I->getOpcode()) {
5265 case Instruction::Freeze:
5266 case Instruction::Select:
5267 case Instruction::PHI:
5268 case Instruction::Invoke:
5269 return false;
5270 case Instruction::Call:
5271 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5272 switch (II->getIntrinsicID()) {
5273 // TODO: Add more intrinsics.
5274 case Intrinsic::sadd_with_overflow:
5275 case Intrinsic::ssub_with_overflow:
5276 case Intrinsic::smul_with_overflow:
5277 case Intrinsic::uadd_with_overflow:
5278 case Intrinsic::usub_with_overflow:
5279 case Intrinsic::umul_with_overflow:
5280 // If an input is a vector containing a poison element, the
5281 // two output vectors (calculated results, overflow bits)'
5282 // corresponding lanes are poison.
5283 return true;
5284 case Intrinsic::ctpop:
5285 return true;
5286 }
5287 }
5288 return false;
5289 case Instruction::ICmp:
5290 case Instruction::FCmp:
5291 case Instruction::GetElementPtr:
5292 return true;
5293 default:
5294 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5295 return true;
5296
5297 // Be conservative and return false.
5298 return false;
5299 }
5300}
5301
5302void llvm::getGuaranteedWellDefinedOps(
5303 const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5304 switch (I->getOpcode()) {
5305 case Instruction::Store:
5306 Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5307 break;
5308
5309 case Instruction::Load:
5310 Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5311 break;
5312
5313 // Since dereferenceable attribute imply noundef, atomic operations
5314 // also implicitly have noundef pointers too
5315 case Instruction::AtomicCmpXchg:
5316 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5317 break;
5318
5319 case Instruction::AtomicRMW:
5320 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5321 break;
5322
5323 case Instruction::Call:
5324 case Instruction::Invoke: {
5325 const CallBase *CB = cast<CallBase>(I);
5326 if (CB->isIndirectCall())
5327 Operands.insert(CB->getCalledOperand());
5328 for (unsigned i = 0; i < CB->arg_size(); ++i) {
5329 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5330 CB->paramHasAttr(i, Attribute::Dereferenceable))
5331 Operands.insert(CB->getArgOperand(i));
5332 }
5333 break;
5334 }
5335 case Instruction::Ret:
5336 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
5337 Operands.insert(I->getOperand(0));
5338 break;
5339 default:
5340 break;
5341 }
5342}
5343
5344void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5345 SmallPtrSetImpl<const Value *> &Operands) {
5346 getGuaranteedWellDefinedOps(I, Operands);
5347 switch (I->getOpcode()) {
5348 // Divisors of these operations are allowed to be partially undef.
5349 case Instruction::UDiv:
5350 case Instruction::SDiv:
5351 case Instruction::URem:
5352 case Instruction::SRem:
5353 Operands.insert(I->getOperand(1));
5354 break;
5355 case Instruction::Switch:
5356 if (BranchOnPoisonAsUB)
5357 Operands.insert(cast<SwitchInst>(I)->getCondition());
5358 break;
5359 case Instruction::Br: {
5360 auto *BR = cast<BranchInst>(I);
5361 if (BranchOnPoisonAsUB && BR->isConditional())
5362 Operands.insert(BR->getCondition());
5363 break;
5364 }
5365 default:
5366 break;
5367 }
5368}
5369
5370bool llvm::mustTriggerUB(const Instruction *I,
5371 const SmallSet<const Value *, 16>& KnownPoison) {
5372 SmallPtrSet<const Value *, 4> NonPoisonOps;
5373 getGuaranteedNonPoisonOps(I, NonPoisonOps);
5374
5375 for (const auto *V : NonPoisonOps)
5376 if (KnownPoison.count(V))
5377 return true;
5378
5379 return false;
5380}
5381
5382static bool programUndefinedIfUndefOrPoison(const Value *V,
5383 bool PoisonOnly) {
5384 // We currently only look for uses of values within the same basic
5385 // block, as that makes it easier to guarantee that the uses will be
5386 // executed given that Inst is executed.
5387 //
5388 // FIXME: Expand this to consider uses beyond the same basic block. To do
5389 // this, look out for the distinction between post-dominance and strong
5390 // post-dominance.
5391 const BasicBlock *BB = nullptr;
5392 BasicBlock::const_iterator Begin;
5393 if (const auto *Inst = dyn_cast<Instruction>(V)) {
5394 BB = Inst->getParent();
5395 Begin = Inst->getIterator();
5396 Begin++;
5397 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5398 BB = &Arg->getParent()->getEntryBlock();
5399 Begin = BB->begin();
5400 } else {
5401 return false;
5402 }
5403
5404 // Limit number of instructions we look at, to avoid scanning through large
5405 // blocks. The current limit is chosen arbitrarily.
5406 unsigned ScanLimit = 32;
5407 BasicBlock::const_iterator End = BB->end();
5408
5409 if (!PoisonOnly) {
5410 // Since undef does not propagate eagerly, be conservative & just check
5411 // whether a value is directly passed to an instruction that must take
5412 // well-defined operands.
5413
5414 for (auto &I : make_range(Begin, End)) {
5415 if (isa<DbgInfoIntrinsic>(I))
5416 continue;
5417 if (--ScanLimit == 0)
5418 break;
5419
5420 SmallPtrSet<const Value *, 4> WellDefinedOps;
5421 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5422 if (WellDefinedOps.contains(V))
5423 return true;
5424
5425 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5426 break;
5427 }
5428 return false;
5429 }
5430
5431 // Set of instructions that we have proved will yield poison if Inst
5432 // does.
5433 SmallSet<const Value *, 16> YieldsPoison;
5434 SmallSet<const BasicBlock *, 4> Visited;
5435
5436 YieldsPoison.insert(V);
5437 auto Propagate = [&](const User *User) {
5438 if (propagatesPoison(cast<Operator>(User)))
5439 YieldsPoison.insert(User);
5440 };
5441 for_each(V->users(), Propagate);
5442 Visited.insert(BB);
5443
5444 while (true) {
5445 for (auto &I : make_range(Begin, End)) {
5446 if (isa<DbgInfoIntrinsic>(I))
5447 continue;
5448 if (--ScanLimit == 0)
5449 return false;
5450 if (mustTriggerUB(&I, YieldsPoison))
5451 return true;
5452 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5453 return false;
5454
5455 // Mark poison that propagates from I through uses of I.
5456 if (YieldsPoison.count(&I))
5457 for_each(I.users(), Propagate);
5458 }
5459
5460 BB = BB->getSingleSuccessor();
5461 if (!BB || !Visited.insert(BB).second)
5462 break;
5463
5464 Begin = BB->getFirstNonPHI()->getIterator();
5465 End = BB->end();
5466 }
5467 return false;
5468}
5469
5470bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5471 return ::programUndefinedIfUndefOrPoison(Inst, false);
5472}
5473
5474bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5475 return ::programUndefinedIfUndefOrPoison(Inst, true);
5476}
5477
5478static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5479 if (FMF.noNaNs())
5480 return true;
5481
5482 if (auto *C = dyn_cast<ConstantFP>(V))
5483 return !C->isNaN();
5484
5485 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5486 if (!C->getElementType()->isFloatingPointTy())
5487 return false;
5488 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5489 if (C->getElementAsAPFloat(I).isNaN())
5490 return false;
5491 }
5492 return true;
5493 }
5494
5495 if (isa<ConstantAggregateZero>(V))
5496 return true;
5497
5498 return false;
5499}
5500
5501static bool isKnownNonZero(const Value *V) {
5502 if (auto *C = dyn_cast<ConstantFP>(V))
5503 return !C->isZero();
5504
5505 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5506 if (!C->getElementType()->isFloatingPointTy())
5507 return false;
5508 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5509 if (C->getElementAsAPFloat(I).isZero())
5510 return false;
5511 }
5512 return true;
5513 }
5514
5515 return false;
5516}
5517
5518/// Match clamp pattern for float types without care about NaNs or signed zeros.
5519/// Given non-min/max outer cmp/select from the clamp pattern this
5520/// function recognizes if it can be substitued by a "canonical" min/max
5521/// pattern.
5522static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5523 Value *CmpLHS, Value *CmpRHS,
5524 Value *TrueVal, Value *FalseVal,
5525 Value *&LHS, Value *&RHS) {
5526 // Try to match
5527 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5528 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5529 // and return description of the outer Max/Min.
5530
5531 // First, check if select has inverse order:
5532 if (CmpRHS == FalseVal) {
5533 std::swap(TrueVal, FalseVal);
5534 Pred = CmpInst::getInversePredicate(Pred);
5535 }
5536
5537 // Assume success now. If there's no match, callers should not use these anyway.
5538 LHS = TrueVal;
5539 RHS = FalseVal;
5540
5541 const APFloat *FC1;
5542 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5543 return {SPF_UNKNOWN, SPNB_NA, false};
5544
5545 const APFloat *FC2;
5546 switch (Pred) {
5547 case CmpInst::FCMP_OLT:
5548 case CmpInst::FCMP_OLE:
5549 case CmpInst::FCMP_ULT:
5550 case CmpInst::FCMP_ULE:
5551 if (match(FalseVal,
5552 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5553 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5554 *FC1 < *FC2)
5555 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5556 break;
5557 case CmpInst::FCMP_OGT:
5558 case CmpInst::FCMP_OGE:
5559 case CmpInst::FCMP_UGT:
5560 case CmpInst::FCMP_UGE:
5561 if (match(FalseVal,
5562 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5563 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5564 *FC1 > *FC2)
5565 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5566 break;
5567 default:
5568 break;
5569 }
5570
5571 return {SPF_UNKNOWN, SPNB_NA, false};
5572}
5573
5574/// Recognize variations of:
5575/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5576static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5577 Value *CmpLHS, Value *CmpRHS,
5578 Value *TrueVal, Value *FalseVal) {
5579 // Swap the select operands and predicate to match the patterns below.
5580 if (CmpRHS != TrueVal) {
5581 Pred = ICmpInst::getSwappedPredicate(Pred);
5582 std::swap(TrueVal, FalseVal);
5583 }
5584 const APInt *C1;
5585 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5586 const APInt *C2;
5587 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5588 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5589 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5590 return {SPF_SMAX, SPNB_NA, false};
5591
5592 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5593 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5594 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5595 return {SPF_SMIN, SPNB_NA, false};
5596
5597 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5598 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5599 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5600 return {SPF_UMAX, SPNB_NA, false};
5601
5602 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5603 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5604 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5605 return {SPF_UMIN, SPNB_NA, false};
5606 }
5607 return {SPF_UNKNOWN, SPNB_NA, false};
5608}
5609
5610/// Recognize variations of:
5611/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5612static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5613 Value *CmpLHS, Value *CmpRHS,
5614 Value *TVal, Value *FVal,
5615 unsigned Depth) {
5616 // TODO: Allow FP min/max with nnan/nsz.
5617 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")(static_cast <bool> (CmpInst::isIntPredicate(Pred) &&
"Expected integer comparison") ? void (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\""
, "llvm/lib/Analysis/ValueTracking.cpp", 5617, __extension__ __PRETTY_FUNCTION__
))
;
5618
5619 Value *A = nullptr, *B = nullptr;
5620 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5621 if (!SelectPatternResult::isMinOrMax(L.Flavor))
5622 return {SPF_UNKNOWN, SPNB_NA, false};
5623
5624 Value *C = nullptr, *D = nullptr;
5625 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5626 if (L.Flavor != R.Flavor)
5627 return {SPF_UNKNOWN, SPNB_NA, false};
5628
5629 // We have something like: x Pred y ? min(a, b) : min(c, d).
5630 // Try to match the compare to the min/max operations of the select operands.
5631 // First, make sure we have the right compare predicate.
5632 switch (L.Flavor) {
5633 case SPF_SMIN:
5634 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5635 Pred = ICmpInst::getSwappedPredicate(Pred);
5636 std::swap(CmpLHS, CmpRHS);
5637 }
5638 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5639 break;
5640 return {SPF_UNKNOWN, SPNB_NA, false};
5641 case SPF_SMAX:
5642 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5643 Pred = ICmpInst::getSwappedPredicate(Pred);
5644 std::swap(CmpLHS, CmpRHS);
5645 }
5646 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5647 break;
5648 return {SPF_UNKNOWN, SPNB_NA, false};
5649 case SPF_UMIN:
5650 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5651 Pred = ICmpInst::getSwappedPredicate(Pred);
5652 std::swap(CmpLHS, CmpRHS);
5653 }
5654 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5655 break;
5656 return {SPF_UNKNOWN, SPNB_NA, false};
5657 case SPF_UMAX:
5658 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5659 Pred = ICmpInst::getSwappedPredicate(Pred);
5660 std::swap(CmpLHS, CmpRHS);
5661 }
5662 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5663 break;
5664 return {SPF_UNKNOWN, SPNB_NA, false};
5665 default:
5666 return {SPF_UNKNOWN, SPNB_NA, false};
5667 }
5668
5669 // If there is a common operand in the already matched min/max and the other
5670 // min/max operands match the compare operands (either directly or inverted),
5671 // then this is min/max of the same flavor.
5672
5673 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5674 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5675 if (D == B) {
5676 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5677 match(A, m_Not(m_Specific(CmpRHS)))))
5678 return {L.Flavor, SPNB_NA, false};
5679 }
5680 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5681 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5682 if (C == B) {
5683 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5684 match(A, m_Not(m_Specific(CmpRHS)))))
5685 return {L.Flavor, SPNB_NA, false};
5686 }
5687 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5688 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5689 if (D == A) {
5690 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5691 match(B, m_Not(m_Specific(CmpRHS)))))
5692 return {L.Flavor, SPNB_NA, false};
5693 }
5694 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5695 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5696 if (C == A) {
5697 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5698 match(B, m_Not(m_Specific(CmpRHS)))))
5699 return {L.Flavor, SPNB_NA, false};
5700 }
5701
5702 return {SPF_UNKNOWN, SPNB_NA, false};
5703}
5704
5705/// If the input value is the result of a 'not' op, constant integer, or vector
5706/// splat of a constant integer, return the bitwise-not source value.
5707/// TODO: This could be extended to handle non-splat vector integer constants.
5708static Value *getNotValue(Value *V) {
5709 Value *NotV;
5710 if (match(V, m_Not(m_Value(NotV))))
5711 return NotV;
5712
5713 const APInt *C;
5714 if (match(V, m_APInt(C)))
5715 return ConstantInt::get(V->getType(), ~(*C));
5716
5717 return nullptr;
5718}
5719
5720/// Match non-obvious integer minimum and maximum sequences.
5721static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5722 Value *CmpLHS, Value *CmpRHS,
5723 Value *TrueVal, Value *FalseVal,
5724 Value *&LHS, Value *&RHS,
5725 unsigned Depth) {
5726 // Assume success. If there's no match, callers should not use these anyway.
5727 LHS = TrueVal;
5728 RHS = FalseVal;
5729
5730 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5731 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5732 return SPR;
5733
5734 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5735 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5736 return SPR;
5737
5738 // Look through 'not' ops to find disguised min/max.
5739 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5740 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5741 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5742 switch (Pred) {
5743 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5744 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5745 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5746 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5747 default: break;
5748 }
5749 }
5750
5751 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5752 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5753 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5754 switch (Pred) {
5755 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5756 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5757 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5758 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5759 default: break;
5760 }
5761 }