File: | llvm/lib/Analysis/ValueTracking.cpp |
Warning: | line 1399, column 13 Value stored to 'L' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ValueTracking.cpp - Walk computations to compute properties --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains routines that help analyze properties that chains of |
10 | // computations have. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "llvm/Analysis/ValueTracking.h" |
15 | #include "llvm/ADT/APFloat.h" |
16 | #include "llvm/ADT/APInt.h" |
17 | #include "llvm/ADT/ArrayRef.h" |
18 | #include "llvm/ADT/None.h" |
19 | #include "llvm/ADT/Optional.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/SmallPtrSet.h" |
22 | #include "llvm/ADT/SmallSet.h" |
23 | #include "llvm/ADT/SmallVector.h" |
24 | #include "llvm/ADT/StringRef.h" |
25 | #include "llvm/ADT/iterator_range.h" |
26 | #include "llvm/Analysis/AliasAnalysis.h" |
27 | #include "llvm/Analysis/AssumeBundleQueries.h" |
28 | #include "llvm/Analysis/AssumptionCache.h" |
29 | #include "llvm/Analysis/GuardUtils.h" |
30 | #include "llvm/Analysis/InstructionSimplify.h" |
31 | #include "llvm/Analysis/Loads.h" |
32 | #include "llvm/Analysis/LoopInfo.h" |
33 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" |
34 | #include "llvm/Analysis/TargetLibraryInfo.h" |
35 | #include "llvm/IR/Argument.h" |
36 | #include "llvm/IR/Attributes.h" |
37 | #include "llvm/IR/BasicBlock.h" |
38 | #include "llvm/IR/Constant.h" |
39 | #include "llvm/IR/ConstantRange.h" |
40 | #include "llvm/IR/Constants.h" |
41 | #include "llvm/IR/DerivedTypes.h" |
42 | #include "llvm/IR/DiagnosticInfo.h" |
43 | #include "llvm/IR/Dominators.h" |
44 | #include "llvm/IR/Function.h" |
45 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
46 | #include "llvm/IR/GlobalAlias.h" |
47 | #include "llvm/IR/GlobalValue.h" |
48 | #include "llvm/IR/GlobalVariable.h" |
49 | #include "llvm/IR/InstrTypes.h" |
50 | #include "llvm/IR/Instruction.h" |
51 | #include "llvm/IR/Instructions.h" |
52 | #include "llvm/IR/IntrinsicInst.h" |
53 | #include "llvm/IR/Intrinsics.h" |
54 | #include "llvm/IR/IntrinsicsAArch64.h" |
55 | #include "llvm/IR/IntrinsicsX86.h" |
56 | #include "llvm/IR/LLVMContext.h" |
57 | #include "llvm/IR/Metadata.h" |
58 | #include "llvm/IR/Module.h" |
59 | #include "llvm/IR/Operator.h" |
60 | #include "llvm/IR/PatternMatch.h" |
61 | #include "llvm/IR/Type.h" |
62 | #include "llvm/IR/User.h" |
63 | #include "llvm/IR/Value.h" |
64 | #include "llvm/Support/Casting.h" |
65 | #include "llvm/Support/CommandLine.h" |
66 | #include "llvm/Support/Compiler.h" |
67 | #include "llvm/Support/ErrorHandling.h" |
68 | #include "llvm/Support/KnownBits.h" |
69 | #include "llvm/Support/MathExtras.h" |
70 | #include <algorithm> |
71 | #include <array> |
72 | #include <cassert> |
73 | #include <cstdint> |
74 | #include <iterator> |
75 | #include <utility> |
76 | |
77 | using namespace llvm; |
78 | using namespace llvm::PatternMatch; |
79 | |
80 | // Controls the number of uses of the value searched for possible |
81 | // dominating comparisons. |
82 | static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", |
83 | cl::Hidden, cl::init(20)); |
84 | |
85 | /// Returns the bitwidth of the given scalar or pointer type. For vector types, |
86 | /// returns the element type's bitwidth. |
87 | static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { |
88 | if (unsigned BitWidth = Ty->getScalarSizeInBits()) |
89 | return BitWidth; |
90 | |
91 | return DL.getPointerTypeSizeInBits(Ty); |
92 | } |
93 | |
94 | namespace { |
95 | |
96 | // Simplifying using an assume can only be done in a particular control-flow |
97 | // context (the context instruction provides that context). If an assume and |
98 | // the context instruction are not in the same block then the DT helps in |
99 | // figuring out if we can use it. |
100 | struct Query { |
101 | const DataLayout &DL; |
102 | AssumptionCache *AC; |
103 | const Instruction *CxtI; |
104 | const DominatorTree *DT; |
105 | |
106 | // Unlike the other analyses, this may be a nullptr because not all clients |
107 | // provide it currently. |
108 | OptimizationRemarkEmitter *ORE; |
109 | |
110 | /// Set of assumptions that should be excluded from further queries. |
111 | /// This is because of the potential for mutual recursion to cause |
112 | /// computeKnownBits to repeatedly visit the same assume intrinsic. The |
113 | /// classic case of this is assume(x = y), which will attempt to determine |
114 | /// bits in x from bits in y, which will attempt to determine bits in y from |
115 | /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call |
116 | /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo |
117 | /// (all of which can call computeKnownBits), and so on. |
118 | std::array<const Value *, MaxAnalysisRecursionDepth> Excluded; |
119 | |
120 | /// If true, it is safe to use metadata during simplification. |
121 | InstrInfoQuery IIQ; |
122 | |
123 | unsigned NumExcluded = 0; |
124 | |
125 | Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, |
126 | const DominatorTree *DT, bool UseInstrInfo, |
127 | OptimizationRemarkEmitter *ORE = nullptr) |
128 | : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} |
129 | |
130 | Query(const Query &Q, const Value *NewExcl) |
131 | : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ), |
132 | NumExcluded(Q.NumExcluded) { |
133 | Excluded = Q.Excluded; |
134 | Excluded[NumExcluded++] = NewExcl; |
135 | assert(NumExcluded <= Excluded.size())((NumExcluded <= Excluded.size()) ? static_cast<void> (0) : __assert_fail ("NumExcluded <= Excluded.size()", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 135, __PRETTY_FUNCTION__)); |
136 | } |
137 | |
138 | bool isExcluded(const Value *Value) const { |
139 | if (NumExcluded == 0) |
140 | return false; |
141 | auto End = Excluded.begin() + NumExcluded; |
142 | return std::find(Excluded.begin(), End, Value) != End; |
143 | } |
144 | }; |
145 | |
146 | } // end anonymous namespace |
147 | |
148 | // Given the provided Value and, potentially, a context instruction, return |
149 | // the preferred context instruction (if any). |
150 | static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { |
151 | // If we've been provided with a context instruction, then use that (provided |
152 | // it has been inserted). |
153 | if (CxtI && CxtI->getParent()) |
154 | return CxtI; |
155 | |
156 | // If the value is really an already-inserted instruction, then use that. |
157 | CxtI = dyn_cast<Instruction>(V); |
158 | if (CxtI && CxtI->getParent()) |
159 | return CxtI; |
160 | |
161 | return nullptr; |
162 | } |
163 | |
164 | static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) { |
165 | // If we've been provided with a context instruction, then use that (provided |
166 | // it has been inserted). |
167 | if (CxtI && CxtI->getParent()) |
168 | return CxtI; |
169 | |
170 | // If the value is really an already-inserted instruction, then use that. |
171 | CxtI = dyn_cast<Instruction>(V1); |
172 | if (CxtI && CxtI->getParent()) |
173 | return CxtI; |
174 | |
175 | CxtI = dyn_cast<Instruction>(V2); |
176 | if (CxtI && CxtI->getParent()) |
177 | return CxtI; |
178 | |
179 | return nullptr; |
180 | } |
181 | |
182 | static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, |
183 | const APInt &DemandedElts, |
184 | APInt &DemandedLHS, APInt &DemandedRHS) { |
185 | // The length of scalable vectors is unknown at compile time, thus we |
186 | // cannot check their values |
187 | if (isa<ScalableVectorType>(Shuf->getType())) |
188 | return false; |
189 | |
190 | int NumElts = |
191 | cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements(); |
192 | int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements(); |
193 | DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts); |
194 | if (DemandedElts.isNullValue()) |
195 | return true; |
196 | // Simple case of a shuffle with zeroinitializer. |
197 | if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) { |
198 | DemandedLHS.setBit(0); |
199 | return true; |
200 | } |
201 | for (int i = 0; i != NumMaskElts; ++i) { |
202 | if (!DemandedElts[i]) |
203 | continue; |
204 | int M = Shuf->getMaskValue(i); |
205 | assert(M < (NumElts * 2) && "Invalid shuffle mask constant")((M < (NumElts * 2) && "Invalid shuffle mask constant" ) ? static_cast<void> (0) : __assert_fail ("M < (NumElts * 2) && \"Invalid shuffle mask constant\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 205, __PRETTY_FUNCTION__)); |
206 | |
207 | // For undef elements, we don't know anything about the common state of |
208 | // the shuffle result. |
209 | if (M == -1) |
210 | return false; |
211 | if (M < NumElts) |
212 | DemandedLHS.setBit(M % NumElts); |
213 | else |
214 | DemandedRHS.setBit(M % NumElts); |
215 | } |
216 | |
217 | return true; |
218 | } |
219 | |
220 | static void computeKnownBits(const Value *V, const APInt &DemandedElts, |
221 | KnownBits &Known, unsigned Depth, const Query &Q); |
222 | |
223 | static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, |
224 | const Query &Q) { |
225 | // FIXME: We currently have no way to represent the DemandedElts of a scalable |
226 | // vector |
227 | if (isa<ScalableVectorType>(V->getType())) { |
228 | Known.resetAll(); |
229 | return; |
230 | } |
231 | |
232 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); |
233 | APInt DemandedElts = |
234 | FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1); |
235 | computeKnownBits(V, DemandedElts, Known, Depth, Q); |
236 | } |
237 | |
238 | void llvm::computeKnownBits(const Value *V, KnownBits &Known, |
239 | const DataLayout &DL, unsigned Depth, |
240 | AssumptionCache *AC, const Instruction *CxtI, |
241 | const DominatorTree *DT, |
242 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { |
243 | ::computeKnownBits(V, Known, Depth, |
244 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); |
245 | } |
246 | |
247 | void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, |
248 | KnownBits &Known, const DataLayout &DL, |
249 | unsigned Depth, AssumptionCache *AC, |
250 | const Instruction *CxtI, const DominatorTree *DT, |
251 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { |
252 | ::computeKnownBits(V, DemandedElts, Known, Depth, |
253 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); |
254 | } |
255 | |
256 | static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, |
257 | unsigned Depth, const Query &Q); |
258 | |
259 | static KnownBits computeKnownBits(const Value *V, unsigned Depth, |
260 | const Query &Q); |
261 | |
262 | KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, |
263 | unsigned Depth, AssumptionCache *AC, |
264 | const Instruction *CxtI, |
265 | const DominatorTree *DT, |
266 | OptimizationRemarkEmitter *ORE, |
267 | bool UseInstrInfo) { |
268 | return ::computeKnownBits( |
269 | V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); |
270 | } |
271 | |
272 | KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, |
273 | const DataLayout &DL, unsigned Depth, |
274 | AssumptionCache *AC, const Instruction *CxtI, |
275 | const DominatorTree *DT, |
276 | OptimizationRemarkEmitter *ORE, |
277 | bool UseInstrInfo) { |
278 | return ::computeKnownBits( |
279 | V, DemandedElts, Depth, |
280 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); |
281 | } |
282 | |
283 | bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, |
284 | const DataLayout &DL, AssumptionCache *AC, |
285 | const Instruction *CxtI, const DominatorTree *DT, |
286 | bool UseInstrInfo) { |
287 | assert(LHS->getType() == RHS->getType() &&((LHS->getType() == RHS->getType() && "LHS and RHS should have the same type" ) ? static_cast<void> (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 288, __PRETTY_FUNCTION__)) |
288 | "LHS and RHS should have the same type")((LHS->getType() == RHS->getType() && "LHS and RHS should have the same type" ) ? static_cast<void> (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 288, __PRETTY_FUNCTION__)); |
289 | assert(LHS->getType()->isIntOrIntVectorTy() &&((LHS->getType()->isIntOrIntVectorTy() && "LHS and RHS should be integers" ) ? static_cast<void> (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 290, __PRETTY_FUNCTION__)) |
290 | "LHS and RHS should be integers")((LHS->getType()->isIntOrIntVectorTy() && "LHS and RHS should be integers" ) ? static_cast<void> (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 290, __PRETTY_FUNCTION__)); |
291 | // Look for an inverted mask: (X & ~M) op (Y & M). |
292 | Value *M; |
293 | if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && |
294 | match(RHS, m_c_And(m_Specific(M), m_Value()))) |
295 | return true; |
296 | if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && |
297 | match(LHS, m_c_And(m_Specific(M), m_Value()))) |
298 | return true; |
299 | IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); |
300 | KnownBits LHSKnown(IT->getBitWidth()); |
301 | KnownBits RHSKnown(IT->getBitWidth()); |
302 | computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); |
303 | computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); |
304 | return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); |
305 | } |
306 | |
307 | bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { |
308 | for (const User *U : CxtI->users()) { |
309 | if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) |
310 | if (IC->isEquality()) |
311 | if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) |
312 | if (C->isNullValue()) |
313 | continue; |
314 | return false; |
315 | } |
316 | return true; |
317 | } |
318 | |
319 | static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, |
320 | const Query &Q); |
321 | |
322 | bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, |
323 | bool OrZero, unsigned Depth, |
324 | AssumptionCache *AC, const Instruction *CxtI, |
325 | const DominatorTree *DT, bool UseInstrInfo) { |
326 | return ::isKnownToBeAPowerOfTwo( |
327 | V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); |
328 | } |
329 | |
330 | static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, |
331 | unsigned Depth, const Query &Q); |
332 | |
333 | static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); |
334 | |
335 | bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, |
336 | AssumptionCache *AC, const Instruction *CxtI, |
337 | const DominatorTree *DT, bool UseInstrInfo) { |
338 | return ::isKnownNonZero(V, Depth, |
339 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); |
340 | } |
341 | |
342 | bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, |
343 | unsigned Depth, AssumptionCache *AC, |
344 | const Instruction *CxtI, const DominatorTree *DT, |
345 | bool UseInstrInfo) { |
346 | KnownBits Known = |
347 | computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); |
348 | return Known.isNonNegative(); |
349 | } |
350 | |
351 | bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, |
352 | AssumptionCache *AC, const Instruction *CxtI, |
353 | const DominatorTree *DT, bool UseInstrInfo) { |
354 | if (auto *CI = dyn_cast<ConstantInt>(V)) |
355 | return CI->getValue().isStrictlyPositive(); |
356 | |
357 | // TODO: We'd doing two recursive queries here. We should factor this such |
358 | // that only a single query is needed. |
359 | return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && |
360 | isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); |
361 | } |
362 | |
363 | bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, |
364 | AssumptionCache *AC, const Instruction *CxtI, |
365 | const DominatorTree *DT, bool UseInstrInfo) { |
366 | KnownBits Known = |
367 | computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); |
368 | return Known.isNegative(); |
369 | } |
370 | |
371 | static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, |
372 | const Query &Q); |
373 | |
374 | bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, |
375 | const DataLayout &DL, AssumptionCache *AC, |
376 | const Instruction *CxtI, const DominatorTree *DT, |
377 | bool UseInstrInfo) { |
378 | return ::isKnownNonEqual(V1, V2, 0, |
379 | Query(DL, AC, safeCxtI(V2, V1, CxtI), DT, |
380 | UseInstrInfo, /*ORE=*/nullptr)); |
381 | } |
382 | |
383 | static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, |
384 | const Query &Q); |
385 | |
386 | bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, |
387 | const DataLayout &DL, unsigned Depth, |
388 | AssumptionCache *AC, const Instruction *CxtI, |
389 | const DominatorTree *DT, bool UseInstrInfo) { |
390 | return ::MaskedValueIsZero( |
391 | V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); |
392 | } |
393 | |
394 | static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, |
395 | unsigned Depth, const Query &Q); |
396 | |
397 | static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, |
398 | const Query &Q) { |
399 | // FIXME: We currently have no way to represent the DemandedElts of a scalable |
400 | // vector |
401 | if (isa<ScalableVectorType>(V->getType())) |
402 | return 1; |
403 | |
404 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); |
405 | APInt DemandedElts = |
406 | FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1); |
407 | return ComputeNumSignBits(V, DemandedElts, Depth, Q); |
408 | } |
409 | |
410 | unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, |
411 | unsigned Depth, AssumptionCache *AC, |
412 | const Instruction *CxtI, |
413 | const DominatorTree *DT, bool UseInstrInfo) { |
414 | return ::ComputeNumSignBits( |
415 | V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); |
416 | } |
417 | |
418 | static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, |
419 | bool NSW, const APInt &DemandedElts, |
420 | KnownBits &KnownOut, KnownBits &Known2, |
421 | unsigned Depth, const Query &Q) { |
422 | computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); |
423 | |
424 | // If one operand is unknown and we have no nowrap information, |
425 | // the result will be unknown independently of the second operand. |
426 | if (KnownOut.isUnknown() && !NSW) |
427 | return; |
428 | |
429 | computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); |
430 | KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut); |
431 | } |
432 | |
433 | static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, |
434 | const APInt &DemandedElts, KnownBits &Known, |
435 | KnownBits &Known2, unsigned Depth, |
436 | const Query &Q) { |
437 | computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); |
438 | computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); |
439 | |
440 | bool isKnownNegative = false; |
441 | bool isKnownNonNegative = false; |
442 | // If the multiplication is known not to overflow, compute the sign bit. |
443 | if (NSW) { |
444 | if (Op0 == Op1) { |
445 | // The product of a number with itself is non-negative. |
446 | isKnownNonNegative = true; |
447 | } else { |
448 | bool isKnownNonNegativeOp1 = Known.isNonNegative(); |
449 | bool isKnownNonNegativeOp0 = Known2.isNonNegative(); |
450 | bool isKnownNegativeOp1 = Known.isNegative(); |
451 | bool isKnownNegativeOp0 = Known2.isNegative(); |
452 | // The product of two numbers with the same sign is non-negative. |
453 | isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || |
454 | (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); |
455 | // The product of a negative number and a non-negative number is either |
456 | // negative or zero. |
457 | if (!isKnownNonNegative) |
458 | isKnownNegative = |
459 | (isKnownNegativeOp1 && isKnownNonNegativeOp0 && |
460 | Known2.isNonZero()) || |
461 | (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero()); |
462 | } |
463 | } |
464 | |
465 | Known = KnownBits::computeForMul(Known, Known2); |
466 | |
467 | // Only make use of no-wrap flags if we failed to compute the sign bit |
468 | // directly. This matters if the multiplication always overflows, in |
469 | // which case we prefer to follow the result of the direct computation, |
470 | // though as the program is invoking undefined behaviour we can choose |
471 | // whatever we like here. |
472 | if (isKnownNonNegative && !Known.isNegative()) |
473 | Known.makeNonNegative(); |
474 | else if (isKnownNegative && !Known.isNonNegative()) |
475 | Known.makeNegative(); |
476 | } |
477 | |
478 | void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, |
479 | KnownBits &Known) { |
480 | unsigned BitWidth = Known.getBitWidth(); |
481 | unsigned NumRanges = Ranges.getNumOperands() / 2; |
482 | assert(NumRanges >= 1)((NumRanges >= 1) ? static_cast<void> (0) : __assert_fail ("NumRanges >= 1", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 482, __PRETTY_FUNCTION__)); |
483 | |
484 | Known.Zero.setAllBits(); |
485 | Known.One.setAllBits(); |
486 | |
487 | for (unsigned i = 0; i < NumRanges; ++i) { |
488 | ConstantInt *Lower = |
489 | mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); |
490 | ConstantInt *Upper = |
491 | mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); |
492 | ConstantRange Range(Lower->getValue(), Upper->getValue()); |
493 | |
494 | // The first CommonPrefixBits of all values in Range are equal. |
495 | unsigned CommonPrefixBits = |
496 | (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); |
497 | APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); |
498 | APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth); |
499 | Known.One &= UnsignedMax & Mask; |
500 | Known.Zero &= ~UnsignedMax & Mask; |
501 | } |
502 | } |
503 | |
504 | static bool isEphemeralValueOf(const Instruction *I, const Value *E) { |
505 | SmallVector<const Value *, 16> WorkSet(1, I); |
506 | SmallPtrSet<const Value *, 32> Visited; |
507 | SmallPtrSet<const Value *, 16> EphValues; |
508 | |
509 | // The instruction defining an assumption's condition itself is always |
510 | // considered ephemeral to that assumption (even if it has other |
511 | // non-ephemeral users). See r246696's test case for an example. |
512 | if (is_contained(I->operands(), E)) |
513 | return true; |
514 | |
515 | while (!WorkSet.empty()) { |
516 | const Value *V = WorkSet.pop_back_val(); |
517 | if (!Visited.insert(V).second) |
518 | continue; |
519 | |
520 | // If all uses of this value are ephemeral, then so is this value. |
521 | if (llvm::all_of(V->users(), [&](const User *U) { |
522 | return EphValues.count(U); |
523 | })) { |
524 | if (V == E) |
525 | return true; |
526 | |
527 | if (V == I || isSafeToSpeculativelyExecute(V)) { |
528 | EphValues.insert(V); |
529 | if (const User *U = dyn_cast<User>(V)) |
530 | append_range(WorkSet, U->operands()); |
531 | } |
532 | } |
533 | } |
534 | |
535 | return false; |
536 | } |
537 | |
538 | // Is this an intrinsic that cannot be speculated but also cannot trap? |
539 | bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { |
540 | if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I)) |
541 | return CI->isAssumeLikeIntrinsic(); |
542 | |
543 | return false; |
544 | } |
545 | |
546 | bool llvm::isValidAssumeForContext(const Instruction *Inv, |
547 | const Instruction *CxtI, |
548 | const DominatorTree *DT) { |
549 | // There are two restrictions on the use of an assume: |
550 | // 1. The assume must dominate the context (or the control flow must |
551 | // reach the assume whenever it reaches the context). |
552 | // 2. The context must not be in the assume's set of ephemeral values |
553 | // (otherwise we will use the assume to prove that the condition |
554 | // feeding the assume is trivially true, thus causing the removal of |
555 | // the assume). |
556 | |
557 | if (Inv->getParent() == CxtI->getParent()) { |
558 | // If Inv and CtxI are in the same block, check if the assume (Inv) is first |
559 | // in the BB. |
560 | if (Inv->comesBefore(CxtI)) |
561 | return true; |
562 | |
563 | // Don't let an assume affect itself - this would cause the problems |
564 | // `isEphemeralValueOf` is trying to prevent, and it would also make |
565 | // the loop below go out of bounds. |
566 | if (Inv == CxtI) |
567 | return false; |
568 | |
569 | // The context comes first, but they're both in the same block. |
570 | // Make sure there is nothing in between that might interrupt |
571 | // the control flow, not even CxtI itself. |
572 | // We limit the scan distance between the assume and its context instruction |
573 | // to avoid a compile-time explosion. This limit is chosen arbitrarily, so |
574 | // it can be adjusted if needed (could be turned into a cl::opt). |
575 | unsigned ScanLimit = 15; |
576 | for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I) |
577 | if (!isGuaranteedToTransferExecutionToSuccessor(&*I) || --ScanLimit == 0) |
578 | return false; |
579 | |
580 | return !isEphemeralValueOf(Inv, CxtI); |
581 | } |
582 | |
583 | // Inv and CxtI are in different blocks. |
584 | if (DT) { |
585 | if (DT->dominates(Inv, CxtI)) |
586 | return true; |
587 | } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { |
588 | // We don't have a DT, but this trivially dominates. |
589 | return true; |
590 | } |
591 | |
592 | return false; |
593 | } |
594 | |
595 | static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) { |
596 | // v u> y implies v != 0. |
597 | if (Pred == ICmpInst::ICMP_UGT) |
598 | return true; |
599 | |
600 | // Special-case v != 0 to also handle v != null. |
601 | if (Pred == ICmpInst::ICMP_NE) |
602 | return match(RHS, m_Zero()); |
603 | |
604 | // All other predicates - rely on generic ConstantRange handling. |
605 | const APInt *C; |
606 | if (!match(RHS, m_APInt(C))) |
607 | return false; |
608 | |
609 | ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C); |
610 | return !TrueValues.contains(APInt::getNullValue(C->getBitWidth())); |
611 | } |
612 | |
613 | static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { |
614 | // Use of assumptions is context-sensitive. If we don't have a context, we |
615 | // cannot use them! |
616 | if (!Q.AC || !Q.CxtI) |
617 | return false; |
618 | |
619 | if (Q.CxtI && V->getType()->isPointerTy()) { |
620 | SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull}; |
621 | if (!NullPointerIsDefined(Q.CxtI->getFunction(), |
622 | V->getType()->getPointerAddressSpace())) |
623 | AttrKinds.push_back(Attribute::Dereferenceable); |
624 | |
625 | if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC)) |
626 | return true; |
627 | } |
628 | |
629 | for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { |
630 | if (!AssumeVH) |
631 | continue; |
632 | CallInst *I = cast<CallInst>(AssumeVH); |
633 | assert(I->getFunction() == Q.CxtI->getFunction() &&((I->getFunction() == Q.CxtI->getFunction() && "Got assumption for the wrong function!" ) ? static_cast<void> (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 634, __PRETTY_FUNCTION__)) |
634 | "Got assumption for the wrong function!")((I->getFunction() == Q.CxtI->getFunction() && "Got assumption for the wrong function!" ) ? static_cast<void> (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 634, __PRETTY_FUNCTION__)); |
635 | if (Q.isExcluded(I)) |
636 | continue; |
637 | |
638 | // Warning: This loop can end up being somewhat performance sensitive. |
639 | // We're running this loop for once for each value queried resulting in a |
640 | // runtime of ~O(#assumes * #values). |
641 | |
642 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&((I->getCalledFunction()->getIntrinsicID() == Intrinsic ::assume && "must be an assume intrinsic") ? static_cast <void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 643, __PRETTY_FUNCTION__)) |
643 | "must be an assume intrinsic")((I->getCalledFunction()->getIntrinsicID() == Intrinsic ::assume && "must be an assume intrinsic") ? static_cast <void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 643, __PRETTY_FUNCTION__)); |
644 | |
645 | Value *RHS; |
646 | CmpInst::Predicate Pred; |
647 | auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); |
648 | if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS)))) |
649 | return false; |
650 | |
651 | if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT)) |
652 | return true; |
653 | } |
654 | |
655 | return false; |
656 | } |
657 | |
658 | static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, |
659 | unsigned Depth, const Query &Q) { |
660 | // Use of assumptions is context-sensitive. If we don't have a context, we |
661 | // cannot use them! |
662 | if (!Q.AC || !Q.CxtI) |
663 | return; |
664 | |
665 | unsigned BitWidth = Known.getBitWidth(); |
666 | |
667 | // Refine Known set if the pointer alignment is set by assume bundles. |
668 | if (V->getType()->isPointerTy()) { |
669 | if (RetainedKnowledge RK = getKnowledgeValidInContext( |
670 | V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) { |
671 | Known.Zero.setLowBits(Log2_32(RK.ArgValue)); |
672 | } |
673 | } |
674 | |
675 | // Note that the patterns below need to be kept in sync with the code |
676 | // in AssumptionCache::updateAffectedValues. |
677 | |
678 | for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { |
679 | if (!AssumeVH) |
680 | continue; |
681 | CallInst *I = cast<CallInst>(AssumeVH); |
682 | assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&((I->getParent()->getParent() == Q.CxtI->getParent() ->getParent() && "Got assumption for the wrong function!" ) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 683, __PRETTY_FUNCTION__)) |
683 | "Got assumption for the wrong function!")((I->getParent()->getParent() == Q.CxtI->getParent() ->getParent() && "Got assumption for the wrong function!" ) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 683, __PRETTY_FUNCTION__)); |
684 | if (Q.isExcluded(I)) |
685 | continue; |
686 | |
687 | // Warning: This loop can end up being somewhat performance sensitive. |
688 | // We're running this loop for once for each value queried resulting in a |
689 | // runtime of ~O(#assumes * #values). |
690 | |
691 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&((I->getCalledFunction()->getIntrinsicID() == Intrinsic ::assume && "must be an assume intrinsic") ? static_cast <void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 692, __PRETTY_FUNCTION__)) |
692 | "must be an assume intrinsic")((I->getCalledFunction()->getIntrinsicID() == Intrinsic ::assume && "must be an assume intrinsic") ? static_cast <void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 692, __PRETTY_FUNCTION__)); |
693 | |
694 | Value *Arg = I->getArgOperand(0); |
695 | |
696 | if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
697 | assert(BitWidth == 1 && "assume operand is not i1?")((BitWidth == 1 && "assume operand is not i1?") ? static_cast <void> (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 697, __PRETTY_FUNCTION__)); |
698 | Known.setAllOnes(); |
699 | return; |
700 | } |
701 | if (match(Arg, m_Not(m_Specific(V))) && |
702 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
703 | assert(BitWidth == 1 && "assume operand is not i1?")((BitWidth == 1 && "assume operand is not i1?") ? static_cast <void> (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 703, __PRETTY_FUNCTION__)); |
704 | Known.setAllZero(); |
705 | return; |
706 | } |
707 | |
708 | // The remaining tests are all recursive, so bail out if we hit the limit. |
709 | if (Depth == MaxAnalysisRecursionDepth) |
710 | continue; |
711 | |
712 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); |
713 | if (!Cmp) |
714 | continue; |
715 | |
716 | // Note that ptrtoint may change the bitwidth. |
717 | Value *A, *B; |
718 | auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); |
719 | |
720 | CmpInst::Predicate Pred; |
721 | uint64_t C; |
722 | switch (Cmp->getPredicate()) { |
723 | default: |
724 | break; |
725 | case ICmpInst::ICMP_EQ: |
726 | // assume(v = a) |
727 | if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) && |
728 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
729 | KnownBits RHSKnown = |
730 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
731 | Known.Zero |= RHSKnown.Zero; |
732 | Known.One |= RHSKnown.One; |
733 | // assume(v & b = a) |
734 | } else if (match(Cmp, |
735 | m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && |
736 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
737 | KnownBits RHSKnown = |
738 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
739 | KnownBits MaskKnown = |
740 | computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
741 | |
742 | // For those bits in the mask that are known to be one, we can propagate |
743 | // known bits from the RHS to V. |
744 | Known.Zero |= RHSKnown.Zero & MaskKnown.One; |
745 | Known.One |= RHSKnown.One & MaskKnown.One; |
746 | // assume(~(v & b) = a) |
747 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), |
748 | m_Value(A))) && |
749 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
750 | KnownBits RHSKnown = |
751 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
752 | KnownBits MaskKnown = |
753 | computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
754 | |
755 | // For those bits in the mask that are known to be one, we can propagate |
756 | // inverted known bits from the RHS to V. |
757 | Known.Zero |= RHSKnown.One & MaskKnown.One; |
758 | Known.One |= RHSKnown.Zero & MaskKnown.One; |
759 | // assume(v | b = a) |
760 | } else if (match(Cmp, |
761 | m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && |
762 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
763 | KnownBits RHSKnown = |
764 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
765 | KnownBits BKnown = |
766 | computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
767 | |
768 | // For those bits in B that are known to be zero, we can propagate known |
769 | // bits from the RHS to V. |
770 | Known.Zero |= RHSKnown.Zero & BKnown.Zero; |
771 | Known.One |= RHSKnown.One & BKnown.Zero; |
772 | // assume(~(v | b) = a) |
773 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), |
774 | m_Value(A))) && |
775 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
776 | KnownBits RHSKnown = |
777 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
778 | KnownBits BKnown = |
779 | computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
780 | |
781 | // For those bits in B that are known to be zero, we can propagate |
782 | // inverted known bits from the RHS to V. |
783 | Known.Zero |= RHSKnown.One & BKnown.Zero; |
784 | Known.One |= RHSKnown.Zero & BKnown.Zero; |
785 | // assume(v ^ b = a) |
786 | } else if (match(Cmp, |
787 | m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && |
788 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
789 | KnownBits RHSKnown = |
790 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
791 | KnownBits BKnown = |
792 | computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
793 | |
794 | // For those bits in B that are known to be zero, we can propagate known |
795 | // bits from the RHS to V. For those bits in B that are known to be one, |
796 | // we can propagate inverted known bits from the RHS to V. |
797 | Known.Zero |= RHSKnown.Zero & BKnown.Zero; |
798 | Known.One |= RHSKnown.One & BKnown.Zero; |
799 | Known.Zero |= RHSKnown.One & BKnown.One; |
800 | Known.One |= RHSKnown.Zero & BKnown.One; |
801 | // assume(~(v ^ b) = a) |
802 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), |
803 | m_Value(A))) && |
804 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
805 | KnownBits RHSKnown = |
806 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
807 | KnownBits BKnown = |
808 | computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
809 | |
810 | // For those bits in B that are known to be zero, we can propagate |
811 | // inverted known bits from the RHS to V. For those bits in B that are |
812 | // known to be one, we can propagate known bits from the RHS to V. |
813 | Known.Zero |= RHSKnown.One & BKnown.Zero; |
814 | Known.One |= RHSKnown.Zero & BKnown.Zero; |
815 | Known.Zero |= RHSKnown.Zero & BKnown.One; |
816 | Known.One |= RHSKnown.One & BKnown.One; |
817 | // assume(v << c = a) |
818 | } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), |
819 | m_Value(A))) && |
820 | isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { |
821 | KnownBits RHSKnown = |
822 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
823 | |
824 | // For those bits in RHS that are known, we can propagate them to known |
825 | // bits in V shifted to the right by C. |
826 | RHSKnown.Zero.lshrInPlace(C); |
827 | Known.Zero |= RHSKnown.Zero; |
828 | RHSKnown.One.lshrInPlace(C); |
829 | Known.One |= RHSKnown.One; |
830 | // assume(~(v << c) = a) |
831 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), |
832 | m_Value(A))) && |
833 | isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { |
834 | KnownBits RHSKnown = |
835 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
836 | // For those bits in RHS that are known, we can propagate them inverted |
837 | // to known bits in V shifted to the right by C. |
838 | RHSKnown.One.lshrInPlace(C); |
839 | Known.Zero |= RHSKnown.One; |
840 | RHSKnown.Zero.lshrInPlace(C); |
841 | Known.One |= RHSKnown.Zero; |
842 | // assume(v >> c = a) |
843 | } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), |
844 | m_Value(A))) && |
845 | isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { |
846 | KnownBits RHSKnown = |
847 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
848 | // For those bits in RHS that are known, we can propagate them to known |
849 | // bits in V shifted to the right by C. |
850 | Known.Zero |= RHSKnown.Zero << C; |
851 | Known.One |= RHSKnown.One << C; |
852 | // assume(~(v >> c) = a) |
853 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), |
854 | m_Value(A))) && |
855 | isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { |
856 | KnownBits RHSKnown = |
857 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
858 | // For those bits in RHS that are known, we can propagate them inverted |
859 | // to known bits in V shifted to the right by C. |
860 | Known.Zero |= RHSKnown.One << C; |
861 | Known.One |= RHSKnown.Zero << C; |
862 | } |
863 | break; |
864 | case ICmpInst::ICMP_SGE: |
865 | // assume(v >=_s c) where c is non-negative |
866 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && |
867 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
868 | KnownBits RHSKnown = |
869 | computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth); |
870 | |
871 | if (RHSKnown.isNonNegative()) { |
872 | // We know that the sign bit is zero. |
873 | Known.makeNonNegative(); |
874 | } |
875 | } |
876 | break; |
877 | case ICmpInst::ICMP_SGT: |
878 | // assume(v >_s c) where c is at least -1. |
879 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && |
880 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
881 | KnownBits RHSKnown = |
882 | computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth); |
883 | |
884 | if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { |
885 | // We know that the sign bit is zero. |
886 | Known.makeNonNegative(); |
887 | } |
888 | } |
889 | break; |
890 | case ICmpInst::ICMP_SLE: |
891 | // assume(v <=_s c) where c is negative |
892 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && |
893 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
894 | KnownBits RHSKnown = |
895 | computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth); |
896 | |
897 | if (RHSKnown.isNegative()) { |
898 | // We know that the sign bit is one. |
899 | Known.makeNegative(); |
900 | } |
901 | } |
902 | break; |
903 | case ICmpInst::ICMP_SLT: |
904 | // assume(v <_s c) where c is non-positive |
905 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && |
906 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
907 | KnownBits RHSKnown = |
908 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
909 | |
910 | if (RHSKnown.isZero() || RHSKnown.isNegative()) { |
911 | // We know that the sign bit is one. |
912 | Known.makeNegative(); |
913 | } |
914 | } |
915 | break; |
916 | case ICmpInst::ICMP_ULE: |
917 | // assume(v <=_u c) |
918 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && |
919 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
920 | KnownBits RHSKnown = |
921 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
922 | |
923 | // Whatever high bits in c are zero are known to be zero. |
924 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); |
925 | } |
926 | break; |
927 | case ICmpInst::ICMP_ULT: |
928 | // assume(v <_u c) |
929 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && |
930 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { |
931 | KnownBits RHSKnown = |
932 | computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); |
933 | |
934 | // If the RHS is known zero, then this assumption must be wrong (nothing |
935 | // is unsigned less than zero). Signal a conflict and get out of here. |
936 | if (RHSKnown.isZero()) { |
937 | Known.Zero.setAllBits(); |
938 | Known.One.setAllBits(); |
939 | break; |
940 | } |
941 | |
942 | // Whatever high bits in c are zero are known to be zero (if c is a power |
943 | // of 2, then one more). |
944 | if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) |
945 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); |
946 | else |
947 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); |
948 | } |
949 | break; |
950 | } |
951 | } |
952 | |
953 | // If assumptions conflict with each other or previous known bits, then we |
954 | // have a logical fallacy. It's possible that the assumption is not reachable, |
955 | // so this isn't a real bug. On the other hand, the program may have undefined |
956 | // behavior, or we might have a bug in the compiler. We can't assert/crash, so |
957 | // clear out the known bits, try to warn the user, and hope for the best. |
958 | if (Known.Zero.intersects(Known.One)) { |
959 | Known.resetAll(); |
960 | |
961 | if (Q.ORE) |
962 | Q.ORE->emit([&]() { |
963 | auto *CxtI = const_cast<Instruction *>(Q.CxtI); |
964 | return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", |
965 | CxtI) |
966 | << "Detected conflicting code assumptions. Program may " |
967 | "have undefined behavior, or compiler may have " |
968 | "internal error."; |
969 | }); |
970 | } |
971 | } |
972 | |
973 | /// Compute known bits from a shift operator, including those with a |
974 | /// non-constant shift amount. Known is the output of this function. Known2 is a |
975 | /// pre-allocated temporary with the same bit width as Known and on return |
976 | /// contains the known bit of the shift value source. KF is an |
977 | /// operator-specific function that, given the known-bits and a shift amount, |
978 | /// compute the implied known-bits of the shift operator's result respectively |
979 | /// for that shift amount. The results from calling KF are conservatively |
980 | /// combined for all permitted shift amounts. |
981 | static void computeKnownBitsFromShiftOperator( |
982 | const Operator *I, const APInt &DemandedElts, KnownBits &Known, |
983 | KnownBits &Known2, unsigned Depth, const Query &Q, |
984 | function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) { |
985 | unsigned BitWidth = Known.getBitWidth(); |
986 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); |
987 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); |
988 | |
989 | // Note: We cannot use Known.Zero.getLimitedValue() here, because if |
990 | // BitWidth > 64 and any upper bits are known, we'll end up returning the |
991 | // limit value (which implies all bits are known). |
992 | uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); |
993 | uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); |
994 | bool ShiftAmtIsConstant = Known.isConstant(); |
995 | bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth); |
996 | |
997 | if (ShiftAmtIsConstant) { |
998 | Known = KF(Known2, Known); |
999 | |
1000 | // If the known bits conflict, this must be an overflowing left shift, so |
1001 | // the shift result is poison. We can return anything we want. Choose 0 for |
1002 | // the best folding opportunity. |
1003 | if (Known.hasConflict()) |
1004 | Known.setAllZero(); |
1005 | |
1006 | return; |
1007 | } |
1008 | |
1009 | // If the shift amount could be greater than or equal to the bit-width of the |
1010 | // LHS, the value could be poison, but bail out because the check below is |
1011 | // expensive. |
1012 | // TODO: Should we just carry on? |
1013 | if (MaxShiftAmtIsOutOfRange) { |
1014 | Known.resetAll(); |
1015 | return; |
1016 | } |
1017 | |
1018 | // It would be more-clearly correct to use the two temporaries for this |
1019 | // calculation. Reusing the APInts here to prevent unnecessary allocations. |
1020 | Known.resetAll(); |
1021 | |
1022 | // If we know the shifter operand is nonzero, we can sometimes infer more |
1023 | // known bits. However this is expensive to compute, so be lazy about it and |
1024 | // only compute it when absolutely necessary. |
1025 | Optional<bool> ShifterOperandIsNonZero; |
1026 | |
1027 | // Early exit if we can't constrain any well-defined shift amount. |
1028 | if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && |
1029 | !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { |
1030 | ShifterOperandIsNonZero = |
1031 | isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); |
1032 | if (!*ShifterOperandIsNonZero) |
1033 | return; |
1034 | } |
1035 | |
1036 | Known.Zero.setAllBits(); |
1037 | Known.One.setAllBits(); |
1038 | for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { |
1039 | // Combine the shifted known input bits only for those shift amounts |
1040 | // compatible with its known constraints. |
1041 | if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) |
1042 | continue; |
1043 | if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) |
1044 | continue; |
1045 | // If we know the shifter is nonzero, we may be able to infer more known |
1046 | // bits. This check is sunk down as far as possible to avoid the expensive |
1047 | // call to isKnownNonZero if the cheaper checks above fail. |
1048 | if (ShiftAmt == 0) { |
1049 | if (!ShifterOperandIsNonZero.hasValue()) |
1050 | ShifterOperandIsNonZero = |
1051 | isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); |
1052 | if (*ShifterOperandIsNonZero) |
1053 | continue; |
1054 | } |
1055 | |
1056 | Known = KnownBits::commonBits( |
1057 | Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt)))); |
1058 | } |
1059 | |
1060 | // If the known bits conflict, the result is poison. Return a 0 and hope the |
1061 | // caller can further optimize that. |
1062 | if (Known.hasConflict()) |
1063 | Known.setAllZero(); |
1064 | } |
1065 | |
1066 | static void computeKnownBitsFromOperator(const Operator *I, |
1067 | const APInt &DemandedElts, |
1068 | KnownBits &Known, unsigned Depth, |
1069 | const Query &Q) { |
1070 | unsigned BitWidth = Known.getBitWidth(); |
1071 | |
1072 | KnownBits Known2(BitWidth); |
1073 | switch (I->getOpcode()) { |
1074 | default: break; |
1075 | case Instruction::Load: |
1076 | if (MDNode *MD = |
1077 | Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) |
1078 | computeKnownBitsFromRangeMetadata(*MD, Known); |
1079 | break; |
1080 | case Instruction::And: { |
1081 | // If either the LHS or the RHS are Zero, the result is zero. |
1082 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); |
1083 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); |
1084 | |
1085 | Known &= Known2; |
1086 | |
1087 | // and(x, add (x, -1)) is a common idiom that always clears the low bit; |
1088 | // here we handle the more general case of adding any odd number by |
1089 | // matching the form add(x, add(x, y)) where y is odd. |
1090 | // TODO: This could be generalized to clearing any bit set in y where the |
1091 | // following bit is known to be unset in y. |
1092 | Value *X = nullptr, *Y = nullptr; |
1093 | if (!Known.Zero[0] && !Known.One[0] && |
1094 | match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { |
1095 | Known2.resetAll(); |
1096 | computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q); |
1097 | if (Known2.countMinTrailingOnes() > 0) |
1098 | Known.Zero.setBit(0); |
1099 | } |
1100 | break; |
1101 | } |
1102 | case Instruction::Or: |
1103 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); |
1104 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); |
1105 | |
1106 | Known |= Known2; |
1107 | break; |
1108 | case Instruction::Xor: |
1109 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); |
1110 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); |
1111 | |
1112 | Known ^= Known2; |
1113 | break; |
1114 | case Instruction::Mul: { |
1115 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); |
1116 | computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, |
1117 | Known, Known2, Depth, Q); |
1118 | break; |
1119 | } |
1120 | case Instruction::UDiv: { |
1121 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1122 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1123 | Known = KnownBits::udiv(Known, Known2); |
1124 | break; |
1125 | } |
1126 | case Instruction::Select: { |
1127 | const Value *LHS = nullptr, *RHS = nullptr; |
1128 | SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; |
1129 | if (SelectPatternResult::isMinOrMax(SPF)) { |
1130 | computeKnownBits(RHS, Known, Depth + 1, Q); |
1131 | computeKnownBits(LHS, Known2, Depth + 1, Q); |
1132 | switch (SPF) { |
1133 | default: |
1134 | llvm_unreachable("Unhandled select pattern flavor!")::llvm::llvm_unreachable_internal("Unhandled select pattern flavor!" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1134); |
1135 | case SPF_SMAX: |
1136 | Known = KnownBits::smax(Known, Known2); |
1137 | break; |
1138 | case SPF_SMIN: |
1139 | Known = KnownBits::smin(Known, Known2); |
1140 | break; |
1141 | case SPF_UMAX: |
1142 | Known = KnownBits::umax(Known, Known2); |
1143 | break; |
1144 | case SPF_UMIN: |
1145 | Known = KnownBits::umin(Known, Known2); |
1146 | break; |
1147 | } |
1148 | break; |
1149 | } |
1150 | |
1151 | computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); |
1152 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1153 | |
1154 | // Only known if known in both the LHS and RHS. |
1155 | Known = KnownBits::commonBits(Known, Known2); |
1156 | |
1157 | if (SPF == SPF_ABS) { |
1158 | // RHS from matchSelectPattern returns the negation part of abs pattern. |
1159 | // If the negate has an NSW flag we can assume the sign bit of the result |
1160 | // will be 0 because that makes abs(INT_MIN) undefined. |
1161 | if (match(RHS, m_Neg(m_Specific(LHS))) && |
1162 | Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) |
1163 | Known.Zero.setSignBit(); |
1164 | } |
1165 | |
1166 | break; |
1167 | } |
1168 | case Instruction::FPTrunc: |
1169 | case Instruction::FPExt: |
1170 | case Instruction::FPToUI: |
1171 | case Instruction::FPToSI: |
1172 | case Instruction::SIToFP: |
1173 | case Instruction::UIToFP: |
1174 | break; // Can't work with floating point. |
1175 | case Instruction::PtrToInt: |
1176 | case Instruction::IntToPtr: |
1177 | // Fall through and handle them the same as zext/trunc. |
1178 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
1179 | case Instruction::ZExt: |
1180 | case Instruction::Trunc: { |
1181 | Type *SrcTy = I->getOperand(0)->getType(); |
1182 | |
1183 | unsigned SrcBitWidth; |
1184 | // Note that we handle pointer operands here because of inttoptr/ptrtoint |
1185 | // which fall through here. |
1186 | Type *ScalarTy = SrcTy->getScalarType(); |
1187 | SrcBitWidth = ScalarTy->isPointerTy() ? |
1188 | Q.DL.getPointerTypeSizeInBits(ScalarTy) : |
1189 | Q.DL.getTypeSizeInBits(ScalarTy); |
1190 | |
1191 | assert(SrcBitWidth && "SrcBitWidth can't be zero")((SrcBitWidth && "SrcBitWidth can't be zero") ? static_cast <void> (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1191, __PRETTY_FUNCTION__)); |
1192 | Known = Known.anyextOrTrunc(SrcBitWidth); |
1193 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1194 | Known = Known.zextOrTrunc(BitWidth); |
1195 | break; |
1196 | } |
1197 | case Instruction::BitCast: { |
1198 | Type *SrcTy = I->getOperand(0)->getType(); |
1199 | if (SrcTy->isIntOrPtrTy() && |
1200 | // TODO: For now, not handling conversions like: |
1201 | // (bitcast i64 %x to <2 x i32>) |
1202 | !I->getType()->isVectorTy()) { |
1203 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1204 | break; |
1205 | } |
1206 | break; |
1207 | } |
1208 | case Instruction::SExt: { |
1209 | // Compute the bits in the result that are not present in the input. |
1210 | unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); |
1211 | |
1212 | Known = Known.trunc(SrcBitWidth); |
1213 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1214 | // If the sign bit of the input is known set or clear, then we know the |
1215 | // top bits of the result. |
1216 | Known = Known.sext(BitWidth); |
1217 | break; |
1218 | } |
1219 | case Instruction::Shl: { |
1220 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); |
1221 | auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) { |
1222 | KnownBits Result = KnownBits::shl(KnownVal, KnownAmt); |
1223 | // If this shift has "nsw" keyword, then the result is either a poison |
1224 | // value or has the same sign bit as the first operand. |
1225 | if (NSW) { |
1226 | if (KnownVal.Zero.isSignBitSet()) |
1227 | Result.Zero.setSignBit(); |
1228 | if (KnownVal.One.isSignBitSet()) |
1229 | Result.One.setSignBit(); |
1230 | } |
1231 | return Result; |
1232 | }; |
1233 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, |
1234 | KF); |
1235 | // Trailing zeros of a right-shifted constant never decrease. |
1236 | const APInt *C; |
1237 | if (match(I->getOperand(0), m_APInt(C))) |
1238 | Known.Zero.setLowBits(C->countTrailingZeros()); |
1239 | break; |
1240 | } |
1241 | case Instruction::LShr: { |
1242 | auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { |
1243 | return KnownBits::lshr(KnownVal, KnownAmt); |
1244 | }; |
1245 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, |
1246 | KF); |
1247 | // Leading zeros of a left-shifted constant never decrease. |
1248 | const APInt *C; |
1249 | if (match(I->getOperand(0), m_APInt(C))) |
1250 | Known.Zero.setHighBits(C->countLeadingZeros()); |
1251 | break; |
1252 | } |
1253 | case Instruction::AShr: { |
1254 | auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { |
1255 | return KnownBits::ashr(KnownVal, KnownAmt); |
1256 | }; |
1257 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, |
1258 | KF); |
1259 | break; |
1260 | } |
1261 | case Instruction::Sub: { |
1262 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); |
1263 | computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, |
1264 | DemandedElts, Known, Known2, Depth, Q); |
1265 | break; |
1266 | } |
1267 | case Instruction::Add: { |
1268 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); |
1269 | computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, |
1270 | DemandedElts, Known, Known2, Depth, Q); |
1271 | break; |
1272 | } |
1273 | case Instruction::SRem: |
1274 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1275 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1276 | Known = KnownBits::srem(Known, Known2); |
1277 | break; |
1278 | |
1279 | case Instruction::URem: |
1280 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1281 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1282 | Known = KnownBits::urem(Known, Known2); |
1283 | break; |
1284 | case Instruction::Alloca: |
1285 | Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign())); |
1286 | break; |
1287 | case Instruction::GetElementPtr: { |
1288 | // Analyze all of the subscripts of this getelementptr instruction |
1289 | // to determine if we can prove known low zero bits. |
1290 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1291 | // Accumulate the constant indices in a separate variable |
1292 | // to minimize the number of calls to computeForAddSub. |
1293 | APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); |
1294 | |
1295 | gep_type_iterator GTI = gep_type_begin(I); |
1296 | for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { |
1297 | // TrailZ can only become smaller, short-circuit if we hit zero. |
1298 | if (Known.isUnknown()) |
1299 | break; |
1300 | |
1301 | Value *Index = I->getOperand(i); |
1302 | |
1303 | // Handle case when index is zero. |
1304 | Constant *CIndex = dyn_cast<Constant>(Index); |
1305 | if (CIndex && CIndex->isZeroValue()) |
1306 | continue; |
1307 | |
1308 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
1309 | // Handle struct member offset arithmetic. |
1310 | |
1311 | assert(CIndex &&((CIndex && "Access to structure field must be known at compile time" ) ? static_cast<void> (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1312, __PRETTY_FUNCTION__)) |
1312 | "Access to structure field must be known at compile time")((CIndex && "Access to structure field must be known at compile time" ) ? static_cast<void> (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1312, __PRETTY_FUNCTION__)); |
1313 | |
1314 | if (CIndex->getType()->isVectorTy()) |
1315 | Index = CIndex->getSplatValue(); |
1316 | |
1317 | unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); |
1318 | const StructLayout *SL = Q.DL.getStructLayout(STy); |
1319 | uint64_t Offset = SL->getElementOffset(Idx); |
1320 | AccConstIndices += Offset; |
1321 | continue; |
1322 | } |
1323 | |
1324 | // Handle array index arithmetic. |
1325 | Type *IndexedTy = GTI.getIndexedType(); |
1326 | if (!IndexedTy->isSized()) { |
1327 | Known.resetAll(); |
1328 | break; |
1329 | } |
1330 | |
1331 | unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); |
1332 | KnownBits IndexBits(IndexBitWidth); |
1333 | computeKnownBits(Index, IndexBits, Depth + 1, Q); |
1334 | TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); |
1335 | uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize(); |
1336 | KnownBits ScalingFactor(IndexBitWidth); |
1337 | // Multiply by current sizeof type. |
1338 | // &A[i] == A + i * sizeof(*A[i]). |
1339 | if (IndexTypeSize.isScalable()) { |
1340 | // For scalable types the only thing we know about sizeof is |
1341 | // that this is a multiple of the minimum size. |
1342 | ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes)); |
1343 | } else if (IndexBits.isConstant()) { |
1344 | APInt IndexConst = IndexBits.getConstant(); |
1345 | APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); |
1346 | IndexConst *= ScalingFactor; |
1347 | AccConstIndices += IndexConst.sextOrTrunc(BitWidth); |
1348 | continue; |
1349 | } else { |
1350 | ScalingFactor = |
1351 | KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes)); |
1352 | } |
1353 | IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor); |
1354 | |
1355 | // If the offsets have a different width from the pointer, according |
1356 | // to the language reference we need to sign-extend or truncate them |
1357 | // to the width of the pointer. |
1358 | IndexBits = IndexBits.sextOrTrunc(BitWidth); |
1359 | |
1360 | // Note that inbounds does *not* guarantee nsw for the addition, as only |
1361 | // the offset is signed, while the base address is unsigned. |
1362 | Known = KnownBits::computeForAddSub( |
1363 | /*Add=*/true, /*NSW=*/false, Known, IndexBits); |
1364 | } |
1365 | if (!Known.isUnknown() && !AccConstIndices.isNullValue()) { |
1366 | KnownBits Index = KnownBits::makeConstant(AccConstIndices); |
1367 | Known = KnownBits::computeForAddSub( |
1368 | /*Add=*/true, /*NSW=*/false, Known, Index); |
1369 | } |
1370 | break; |
1371 | } |
1372 | case Instruction::PHI: { |
1373 | const PHINode *P = cast<PHINode>(I); |
1374 | // Handle the case of a simple two-predecessor recurrence PHI. |
1375 | // There's a lot more that could theoretically be done here, but |
1376 | // this is sufficient to catch some interesting cases. |
1377 | if (P->getNumIncomingValues() == 2) { |
1378 | for (unsigned i = 0; i != 2; ++i) { |
1379 | Value *L = P->getIncomingValue(i); |
1380 | Value *R = P->getIncomingValue(!i); |
1381 | Instruction *RInst = P->getIncomingBlock(!i)->getTerminator(); |
1382 | Instruction *LInst = P->getIncomingBlock(i)->getTerminator(); |
1383 | Operator *LU = dyn_cast<Operator>(L); |
1384 | if (!LU) |
1385 | continue; |
1386 | unsigned Opcode = LU->getOpcode(); |
1387 | |
1388 | |
1389 | // If this is a shift recurrence, we know the bits being shifted in. |
1390 | // We can combine that with information about the start value of the |
1391 | // recurrence to conclude facts about the result. |
1392 | if (Opcode == Instruction::LShr || |
1393 | Opcode == Instruction::AShr || |
1394 | Opcode == Instruction::Shl) { |
1395 | Value *LL = LU->getOperand(0); |
1396 | Value *LR = LU->getOperand(1); |
1397 | // Find a recurrence. |
1398 | if (LL == I) |
1399 | L = LR; |
Value stored to 'L' is never read | |
1400 | else |
1401 | continue; // Check for recurrence with L and R flipped. |
1402 | |
1403 | // We have matched a recurrence of the form: |
1404 | // %iv = [R, %entry], [%iv.next, %backedge] |
1405 | // %iv.next = shift_op %iv, L |
1406 | |
1407 | // Recurse with the phi context to avoid concern about whether facts |
1408 | // inferred hold at original context instruction. TODO: It may be |
1409 | // correct to use the original context. IF warranted, explore and |
1410 | // add sufficient tests to cover. |
1411 | Query RecQ = Q; |
1412 | RecQ.CxtI = P; |
1413 | computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); |
1414 | switch (Opcode) { |
1415 | case Instruction::Shl: |
1416 | // A shl recurrence will only increase the tailing zeros |
1417 | Known.Zero.setLowBits(Known2.countMinTrailingZeros()); |
1418 | break; |
1419 | case Instruction::LShr: |
1420 | // A lshr recurrence will preserve the leading zeros of the |
1421 | // start value |
1422 | Known.Zero.setHighBits(Known2.countMinLeadingZeros()); |
1423 | break; |
1424 | case Instruction::AShr: |
1425 | // An ashr recurrence will extend the initial sign bit |
1426 | Known.Zero.setHighBits(Known2.countMinLeadingZeros()); |
1427 | Known.One.setHighBits(Known2.countMinLeadingOnes()); |
1428 | break; |
1429 | }; |
1430 | } |
1431 | |
1432 | // Check for operations that have the property that if |
1433 | // both their operands have low zero bits, the result |
1434 | // will have low zero bits. |
1435 | if (Opcode == Instruction::Add || |
1436 | Opcode == Instruction::Sub || |
1437 | Opcode == Instruction::And || |
1438 | Opcode == Instruction::Or || |
1439 | Opcode == Instruction::Mul) { |
1440 | Value *LL = LU->getOperand(0); |
1441 | Value *LR = LU->getOperand(1); |
1442 | // Find a recurrence. |
1443 | if (LL == I) |
1444 | L = LR; |
1445 | else if (LR == I) |
1446 | L = LL; |
1447 | else |
1448 | continue; // Check for recurrence with L and R flipped. |
1449 | |
1450 | // Change the context instruction to the "edge" that flows into the |
1451 | // phi. This is important because that is where the value is actually |
1452 | // "evaluated" even though it is used later somewhere else. (see also |
1453 | // D69571). |
1454 | Query RecQ = Q; |
1455 | |
1456 | // Ok, we have a PHI of the form L op= R. Check for low |
1457 | // zero bits. |
1458 | RecQ.CxtI = RInst; |
1459 | computeKnownBits(R, Known2, Depth + 1, RecQ); |
1460 | |
1461 | // We need to take the minimum number of known bits |
1462 | KnownBits Known3(BitWidth); |
1463 | RecQ.CxtI = LInst; |
1464 | computeKnownBits(L, Known3, Depth + 1, RecQ); |
1465 | |
1466 | Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), |
1467 | Known3.countMinTrailingZeros())); |
1468 | |
1469 | auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); |
1470 | if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { |
1471 | // If initial value of recurrence is nonnegative, and we are adding |
1472 | // a nonnegative number with nsw, the result can only be nonnegative |
1473 | // or poison value regardless of the number of times we execute the |
1474 | // add in phi recurrence. If initial value is negative and we are |
1475 | // adding a negative number with nsw, the result can only be |
1476 | // negative or poison value. Similar arguments apply to sub and mul. |
1477 | // |
1478 | // (add non-negative, non-negative) --> non-negative |
1479 | // (add negative, negative) --> negative |
1480 | if (Opcode == Instruction::Add) { |
1481 | if (Known2.isNonNegative() && Known3.isNonNegative()) |
1482 | Known.makeNonNegative(); |
1483 | else if (Known2.isNegative() && Known3.isNegative()) |
1484 | Known.makeNegative(); |
1485 | } |
1486 | |
1487 | // (sub nsw non-negative, negative) --> non-negative |
1488 | // (sub nsw negative, non-negative) --> negative |
1489 | else if (Opcode == Instruction::Sub && LL == I) { |
1490 | if (Known2.isNonNegative() && Known3.isNegative()) |
1491 | Known.makeNonNegative(); |
1492 | else if (Known2.isNegative() && Known3.isNonNegative()) |
1493 | Known.makeNegative(); |
1494 | } |
1495 | |
1496 | // (mul nsw non-negative, non-negative) --> non-negative |
1497 | else if (Opcode == Instruction::Mul && Known2.isNonNegative() && |
1498 | Known3.isNonNegative()) |
1499 | Known.makeNonNegative(); |
1500 | } |
1501 | |
1502 | break; |
1503 | } |
1504 | } |
1505 | } |
1506 | |
1507 | // Unreachable blocks may have zero-operand PHI nodes. |
1508 | if (P->getNumIncomingValues() == 0) |
1509 | break; |
1510 | |
1511 | // Otherwise take the unions of the known bit sets of the operands, |
1512 | // taking conservative care to avoid excessive recursion. |
1513 | if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) { |
1514 | // Skip if every incoming value references to ourself. |
1515 | if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) |
1516 | break; |
1517 | |
1518 | Known.Zero.setAllBits(); |
1519 | Known.One.setAllBits(); |
1520 | for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { |
1521 | Value *IncValue = P->getIncomingValue(u); |
1522 | // Skip direct self references. |
1523 | if (IncValue == P) continue; |
1524 | |
1525 | // Change the context instruction to the "edge" that flows into the |
1526 | // phi. This is important because that is where the value is actually |
1527 | // "evaluated" even though it is used later somewhere else. (see also |
1528 | // D69571). |
1529 | Query RecQ = Q; |
1530 | RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); |
1531 | |
1532 | Known2 = KnownBits(BitWidth); |
1533 | // Recurse, but cap the recursion to one level, because we don't |
1534 | // want to waste time spinning around in loops. |
1535 | computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ); |
1536 | Known = KnownBits::commonBits(Known, Known2); |
1537 | // If all bits have been ruled out, there's no need to check |
1538 | // more operands. |
1539 | if (Known.isUnknown()) |
1540 | break; |
1541 | } |
1542 | } |
1543 | break; |
1544 | } |
1545 | case Instruction::Call: |
1546 | case Instruction::Invoke: |
1547 | // If range metadata is attached to this call, set known bits from that, |
1548 | // and then intersect with known bits based on other properties of the |
1549 | // function. |
1550 | if (MDNode *MD = |
1551 | Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) |
1552 | computeKnownBitsFromRangeMetadata(*MD, Known); |
1553 | if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) { |
1554 | computeKnownBits(RV, Known2, Depth + 1, Q); |
1555 | Known.Zero |= Known2.Zero; |
1556 | Known.One |= Known2.One; |
1557 | } |
1558 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
1559 | switch (II->getIntrinsicID()) { |
1560 | default: break; |
1561 | case Intrinsic::abs: { |
1562 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); |
1563 | bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); |
1564 | Known = Known2.abs(IntMinIsPoison); |
1565 | break; |
1566 | } |
1567 | case Intrinsic::bitreverse: |
1568 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); |
1569 | Known.Zero |= Known2.Zero.reverseBits(); |
1570 | Known.One |= Known2.One.reverseBits(); |
1571 | break; |
1572 | case Intrinsic::bswap: |
1573 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); |
1574 | Known.Zero |= Known2.Zero.byteSwap(); |
1575 | Known.One |= Known2.One.byteSwap(); |
1576 | break; |
1577 | case Intrinsic::ctlz: { |
1578 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); |
1579 | // If we have a known 1, its position is our upper bound. |
1580 | unsigned PossibleLZ = Known2.countMaxLeadingZeros(); |
1581 | // If this call is undefined for 0, the result will be less than 2^n. |
1582 | if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) |
1583 | PossibleLZ = std::min(PossibleLZ, BitWidth - 1); |
1584 | unsigned LowBits = Log2_32(PossibleLZ)+1; |
1585 | Known.Zero.setBitsFrom(LowBits); |
1586 | break; |
1587 | } |
1588 | case Intrinsic::cttz: { |
1589 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); |
1590 | // If we have a known 1, its position is our upper bound. |
1591 | unsigned PossibleTZ = Known2.countMaxTrailingZeros(); |
1592 | // If this call is undefined for 0, the result will be less than 2^n. |
1593 | if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) |
1594 | PossibleTZ = std::min(PossibleTZ, BitWidth - 1); |
1595 | unsigned LowBits = Log2_32(PossibleTZ)+1; |
1596 | Known.Zero.setBitsFrom(LowBits); |
1597 | break; |
1598 | } |
1599 | case Intrinsic::ctpop: { |
1600 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); |
1601 | // We can bound the space the count needs. Also, bits known to be zero |
1602 | // can't contribute to the population. |
1603 | unsigned BitsPossiblySet = Known2.countMaxPopulation(); |
1604 | unsigned LowBits = Log2_32(BitsPossiblySet)+1; |
1605 | Known.Zero.setBitsFrom(LowBits); |
1606 | // TODO: we could bound KnownOne using the lower bound on the number |
1607 | // of bits which might be set provided by popcnt KnownOne2. |
1608 | break; |
1609 | } |
1610 | case Intrinsic::fshr: |
1611 | case Intrinsic::fshl: { |
1612 | const APInt *SA; |
1613 | if (!match(I->getOperand(2), m_APInt(SA))) |
1614 | break; |
1615 | |
1616 | // Normalize to funnel shift left. |
1617 | uint64_t ShiftAmt = SA->urem(BitWidth); |
1618 | if (II->getIntrinsicID() == Intrinsic::fshr) |
1619 | ShiftAmt = BitWidth - ShiftAmt; |
1620 | |
1621 | KnownBits Known3(BitWidth); |
1622 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); |
1623 | computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); |
1624 | |
1625 | Known.Zero = |
1626 | Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); |
1627 | Known.One = |
1628 | Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); |
1629 | break; |
1630 | } |
1631 | case Intrinsic::uadd_sat: |
1632 | case Intrinsic::usub_sat: { |
1633 | bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat; |
1634 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1635 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1636 | |
1637 | // Add: Leading ones of either operand are preserved. |
1638 | // Sub: Leading zeros of LHS and leading ones of RHS are preserved |
1639 | // as leading zeros in the result. |
1640 | unsigned LeadingKnown; |
1641 | if (IsAdd) |
1642 | LeadingKnown = std::max(Known.countMinLeadingOnes(), |
1643 | Known2.countMinLeadingOnes()); |
1644 | else |
1645 | LeadingKnown = std::max(Known.countMinLeadingZeros(), |
1646 | Known2.countMinLeadingOnes()); |
1647 | |
1648 | Known = KnownBits::computeForAddSub( |
1649 | IsAdd, /* NSW */ false, Known, Known2); |
1650 | |
1651 | // We select between the operation result and all-ones/zero |
1652 | // respectively, so we can preserve known ones/zeros. |
1653 | if (IsAdd) { |
1654 | Known.One.setHighBits(LeadingKnown); |
1655 | Known.Zero.clearAllBits(); |
1656 | } else { |
1657 | Known.Zero.setHighBits(LeadingKnown); |
1658 | Known.One.clearAllBits(); |
1659 | } |
1660 | break; |
1661 | } |
1662 | case Intrinsic::umin: |
1663 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1664 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1665 | Known = KnownBits::umin(Known, Known2); |
1666 | break; |
1667 | case Intrinsic::umax: |
1668 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1669 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1670 | Known = KnownBits::umax(Known, Known2); |
1671 | break; |
1672 | case Intrinsic::smin: |
1673 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1674 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1675 | Known = KnownBits::smin(Known, Known2); |
1676 | break; |
1677 | case Intrinsic::smax: |
1678 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1679 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); |
1680 | Known = KnownBits::smax(Known, Known2); |
1681 | break; |
1682 | case Intrinsic::x86_sse42_crc32_64_64: |
1683 | Known.Zero.setBitsFrom(32); |
1684 | break; |
1685 | } |
1686 | } |
1687 | break; |
1688 | case Instruction::ShuffleVector: { |
1689 | auto *Shuf = dyn_cast<ShuffleVectorInst>(I); |
1690 | // FIXME: Do we need to handle ConstantExpr involving shufflevectors? |
1691 | if (!Shuf) { |
1692 | Known.resetAll(); |
1693 | return; |
1694 | } |
1695 | // For undef elements, we don't know anything about the common state of |
1696 | // the shuffle result. |
1697 | APInt DemandedLHS, DemandedRHS; |
1698 | if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) { |
1699 | Known.resetAll(); |
1700 | return; |
1701 | } |
1702 | Known.One.setAllBits(); |
1703 | Known.Zero.setAllBits(); |
1704 | if (!!DemandedLHS) { |
1705 | const Value *LHS = Shuf->getOperand(0); |
1706 | computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q); |
1707 | // If we don't know any bits, early out. |
1708 | if (Known.isUnknown()) |
1709 | break; |
1710 | } |
1711 | if (!!DemandedRHS) { |
1712 | const Value *RHS = Shuf->getOperand(1); |
1713 | computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q); |
1714 | Known = KnownBits::commonBits(Known, Known2); |
1715 | } |
1716 | break; |
1717 | } |
1718 | case Instruction::InsertElement: { |
1719 | const Value *Vec = I->getOperand(0); |
1720 | const Value *Elt = I->getOperand(1); |
1721 | auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); |
1722 | // Early out if the index is non-constant or out-of-range. |
1723 | unsigned NumElts = DemandedElts.getBitWidth(); |
1724 | if (!CIdx || CIdx->getValue().uge(NumElts)) { |
1725 | Known.resetAll(); |
1726 | return; |
1727 | } |
1728 | Known.One.setAllBits(); |
1729 | Known.Zero.setAllBits(); |
1730 | unsigned EltIdx = CIdx->getZExtValue(); |
1731 | // Do we demand the inserted element? |
1732 | if (DemandedElts[EltIdx]) { |
1733 | computeKnownBits(Elt, Known, Depth + 1, Q); |
1734 | // If we don't know any bits, early out. |
1735 | if (Known.isUnknown()) |
1736 | break; |
1737 | } |
1738 | // We don't need the base vector element that has been inserted. |
1739 | APInt DemandedVecElts = DemandedElts; |
1740 | DemandedVecElts.clearBit(EltIdx); |
1741 | if (!!DemandedVecElts) { |
1742 | computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q); |
1743 | Known = KnownBits::commonBits(Known, Known2); |
1744 | } |
1745 | break; |
1746 | } |
1747 | case Instruction::ExtractElement: { |
1748 | // Look through extract element. If the index is non-constant or |
1749 | // out-of-range demand all elements, otherwise just the extracted element. |
1750 | const Value *Vec = I->getOperand(0); |
1751 | const Value *Idx = I->getOperand(1); |
1752 | auto *CIdx = dyn_cast<ConstantInt>(Idx); |
1753 | if (isa<ScalableVectorType>(Vec->getType())) { |
1754 | // FIXME: there's probably *something* we can do with scalable vectors |
1755 | Known.resetAll(); |
1756 | break; |
1757 | } |
1758 | unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); |
1759 | APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); |
1760 | if (CIdx && CIdx->getValue().ult(NumElts)) |
1761 | DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); |
1762 | computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q); |
1763 | break; |
1764 | } |
1765 | case Instruction::ExtractValue: |
1766 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { |
1767 | const ExtractValueInst *EVI = cast<ExtractValueInst>(I); |
1768 | if (EVI->getNumIndices() != 1) break; |
1769 | if (EVI->getIndices()[0] == 0) { |
1770 | switch (II->getIntrinsicID()) { |
1771 | default: break; |
1772 | case Intrinsic::uadd_with_overflow: |
1773 | case Intrinsic::sadd_with_overflow: |
1774 | computeKnownBitsAddSub(true, II->getArgOperand(0), |
1775 | II->getArgOperand(1), false, DemandedElts, |
1776 | Known, Known2, Depth, Q); |
1777 | break; |
1778 | case Intrinsic::usub_with_overflow: |
1779 | case Intrinsic::ssub_with_overflow: |
1780 | computeKnownBitsAddSub(false, II->getArgOperand(0), |
1781 | II->getArgOperand(1), false, DemandedElts, |
1782 | Known, Known2, Depth, Q); |
1783 | break; |
1784 | case Intrinsic::umul_with_overflow: |
1785 | case Intrinsic::smul_with_overflow: |
1786 | computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, |
1787 | DemandedElts, Known, Known2, Depth, Q); |
1788 | break; |
1789 | } |
1790 | } |
1791 | } |
1792 | break; |
1793 | case Instruction::Freeze: |
1794 | if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, |
1795 | Depth + 1)) |
1796 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); |
1797 | break; |
1798 | } |
1799 | } |
1800 | |
1801 | /// Determine which bits of V are known to be either zero or one and return |
1802 | /// them. |
1803 | KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, |
1804 | unsigned Depth, const Query &Q) { |
1805 | KnownBits Known(getBitWidth(V->getType(), Q.DL)); |
1806 | computeKnownBits(V, DemandedElts, Known, Depth, Q); |
1807 | return Known; |
1808 | } |
1809 | |
1810 | /// Determine which bits of V are known to be either zero or one and return |
1811 | /// them. |
1812 | KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { |
1813 | KnownBits Known(getBitWidth(V->getType(), Q.DL)); |
1814 | computeKnownBits(V, Known, Depth, Q); |
1815 | return Known; |
1816 | } |
1817 | |
1818 | /// Determine which bits of V are known to be either zero or one and return |
1819 | /// them in the Known bit set. |
1820 | /// |
1821 | /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that |
1822 | /// we cannot optimize based on the assumption that it is zero without changing |
1823 | /// it to be an explicit zero. If we don't change it to zero, other code could |
1824 | /// optimized based on the contradictory assumption that it is non-zero. |
1825 | /// Because instcombine aggressively folds operations with undef args anyway, |
1826 | /// this won't lose us code quality. |
1827 | /// |
1828 | /// This function is defined on values with integer type, values with pointer |
1829 | /// type, and vectors of integers. In the case |
1830 | /// where V is a vector, known zero, and known one values are the |
1831 | /// same width as the vector element, and the bit is set only if it is true |
1832 | /// for all of the demanded elements in the vector specified by DemandedElts. |
1833 | void computeKnownBits(const Value *V, const APInt &DemandedElts, |
1834 | KnownBits &Known, unsigned Depth, const Query &Q) { |
1835 | if (!DemandedElts || isa<ScalableVectorType>(V->getType())) { |
1836 | // No demanded elts or V is a scalable vector, better to assume we don't |
1837 | // know anything. |
1838 | Known.resetAll(); |
1839 | return; |
1840 | } |
1841 | |
1842 | assert(V && "No Value?")((V && "No Value?") ? static_cast<void> (0) : __assert_fail ("V && \"No Value?\"", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1842, __PRETTY_FUNCTION__)); |
1843 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")((Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth" ) ? static_cast<void> (0) : __assert_fail ("Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1843, __PRETTY_FUNCTION__)); |
1844 | |
1845 | #ifndef NDEBUG |
1846 | Type *Ty = V->getType(); |
1847 | unsigned BitWidth = Known.getBitWidth(); |
1848 | |
1849 | assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&(((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy ()) && "Not integer or pointer type!") ? static_cast< void> (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1850, __PRETTY_FUNCTION__)) |
1850 | "Not integer or pointer type!")(((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy ()) && "Not integer or pointer type!") ? static_cast< void> (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1850, __PRETTY_FUNCTION__)); |
1851 | |
1852 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { |
1853 | assert(((FVTy->getNumElements() == DemandedElts.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? static_cast<void> (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1855, __PRETTY_FUNCTION__)) |
1854 | FVTy->getNumElements() == DemandedElts.getBitWidth() &&((FVTy->getNumElements() == DemandedElts.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? static_cast<void> (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1855, __PRETTY_FUNCTION__)) |
1855 | "DemandedElt width should equal the fixed vector number of elements")((FVTy->getNumElements() == DemandedElts.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? static_cast<void> (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1855, __PRETTY_FUNCTION__)); |
1856 | } else { |
1857 | assert(DemandedElts == APInt(1, 1) &&((DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars" ) ? static_cast<void> (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1858, __PRETTY_FUNCTION__)) |
1858 | "DemandedElt width should be 1 for scalars")((DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars" ) ? static_cast<void> (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1858, __PRETTY_FUNCTION__)); |
1859 | } |
1860 | |
1861 | Type *ScalarTy = Ty->getScalarType(); |
1862 | if (ScalarTy->isPointerTy()) { |
1863 | assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&((BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && "V and Known should have same BitWidth") ? static_cast<void > (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1864, __PRETTY_FUNCTION__)) |
1864 | "V and Known should have same BitWidth")((BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && "V and Known should have same BitWidth") ? static_cast<void > (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1864, __PRETTY_FUNCTION__)); |
1865 | } else { |
1866 | assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&((BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && "V and Known should have same BitWidth" ) ? static_cast<void> (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1867, __PRETTY_FUNCTION__)) |
1867 | "V and Known should have same BitWidth")((BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && "V and Known should have same BitWidth" ) ? static_cast<void> (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1867, __PRETTY_FUNCTION__)); |
1868 | } |
1869 | #endif |
1870 | |
1871 | const APInt *C; |
1872 | if (match(V, m_APInt(C))) { |
1873 | // We know all of the bits for a scalar constant or a splat vector constant! |
1874 | Known = KnownBits::makeConstant(*C); |
1875 | return; |
1876 | } |
1877 | // Null and aggregate-zero are all-zeros. |
1878 | if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { |
1879 | Known.setAllZero(); |
1880 | return; |
1881 | } |
1882 | // Handle a constant vector by taking the intersection of the known bits of |
1883 | // each element. |
1884 | if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) { |
1885 | // We know that CDV must be a vector of integers. Take the intersection of |
1886 | // each element. |
1887 | Known.Zero.setAllBits(); Known.One.setAllBits(); |
1888 | for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { |
1889 | if (!DemandedElts[i]) |
1890 | continue; |
1891 | APInt Elt = CDV->getElementAsAPInt(i); |
1892 | Known.Zero &= ~Elt; |
1893 | Known.One &= Elt; |
1894 | } |
1895 | return; |
1896 | } |
1897 | |
1898 | if (const auto *CV = dyn_cast<ConstantVector>(V)) { |
1899 | // We know that CV must be a vector of integers. Take the intersection of |
1900 | // each element. |
1901 | Known.Zero.setAllBits(); Known.One.setAllBits(); |
1902 | for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { |
1903 | if (!DemandedElts[i]) |
1904 | continue; |
1905 | Constant *Element = CV->getAggregateElement(i); |
1906 | auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); |
1907 | if (!ElementCI) { |
1908 | Known.resetAll(); |
1909 | return; |
1910 | } |
1911 | const APInt &Elt = ElementCI->getValue(); |
1912 | Known.Zero &= ~Elt; |
1913 | Known.One &= Elt; |
1914 | } |
1915 | return; |
1916 | } |
1917 | |
1918 | // Start out not knowing anything. |
1919 | Known.resetAll(); |
1920 | |
1921 | // We can't imply anything about undefs. |
1922 | if (isa<UndefValue>(V)) |
1923 | return; |
1924 | |
1925 | // There's no point in looking through other users of ConstantData for |
1926 | // assumptions. Confirm that we've handled them all. |
1927 | assert(!isa<ConstantData>(V) && "Unhandled constant data!")((!isa<ConstantData>(V) && "Unhandled constant data!" ) ? static_cast<void> (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1927, __PRETTY_FUNCTION__)); |
1928 | |
1929 | // All recursive calls that increase depth must come after this. |
1930 | if (Depth == MaxAnalysisRecursionDepth) |
1931 | return; |
1932 | |
1933 | // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has |
1934 | // the bits of its aliasee. |
1935 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { |
1936 | if (!GA->isInterposable()) |
1937 | computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); |
1938 | return; |
1939 | } |
1940 | |
1941 | if (const Operator *I = dyn_cast<Operator>(V)) |
1942 | computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q); |
1943 | |
1944 | // Aligned pointers have trailing zeros - refine Known.Zero set |
1945 | if (isa<PointerType>(V->getType())) { |
1946 | Align Alignment = V->getPointerAlignment(Q.DL); |
1947 | Known.Zero.setLowBits(Log2(Alignment)); |
1948 | } |
1949 | |
1950 | // computeKnownBitsFromAssume strictly refines Known. |
1951 | // Therefore, we run them after computeKnownBitsFromOperator. |
1952 | |
1953 | // Check whether a nearby assume intrinsic can determine some known bits. |
1954 | computeKnownBitsFromAssume(V, Known, Depth, Q); |
1955 | |
1956 | assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?" ) ? static_cast<void> (0) : __assert_fail ("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1956, __PRETTY_FUNCTION__)); |
1957 | } |
1958 | |
1959 | /// Return true if the given value is known to have exactly one |
1960 | /// bit set when defined. For vectors return true if every element is known to |
1961 | /// be a power of two when defined. Supports values with integer or pointer |
1962 | /// types and vectors of integers. |
1963 | bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, |
1964 | const Query &Q) { |
1965 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")((Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth" ) ? static_cast<void> (0) : __assert_fail ("Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 1965, __PRETTY_FUNCTION__)); |
1966 | |
1967 | // Attempt to match against constants. |
1968 | if (OrZero && match(V, m_Power2OrZero())) |
1969 | return true; |
1970 | if (match(V, m_Power2())) |
1971 | return true; |
1972 | |
1973 | // 1 << X is clearly a power of two if the one is not shifted off the end. If |
1974 | // it is shifted off the end then the result is undefined. |
1975 | if (match(V, m_Shl(m_One(), m_Value()))) |
1976 | return true; |
1977 | |
1978 | // (signmask) >>l X is clearly a power of two if the one is not shifted off |
1979 | // the bottom. If it is shifted off the bottom then the result is undefined. |
1980 | if (match(V, m_LShr(m_SignMask(), m_Value()))) |
1981 | return true; |
1982 | |
1983 | // The remaining tests are all recursive, so bail out if we hit the limit. |
1984 | if (Depth++ == MaxAnalysisRecursionDepth) |
1985 | return false; |
1986 | |
1987 | Value *X = nullptr, *Y = nullptr; |
1988 | // A shift left or a logical shift right of a power of two is a power of two |
1989 | // or zero. |
1990 | if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || |
1991 | match(V, m_LShr(m_Value(X), m_Value())))) |
1992 | return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); |
1993 | |
1994 | if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) |
1995 | return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); |
1996 | |
1997 | if (const SelectInst *SI = dyn_cast<SelectInst>(V)) |
1998 | return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && |
1999 | isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); |
2000 | |
2001 | if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { |
2002 | // A power of two and'd with anything is a power of two or zero. |
2003 | if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || |
2004 | isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) |
2005 | return true; |
2006 | // X & (-X) is always a power of two or zero. |
2007 | if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) |
2008 | return true; |
2009 | return false; |
2010 | } |
2011 | |
2012 | // Adding a power-of-two or zero to the same power-of-two or zero yields |
2013 | // either the original power-of-two, a larger power-of-two or zero. |
2014 | if (match(V, m_Add(m_Value(X), m_Value(Y)))) { |
2015 | const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); |
2016 | if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || |
2017 | Q.IIQ.hasNoSignedWrap(VOBO)) { |
2018 | if (match(X, m_And(m_Specific(Y), m_Value())) || |
2019 | match(X, m_And(m_Value(), m_Specific(Y)))) |
2020 | if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) |
2021 | return true; |
2022 | if (match(Y, m_And(m_Specific(X), m_Value())) || |
2023 | match(Y, m_And(m_Value(), m_Specific(X)))) |
2024 | if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) |
2025 | return true; |
2026 | |
2027 | unsigned BitWidth = V->getType()->getScalarSizeInBits(); |
2028 | KnownBits LHSBits(BitWidth); |
2029 | computeKnownBits(X, LHSBits, Depth, Q); |
2030 | |
2031 | KnownBits RHSBits(BitWidth); |
2032 | computeKnownBits(Y, RHSBits, Depth, Q); |
2033 | // If i8 V is a power of two or zero: |
2034 | // ZeroBits: 1 1 1 0 1 1 1 1 |
2035 | // ~ZeroBits: 0 0 0 1 0 0 0 0 |
2036 | if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) |
2037 | // If OrZero isn't set, we cannot give back a zero result. |
2038 | // Make sure either the LHS or RHS has a bit set. |
2039 | if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) |
2040 | return true; |
2041 | } |
2042 | } |
2043 | |
2044 | // An exact divide or right shift can only shift off zero bits, so the result |
2045 | // is a power of two only if the first operand is a power of two and not |
2046 | // copying a sign bit (sdiv int_min, 2). |
2047 | if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || |
2048 | match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { |
2049 | return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, |
2050 | Depth, Q); |
2051 | } |
2052 | |
2053 | return false; |
2054 | } |
2055 | |
2056 | /// Test whether a GEP's result is known to be non-null. |
2057 | /// |
2058 | /// Uses properties inherent in a GEP to try to determine whether it is known |
2059 | /// to be non-null. |
2060 | /// |
2061 | /// Currently this routine does not support vector GEPs. |
2062 | static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, |
2063 | const Query &Q) { |
2064 | const Function *F = nullptr; |
2065 | if (const Instruction *I = dyn_cast<Instruction>(GEP)) |
2066 | F = I->getFunction(); |
2067 | |
2068 | if (!GEP->isInBounds() || |
2069 | NullPointerIsDefined(F, GEP->getPointerAddressSpace())) |
2070 | return false; |
2071 | |
2072 | // FIXME: Support vector-GEPs. |
2073 | assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")((GEP->getType()->isPointerTy() && "We only support plain pointer GEP" ) ? static_cast<void> (0) : __assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2073, __PRETTY_FUNCTION__)); |
2074 | |
2075 | // If the base pointer is non-null, we cannot walk to a null address with an |
2076 | // inbounds GEP in address space zero. |
2077 | if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) |
2078 | return true; |
2079 | |
2080 | // Walk the GEP operands and see if any operand introduces a non-zero offset. |
2081 | // If so, then the GEP cannot produce a null pointer, as doing so would |
2082 | // inherently violate the inbounds contract within address space zero. |
2083 | for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); |
2084 | GTI != GTE; ++GTI) { |
2085 | // Struct types are easy -- they must always be indexed by a constant. |
2086 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
2087 | ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); |
2088 | unsigned ElementIdx = OpC->getZExtValue(); |
2089 | const StructLayout *SL = Q.DL.getStructLayout(STy); |
2090 | uint64_t ElementOffset = SL->getElementOffset(ElementIdx); |
2091 | if (ElementOffset > 0) |
2092 | return true; |
2093 | continue; |
2094 | } |
2095 | |
2096 | // If we have a zero-sized type, the index doesn't matter. Keep looping. |
2097 | if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0) |
2098 | continue; |
2099 | |
2100 | // Fast path the constant operand case both for efficiency and so we don't |
2101 | // increment Depth when just zipping down an all-constant GEP. |
2102 | if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { |
2103 | if (!OpC->isZero()) |
2104 | return true; |
2105 | continue; |
2106 | } |
2107 | |
2108 | // We post-increment Depth here because while isKnownNonZero increments it |
2109 | // as well, when we pop back up that increment won't persist. We don't want |
2110 | // to recurse 10k times just because we have 10k GEP operands. We don't |
2111 | // bail completely out because we want to handle constant GEPs regardless |
2112 | // of depth. |
2113 | if (Depth++ >= MaxAnalysisRecursionDepth) |
2114 | continue; |
2115 | |
2116 | if (isKnownNonZero(GTI.getOperand(), Depth, Q)) |
2117 | return true; |
2118 | } |
2119 | |
2120 | return false; |
2121 | } |
2122 | |
2123 | static bool isKnownNonNullFromDominatingCondition(const Value *V, |
2124 | const Instruction *CtxI, |
2125 | const DominatorTree *DT) { |
2126 | if (isa<Constant>(V)) |
2127 | return false; |
2128 | |
2129 | if (!CtxI || !DT) |
2130 | return false; |
2131 | |
2132 | unsigned NumUsesExplored = 0; |
2133 | for (auto *U : V->users()) { |
2134 | // Avoid massive lists |
2135 | if (NumUsesExplored >= DomConditionsMaxUses) |
2136 | break; |
2137 | NumUsesExplored++; |
2138 | |
2139 | // If the value is used as an argument to a call or invoke, then argument |
2140 | // attributes may provide an answer about null-ness. |
2141 | if (const auto *CB = dyn_cast<CallBase>(U)) |
2142 | if (auto *CalledFunc = CB->getCalledFunction()) |
2143 | for (const Argument &Arg : CalledFunc->args()) |
2144 | if (CB->getArgOperand(Arg.getArgNo()) == V && |
2145 | Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) && |
2146 | DT->dominates(CB, CtxI)) |
2147 | return true; |
2148 | |
2149 | // If the value is used as a load/store, then the pointer must be non null. |
2150 | if (V == getLoadStorePointerOperand(U)) { |
2151 | const Instruction *I = cast<Instruction>(U); |
2152 | if (!NullPointerIsDefined(I->getFunction(), |
2153 | V->getType()->getPointerAddressSpace()) && |
2154 | DT->dominates(I, CtxI)) |
2155 | return true; |
2156 | } |
2157 | |
2158 | // Consider only compare instructions uniquely controlling a branch |
2159 | Value *RHS; |
2160 | CmpInst::Predicate Pred; |
2161 | if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS)))) |
2162 | continue; |
2163 | |
2164 | bool NonNullIfTrue; |
2165 | if (cmpExcludesZero(Pred, RHS)) |
2166 | NonNullIfTrue = true; |
2167 | else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS)) |
2168 | NonNullIfTrue = false; |
2169 | else |
2170 | continue; |
2171 | |
2172 | SmallVector<const User *, 4> WorkList; |
2173 | SmallPtrSet<const User *, 4> Visited; |
2174 | for (auto *CmpU : U->users()) { |
2175 | assert(WorkList.empty() && "Should be!")((WorkList.empty() && "Should be!") ? static_cast< void> (0) : __assert_fail ("WorkList.empty() && \"Should be!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2175, __PRETTY_FUNCTION__)); |
2176 | if (Visited.insert(CmpU).second) |
2177 | WorkList.push_back(CmpU); |
2178 | |
2179 | while (!WorkList.empty()) { |
2180 | auto *Curr = WorkList.pop_back_val(); |
2181 | |
2182 | // If a user is an AND, add all its users to the work list. We only |
2183 | // propagate "pred != null" condition through AND because it is only |
2184 | // correct to assume that all conditions of AND are met in true branch. |
2185 | // TODO: Support similar logic of OR and EQ predicate? |
2186 | if (NonNullIfTrue) |
2187 | if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) { |
2188 | for (auto *CurrU : Curr->users()) |
2189 | if (Visited.insert(CurrU).second) |
2190 | WorkList.push_back(CurrU); |
2191 | continue; |
2192 | } |
2193 | |
2194 | if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { |
2195 | assert(BI->isConditional() && "uses a comparison!")((BI->isConditional() && "uses a comparison!") ? static_cast <void> (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2195, __PRETTY_FUNCTION__)); |
2196 | |
2197 | BasicBlock *NonNullSuccessor = |
2198 | BI->getSuccessor(NonNullIfTrue ? 0 : 1); |
2199 | BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); |
2200 | if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) |
2201 | return true; |
2202 | } else if (NonNullIfTrue && isGuard(Curr) && |
2203 | DT->dominates(cast<Instruction>(Curr), CtxI)) { |
2204 | return true; |
2205 | } |
2206 | } |
2207 | } |
2208 | } |
2209 | |
2210 | return false; |
2211 | } |
2212 | |
2213 | /// Does the 'Range' metadata (which must be a valid MD_range operand list) |
2214 | /// ensure that the value it's attached to is never Value? 'RangeType' is |
2215 | /// is the type of the value described by the range. |
2216 | static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { |
2217 | const unsigned NumRanges = Ranges->getNumOperands() / 2; |
2218 | assert(NumRanges >= 1)((NumRanges >= 1) ? static_cast<void> (0) : __assert_fail ("NumRanges >= 1", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2218, __PRETTY_FUNCTION__)); |
2219 | for (unsigned i = 0; i < NumRanges; ++i) { |
2220 | ConstantInt *Lower = |
2221 | mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); |
2222 | ConstantInt *Upper = |
2223 | mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); |
2224 | ConstantRange Range(Lower->getValue(), Upper->getValue()); |
2225 | if (Range.contains(Value)) |
2226 | return false; |
2227 | } |
2228 | return true; |
2229 | } |
2230 | |
2231 | /// Return true if the given value is known to be non-zero when defined. For |
2232 | /// vectors, return true if every demanded element is known to be non-zero when |
2233 | /// defined. For pointers, if the context instruction and dominator tree are |
2234 | /// specified, perform context-sensitive analysis and return true if the |
2235 | /// pointer couldn't possibly be null at the specified instruction. |
2236 | /// Supports values with integer or pointer type and vectors of integers. |
2237 | bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, |
2238 | const Query &Q) { |
2239 | // FIXME: We currently have no way to represent the DemandedElts of a scalable |
2240 | // vector |
2241 | if (isa<ScalableVectorType>(V->getType())) |
2242 | return false; |
2243 | |
2244 | if (auto *C = dyn_cast<Constant>(V)) { |
2245 | if (C->isNullValue()) |
2246 | return false; |
2247 | if (isa<ConstantInt>(C)) |
2248 | // Must be non-zero due to null test above. |
2249 | return true; |
2250 | |
2251 | if (auto *CE = dyn_cast<ConstantExpr>(C)) { |
2252 | // See the comment for IntToPtr/PtrToInt instructions below. |
2253 | if (CE->getOpcode() == Instruction::IntToPtr || |
2254 | CE->getOpcode() == Instruction::PtrToInt) |
2255 | if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) |
2256 | .getFixedSize() <= |
2257 | Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize()) |
2258 | return isKnownNonZero(CE->getOperand(0), Depth, Q); |
2259 | } |
2260 | |
2261 | // For constant vectors, check that all elements are undefined or known |
2262 | // non-zero to determine that the whole vector is known non-zero. |
2263 | if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) { |
2264 | for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { |
2265 | if (!DemandedElts[i]) |
2266 | continue; |
2267 | Constant *Elt = C->getAggregateElement(i); |
2268 | if (!Elt || Elt->isNullValue()) |
2269 | return false; |
2270 | if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) |
2271 | return false; |
2272 | } |
2273 | return true; |
2274 | } |
2275 | |
2276 | // A global variable in address space 0 is non null unless extern weak |
2277 | // or an absolute symbol reference. Other address spaces may have null as a |
2278 | // valid address for a global, so we can't assume anything. |
2279 | if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { |
2280 | if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && |
2281 | GV->getType()->getAddressSpace() == 0) |
2282 | return true; |
2283 | } else |
2284 | return false; |
2285 | } |
2286 | |
2287 | if (auto *I = dyn_cast<Instruction>(V)) { |
2288 | if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { |
2289 | // If the possible ranges don't contain zero, then the value is |
2290 | // definitely non-zero. |
2291 | if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { |
2292 | const APInt ZeroValue(Ty->getBitWidth(), 0); |
2293 | if (rangeMetadataExcludesValue(Ranges, ZeroValue)) |
2294 | return true; |
2295 | } |
2296 | } |
2297 | } |
2298 | |
2299 | if (isKnownNonZeroFromAssume(V, Q)) |
2300 | return true; |
2301 | |
2302 | // Some of the tests below are recursive, so bail out if we hit the limit. |
2303 | if (Depth++ >= MaxAnalysisRecursionDepth) |
2304 | return false; |
2305 | |
2306 | // Check for pointer simplifications. |
2307 | |
2308 | if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) { |
2309 | // Alloca never returns null, malloc might. |
2310 | if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) |
2311 | return true; |
2312 | |
2313 | // A byval, inalloca may not be null in a non-default addres space. A |
2314 | // nonnull argument is assumed never 0. |
2315 | if (const Argument *A = dyn_cast<Argument>(V)) { |
2316 | if (((A->hasPassPointeeByValueCopyAttr() && |
2317 | !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) || |
2318 | A->hasNonNullAttr())) |
2319 | return true; |
2320 | } |
2321 | |
2322 | // A Load tagged with nonnull metadata is never null. |
2323 | if (const LoadInst *LI = dyn_cast<LoadInst>(V)) |
2324 | if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) |
2325 | return true; |
2326 | |
2327 | if (const auto *Call = dyn_cast<CallBase>(V)) { |
2328 | if (Call->isReturnNonNull()) |
2329 | return true; |
2330 | if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) |
2331 | return isKnownNonZero(RP, Depth, Q); |
2332 | } |
2333 | } |
2334 | |
2335 | if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) |
2336 | return true; |
2337 | |
2338 | // Check for recursive pointer simplifications. |
2339 | if (V->getType()->isPointerTy()) { |
2340 | // Look through bitcast operations, GEPs, and int2ptr instructions as they |
2341 | // do not alter the value, or at least not the nullness property of the |
2342 | // value, e.g., int2ptr is allowed to zero/sign extend the value. |
2343 | // |
2344 | // Note that we have to take special care to avoid looking through |
2345 | // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well |
2346 | // as casts that can alter the value, e.g., AddrSpaceCasts. |
2347 | if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) |
2348 | return isGEPKnownNonNull(GEP, Depth, Q); |
2349 | |
2350 | if (auto *BCO = dyn_cast<BitCastOperator>(V)) |
2351 | return isKnownNonZero(BCO->getOperand(0), Depth, Q); |
2352 | |
2353 | if (auto *I2P = dyn_cast<IntToPtrInst>(V)) |
2354 | if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <= |
2355 | Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize()) |
2356 | return isKnownNonZero(I2P->getOperand(0), Depth, Q); |
2357 | } |
2358 | |
2359 | // Similar to int2ptr above, we can look through ptr2int here if the cast |
2360 | // is a no-op or an extend and not a truncate. |
2361 | if (auto *P2I = dyn_cast<PtrToIntInst>(V)) |
2362 | if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <= |
2363 | Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize()) |
2364 | return isKnownNonZero(P2I->getOperand(0), Depth, Q); |
2365 | |
2366 | unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); |
2367 | |
2368 | // X | Y != 0 if X != 0 or Y != 0. |
2369 | Value *X = nullptr, *Y = nullptr; |
2370 | if (match(V, m_Or(m_Value(X), m_Value(Y)))) |
2371 | return isKnownNonZero(X, DemandedElts, Depth, Q) || |
2372 | isKnownNonZero(Y, DemandedElts, Depth, Q); |
2373 | |
2374 | // ext X != 0 if X != 0. |
2375 | if (isa<SExtInst>(V) || isa<ZExtInst>(V)) |
2376 | return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); |
2377 | |
2378 | // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined |
2379 | // if the lowest bit is shifted off the end. |
2380 | if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { |
2381 | // shl nuw can't remove any non-zero bits. |
2382 | const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); |
2383 | if (Q.IIQ.hasNoUnsignedWrap(BO)) |
2384 | return isKnownNonZero(X, Depth, Q); |
2385 | |
2386 | KnownBits Known(BitWidth); |
2387 | computeKnownBits(X, DemandedElts, Known, Depth, Q); |
2388 | if (Known.One[0]) |
2389 | return true; |
2390 | } |
2391 | // shr X, Y != 0 if X is negative. Note that the value of the shift is not |
2392 | // defined if the sign bit is shifted off the end. |
2393 | else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { |
2394 | // shr exact can only shift out zero bits. |
2395 | const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); |
2396 | if (BO->isExact()) |
2397 | return isKnownNonZero(X, Depth, Q); |
2398 | |
2399 | KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q); |
2400 | if (Known.isNegative()) |
2401 | return true; |
2402 | |
2403 | // If the shifter operand is a constant, and all of the bits shifted |
2404 | // out are known to be zero, and X is known non-zero then at least one |
2405 | // non-zero bit must remain. |
2406 | if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { |
2407 | auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); |
2408 | // Is there a known one in the portion not shifted out? |
2409 | if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) |
2410 | return true; |
2411 | // Are all the bits to be shifted out known zero? |
2412 | if (Known.countMinTrailingZeros() >= ShiftVal) |
2413 | return isKnownNonZero(X, DemandedElts, Depth, Q); |
2414 | } |
2415 | } |
2416 | // div exact can only produce a zero if the dividend is zero. |
2417 | else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { |
2418 | return isKnownNonZero(X, DemandedElts, Depth, Q); |
2419 | } |
2420 | // X + Y. |
2421 | else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { |
2422 | KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); |
2423 | KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); |
2424 | |
2425 | // If X and Y are both non-negative (as signed values) then their sum is not |
2426 | // zero unless both X and Y are zero. |
2427 | if (XKnown.isNonNegative() && YKnown.isNonNegative()) |
2428 | if (isKnownNonZero(X, DemandedElts, Depth, Q) || |
2429 | isKnownNonZero(Y, DemandedElts, Depth, Q)) |
2430 | return true; |
2431 | |
2432 | // If X and Y are both negative (as signed values) then their sum is not |
2433 | // zero unless both X and Y equal INT_MIN. |
2434 | if (XKnown.isNegative() && YKnown.isNegative()) { |
2435 | APInt Mask = APInt::getSignedMaxValue(BitWidth); |
2436 | // The sign bit of X is set. If some other bit is set then X is not equal |
2437 | // to INT_MIN. |
2438 | if (XKnown.One.intersects(Mask)) |
2439 | return true; |
2440 | // The sign bit of Y is set. If some other bit is set then Y is not equal |
2441 | // to INT_MIN. |
2442 | if (YKnown.One.intersects(Mask)) |
2443 | return true; |
2444 | } |
2445 | |
2446 | // The sum of a non-negative number and a power of two is not zero. |
2447 | if (XKnown.isNonNegative() && |
2448 | isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) |
2449 | return true; |
2450 | if (YKnown.isNonNegative() && |
2451 | isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) |
2452 | return true; |
2453 | } |
2454 | // X * Y. |
2455 | else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { |
2456 | const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); |
2457 | // If X and Y are non-zero then so is X * Y as long as the multiplication |
2458 | // does not overflow. |
2459 | if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) && |
2460 | isKnownNonZero(X, DemandedElts, Depth, Q) && |
2461 | isKnownNonZero(Y, DemandedElts, Depth, Q)) |
2462 | return true; |
2463 | } |
2464 | // (C ? X : Y) != 0 if X != 0 and Y != 0. |
2465 | else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { |
2466 | if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) && |
2467 | isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q)) |
2468 | return true; |
2469 | } |
2470 | // PHI |
2471 | else if (const PHINode *PN = dyn_cast<PHINode>(V)) { |
2472 | // Try and detect a recurrence that monotonically increases from a |
2473 | // starting value, as these are common as induction variables. |
2474 | if (PN->getNumIncomingValues() == 2) { |
2475 | Value *Start = PN->getIncomingValue(0); |
2476 | Value *Induction = PN->getIncomingValue(1); |
2477 | if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) |
2478 | std::swap(Start, Induction); |
2479 | if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { |
2480 | if (!C->isZero() && !C->isNegative()) { |
2481 | ConstantInt *X; |
2482 | if (Q.IIQ.UseInstrInfo && |
2483 | (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || |
2484 | match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && |
2485 | !X->isNegative()) |
2486 | return true; |
2487 | } |
2488 | } |
2489 | } |
2490 | // Check if all incoming values are non-zero using recursion. |
2491 | Query RecQ = Q; |
2492 | unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); |
2493 | return llvm::all_of(PN->operands(), [&](const Use &U) { |
2494 | if (U.get() == PN) |
2495 | return true; |
2496 | RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); |
2497 | return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ); |
2498 | }); |
2499 | } |
2500 | // ExtractElement |
2501 | else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) { |
2502 | const Value *Vec = EEI->getVectorOperand(); |
2503 | const Value *Idx = EEI->getIndexOperand(); |
2504 | auto *CIdx = dyn_cast<ConstantInt>(Idx); |
2505 | if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { |
2506 | unsigned NumElts = VecTy->getNumElements(); |
2507 | APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); |
2508 | if (CIdx && CIdx->getValue().ult(NumElts)) |
2509 | DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); |
2510 | return isKnownNonZero(Vec, DemandedVecElts, Depth, Q); |
2511 | } |
2512 | } |
2513 | // Freeze |
2514 | else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) { |
2515 | auto *Op = FI->getOperand(0); |
2516 | if (isKnownNonZero(Op, Depth, Q) && |
2517 | isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth)) |
2518 | return true; |
2519 | } |
2520 | |
2521 | KnownBits Known(BitWidth); |
2522 | computeKnownBits(V, DemandedElts, Known, Depth, Q); |
2523 | return Known.One != 0; |
2524 | } |
2525 | |
2526 | bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { |
2527 | // FIXME: We currently have no way to represent the DemandedElts of a scalable |
2528 | // vector |
2529 | if (isa<ScalableVectorType>(V->getType())) |
2530 | return false; |
2531 | |
2532 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); |
2533 | APInt DemandedElts = |
2534 | FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1); |
2535 | return isKnownNonZero(V, DemandedElts, Depth, Q); |
2536 | } |
2537 | |
2538 | /// Return true if V2 == V1 + X, where X is known non-zero. |
2539 | static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, |
2540 | const Query &Q) { |
2541 | const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); |
2542 | if (!BO || BO->getOpcode() != Instruction::Add) |
2543 | return false; |
2544 | Value *Op = nullptr; |
2545 | if (V2 == BO->getOperand(0)) |
2546 | Op = BO->getOperand(1); |
2547 | else if (V2 == BO->getOperand(1)) |
2548 | Op = BO->getOperand(0); |
2549 | else |
2550 | return false; |
2551 | return isKnownNonZero(Op, Depth + 1, Q); |
2552 | } |
2553 | |
2554 | |
2555 | /// Return true if it is known that V1 != V2. |
2556 | static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, |
2557 | const Query &Q) { |
2558 | if (V1 == V2) |
2559 | return false; |
2560 | if (V1->getType() != V2->getType()) |
2561 | // We can't look through casts yet. |
2562 | return false; |
2563 | |
2564 | if (Depth >= MaxAnalysisRecursionDepth) |
2565 | return false; |
2566 | |
2567 | // See if we can recurse through (exactly one of) our operands. This |
2568 | // requires our operation be 1-to-1 and map every input value to exactly |
2569 | // one output value. Such an operation is invertible. |
2570 | auto *O1 = dyn_cast<Operator>(V1); |
2571 | auto *O2 = dyn_cast<Operator>(V2); |
2572 | if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { |
2573 | switch (O1->getOpcode()) { |
2574 | default: break; |
2575 | case Instruction::Add: |
2576 | case Instruction::Sub: |
2577 | // Assume operand order has been canonicalized |
2578 | if (O1->getOperand(0) == O2->getOperand(0)) |
2579 | return isKnownNonEqual(O1->getOperand(1), O2->getOperand(1), |
2580 | Depth + 1, Q); |
2581 | if (O1->getOperand(1) == O2->getOperand(1)) |
2582 | return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0), |
2583 | Depth + 1, Q); |
2584 | break; |
2585 | case Instruction::Mul: { |
2586 | // invertible if A * B == (A * B) mod 2^N where A, and B are integers |
2587 | // and N is the bitwdith. The nsw case is non-obvious, but proven by |
2588 | // alive2: https://alive2.llvm.org/ce/z/Z6D5qK |
2589 | auto *OBO1 = cast<OverflowingBinaryOperator>(O1); |
2590 | auto *OBO2 = cast<OverflowingBinaryOperator>(O2); |
2591 | if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && |
2592 | (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) |
2593 | break; |
2594 | |
2595 | // Assume operand order has been canonicalized |
2596 | if (O1->getOperand(1) == O2->getOperand(1) && |
2597 | isa<ConstantInt>(O1->getOperand(1)) && |
2598 | !cast<ConstantInt>(O1->getOperand(1))->isZero()) |
2599 | return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0), |
2600 | Depth + 1, Q); |
2601 | break; |
2602 | } |
2603 | case Instruction::SExt: |
2604 | case Instruction::ZExt: |
2605 | if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType()) |
2606 | return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0), |
2607 | Depth + 1, Q); |
2608 | break; |
2609 | }; |
2610 | } |
2611 | |
2612 | if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q)) |
2613 | return true; |
2614 | |
2615 | if (V1->getType()->isIntOrIntVectorTy()) { |
2616 | // Are any known bits in V1 contradictory to known bits in V2? If V1 |
2617 | // has a known zero where V2 has a known one, they must not be equal. |
2618 | KnownBits Known1 = computeKnownBits(V1, Depth, Q); |
2619 | KnownBits Known2 = computeKnownBits(V2, Depth, Q); |
2620 | |
2621 | if (Known1.Zero.intersects(Known2.One) || |
2622 | Known2.Zero.intersects(Known1.One)) |
2623 | return true; |
2624 | } |
2625 | return false; |
2626 | } |
2627 | |
2628 | /// Return true if 'V & Mask' is known to be zero. We use this predicate to |
2629 | /// simplify operations downstream. Mask is known to be zero for bits that V |
2630 | /// cannot have. |
2631 | /// |
2632 | /// This function is defined on values with integer type, values with pointer |
2633 | /// type, and vectors of integers. In the case |
2634 | /// where V is a vector, the mask, known zero, and known one values are the |
2635 | /// same width as the vector element, and the bit is set only if it is true |
2636 | /// for all of the elements in the vector. |
2637 | bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, |
2638 | const Query &Q) { |
2639 | KnownBits Known(Mask.getBitWidth()); |
2640 | computeKnownBits(V, Known, Depth, Q); |
2641 | return Mask.isSubsetOf(Known.Zero); |
2642 | } |
2643 | |
2644 | // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). |
2645 | // Returns the input and lower/upper bounds. |
2646 | static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, |
2647 | const APInt *&CLow, const APInt *&CHigh) { |
2648 | assert(isa<Operator>(Select) &&((isa<Operator>(Select) && cast<Operator> (Select)->getOpcode() == Instruction::Select && "Input should be a Select!" ) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2650, __PRETTY_FUNCTION__)) |
2649 | cast<Operator>(Select)->getOpcode() == Instruction::Select &&((isa<Operator>(Select) && cast<Operator> (Select)->getOpcode() == Instruction::Select && "Input should be a Select!" ) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2650, __PRETTY_FUNCTION__)) |
2650 | "Input should be a Select!")((isa<Operator>(Select) && cast<Operator> (Select)->getOpcode() == Instruction::Select && "Input should be a Select!" ) ? static_cast<void> (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2650, __PRETTY_FUNCTION__)); |
2651 | |
2652 | const Value *LHS = nullptr, *RHS = nullptr; |
2653 | SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; |
2654 | if (SPF != SPF_SMAX && SPF != SPF_SMIN) |
2655 | return false; |
2656 | |
2657 | if (!match(RHS, m_APInt(CLow))) |
2658 | return false; |
2659 | |
2660 | const Value *LHS2 = nullptr, *RHS2 = nullptr; |
2661 | SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; |
2662 | if (getInverseMinMaxFlavor(SPF) != SPF2) |
2663 | return false; |
2664 | |
2665 | if (!match(RHS2, m_APInt(CHigh))) |
2666 | return false; |
2667 | |
2668 | if (SPF == SPF_SMIN) |
2669 | std::swap(CLow, CHigh); |
2670 | |
2671 | In = LHS2; |
2672 | return CLow->sle(*CHigh); |
2673 | } |
2674 | |
2675 | /// For vector constants, loop over the elements and find the constant with the |
2676 | /// minimum number of sign bits. Return 0 if the value is not a vector constant |
2677 | /// or if any element was not analyzed; otherwise, return the count for the |
2678 | /// element with the minimum number of sign bits. |
2679 | static unsigned computeNumSignBitsVectorConstant(const Value *V, |
2680 | const APInt &DemandedElts, |
2681 | unsigned TyBits) { |
2682 | const auto *CV = dyn_cast<Constant>(V); |
2683 | if (!CV || !isa<FixedVectorType>(CV->getType())) |
2684 | return 0; |
2685 | |
2686 | unsigned MinSignBits = TyBits; |
2687 | unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); |
2688 | for (unsigned i = 0; i != NumElts; ++i) { |
2689 | if (!DemandedElts[i]) |
2690 | continue; |
2691 | // If we find a non-ConstantInt, bail out. |
2692 | auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); |
2693 | if (!Elt) |
2694 | return 0; |
2695 | |
2696 | MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); |
2697 | } |
2698 | |
2699 | return MinSignBits; |
2700 | } |
2701 | |
2702 | static unsigned ComputeNumSignBitsImpl(const Value *V, |
2703 | const APInt &DemandedElts, |
2704 | unsigned Depth, const Query &Q); |
2705 | |
2706 | static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, |
2707 | unsigned Depth, const Query &Q) { |
2708 | unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); |
2709 | assert(Result > 0 && "At least one sign bit needs to be present!")((Result > 0 && "At least one sign bit needs to be present!" ) ? static_cast<void> (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2709, __PRETTY_FUNCTION__)); |
2710 | return Result; |
2711 | } |
2712 | |
2713 | /// Return the number of times the sign bit of the register is replicated into |
2714 | /// the other bits. We know that at least 1 bit is always equal to the sign bit |
2715 | /// (itself), but other cases can give us information. For example, immediately |
2716 | /// after an "ashr X, 2", we know that the top 3 bits are all equal to each |
2717 | /// other, so we return 3. For vectors, return the number of sign bits for the |
2718 | /// vector element with the minimum number of known sign bits of the demanded |
2719 | /// elements in the vector specified by DemandedElts. |
2720 | static unsigned ComputeNumSignBitsImpl(const Value *V, |
2721 | const APInt &DemandedElts, |
2722 | unsigned Depth, const Query &Q) { |
2723 | Type *Ty = V->getType(); |
2724 | |
2725 | // FIXME: We currently have no way to represent the DemandedElts of a scalable |
2726 | // vector |
2727 | if (isa<ScalableVectorType>(Ty)) |
2728 | return 1; |
2729 | |
2730 | #ifndef NDEBUG |
2731 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")((Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth" ) ? static_cast<void> (0) : __assert_fail ("Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2731, __PRETTY_FUNCTION__)); |
2732 | |
2733 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { |
2734 | assert(((FVTy->getNumElements() == DemandedElts.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? static_cast<void> (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2736, __PRETTY_FUNCTION__)) |
2735 | FVTy->getNumElements() == DemandedElts.getBitWidth() &&((FVTy->getNumElements() == DemandedElts.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? static_cast<void> (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2736, __PRETTY_FUNCTION__)) |
2736 | "DemandedElt width should equal the fixed vector number of elements")((FVTy->getNumElements() == DemandedElts.getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? static_cast<void> (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2736, __PRETTY_FUNCTION__)); |
2737 | } else { |
2738 | assert(DemandedElts == APInt(1, 1) &&((DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars" ) ? static_cast<void> (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2739, __PRETTY_FUNCTION__)) |
2739 | "DemandedElt width should be 1 for scalars")((DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars" ) ? static_cast<void> (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 2739, __PRETTY_FUNCTION__)); |
2740 | } |
2741 | #endif |
2742 | |
2743 | // We return the minimum number of sign bits that are guaranteed to be present |
2744 | // in V, so for undef we have to conservatively return 1. We don't have the |
2745 | // same behavior for poison though -- that's a FIXME today. |
2746 | |
2747 | Type *ScalarTy = Ty->getScalarType(); |
2748 | unsigned TyBits = ScalarTy->isPointerTy() ? |
2749 | Q.DL.getPointerTypeSizeInBits(ScalarTy) : |
2750 | Q.DL.getTypeSizeInBits(ScalarTy); |
2751 | |
2752 | unsigned Tmp, Tmp2; |
2753 | unsigned FirstAnswer = 1; |
2754 | |
2755 | // Note that ConstantInt is handled by the general computeKnownBits case |
2756 | // below. |
2757 | |
2758 | if (Depth == MaxAnalysisRecursionDepth) |
2759 | return 1; |
2760 | |
2761 | if (auto *U = dyn_cast<Operator>(V)) { |
2762 | switch (Operator::getOpcode(V)) { |
2763 | default: break; |
2764 | case Instruction::SExt: |
2765 | Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); |
2766 | return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; |
2767 | |
2768 | case Instruction::SDiv: { |
2769 | const APInt *Denominator; |
2770 | // sdiv X, C -> adds log(C) sign bits. |
2771 | if (match(U->getOperand(1), m_APInt(Denominator))) { |
2772 | |
2773 | // Ignore non-positive denominator. |
2774 | if (!Denominator->isStrictlyPositive()) |
2775 | break; |
2776 | |
2777 | // Calculate the incoming numerator bits. |
2778 | unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2779 | |
2780 | // Add floor(log(C)) bits to the numerator bits. |
2781 | return std::min(TyBits, NumBits + Denominator->logBase2()); |
2782 | } |
2783 | break; |
2784 | } |
2785 | |
2786 | case Instruction::SRem: { |
2787 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2788 | |
2789 | const APInt *Denominator; |
2790 | // srem X, C -> we know that the result is within [-C+1,C) when C is a |
2791 | // positive constant. This let us put a lower bound on the number of sign |
2792 | // bits. |
2793 | if (match(U->getOperand(1), m_APInt(Denominator))) { |
2794 | |
2795 | // Ignore non-positive denominator. |
2796 | if (Denominator->isStrictlyPositive()) { |
2797 | // Calculate the leading sign bit constraints by examining the |
2798 | // denominator. Given that the denominator is positive, there are two |
2799 | // cases: |
2800 | // |
2801 | // 1. The numerator is positive. The result range is [0,C) and |
2802 | // [0,C) u< (1 << ceilLogBase2(C)). |
2803 | // |
2804 | // 2. The numerator is negative. Then the result range is (-C,0] and |
2805 | // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). |
2806 | // |
2807 | // Thus a lower bound on the number of sign bits is `TyBits - |
2808 | // ceilLogBase2(C)`. |
2809 | |
2810 | unsigned ResBits = TyBits - Denominator->ceilLogBase2(); |
2811 | Tmp = std::max(Tmp, ResBits); |
2812 | } |
2813 | } |
2814 | return Tmp; |
2815 | } |
2816 | |
2817 | case Instruction::AShr: { |
2818 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2819 | // ashr X, C -> adds C sign bits. Vectors too. |
2820 | const APInt *ShAmt; |
2821 | if (match(U->getOperand(1), m_APInt(ShAmt))) { |
2822 | if (ShAmt->uge(TyBits)) |
2823 | break; // Bad shift. |
2824 | unsigned ShAmtLimited = ShAmt->getZExtValue(); |
2825 | Tmp += ShAmtLimited; |
2826 | if (Tmp > TyBits) Tmp = TyBits; |
2827 | } |
2828 | return Tmp; |
2829 | } |
2830 | case Instruction::Shl: { |
2831 | const APInt *ShAmt; |
2832 | if (match(U->getOperand(1), m_APInt(ShAmt))) { |
2833 | // shl destroys sign bits. |
2834 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2835 | if (ShAmt->uge(TyBits) || // Bad shift. |
2836 | ShAmt->uge(Tmp)) break; // Shifted all sign bits out. |
2837 | Tmp2 = ShAmt->getZExtValue(); |
2838 | return Tmp - Tmp2; |
2839 | } |
2840 | break; |
2841 | } |
2842 | case Instruction::And: |
2843 | case Instruction::Or: |
2844 | case Instruction::Xor: // NOT is handled here. |
2845 | // Logical binary ops preserve the number of sign bits at the worst. |
2846 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2847 | if (Tmp != 1) { |
2848 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); |
2849 | FirstAnswer = std::min(Tmp, Tmp2); |
2850 | // We computed what we know about the sign bits as our first |
2851 | // answer. Now proceed to the generic code that uses |
2852 | // computeKnownBits, and pick whichever answer is better. |
2853 | } |
2854 | break; |
2855 | |
2856 | case Instruction::Select: { |
2857 | // If we have a clamp pattern, we know that the number of sign bits will |
2858 | // be the minimum of the clamp min/max range. |
2859 | const Value *X; |
2860 | const APInt *CLow, *CHigh; |
2861 | if (isSignedMinMaxClamp(U, X, CLow, CHigh)) |
2862 | return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); |
2863 | |
2864 | Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); |
2865 | if (Tmp == 1) break; |
2866 | Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); |
2867 | return std::min(Tmp, Tmp2); |
2868 | } |
2869 | |
2870 | case Instruction::Add: |
2871 | // Add can have at most one carry bit. Thus we know that the output |
2872 | // is, at worst, one more bit than the inputs. |
2873 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2874 | if (Tmp == 1) break; |
2875 | |
2876 | // Special case decrementing a value (ADD X, -1): |
2877 | if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) |
2878 | if (CRHS->isAllOnesValue()) { |
2879 | KnownBits Known(TyBits); |
2880 | computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); |
2881 | |
2882 | // If the input is known to be 0 or 1, the output is 0/-1, which is |
2883 | // all sign bits set. |
2884 | if ((Known.Zero | 1).isAllOnesValue()) |
2885 | return TyBits; |
2886 | |
2887 | // If we are subtracting one from a positive number, there is no carry |
2888 | // out of the result. |
2889 | if (Known.isNonNegative()) |
2890 | return Tmp; |
2891 | } |
2892 | |
2893 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); |
2894 | if (Tmp2 == 1) break; |
2895 | return std::min(Tmp, Tmp2) - 1; |
2896 | |
2897 | case Instruction::Sub: |
2898 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); |
2899 | if (Tmp2 == 1) break; |
2900 | |
2901 | // Handle NEG. |
2902 | if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) |
2903 | if (CLHS->isNullValue()) { |
2904 | KnownBits Known(TyBits); |
2905 | computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); |
2906 | // If the input is known to be 0 or 1, the output is 0/-1, which is |
2907 | // all sign bits set. |
2908 | if ((Known.Zero | 1).isAllOnesValue()) |
2909 | return TyBits; |
2910 | |
2911 | // If the input is known to be positive (the sign bit is known clear), |
2912 | // the output of the NEG has the same number of sign bits as the |
2913 | // input. |
2914 | if (Known.isNonNegative()) |
2915 | return Tmp2; |
2916 | |
2917 | // Otherwise, we treat this like a SUB. |
2918 | } |
2919 | |
2920 | // Sub can have at most one carry bit. Thus we know that the output |
2921 | // is, at worst, one more bit than the inputs. |
2922 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2923 | if (Tmp == 1) break; |
2924 | return std::min(Tmp, Tmp2) - 1; |
2925 | |
2926 | case Instruction::Mul: { |
2927 | // The output of the Mul can be at most twice the valid bits in the |
2928 | // inputs. |
2929 | unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2930 | if (SignBitsOp0 == 1) break; |
2931 | unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); |
2932 | if (SignBitsOp1 == 1) break; |
2933 | unsigned OutValidBits = |
2934 | (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); |
2935 | return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; |
2936 | } |
2937 | |
2938 | case Instruction::PHI: { |
2939 | const PHINode *PN = cast<PHINode>(U); |
2940 | unsigned NumIncomingValues = PN->getNumIncomingValues(); |
2941 | // Don't analyze large in-degree PHIs. |
2942 | if (NumIncomingValues > 4) break; |
2943 | // Unreachable blocks may have zero-operand PHI nodes. |
2944 | if (NumIncomingValues == 0) break; |
2945 | |
2946 | // Take the minimum of all incoming values. This can't infinitely loop |
2947 | // because of our depth threshold. |
2948 | Query RecQ = Q; |
2949 | Tmp = TyBits; |
2950 | for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) { |
2951 | if (Tmp == 1) return Tmp; |
2952 | RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); |
2953 | Tmp = std::min( |
2954 | Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ)); |
2955 | } |
2956 | return Tmp; |
2957 | } |
2958 | |
2959 | case Instruction::Trunc: |
2960 | // FIXME: it's tricky to do anything useful for this, but it is an |
2961 | // important case for targets like X86. |
2962 | break; |
2963 | |
2964 | case Instruction::ExtractElement: |
2965 | // Look through extract element. At the moment we keep this simple and |
2966 | // skip tracking the specific element. But at least we might find |
2967 | // information valid for all elements of the vector (for example if vector |
2968 | // is sign extended, shifted, etc). |
2969 | return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
2970 | |
2971 | case Instruction::ShuffleVector: { |
2972 | // Collect the minimum number of sign bits that are shared by every vector |
2973 | // element referenced by the shuffle. |
2974 | auto *Shuf = dyn_cast<ShuffleVectorInst>(U); |
2975 | if (!Shuf) { |
2976 | // FIXME: Add support for shufflevector constant expressions. |
2977 | return 1; |
2978 | } |
2979 | APInt DemandedLHS, DemandedRHS; |
2980 | // For undef elements, we don't know anything about the common state of |
2981 | // the shuffle result. |
2982 | if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) |
2983 | return 1; |
2984 | Tmp = std::numeric_limits<unsigned>::max(); |
2985 | if (!!DemandedLHS) { |
2986 | const Value *LHS = Shuf->getOperand(0); |
2987 | Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q); |
2988 | } |
2989 | // If we don't know anything, early out and try computeKnownBits |
2990 | // fall-back. |
2991 | if (Tmp == 1) |
2992 | break; |
2993 | if (!!DemandedRHS) { |
2994 | const Value *RHS = Shuf->getOperand(1); |
2995 | Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q); |
2996 | Tmp = std::min(Tmp, Tmp2); |
2997 | } |
2998 | // If we don't know anything, early out and try computeKnownBits |
2999 | // fall-back. |
3000 | if (Tmp == 1) |
3001 | break; |
3002 | assert(Tmp <= TyBits && "Failed to determine minimum sign bits")((Tmp <= TyBits && "Failed to determine minimum sign bits" ) ? static_cast<void> (0) : __assert_fail ("Tmp <= TyBits && \"Failed to determine minimum sign bits\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3002, __PRETTY_FUNCTION__)); |
3003 | return Tmp; |
3004 | } |
3005 | case Instruction::Call: { |
3006 | if (const auto *II = dyn_cast<IntrinsicInst>(U)) { |
3007 | switch (II->getIntrinsicID()) { |
3008 | default: break; |
3009 | case Intrinsic::abs: |
3010 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); |
3011 | if (Tmp == 1) break; |
3012 | |
3013 | // Absolute value reduces number of sign bits by at most 1. |
3014 | return Tmp - 1; |
3015 | } |
3016 | } |
3017 | } |
3018 | } |
3019 | } |
3020 | |
3021 | // Finally, if we can prove that the top bits of the result are 0's or 1's, |
3022 | // use this information. |
3023 | |
3024 | // If we can examine all elements of a vector constant successfully, we're |
3025 | // done (we can't do any better than that). If not, keep trying. |
3026 | if (unsigned VecSignBits = |
3027 | computeNumSignBitsVectorConstant(V, DemandedElts, TyBits)) |
3028 | return VecSignBits; |
3029 | |
3030 | KnownBits Known(TyBits); |
3031 | computeKnownBits(V, DemandedElts, Known, Depth, Q); |
3032 | |
3033 | // If we know that the sign bit is either zero or one, determine the number of |
3034 | // identical bits in the top of the input value. |
3035 | return std::max(FirstAnswer, Known.countMinSignBits()); |
3036 | } |
3037 | |
3038 | /// This function computes the integer multiple of Base that equals V. |
3039 | /// If successful, it returns true and returns the multiple in |
3040 | /// Multiple. If unsuccessful, it returns false. It looks |
3041 | /// through SExt instructions only if LookThroughSExt is true. |
3042 | bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, |
3043 | bool LookThroughSExt, unsigned Depth) { |
3044 | assert(V && "No Value?")((V && "No Value?") ? static_cast<void> (0) : __assert_fail ("V && \"No Value?\"", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3044, __PRETTY_FUNCTION__)); |
3045 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")((Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth" ) ? static_cast<void> (0) : __assert_fail ("Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3045, __PRETTY_FUNCTION__)); |
3046 | assert(V->getType()->isIntegerTy() && "Not integer or pointer type!")((V->getType()->isIntegerTy() && "Not integer or pointer type!" ) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Not integer or pointer type!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3046, __PRETTY_FUNCTION__)); |
3047 | |
3048 | Type *T = V->getType(); |
3049 | |
3050 | ConstantInt *CI = dyn_cast<ConstantInt>(V); |
3051 | |
3052 | if (Base == 0) |
3053 | return false; |
3054 | |
3055 | if (Base == 1) { |
3056 | Multiple = V; |
3057 | return true; |
3058 | } |
3059 | |
3060 | ConstantExpr *CO = dyn_cast<ConstantExpr>(V); |
3061 | Constant *BaseVal = ConstantInt::get(T, Base); |
3062 | if (CO && CO == BaseVal) { |
3063 | // Multiple is 1. |
3064 | Multiple = ConstantInt::get(T, 1); |
3065 | return true; |
3066 | } |
3067 | |
3068 | if (CI && CI->getZExtValue() % Base == 0) { |
3069 | Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); |
3070 | return true; |
3071 | } |
3072 | |
3073 | if (Depth == MaxAnalysisRecursionDepth) return false; |
3074 | |
3075 | Operator *I = dyn_cast<Operator>(V); |
3076 | if (!I) return false; |
3077 | |
3078 | switch (I->getOpcode()) { |
3079 | default: break; |
3080 | case Instruction::SExt: |
3081 | if (!LookThroughSExt) return false; |
3082 | // otherwise fall through to ZExt |
3083 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
3084 | case Instruction::ZExt: |
3085 | return ComputeMultiple(I->getOperand(0), Base, Multiple, |
3086 | LookThroughSExt, Depth+1); |
3087 | case Instruction::Shl: |
3088 | case Instruction::Mul: { |
3089 | Value *Op0 = I->getOperand(0); |
3090 | Value *Op1 = I->getOperand(1); |
3091 | |
3092 | if (I->getOpcode() == Instruction::Shl) { |
3093 | ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); |
3094 | if (!Op1CI) return false; |
3095 | // Turn Op0 << Op1 into Op0 * 2^Op1 |
3096 | APInt Op1Int = Op1CI->getValue(); |
3097 | uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); |
3098 | APInt API(Op1Int.getBitWidth(), 0); |
3099 | API.setBit(BitToSet); |
3100 | Op1 = ConstantInt::get(V->getContext(), API); |
3101 | } |
3102 | |
3103 | Value *Mul0 = nullptr; |
3104 | if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { |
3105 | if (Constant *Op1C = dyn_cast<Constant>(Op1)) |
3106 | if (Constant *MulC = dyn_cast<Constant>(Mul0)) { |
3107 | if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() < |
3108 | MulC->getType()->getPrimitiveSizeInBits().getFixedSize()) |
3109 | Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); |
3110 | if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() > |
3111 | MulC->getType()->getPrimitiveSizeInBits().getFixedSize()) |
3112 | MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); |
3113 | |
3114 | // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) |
3115 | Multiple = ConstantExpr::getMul(MulC, Op1C); |
3116 | return true; |
3117 | } |
3118 | |
3119 | if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) |
3120 | if (Mul0CI->getValue() == 1) { |
3121 | // V == Base * Op1, so return Op1 |
3122 | Multiple = Op1; |
3123 | return true; |
3124 | } |
3125 | } |
3126 | |
3127 | Value *Mul1 = nullptr; |
3128 | if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { |
3129 | if (Constant *Op0C = dyn_cast<Constant>(Op0)) |
3130 | if (Constant *MulC = dyn_cast<Constant>(Mul1)) { |
3131 | if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() < |
3132 | MulC->getType()->getPrimitiveSizeInBits().getFixedSize()) |
3133 | Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); |
3134 | if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() > |
3135 | MulC->getType()->getPrimitiveSizeInBits().getFixedSize()) |
3136 | MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); |
3137 | |
3138 | // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) |
3139 | Multiple = ConstantExpr::getMul(MulC, Op0C); |
3140 | return true; |
3141 | } |
3142 | |
3143 | if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) |
3144 | if (Mul1CI->getValue() == 1) { |
3145 | // V == Base * Op0, so return Op0 |
3146 | Multiple = Op0; |
3147 | return true; |
3148 | } |
3149 | } |
3150 | } |
3151 | } |
3152 | |
3153 | // We could not determine if V is a multiple of Base. |
3154 | return false; |
3155 | } |
3156 | |
3157 | Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB, |
3158 | const TargetLibraryInfo *TLI) { |
3159 | const Function *F = CB.getCalledFunction(); |
3160 | if (!F) |
3161 | return Intrinsic::not_intrinsic; |
3162 | |
3163 | if (F->isIntrinsic()) |
3164 | return F->getIntrinsicID(); |
3165 | |
3166 | // We are going to infer semantics of a library function based on mapping it |
3167 | // to an LLVM intrinsic. Check that the library function is available from |
3168 | // this callbase and in this environment. |
3169 | LibFunc Func; |
3170 | if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) || |
3171 | !CB.onlyReadsMemory()) |
3172 | return Intrinsic::not_intrinsic; |
3173 | |
3174 | switch (Func) { |
3175 | default: |
3176 | break; |
3177 | case LibFunc_sin: |
3178 | case LibFunc_sinf: |
3179 | case LibFunc_sinl: |
3180 | return Intrinsic::sin; |
3181 | case LibFunc_cos: |
3182 | case LibFunc_cosf: |
3183 | case LibFunc_cosl: |
3184 | return Intrinsic::cos; |
3185 | case LibFunc_exp: |
3186 | case LibFunc_expf: |
3187 | case LibFunc_expl: |
3188 | return Intrinsic::exp; |
3189 | case LibFunc_exp2: |
3190 | case LibFunc_exp2f: |
3191 | case LibFunc_exp2l: |
3192 | return Intrinsic::exp2; |
3193 | case LibFunc_log: |
3194 | case LibFunc_logf: |
3195 | case LibFunc_logl: |
3196 | return Intrinsic::log; |
3197 | case LibFunc_log10: |
3198 | case LibFunc_log10f: |
3199 | case LibFunc_log10l: |
3200 | return Intrinsic::log10; |
3201 | case LibFunc_log2: |
3202 | case LibFunc_log2f: |
3203 | case LibFunc_log2l: |
3204 | return Intrinsic::log2; |
3205 | case LibFunc_fabs: |
3206 | case LibFunc_fabsf: |
3207 | case LibFunc_fabsl: |
3208 | return Intrinsic::fabs; |
3209 | case LibFunc_fmin: |
3210 | case LibFunc_fminf: |
3211 | case LibFunc_fminl: |
3212 | return Intrinsic::minnum; |
3213 | case LibFunc_fmax: |
3214 | case LibFunc_fmaxf: |
3215 | case LibFunc_fmaxl: |
3216 | return Intrinsic::maxnum; |
3217 | case LibFunc_copysign: |
3218 | case LibFunc_copysignf: |
3219 | case LibFunc_copysignl: |
3220 | return Intrinsic::copysign; |
3221 | case LibFunc_floor: |
3222 | case LibFunc_floorf: |
3223 | case LibFunc_floorl: |
3224 | return Intrinsic::floor; |
3225 | case LibFunc_ceil: |
3226 | case LibFunc_ceilf: |
3227 | case LibFunc_ceill: |
3228 | return Intrinsic::ceil; |
3229 | case LibFunc_trunc: |
3230 | case LibFunc_truncf: |
3231 | case LibFunc_truncl: |
3232 | return Intrinsic::trunc; |
3233 | case LibFunc_rint: |
3234 | case LibFunc_rintf: |
3235 | case LibFunc_rintl: |
3236 | return Intrinsic::rint; |
3237 | case LibFunc_nearbyint: |
3238 | case LibFunc_nearbyintf: |
3239 | case LibFunc_nearbyintl: |
3240 | return Intrinsic::nearbyint; |
3241 | case LibFunc_round: |
3242 | case LibFunc_roundf: |
3243 | case LibFunc_roundl: |
3244 | return Intrinsic::round; |
3245 | case LibFunc_roundeven: |
3246 | case LibFunc_roundevenf: |
3247 | case LibFunc_roundevenl: |
3248 | return Intrinsic::roundeven; |
3249 | case LibFunc_pow: |
3250 | case LibFunc_powf: |
3251 | case LibFunc_powl: |
3252 | return Intrinsic::pow; |
3253 | case LibFunc_sqrt: |
3254 | case LibFunc_sqrtf: |
3255 | case LibFunc_sqrtl: |
3256 | return Intrinsic::sqrt; |
3257 | } |
3258 | |
3259 | return Intrinsic::not_intrinsic; |
3260 | } |
3261 | |
3262 | /// Return true if we can prove that the specified FP value is never equal to |
3263 | /// -0.0. |
3264 | /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee |
3265 | /// that a value is not -0.0. It only guarantees that -0.0 may be treated |
3266 | /// the same as +0.0 in floating-point ops. |
3267 | /// |
3268 | /// NOTE: this function will need to be revisited when we support non-default |
3269 | /// rounding modes! |
3270 | bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, |
3271 | unsigned Depth) { |
3272 | if (auto *CFP = dyn_cast<ConstantFP>(V)) |
3273 | return !CFP->getValueAPF().isNegZero(); |
3274 | |
3275 | if (Depth == MaxAnalysisRecursionDepth) |
3276 | return false; |
3277 | |
3278 | auto *Op = dyn_cast<Operator>(V); |
3279 | if (!Op) |
3280 | return false; |
3281 | |
3282 | // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. |
3283 | if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) |
3284 | return true; |
3285 | |
3286 | // sitofp and uitofp turn into +0.0 for zero. |
3287 | if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) |
3288 | return true; |
3289 | |
3290 | if (auto *Call = dyn_cast<CallInst>(Op)) { |
3291 | Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI); |
3292 | switch (IID) { |
3293 | default: |
3294 | break; |
3295 | // sqrt(-0.0) = -0.0, no other negative results are possible. |
3296 | case Intrinsic::sqrt: |
3297 | case Intrinsic::canonicalize: |
3298 | return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); |
3299 | // fabs(x) != -0.0 |
3300 | case Intrinsic::fabs: |
3301 | return true; |
3302 | } |
3303 | } |
3304 | |
3305 | return false; |
3306 | } |
3307 | |
3308 | /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a |
3309 | /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign |
3310 | /// bit despite comparing equal. |
3311 | static bool cannotBeOrderedLessThanZeroImpl(const Value *V, |
3312 | const TargetLibraryInfo *TLI, |
3313 | bool SignBitOnly, |
3314 | unsigned Depth) { |
3315 | // TODO: This function does not do the right thing when SignBitOnly is true |
3316 | // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform |
3317 | // which flips the sign bits of NaNs. See |
3318 | // https://llvm.org/bugs/show_bug.cgi?id=31702. |
3319 | |
3320 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { |
3321 | return !CFP->getValueAPF().isNegative() || |
3322 | (!SignBitOnly && CFP->getValueAPF().isZero()); |
3323 | } |
3324 | |
3325 | // Handle vector of constants. |
3326 | if (auto *CV = dyn_cast<Constant>(V)) { |
3327 | if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) { |
3328 | unsigned NumElts = CVFVTy->getNumElements(); |
3329 | for (unsigned i = 0; i != NumElts; ++i) { |
3330 | auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); |
3331 | if (!CFP) |
3332 | return false; |
3333 | if (CFP->getValueAPF().isNegative() && |
3334 | (SignBitOnly || !CFP->getValueAPF().isZero())) |
3335 | return false; |
3336 | } |
3337 | |
3338 | // All non-negative ConstantFPs. |
3339 | return true; |
3340 | } |
3341 | } |
3342 | |
3343 | if (Depth == MaxAnalysisRecursionDepth) |
3344 | return false; |
3345 | |
3346 | const Operator *I = dyn_cast<Operator>(V); |
3347 | if (!I) |
3348 | return false; |
3349 | |
3350 | switch (I->getOpcode()) { |
3351 | default: |
3352 | break; |
3353 | // Unsigned integers are always nonnegative. |
3354 | case Instruction::UIToFP: |
3355 | return true; |
3356 | case Instruction::FMul: |
3357 | case Instruction::FDiv: |
3358 | // X * X is always non-negative or a NaN. |
3359 | // X / X is always exactly 1.0 or a NaN. |
3360 | if (I->getOperand(0) == I->getOperand(1) && |
3361 | (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) |
3362 | return true; |
3363 | |
3364 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
3365 | case Instruction::FAdd: |
3366 | case Instruction::FRem: |
3367 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, |
3368 | Depth + 1) && |
3369 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, |
3370 | Depth + 1); |
3371 | case Instruction::Select: |
3372 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, |
3373 | Depth + 1) && |
3374 | cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, |
3375 | Depth + 1); |
3376 | case Instruction::FPExt: |
3377 | case Instruction::FPTrunc: |
3378 | // Widening/narrowing never change sign. |
3379 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, |
3380 | Depth + 1); |
3381 | case Instruction::ExtractElement: |
3382 | // Look through extract element. At the moment we keep this simple and skip |
3383 | // tracking the specific element. But at least we might find information |
3384 | // valid for all elements of the vector. |
3385 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, |
3386 | Depth + 1); |
3387 | case Instruction::Call: |
3388 | const auto *CI = cast<CallInst>(I); |
3389 | Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI); |
3390 | switch (IID) { |
3391 | default: |
3392 | break; |
3393 | case Intrinsic::maxnum: { |
3394 | Value *V0 = I->getOperand(0), *V1 = I->getOperand(1); |
3395 | auto isPositiveNum = [&](Value *V) { |
3396 | if (SignBitOnly) { |
3397 | // With SignBitOnly, this is tricky because the result of |
3398 | // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is |
3399 | // a constant strictly greater than 0.0. |
3400 | const APFloat *C; |
3401 | return match(V, m_APFloat(C)) && |
3402 | *C > APFloat::getZero(C->getSemantics()); |
3403 | } |
3404 | |
3405 | // -0.0 compares equal to 0.0, so if this operand is at least -0.0, |
3406 | // maxnum can't be ordered-less-than-zero. |
3407 | return isKnownNeverNaN(V, TLI) && |
3408 | cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1); |
3409 | }; |
3410 | |
3411 | // TODO: This could be improved. We could also check that neither operand |
3412 | // has its sign bit set (and at least 1 is not-NAN?). |
3413 | return isPositiveNum(V0) || isPositiveNum(V1); |
3414 | } |
3415 | |
3416 | case Intrinsic::maximum: |
3417 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, |
3418 | Depth + 1) || |
3419 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, |
3420 | Depth + 1); |
3421 | case Intrinsic::minnum: |
3422 | case Intrinsic::minimum: |
3423 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, |
3424 | Depth + 1) && |
3425 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, |
3426 | Depth + 1); |
3427 | case Intrinsic::exp: |
3428 | case Intrinsic::exp2: |
3429 | case Intrinsic::fabs: |
3430 | return true; |
3431 | |
3432 | case Intrinsic::sqrt: |
3433 | // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. |
3434 | if (!SignBitOnly) |
3435 | return true; |
3436 | return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || |
3437 | CannotBeNegativeZero(CI->getOperand(0), TLI)); |
3438 | |
3439 | case Intrinsic::powi: |
3440 | if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { |
3441 | // powi(x,n) is non-negative if n is even. |
3442 | if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) |
3443 | return true; |
3444 | } |
3445 | // TODO: This is not correct. Given that exp is an integer, here are the |
3446 | // ways that pow can return a negative value: |
3447 | // |
3448 | // pow(x, exp) --> negative if exp is odd and x is negative. |
3449 | // pow(-0, exp) --> -inf if exp is negative odd. |
3450 | // pow(-0, exp) --> -0 if exp is positive odd. |
3451 | // pow(-inf, exp) --> -0 if exp is negative odd. |
3452 | // pow(-inf, exp) --> -inf if exp is positive odd. |
3453 | // |
3454 | // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, |
3455 | // but we must return false if x == -0. Unfortunately we do not currently |
3456 | // have a way of expressing this constraint. See details in |
3457 | // https://llvm.org/bugs/show_bug.cgi?id=31702. |
3458 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, |
3459 | Depth + 1); |
3460 | |
3461 | case Intrinsic::fma: |
3462 | case Intrinsic::fmuladd: |
3463 | // x*x+y is non-negative if y is non-negative. |
3464 | return I->getOperand(0) == I->getOperand(1) && |
3465 | (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && |
3466 | cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, |
3467 | Depth + 1); |
3468 | } |
3469 | break; |
3470 | } |
3471 | return false; |
3472 | } |
3473 | |
3474 | bool llvm::CannotBeOrderedLessThanZero(const Value *V, |
3475 | const TargetLibraryInfo *TLI) { |
3476 | return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); |
3477 | } |
3478 | |
3479 | bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { |
3480 | return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); |
3481 | } |
3482 | |
3483 | bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, |
3484 | unsigned Depth) { |
3485 | assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type")((V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type" ) ? static_cast<void> (0) : __assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for Inf on non-FP type\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3485, __PRETTY_FUNCTION__)); |
3486 | |
3487 | // If we're told that infinities won't happen, assume they won't. |
3488 | if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) |
3489 | if (FPMathOp->hasNoInfs()) |
3490 | return true; |
3491 | |
3492 | // Handle scalar constants. |
3493 | if (auto *CFP = dyn_cast<ConstantFP>(V)) |
3494 | return !CFP->isInfinity(); |
3495 | |
3496 | if (Depth == MaxAnalysisRecursionDepth) |
3497 | return false; |
3498 | |
3499 | if (auto *Inst = dyn_cast<Instruction>(V)) { |
3500 | switch (Inst->getOpcode()) { |
3501 | case Instruction::Select: { |
3502 | return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) && |
3503 | isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1); |
3504 | } |
3505 | case Instruction::SIToFP: |
3506 | case Instruction::UIToFP: { |
3507 | // Get width of largest magnitude integer (remove a bit if signed). |
3508 | // This still works for a signed minimum value because the largest FP |
3509 | // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx). |
3510 | int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits(); |
3511 | if (Inst->getOpcode() == Instruction::SIToFP) |
3512 | --IntSize; |
3513 | |
3514 | // If the exponent of the largest finite FP value can hold the largest |
3515 | // integer, the result of the cast must be finite. |
3516 | Type *FPTy = Inst->getType()->getScalarType(); |
3517 | return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize; |
3518 | } |
3519 | default: |
3520 | break; |
3521 | } |
3522 | } |
3523 | |
3524 | // try to handle fixed width vector constants |
3525 | auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); |
3526 | if (VFVTy && isa<Constant>(V)) { |
3527 | // For vectors, verify that each element is not infinity. |
3528 | unsigned NumElts = VFVTy->getNumElements(); |
3529 | for (unsigned i = 0; i != NumElts; ++i) { |
3530 | Constant *Elt = cast<Constant>(V)->getAggregateElement(i); |
3531 | if (!Elt) |
3532 | return false; |
3533 | if (isa<UndefValue>(Elt)) |
3534 | continue; |
3535 | auto *CElt = dyn_cast<ConstantFP>(Elt); |
3536 | if (!CElt || CElt->isInfinity()) |
3537 | return false; |
3538 | } |
3539 | // All elements were confirmed non-infinity or undefined. |
3540 | return true; |
3541 | } |
3542 | |
3543 | // was not able to prove that V never contains infinity |
3544 | return false; |
3545 | } |
3546 | |
3547 | bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, |
3548 | unsigned Depth) { |
3549 | assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")((V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type" ) ? static_cast<void> (0) : __assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3549, __PRETTY_FUNCTION__)); |
3550 | |
3551 | // If we're told that NaNs won't happen, assume they won't. |
3552 | if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) |
3553 | if (FPMathOp->hasNoNaNs()) |
3554 | return true; |
3555 | |
3556 | // Handle scalar constants. |
3557 | if (auto *CFP = dyn_cast<ConstantFP>(V)) |
3558 | return !CFP->isNaN(); |
3559 | |
3560 | if (Depth == MaxAnalysisRecursionDepth) |
3561 | return false; |
3562 | |
3563 | if (auto *Inst = dyn_cast<Instruction>(V)) { |
3564 | switch (Inst->getOpcode()) { |
3565 | case Instruction::FAdd: |
3566 | case Instruction::FSub: |
3567 | // Adding positive and negative infinity produces NaN. |
3568 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && |
3569 | isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && |
3570 | (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) || |
3571 | isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1)); |
3572 | |
3573 | case Instruction::FMul: |
3574 | // Zero multiplied with infinity produces NaN. |
3575 | // FIXME: If neither side can be zero fmul never produces NaN. |
3576 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && |
3577 | isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) && |
3578 | isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && |
3579 | isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1); |
3580 | |
3581 | case Instruction::FDiv: |
3582 | case Instruction::FRem: |
3583 | // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN. |
3584 | return false; |
3585 | |
3586 | case Instruction::Select: { |
3587 | return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && |
3588 | isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); |
3589 | } |
3590 | case Instruction::SIToFP: |
3591 | case Instruction::UIToFP: |
3592 | return true; |
3593 | case Instruction::FPTrunc: |
3594 | case Instruction::FPExt: |
3595 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); |
3596 | default: |
3597 | break; |
3598 | } |
3599 | } |
3600 | |
3601 | if (const auto *II = dyn_cast<IntrinsicInst>(V)) { |
3602 | switch (II->getIntrinsicID()) { |
3603 | case Intrinsic::canonicalize: |
3604 | case Intrinsic::fabs: |
3605 | case Intrinsic::copysign: |
3606 | case Intrinsic::exp: |
3607 | case Intrinsic::exp2: |
3608 | case Intrinsic::floor: |
3609 | case Intrinsic::ceil: |
3610 | case Intrinsic::trunc: |
3611 | case Intrinsic::rint: |
3612 | case Intrinsic::nearbyint: |
3613 | case Intrinsic::round: |
3614 | case Intrinsic::roundeven: |
3615 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); |
3616 | case Intrinsic::sqrt: |
3617 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && |
3618 | CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); |
3619 | case Intrinsic::minnum: |
3620 | case Intrinsic::maxnum: |
3621 | // If either operand is not NaN, the result is not NaN. |
3622 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) || |
3623 | isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1); |
3624 | default: |
3625 | return false; |
3626 | } |
3627 | } |
3628 | |
3629 | // Try to handle fixed width vector constants |
3630 | auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); |
3631 | if (VFVTy && isa<Constant>(V)) { |
3632 | // For vectors, verify that each element is not NaN. |
3633 | unsigned NumElts = VFVTy->getNumElements(); |
3634 | for (unsigned i = 0; i != NumElts; ++i) { |
3635 | Constant *Elt = cast<Constant>(V)->getAggregateElement(i); |
3636 | if (!Elt) |
3637 | return false; |
3638 | if (isa<UndefValue>(Elt)) |
3639 | continue; |
3640 | auto *CElt = dyn_cast<ConstantFP>(Elt); |
3641 | if (!CElt || CElt->isNaN()) |
3642 | return false; |
3643 | } |
3644 | // All elements were confirmed not-NaN or undefined. |
3645 | return true; |
3646 | } |
3647 | |
3648 | // Was not able to prove that V never contains NaN |
3649 | return false; |
3650 | } |
3651 | |
3652 | Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { |
3653 | |
3654 | // All byte-wide stores are splatable, even of arbitrary variables. |
3655 | if (V->getType()->isIntegerTy(8)) |
3656 | return V; |
3657 | |
3658 | LLVMContext &Ctx = V->getContext(); |
3659 | |
3660 | // Undef don't care. |
3661 | auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); |
3662 | if (isa<UndefValue>(V)) |
3663 | return UndefInt8; |
3664 | |
3665 | // Return Undef for zero-sized type. |
3666 | if (!DL.getTypeStoreSize(V->getType()).isNonZero()) |
3667 | return UndefInt8; |
3668 | |
3669 | Constant *C = dyn_cast<Constant>(V); |
3670 | if (!C) { |
3671 | // Conceptually, we could handle things like: |
3672 | // %a = zext i8 %X to i16 |
3673 | // %b = shl i16 %a, 8 |
3674 | // %c = or i16 %a, %b |
3675 | // but until there is an example that actually needs this, it doesn't seem |
3676 | // worth worrying about. |
3677 | return nullptr; |
3678 | } |
3679 | |
3680 | // Handle 'null' ConstantArrayZero etc. |
3681 | if (C->isNullValue()) |
3682 | return Constant::getNullValue(Type::getInt8Ty(Ctx)); |
3683 | |
3684 | // Constant floating-point values can be handled as integer values if the |
3685 | // corresponding integer value is "byteable". An important case is 0.0. |
3686 | if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { |
3687 | Type *Ty = nullptr; |
3688 | if (CFP->getType()->isHalfTy()) |
3689 | Ty = Type::getInt16Ty(Ctx); |
3690 | else if (CFP->getType()->isFloatTy()) |
3691 | Ty = Type::getInt32Ty(Ctx); |
3692 | else if (CFP->getType()->isDoubleTy()) |
3693 | Ty = Type::getInt64Ty(Ctx); |
3694 | // Don't handle long double formats, which have strange constraints. |
3695 | return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL) |
3696 | : nullptr; |
3697 | } |
3698 | |
3699 | // We can handle constant integers that are multiple of 8 bits. |
3700 | if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { |
3701 | if (CI->getBitWidth() % 8 == 0) { |
3702 | assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")((CI->getBitWidth() > 8 && "8 bits should be handled above!" ) ? static_cast<void> (0) : __assert_fail ("CI->getBitWidth() > 8 && \"8 bits should be handled above!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3702, __PRETTY_FUNCTION__)); |
3703 | if (!CI->getValue().isSplat(8)) |
3704 | return nullptr; |
3705 | return ConstantInt::get(Ctx, CI->getValue().trunc(8)); |
3706 | } |
3707 | } |
3708 | |
3709 | if (auto *CE = dyn_cast<ConstantExpr>(C)) { |
3710 | if (CE->getOpcode() == Instruction::IntToPtr) { |
3711 | if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) { |
3712 | unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace()); |
3713 | return isBytewiseValue( |
3714 | ConstantExpr::getIntegerCast(CE->getOperand(0), |
3715 | Type::getIntNTy(Ctx, BitWidth), false), |
3716 | DL); |
3717 | } |
3718 | } |
3719 | } |
3720 | |
3721 | auto Merge = [&](Value *LHS, Value *RHS) -> Value * { |
3722 | if (LHS == RHS) |
3723 | return LHS; |
3724 | if (!LHS || !RHS) |
3725 | return nullptr; |
3726 | if (LHS == UndefInt8) |
3727 | return RHS; |
3728 | if (RHS == UndefInt8) |
3729 | return LHS; |
3730 | return nullptr; |
3731 | }; |
3732 | |
3733 | if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { |
3734 | Value *Val = UndefInt8; |
3735 | for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) |
3736 | if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) |
3737 | return nullptr; |
3738 | return Val; |
3739 | } |
3740 | |
3741 | if (isa<ConstantAggregate>(C)) { |
3742 | Value *Val = UndefInt8; |
3743 | for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) |
3744 | if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) |
3745 | return nullptr; |
3746 | return Val; |
3747 | } |
3748 | |
3749 | // Don't try to handle the handful of other constants. |
3750 | return nullptr; |
3751 | } |
3752 | |
3753 | // This is the recursive version of BuildSubAggregate. It takes a few different |
3754 | // arguments. Idxs is the index within the nested struct From that we are |
3755 | // looking at now (which is of type IndexedType). IdxSkip is the number of |
3756 | // indices from Idxs that should be left out when inserting into the resulting |
3757 | // struct. To is the result struct built so far, new insertvalue instructions |
3758 | // build on that. |
3759 | static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, |
3760 | SmallVectorImpl<unsigned> &Idxs, |
3761 | unsigned IdxSkip, |
3762 | Instruction *InsertBefore) { |
3763 | StructType *STy = dyn_cast<StructType>(IndexedType); |
3764 | if (STy) { |
3765 | // Save the original To argument so we can modify it |
3766 | Value *OrigTo = To; |
3767 | // General case, the type indexed by Idxs is a struct |
3768 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
3769 | // Process each struct element recursively |
3770 | Idxs.push_back(i); |
3771 | Value *PrevTo = To; |
3772 | To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, |
3773 | InsertBefore); |
3774 | Idxs.pop_back(); |
3775 | if (!To) { |
3776 | // Couldn't find any inserted value for this index? Cleanup |
3777 | while (PrevTo != OrigTo) { |
3778 | InsertValueInst* Del = cast<InsertValueInst>(PrevTo); |
3779 | PrevTo = Del->getAggregateOperand(); |
3780 | Del->eraseFromParent(); |
3781 | } |
3782 | // Stop processing elements |
3783 | break; |
3784 | } |
3785 | } |
3786 | // If we successfully found a value for each of our subaggregates |
3787 | if (To) |
3788 | return To; |
3789 | } |
3790 | // Base case, the type indexed by SourceIdxs is not a struct, or not all of |
3791 | // the struct's elements had a value that was inserted directly. In the latter |
3792 | // case, perhaps we can't determine each of the subelements individually, but |
3793 | // we might be able to find the complete struct somewhere. |
3794 | |
3795 | // Find the value that is at that particular spot |
3796 | Value *V = FindInsertedValue(From, Idxs); |
3797 | |
3798 | if (!V) |
3799 | return nullptr; |
3800 | |
3801 | // Insert the value in the new (sub) aggregate |
3802 | return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), |
3803 | "tmp", InsertBefore); |
3804 | } |
3805 | |
3806 | // This helper takes a nested struct and extracts a part of it (which is again a |
3807 | // struct) into a new value. For example, given the struct: |
3808 | // { a, { b, { c, d }, e } } |
3809 | // and the indices "1, 1" this returns |
3810 | // { c, d }. |
3811 | // |
3812 | // It does this by inserting an insertvalue for each element in the resulting |
3813 | // struct, as opposed to just inserting a single struct. This will only work if |
3814 | // each of the elements of the substruct are known (ie, inserted into From by an |
3815 | // insertvalue instruction somewhere). |
3816 | // |
3817 | // All inserted insertvalue instructions are inserted before InsertBefore |
3818 | static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, |
3819 | Instruction *InsertBefore) { |
3820 | assert(InsertBefore && "Must have someplace to insert!")((InsertBefore && "Must have someplace to insert!") ? static_cast<void> (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3820, __PRETTY_FUNCTION__)); |
3821 | Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), |
3822 | idx_range); |
3823 | Value *To = UndefValue::get(IndexedType); |
3824 | SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); |
3825 | unsigned IdxSkip = Idxs.size(); |
3826 | |
3827 | return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); |
3828 | } |
3829 | |
3830 | /// Given an aggregate and a sequence of indices, see if the scalar value |
3831 | /// indexed is already around as a register, for example if it was inserted |
3832 | /// directly into the aggregate. |
3833 | /// |
3834 | /// If InsertBefore is not null, this function will duplicate (modified) |
3835 | /// insertvalues when a part of a nested struct is extracted. |
3836 | Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, |
3837 | Instruction *InsertBefore) { |
3838 | // Nothing to index? Just return V then (this is useful at the end of our |
3839 | // recursion). |
3840 | if (idx_range.empty()) |
3841 | return V; |
3842 | // We have indices, so V should have an indexable type. |
3843 | assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(((V->getType()->isStructTy() || V->getType()->isArrayTy ()) && "Not looking at a struct or array?") ? static_cast <void> (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3844, __PRETTY_FUNCTION__)) |
3844 | "Not looking at a struct or array?")(((V->getType()->isStructTy() || V->getType()->isArrayTy ()) && "Not looking at a struct or array?") ? static_cast <void> (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3844, __PRETTY_FUNCTION__)); |
3845 | assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&((ExtractValueInst::getIndexedType(V->getType(), idx_range ) && "Invalid indices for type?") ? static_cast<void > (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3846, __PRETTY_FUNCTION__)) |
3846 | "Invalid indices for type?")((ExtractValueInst::getIndexedType(V->getType(), idx_range ) && "Invalid indices for type?") ? static_cast<void > (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3846, __PRETTY_FUNCTION__)); |
3847 | |
3848 | if (Constant *C = dyn_cast<Constant>(V)) { |
3849 | C = C->getAggregateElement(idx_range[0]); |
3850 | if (!C) return nullptr; |
3851 | return FindInsertedValue(C, idx_range.slice(1), InsertBefore); |
3852 | } |
3853 | |
3854 | if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { |
3855 | // Loop the indices for the insertvalue instruction in parallel with the |
3856 | // requested indices |
3857 | const unsigned *req_idx = idx_range.begin(); |
3858 | for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); |
3859 | i != e; ++i, ++req_idx) { |
3860 | if (req_idx == idx_range.end()) { |
3861 | // We can't handle this without inserting insertvalues |
3862 | if (!InsertBefore) |
3863 | return nullptr; |
3864 | |
3865 | // The requested index identifies a part of a nested aggregate. Handle |
3866 | // this specially. For example, |
3867 | // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 |
3868 | // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 |
3869 | // %C = extractvalue {i32, { i32, i32 } } %B, 1 |
3870 | // This can be changed into |
3871 | // %A = insertvalue {i32, i32 } undef, i32 10, 0 |
3872 | // %C = insertvalue {i32, i32 } %A, i32 11, 1 |
3873 | // which allows the unused 0,0 element from the nested struct to be |
3874 | // removed. |
3875 | return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), |
3876 | InsertBefore); |
3877 | } |
3878 | |
3879 | // This insert value inserts something else than what we are looking for. |
3880 | // See if the (aggregate) value inserted into has the value we are |
3881 | // looking for, then. |
3882 | if (*req_idx != *i) |
3883 | return FindInsertedValue(I->getAggregateOperand(), idx_range, |
3884 | InsertBefore); |
3885 | } |
3886 | // If we end up here, the indices of the insertvalue match with those |
3887 | // requested (though possibly only partially). Now we recursively look at |
3888 | // the inserted value, passing any remaining indices. |
3889 | return FindInsertedValue(I->getInsertedValueOperand(), |
3890 | makeArrayRef(req_idx, idx_range.end()), |
3891 | InsertBefore); |
3892 | } |
3893 | |
3894 | if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { |
3895 | // If we're extracting a value from an aggregate that was extracted from |
3896 | // something else, we can extract from that something else directly instead. |
3897 | // However, we will need to chain I's indices with the requested indices. |
3898 | |
3899 | // Calculate the number of indices required |
3900 | unsigned size = I->getNumIndices() + idx_range.size(); |
3901 | // Allocate some space to put the new indices in |
3902 | SmallVector<unsigned, 5> Idxs; |
3903 | Idxs.reserve(size); |
3904 | // Add indices from the extract value instruction |
3905 | Idxs.append(I->idx_begin(), I->idx_end()); |
3906 | |
3907 | // Add requested indices |
3908 | Idxs.append(idx_range.begin(), idx_range.end()); |
3909 | |
3910 | assert(Idxs.size() == size((Idxs.size() == size && "Number of indices added not correct?" ) ? static_cast<void> (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3911, __PRETTY_FUNCTION__)) |
3911 | && "Number of indices added not correct?")((Idxs.size() == size && "Number of indices added not correct?" ) ? static_cast<void> (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3911, __PRETTY_FUNCTION__)); |
3912 | |
3913 | return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); |
3914 | } |
3915 | // Otherwise, we don't know (such as, extracting from a function return value |
3916 | // or load instruction) |
3917 | return nullptr; |
3918 | } |
3919 | |
3920 | bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, |
3921 | unsigned CharSize) { |
3922 | // Make sure the GEP has exactly three arguments. |
3923 | if (GEP->getNumOperands() != 3) |
3924 | return false; |
3925 | |
3926 | // Make sure the index-ee is a pointer to array of \p CharSize integers. |
3927 | // CharSize. |
3928 | ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); |
3929 | if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) |
3930 | return false; |
3931 | |
3932 | // Check to make sure that the first operand of the GEP is an integer and |
3933 | // has value 0 so that we are sure we're indexing into the initializer. |
3934 | const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); |
3935 | if (!FirstIdx || !FirstIdx->isZero()) |
3936 | return false; |
3937 | |
3938 | return true; |
3939 | } |
3940 | |
3941 | bool llvm::getConstantDataArrayInfo(const Value *V, |
3942 | ConstantDataArraySlice &Slice, |
3943 | unsigned ElementSize, uint64_t Offset) { |
3944 | assert(V)((V) ? static_cast<void> (0) : __assert_fail ("V", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 3944, __PRETTY_FUNCTION__)); |
3945 | |
3946 | // Look through bitcast instructions and geps. |
3947 | V = V->stripPointerCasts(); |
3948 | |
3949 | // If the value is a GEP instruction or constant expression, treat it as an |
3950 | // offset. |
3951 | if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { |
3952 | // The GEP operator should be based on a pointer to string constant, and is |
3953 | // indexing into the string constant. |
3954 | if (!isGEPBasedOnPointerToString(GEP, ElementSize)) |
3955 | return false; |
3956 | |
3957 | // If the second index isn't a ConstantInt, then this is a variable index |
3958 | // into the array. If this occurs, we can't say anything meaningful about |
3959 | // the string. |
3960 | uint64_t StartIdx = 0; |
3961 | if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) |
3962 | StartIdx = CI->getZExtValue(); |
3963 | else |
3964 | return false; |
3965 | return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, |
3966 | StartIdx + Offset); |
3967 | } |
3968 | |
3969 | // The GEP instruction, constant or instruction, must reference a global |
3970 | // variable that is a constant and is initialized. The referenced constant |
3971 | // initializer is the array that we'll use for optimization. |
3972 | const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); |
3973 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) |
3974 | return false; |
3975 | |
3976 | const ConstantDataArray *Array; |
3977 | ArrayType *ArrayTy; |
3978 | if (GV->getInitializer()->isNullValue()) { |
3979 | Type *GVTy = GV->getValueType(); |
3980 | if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { |
3981 | // A zeroinitializer for the array; there is no ConstantDataArray. |
3982 | Array = nullptr; |
3983 | } else { |
3984 | const DataLayout &DL = GV->getParent()->getDataLayout(); |
3985 | uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize(); |
3986 | uint64_t Length = SizeInBytes / (ElementSize / 8); |
3987 | if (Length <= Offset) |
3988 | return false; |
3989 | |
3990 | Slice.Array = nullptr; |
3991 | Slice.Offset = 0; |
3992 | Slice.Length = Length - Offset; |
3993 | return true; |
3994 | } |
3995 | } else { |
3996 | // This must be a ConstantDataArray. |
3997 | Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); |
3998 | if (!Array) |
3999 | return false; |
4000 | ArrayTy = Array->getType(); |
4001 | } |
4002 | if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) |
4003 | return false; |
4004 | |
4005 | uint64_t NumElts = ArrayTy->getArrayNumElements(); |
4006 | if (Offset > NumElts) |
4007 | return false; |
4008 | |
4009 | Slice.Array = Array; |
4010 | Slice.Offset = Offset; |
4011 | Slice.Length = NumElts - Offset; |
4012 | return true; |
4013 | } |
4014 | |
4015 | /// This function computes the length of a null-terminated C string pointed to |
4016 | /// by V. If successful, it returns true and returns the string in Str. |
4017 | /// If unsuccessful, it returns false. |
4018 | bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, |
4019 | uint64_t Offset, bool TrimAtNul) { |
4020 | ConstantDataArraySlice Slice; |
4021 | if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) |
4022 | return false; |
4023 | |
4024 | if (Slice.Array == nullptr) { |
4025 | if (TrimAtNul) { |
4026 | Str = StringRef(); |
4027 | return true; |
4028 | } |
4029 | if (Slice.Length == 1) { |
4030 | Str = StringRef("", 1); |
4031 | return true; |
4032 | } |
4033 | // We cannot instantiate a StringRef as we do not have an appropriate string |
4034 | // of 0s at hand. |
4035 | return false; |
4036 | } |
4037 | |
4038 | // Start out with the entire array in the StringRef. |
4039 | Str = Slice.Array->getAsString(); |
4040 | // Skip over 'offset' bytes. |
4041 | Str = Str.substr(Slice.Offset); |
4042 | |
4043 | if (TrimAtNul) { |
4044 | // Trim off the \0 and anything after it. If the array is not nul |
4045 | // terminated, we just return the whole end of string. The client may know |
4046 | // some other way that the string is length-bound. |
4047 | Str = Str.substr(0, Str.find('\0')); |
4048 | } |
4049 | return true; |
4050 | } |
4051 | |
4052 | // These next two are very similar to the above, but also look through PHI |
4053 | // nodes. |
4054 | // TODO: See if we can integrate these two together. |
4055 | |
4056 | /// If we can compute the length of the string pointed to by |
4057 | /// the specified pointer, return 'len+1'. If we can't, return 0. |
4058 | static uint64_t GetStringLengthH(const Value *V, |
4059 | SmallPtrSetImpl<const PHINode*> &PHIs, |
4060 | unsigned CharSize) { |
4061 | // Look through noop bitcast instructions. |
4062 | V = V->stripPointerCasts(); |
4063 | |
4064 | // If this is a PHI node, there are two cases: either we have already seen it |
4065 | // or we haven't. |
4066 | if (const PHINode *PN = dyn_cast<PHINode>(V)) { |
4067 | if (!PHIs.insert(PN).second) |
4068 | return ~0ULL; // already in the set. |
4069 | |
4070 | // If it was new, see if all the input strings are the same length. |
4071 | uint64_t LenSoFar = ~0ULL; |
4072 | for (Value *IncValue : PN->incoming_values()) { |
4073 | uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); |
4074 | if (Len == 0) return 0; // Unknown length -> unknown. |
4075 | |
4076 | if (Len == ~0ULL) continue; |
4077 | |
4078 | if (Len != LenSoFar && LenSoFar != ~0ULL) |
4079 | return 0; // Disagree -> unknown. |
4080 | LenSoFar = Len; |
4081 | } |
4082 | |
4083 | // Success, all agree. |
4084 | return LenSoFar; |
4085 | } |
4086 | |
4087 | // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) |
4088 | if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { |
4089 | uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); |
4090 | if (Len1 == 0) return 0; |
4091 | uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); |
4092 | if (Len2 == 0) return 0; |
4093 | if (Len1 == ~0ULL) return Len2; |
4094 | if (Len2 == ~0ULL) return Len1; |
4095 | if (Len1 != Len2) return 0; |
4096 | return Len1; |
4097 | } |
4098 | |
4099 | // Otherwise, see if we can read the string. |
4100 | ConstantDataArraySlice Slice; |
4101 | if (!getConstantDataArrayInfo(V, Slice, CharSize)) |
4102 | return 0; |
4103 | |
4104 | if (Slice.Array == nullptr) |
4105 | return 1; |
4106 | |
4107 | // Search for nul characters |
4108 | unsigned NullIndex = 0; |
4109 | for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { |
4110 | if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) |
4111 | break; |
4112 | } |
4113 | |
4114 | return NullIndex + 1; |
4115 | } |
4116 | |
4117 | /// If we can compute the length of the string pointed to by |
4118 | /// the specified pointer, return 'len+1'. If we can't, return 0. |
4119 | uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { |
4120 | if (!V->getType()->isPointerTy()) |
4121 | return 0; |
4122 | |
4123 | SmallPtrSet<const PHINode*, 32> PHIs; |
4124 | uint64_t Len = GetStringLengthH(V, PHIs, CharSize); |
4125 | // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return |
4126 | // an empty string as a length. |
4127 | return Len == ~0ULL ? 1 : Len; |
4128 | } |
4129 | |
4130 | const Value * |
4131 | llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call, |
4132 | bool MustPreserveNullness) { |
4133 | assert(Call &&((Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls" ) ? static_cast<void> (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4134, __PRETTY_FUNCTION__)) |
4134 | "getArgumentAliasingToReturnedPointer only works on nonnull calls")((Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls" ) ? static_cast<void> (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4134, __PRETTY_FUNCTION__)); |
4135 | if (const Value *RV = Call->getReturnedArgOperand()) |
4136 | return RV; |
4137 | // This can be used only as a aliasing property. |
4138 | if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( |
4139 | Call, MustPreserveNullness)) |
4140 | return Call->getArgOperand(0); |
4141 | return nullptr; |
4142 | } |
4143 | |
4144 | bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( |
4145 | const CallBase *Call, bool MustPreserveNullness) { |
4146 | switch (Call->getIntrinsicID()) { |
4147 | case Intrinsic::launder_invariant_group: |
4148 | case Intrinsic::strip_invariant_group: |
4149 | case Intrinsic::aarch64_irg: |
4150 | case Intrinsic::aarch64_tagp: |
4151 | return true; |
4152 | case Intrinsic::ptrmask: |
4153 | return !MustPreserveNullness; |
4154 | default: |
4155 | return false; |
4156 | } |
4157 | } |
4158 | |
4159 | /// \p PN defines a loop-variant pointer to an object. Check if the |
4160 | /// previous iteration of the loop was referring to the same object as \p PN. |
4161 | static bool isSameUnderlyingObjectInLoop(const PHINode *PN, |
4162 | const LoopInfo *LI) { |
4163 | // Find the loop-defined value. |
4164 | Loop *L = LI->getLoopFor(PN->getParent()); |
4165 | if (PN->getNumIncomingValues() != 2) |
4166 | return true; |
4167 | |
4168 | // Find the value from previous iteration. |
4169 | auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); |
4170 | if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) |
4171 | PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); |
4172 | if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) |
4173 | return true; |
4174 | |
4175 | // If a new pointer is loaded in the loop, the pointer references a different |
4176 | // object in every iteration. E.g.: |
4177 | // for (i) |
4178 | // int *p = a[i]; |
4179 | // ... |
4180 | if (auto *Load = dyn_cast<LoadInst>(PrevValue)) |
4181 | if (!L->isLoopInvariant(Load->getPointerOperand())) |
4182 | return false; |
4183 | return true; |
4184 | } |
4185 | |
4186 | Value *llvm::getUnderlyingObject(Value *V, unsigned MaxLookup) { |
4187 | if (!V->getType()->isPointerTy()) |
4188 | return V; |
4189 | for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { |
4190 | if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { |
4191 | V = GEP->getPointerOperand(); |
4192 | } else if (Operator::getOpcode(V) == Instruction::BitCast || |
4193 | Operator::getOpcode(V) == Instruction::AddrSpaceCast) { |
4194 | V = cast<Operator>(V)->getOperand(0); |
4195 | if (!V->getType()->isPointerTy()) |
4196 | return V; |
4197 | } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { |
4198 | if (GA->isInterposable()) |
4199 | return V; |
4200 | V = GA->getAliasee(); |
4201 | } else { |
4202 | if (auto *PHI = dyn_cast<PHINode>(V)) { |
4203 | // Look through single-arg phi nodes created by LCSSA. |
4204 | if (PHI->getNumIncomingValues() == 1) { |
4205 | V = PHI->getIncomingValue(0); |
4206 | continue; |
4207 | } |
4208 | } else if (auto *Call = dyn_cast<CallBase>(V)) { |
4209 | // CaptureTracking can know about special capturing properties of some |
4210 | // intrinsics like launder.invariant.group, that can't be expressed with |
4211 | // the attributes, but have properties like returning aliasing pointer. |
4212 | // Because some analysis may assume that nocaptured pointer is not |
4213 | // returned from some special intrinsic (because function would have to |
4214 | // be marked with returns attribute), it is crucial to use this function |
4215 | // because it should be in sync with CaptureTracking. Not using it may |
4216 | // cause weird miscompilations where 2 aliasing pointers are assumed to |
4217 | // noalias. |
4218 | if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { |
4219 | V = RP; |
4220 | continue; |
4221 | } |
4222 | } |
4223 | |
4224 | return V; |
4225 | } |
4226 | assert(V->getType()->isPointerTy() && "Unexpected operand type!")((V->getType()->isPointerTy() && "Unexpected operand type!" ) ? static_cast<void> (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4226, __PRETTY_FUNCTION__)); |
4227 | } |
4228 | return V; |
4229 | } |
4230 | |
4231 | void llvm::getUnderlyingObjects(const Value *V, |
4232 | SmallVectorImpl<const Value *> &Objects, |
4233 | LoopInfo *LI, unsigned MaxLookup) { |
4234 | SmallPtrSet<const Value *, 4> Visited; |
4235 | SmallVector<const Value *, 4> Worklist; |
4236 | Worklist.push_back(V); |
4237 | do { |
4238 | const Value *P = Worklist.pop_back_val(); |
4239 | P = getUnderlyingObject(P, MaxLookup); |
4240 | |
4241 | if (!Visited.insert(P).second) |
4242 | continue; |
4243 | |
4244 | if (auto *SI = dyn_cast<SelectInst>(P)) { |
4245 | Worklist.push_back(SI->getTrueValue()); |
4246 | Worklist.push_back(SI->getFalseValue()); |
4247 | continue; |
4248 | } |
4249 | |
4250 | if (auto *PN = dyn_cast<PHINode>(P)) { |
4251 | // If this PHI changes the underlying object in every iteration of the |
4252 | // loop, don't look through it. Consider: |
4253 | // int **A; |
4254 | // for (i) { |
4255 | // Prev = Curr; // Prev = PHI (Prev_0, Curr) |
4256 | // Curr = A[i]; |
4257 | // *Prev, *Curr; |
4258 | // |
4259 | // Prev is tracking Curr one iteration behind so they refer to different |
4260 | // underlying objects. |
4261 | if (!LI || !LI->isLoopHeader(PN->getParent()) || |
4262 | isSameUnderlyingObjectInLoop(PN, LI)) |
4263 | append_range(Worklist, PN->incoming_values()); |
4264 | continue; |
4265 | } |
4266 | |
4267 | Objects.push_back(P); |
4268 | } while (!Worklist.empty()); |
4269 | } |
4270 | |
4271 | /// This is the function that does the work of looking through basic |
4272 | /// ptrtoint+arithmetic+inttoptr sequences. |
4273 | static const Value *getUnderlyingObjectFromInt(const Value *V) { |
4274 | do { |
4275 | if (const Operator *U = dyn_cast<Operator>(V)) { |
4276 | // If we find a ptrtoint, we can transfer control back to the |
4277 | // regular getUnderlyingObjectFromInt. |
4278 | if (U->getOpcode() == Instruction::PtrToInt) |
4279 | return U->getOperand(0); |
4280 | // If we find an add of a constant, a multiplied value, or a phi, it's |
4281 | // likely that the other operand will lead us to the base |
4282 | // object. We don't have to worry about the case where the |
4283 | // object address is somehow being computed by the multiply, |
4284 | // because our callers only care when the result is an |
4285 | // identifiable object. |
4286 | if (U->getOpcode() != Instruction::Add || |
4287 | (!isa<ConstantInt>(U->getOperand(1)) && |
4288 | Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && |
4289 | !isa<PHINode>(U->getOperand(1)))) |
4290 | return V; |
4291 | V = U->getOperand(0); |
4292 | } else { |
4293 | return V; |
4294 | } |
4295 | assert(V->getType()->isIntegerTy() && "Unexpected operand type!")((V->getType()->isIntegerTy() && "Unexpected operand type!" ) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4295, __PRETTY_FUNCTION__)); |
4296 | } while (true); |
4297 | } |
4298 | |
4299 | /// This is a wrapper around getUnderlyingObjects and adds support for basic |
4300 | /// ptrtoint+arithmetic+inttoptr sequences. |
4301 | /// It returns false if unidentified object is found in getUnderlyingObjects. |
4302 | bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, |
4303 | SmallVectorImpl<Value *> &Objects) { |
4304 | SmallPtrSet<const Value *, 16> Visited; |
4305 | SmallVector<const Value *, 4> Working(1, V); |
4306 | do { |
4307 | V = Working.pop_back_val(); |
4308 | |
4309 | SmallVector<const Value *, 4> Objs; |
4310 | getUnderlyingObjects(V, Objs); |
4311 | |
4312 | for (const Value *V : Objs) { |
4313 | if (!Visited.insert(V).second) |
4314 | continue; |
4315 | if (Operator::getOpcode(V) == Instruction::IntToPtr) { |
4316 | const Value *O = |
4317 | getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); |
4318 | if (O->getType()->isPointerTy()) { |
4319 | Working.push_back(O); |
4320 | continue; |
4321 | } |
4322 | } |
4323 | // If getUnderlyingObjects fails to find an identifiable object, |
4324 | // getUnderlyingObjectsForCodeGen also fails for safety. |
4325 | if (!isIdentifiedObject(V)) { |
4326 | Objects.clear(); |
4327 | return false; |
4328 | } |
4329 | Objects.push_back(const_cast<Value *>(V)); |
4330 | } |
4331 | } while (!Working.empty()); |
4332 | return true; |
4333 | } |
4334 | |
4335 | AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) { |
4336 | AllocaInst *Result = nullptr; |
4337 | SmallPtrSet<Value *, 4> Visited; |
4338 | SmallVector<Value *, 4> Worklist; |
4339 | |
4340 | auto AddWork = [&](Value *V) { |
4341 | if (Visited.insert(V).second) |
4342 | Worklist.push_back(V); |
4343 | }; |
4344 | |
4345 | AddWork(V); |
4346 | do { |
4347 | V = Worklist.pop_back_val(); |
4348 | assert(Visited.count(V))((Visited.count(V)) ? static_cast<void> (0) : __assert_fail ("Visited.count(V)", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4348, __PRETTY_FUNCTION__)); |
4349 | |
4350 | if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { |
4351 | if (Result && Result != AI) |
4352 | return nullptr; |
4353 | Result = AI; |
4354 | } else if (CastInst *CI = dyn_cast<CastInst>(V)) { |
4355 | AddWork(CI->getOperand(0)); |
4356 | } else if (PHINode *PN = dyn_cast<PHINode>(V)) { |
4357 | for (Value *IncValue : PN->incoming_values()) |
4358 | AddWork(IncValue); |
4359 | } else if (auto *SI = dyn_cast<SelectInst>(V)) { |
4360 | AddWork(SI->getTrueValue()); |
4361 | AddWork(SI->getFalseValue()); |
4362 | } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { |
4363 | if (OffsetZero && !GEP->hasAllZeroIndices()) |
4364 | return nullptr; |
4365 | AddWork(GEP->getPointerOperand()); |
4366 | } else { |
4367 | return nullptr; |
4368 | } |
4369 | } while (!Worklist.empty()); |
4370 | |
4371 | return Result; |
4372 | } |
4373 | |
4374 | static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper( |
4375 | const Value *V, bool AllowLifetime, bool AllowDroppable) { |
4376 | for (const User *U : V->users()) { |
4377 | const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); |
4378 | if (!II) |
4379 | return false; |
4380 | |
4381 | if (AllowLifetime && II->isLifetimeStartOrEnd()) |
4382 | continue; |
4383 | |
4384 | if (AllowDroppable && II->isDroppable()) |
4385 | continue; |
4386 | |
4387 | return false; |
4388 | } |
4389 | return true; |
4390 | } |
4391 | |
4392 | bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { |
4393 | return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( |
4394 | V, /* AllowLifetime */ true, /* AllowDroppable */ false); |
4395 | } |
4396 | bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) { |
4397 | return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( |
4398 | V, /* AllowLifetime */ true, /* AllowDroppable */ true); |
4399 | } |
4400 | |
4401 | bool llvm::mustSuppressSpeculation(const LoadInst &LI) { |
4402 | if (!LI.isUnordered()) |
4403 | return true; |
4404 | const Function &F = *LI.getFunction(); |
4405 | // Speculative load may create a race that did not exist in the source. |
4406 | return F.hasFnAttribute(Attribute::SanitizeThread) || |
4407 | // Speculative load may load data from dirty regions. |
4408 | F.hasFnAttribute(Attribute::SanitizeAddress) || |
4409 | F.hasFnAttribute(Attribute::SanitizeHWAddress); |
4410 | } |
4411 | |
4412 | |
4413 | bool llvm::isSafeToSpeculativelyExecute(const Value *V, |
4414 | const Instruction *CtxI, |
4415 | const DominatorTree *DT) { |
4416 | const Operator *Inst = dyn_cast<Operator>(V); |
4417 | if (!Inst) |
4418 | return false; |
4419 | |
4420 | for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) |
4421 | if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) |
4422 | if (C->canTrap()) |
4423 | return false; |
4424 | |
4425 | switch (Inst->getOpcode()) { |
4426 | default: |
4427 | return true; |
4428 | case Instruction::UDiv: |
4429 | case Instruction::URem: { |
4430 | // x / y is undefined if y == 0. |
4431 | const APInt *V; |
4432 | if (match(Inst->getOperand(1), m_APInt(V))) |
4433 | return *V != 0; |
4434 | return false; |
4435 | } |
4436 | case Instruction::SDiv: |
4437 | case Instruction::SRem: { |
4438 | // x / y is undefined if y == 0 or x == INT_MIN and y == -1 |
4439 | const APInt *Numerator, *Denominator; |
4440 | if (!match(Inst->getOperand(1), m_APInt(Denominator))) |
4441 | return false; |
4442 | // We cannot hoist this division if the denominator is 0. |
4443 | if (*Denominator == 0) |
4444 | return false; |
4445 | // It's safe to hoist if the denominator is not 0 or -1. |
4446 | if (!Denominator->isAllOnesValue()) |
4447 | return true; |
4448 | // At this point we know that the denominator is -1. It is safe to hoist as |
4449 | // long we know that the numerator is not INT_MIN. |
4450 | if (match(Inst->getOperand(0), m_APInt(Numerator))) |
4451 | return !Numerator->isMinSignedValue(); |
4452 | // The numerator *might* be MinSignedValue. |
4453 | return false; |
4454 | } |
4455 | case Instruction::Load: { |
4456 | const LoadInst *LI = cast<LoadInst>(Inst); |
4457 | if (mustSuppressSpeculation(*LI)) |
4458 | return false; |
4459 | const DataLayout &DL = LI->getModule()->getDataLayout(); |
4460 | return isDereferenceableAndAlignedPointer( |
4461 | LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()), |
4462 | DL, CtxI, DT); |
4463 | } |
4464 | case Instruction::Call: { |
4465 | auto *CI = cast<const CallInst>(Inst); |
4466 | const Function *Callee = CI->getCalledFunction(); |
4467 | |
4468 | // The called function could have undefined behavior or side-effects, even |
4469 | // if marked readnone nounwind. |
4470 | return Callee && Callee->isSpeculatable(); |
4471 | } |
4472 | case Instruction::VAArg: |
4473 | case Instruction::Alloca: |
4474 | case Instruction::Invoke: |
4475 | case Instruction::CallBr: |
4476 | case Instruction::PHI: |
4477 | case Instruction::Store: |
4478 | case Instruction::Ret: |
4479 | case Instruction::Br: |
4480 | case Instruction::IndirectBr: |
4481 | case Instruction::Switch: |
4482 | case Instruction::Unreachable: |
4483 | case Instruction::Fence: |
4484 | case Instruction::AtomicRMW: |
4485 | case Instruction::AtomicCmpXchg: |
4486 | case Instruction::LandingPad: |
4487 | case Instruction::Resume: |
4488 | case Instruction::CatchSwitch: |
4489 | case Instruction::CatchPad: |
4490 | case Instruction::CatchRet: |
4491 | case Instruction::CleanupPad: |
4492 | case Instruction::CleanupRet: |
4493 | return false; // Misc instructions which have effects |
4494 | } |
4495 | } |
4496 | |
4497 | bool llvm::mayBeMemoryDependent(const Instruction &I) { |
4498 | return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); |
4499 | } |
4500 | |
4501 | /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult. |
4502 | static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) { |
4503 | switch (OR) { |
4504 | case ConstantRange::OverflowResult::MayOverflow: |
4505 | return OverflowResult::MayOverflow; |
4506 | case ConstantRange::OverflowResult::AlwaysOverflowsLow: |
4507 | return OverflowResult::AlwaysOverflowsLow; |
4508 | case ConstantRange::OverflowResult::AlwaysOverflowsHigh: |
4509 | return OverflowResult::AlwaysOverflowsHigh; |
4510 | case ConstantRange::OverflowResult::NeverOverflows: |
4511 | return OverflowResult::NeverOverflows; |
4512 | } |
4513 | llvm_unreachable("Unknown OverflowResult")::llvm::llvm_unreachable_internal("Unknown OverflowResult", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4513); |
4514 | } |
4515 | |
4516 | /// Combine constant ranges from computeConstantRange() and computeKnownBits(). |
4517 | static ConstantRange computeConstantRangeIncludingKnownBits( |
4518 | const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth, |
4519 | AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, |
4520 | OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) { |
4521 | KnownBits Known = computeKnownBits( |
4522 | V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo); |
4523 | ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned); |
4524 | ConstantRange CR2 = computeConstantRange(V, UseInstrInfo); |
4525 | ConstantRange::PreferredRangeType RangeType = |
4526 | ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned; |
4527 | return CR1.intersectWith(CR2, RangeType); |
4528 | } |
4529 | |
4530 | OverflowResult llvm::computeOverflowForUnsignedMul( |
4531 | const Value *LHS, const Value *RHS, const DataLayout &DL, |
4532 | AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, |
4533 | bool UseInstrInfo) { |
4534 | KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, |
4535 | nullptr, UseInstrInfo); |
4536 | KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, |
4537 | nullptr, UseInstrInfo); |
4538 | ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false); |
4539 | ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false); |
4540 | return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange)); |
4541 | } |
4542 | |
4543 | OverflowResult |
4544 | llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, |
4545 | const DataLayout &DL, AssumptionCache *AC, |
4546 | const Instruction *CxtI, |
4547 | const DominatorTree *DT, bool UseInstrInfo) { |
4548 | // Multiplying n * m significant bits yields a result of n + m significant |
4549 | // bits. If the total number of significant bits does not exceed the |
4550 | // result bit width (minus 1), there is no overflow. |
4551 | // This means if we have enough leading sign bits in the operands |
4552 | // we can guarantee that the result does not overflow. |
4553 | // Ref: "Hacker's Delight" by Henry Warren |
4554 | unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); |
4555 | |
4556 | // Note that underestimating the number of sign bits gives a more |
4557 | // conservative answer. |
4558 | unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + |
4559 | ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); |
4560 | |
4561 | // First handle the easy case: if we have enough sign bits there's |
4562 | // definitely no overflow. |
4563 | if (SignBits > BitWidth + 1) |
4564 | return OverflowResult::NeverOverflows; |
4565 | |
4566 | // There are two ambiguous cases where there can be no overflow: |
4567 | // SignBits == BitWidth + 1 and |
4568 | // SignBits == BitWidth |
4569 | // The second case is difficult to check, therefore we only handle the |
4570 | // first case. |
4571 | if (SignBits == BitWidth + 1) { |
4572 | // It overflows only when both arguments are negative and the true |
4573 | // product is exactly the minimum negative number. |
4574 | // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 |
4575 | // For simplicity we just check if at least one side is not negative. |
4576 | KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, |
4577 | nullptr, UseInstrInfo); |
4578 | KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, |
4579 | nullptr, UseInstrInfo); |
4580 | if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) |
4581 | return OverflowResult::NeverOverflows; |
4582 | } |
4583 | return OverflowResult::MayOverflow; |
4584 | } |
4585 | |
4586 | OverflowResult llvm::computeOverflowForUnsignedAdd( |
4587 | const Value *LHS, const Value *RHS, const DataLayout &DL, |
4588 | AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, |
4589 | bool UseInstrInfo) { |
4590 | ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( |
4591 | LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, |
4592 | nullptr, UseInstrInfo); |
4593 | ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( |
4594 | RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, |
4595 | nullptr, UseInstrInfo); |
4596 | return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange)); |
4597 | } |
4598 | |
4599 | static OverflowResult computeOverflowForSignedAdd(const Value *LHS, |
4600 | const Value *RHS, |
4601 | const AddOperator *Add, |
4602 | const DataLayout &DL, |
4603 | AssumptionCache *AC, |
4604 | const Instruction *CxtI, |
4605 | const DominatorTree *DT) { |
4606 | if (Add && Add->hasNoSignedWrap()) { |
4607 | return OverflowResult::NeverOverflows; |
4608 | } |
4609 | |
4610 | // If LHS and RHS each have at least two sign bits, the addition will look |
4611 | // like |
4612 | // |
4613 | // XX..... + |
4614 | // YY..... |
4615 | // |
4616 | // If the carry into the most significant position is 0, X and Y can't both |
4617 | // be 1 and therefore the carry out of the addition is also 0. |
4618 | // |
4619 | // If the carry into the most significant position is 1, X and Y can't both |
4620 | // be 0 and therefore the carry out of the addition is also 1. |
4621 | // |
4622 | // Since the carry into the most significant position is always equal to |
4623 | // the carry out of the addition, there is no signed overflow. |
4624 | if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && |
4625 | ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) |
4626 | return OverflowResult::NeverOverflows; |
4627 | |
4628 | ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( |
4629 | LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); |
4630 | ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( |
4631 | RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); |
4632 | OverflowResult OR = |
4633 | mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange)); |
4634 | if (OR != OverflowResult::MayOverflow) |
4635 | return OR; |
4636 | |
4637 | // The remaining code needs Add to be available. Early returns if not so. |
4638 | if (!Add) |
4639 | return OverflowResult::MayOverflow; |
4640 | |
4641 | // If the sign of Add is the same as at least one of the operands, this add |
4642 | // CANNOT overflow. If this can be determined from the known bits of the |
4643 | // operands the above signedAddMayOverflow() check will have already done so. |
4644 | // The only other way to improve on the known bits is from an assumption, so |
4645 | // call computeKnownBitsFromAssume() directly. |
4646 | bool LHSOrRHSKnownNonNegative = |
4647 | (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative()); |
4648 | bool LHSOrRHSKnownNegative = |
4649 | (LHSRange.isAllNegative() || RHSRange.isAllNegative()); |
4650 | if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { |
4651 | KnownBits AddKnown(LHSRange.getBitWidth()); |
4652 | computeKnownBitsFromAssume( |
4653 | Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true)); |
4654 | if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || |
4655 | (AddKnown.isNegative() && LHSOrRHSKnownNegative)) |
4656 | return OverflowResult::NeverOverflows; |
4657 | } |
4658 | |
4659 | return OverflowResult::MayOverflow; |
4660 | } |
4661 | |
4662 | OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, |
4663 | const Value *RHS, |
4664 | const DataLayout &DL, |
4665 | AssumptionCache *AC, |
4666 | const Instruction *CxtI, |
4667 | const DominatorTree *DT) { |
4668 | // Checking for conditions implied by dominating conditions may be expensive. |
4669 | // Limit it to usub_with_overflow calls for now. |
4670 | if (match(CxtI, |
4671 | m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value()))) |
4672 | if (auto C = |
4673 | isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) { |
4674 | if (*C) |
4675 | return OverflowResult::NeverOverflows; |
4676 | return OverflowResult::AlwaysOverflowsLow; |
4677 | } |
4678 | ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( |
4679 | LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); |
4680 | ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( |
4681 | RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); |
4682 | return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange)); |
4683 | } |
4684 | |
4685 | OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, |
4686 | const Value *RHS, |
4687 | const DataLayout &DL, |
4688 | AssumptionCache *AC, |
4689 | const Instruction *CxtI, |
4690 | const DominatorTree *DT) { |
4691 | // If LHS and RHS each have at least two sign bits, the subtraction |
4692 | // cannot overflow. |
4693 | if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && |
4694 | ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) |
4695 | return OverflowResult::NeverOverflows; |
4696 | |
4697 | ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( |
4698 | LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); |
4699 | ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( |
4700 | RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); |
4701 | return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); |
4702 | } |
4703 | |
4704 | bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, |
4705 | const DominatorTree &DT) { |
4706 | SmallVector<const BranchInst *, 2> GuardingBranches; |
4707 | SmallVector<const ExtractValueInst *, 2> Results; |
4708 | |
4709 | for (const User *U : WO->users()) { |
4710 | if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { |
4711 | assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")((EVI->getNumIndices() == 1 && "Obvious from CI's type" ) ? static_cast<void> (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4711, __PRETTY_FUNCTION__)); |
4712 | |
4713 | if (EVI->getIndices()[0] == 0) |
4714 | Results.push_back(EVI); |
4715 | else { |
4716 | assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")((EVI->getIndices()[0] == 1 && "Obvious from CI's type" ) ? static_cast<void> (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4716, __PRETTY_FUNCTION__)); |
4717 | |
4718 | for (const auto *U : EVI->users()) |
4719 | if (const auto *B = dyn_cast<BranchInst>(U)) { |
4720 | assert(B->isConditional() && "How else is it using an i1?")((B->isConditional() && "How else is it using an i1?" ) ? static_cast<void> (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 4720, __PRETTY_FUNCTION__)); |
4721 | GuardingBranches.push_back(B); |
4722 | } |
4723 | } |
4724 | } else { |
4725 | // We are using the aggregate directly in a way we don't want to analyze |
4726 | // here (storing it to a global, say). |
4727 | return false; |
4728 | } |
4729 | } |
4730 | |
4731 | auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { |
4732 | BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); |
4733 | if (!NoWrapEdge.isSingleEdge()) |
4734 | return false; |
4735 | |
4736 | // Check if all users of the add are provably no-wrap. |
4737 | for (const auto *Result : Results) { |
4738 | // If the extractvalue itself is not executed on overflow, the we don't |
4739 | // need to check each use separately, since domination is transitive. |
4740 | if (DT.dominates(NoWrapEdge, Result->getParent())) |
4741 | continue; |
4742 | |
4743 | for (auto &RU : Result->uses()) |
4744 | if (!DT.dominates(NoWrapEdge, RU)) |
4745 | return false; |
4746 | } |
4747 | |
4748 | return true; |
4749 | }; |
4750 | |
4751 | return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); |
4752 | } |
4753 | |
4754 | static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) { |
4755 | // See whether I has flags that may create poison |
4756 | if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) { |
4757 | if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap()) |
4758 | return true; |
4759 | } |
4760 | if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op)) |
4761 | if (ExactOp->isExact()) |
4762 | return true; |
4763 | if (const auto *FP = dyn_cast<FPMathOperator>(Op)) { |
4764 | auto FMF = FP->getFastMathFlags(); |
4765 | if (FMF.noNaNs() || FMF.noInfs()) |
4766 | return true; |
4767 | } |
4768 | |
4769 | unsigned Opcode = Op->getOpcode(); |
4770 | |
4771 | // Check whether opcode is a poison/undef-generating operation |
4772 | switch (Opcode) { |
4773 | case Instruction::Shl: |
4774 | case Instruction::AShr: |
4775 | case Instruction::LShr: { |
4776 | // Shifts return poison if shiftwidth is larger than the bitwidth. |
4777 | if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) { |
4778 | SmallVector<Constant *, 4> ShiftAmounts; |
4779 | if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) { |
4780 | unsigned NumElts = FVTy->getNumElements(); |
4781 | for (unsigned i = 0; i < NumElts; ++i) |
4782 | ShiftAmounts.push_back(C->getAggregateElement(i)); |
4783 | } else if (isa<ScalableVectorType>(C->getType())) |
4784 | return true; // Can't tell, just return true to be safe |
4785 | else |
4786 | ShiftAmounts.push_back(C); |
4787 | |
4788 | bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) { |
4789 | auto *CI = dyn_cast_or_null<ConstantInt>(C); |
4790 | return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth()); |
4791 | }); |
4792 | return !Safe; |
4793 | } |
4794 | return true; |
4795 | } |
4796 | case Instruction::FPToSI: |
4797 | case Instruction::FPToUI: |
4798 | // fptosi/ui yields poison if the resulting value does not fit in the |
4799 | // destination type. |
4800 | return true; |
4801 | case Instruction::Call: |
4802 | case Instruction::CallBr: |
4803 | case Instruction::Invoke: { |
4804 | const auto *CB = cast<CallBase>(Op); |
4805 | return !CB->hasRetAttr(Attribute::NoUndef); |
4806 | } |
4807 | case Instruction::InsertElement: |
4808 | case Instruction::ExtractElement: { |
4809 | // If index exceeds the length of the vector, it returns poison |
4810 | auto *VTy = cast<VectorType>(Op->getOperand(0)->getType()); |
4811 | unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1; |
4812 | auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp)); |
4813 | if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue())) |
4814 | return true; |
4815 | return false; |
4816 | } |
4817 | case Instruction::ShuffleVector: { |
4818 | // shufflevector may return undef. |
4819 | if (PoisonOnly) |
4820 | return false; |
4821 | ArrayRef<int> Mask = isa<ConstantExpr>(Op) |
4822 | ? cast<ConstantExpr>(Op)->getShuffleMask() |
4823 | : cast<ShuffleVectorInst>(Op)->getShuffleMask(); |
4824 | return is_contained(Mask, UndefMaskElem); |
4825 | } |
4826 | case Instruction::FNeg: |
4827 | case Instruction::PHI: |
4828 | case Instruction::Select: |
4829 | case Instruction::URem: |
4830 | case Instruction::SRem: |
4831 | case Instruction::ExtractValue: |
4832 | case Instruction::InsertValue: |
4833 | case Instruction::Freeze: |
4834 | case Instruction::ICmp: |
4835 | case Instruction::FCmp: |
4836 | return false; |
4837 | case Instruction::GetElementPtr: { |
4838 | const auto *GEP = cast<GEPOperator>(Op); |
4839 | return GEP->isInBounds(); |
4840 | } |
4841 | default: { |
4842 | const auto *CE = dyn_cast<ConstantExpr>(Op); |
4843 | if (isa<CastInst>(Op) || (CE && CE->isCast())) |
4844 | return false; |
4845 | else if (Instruction::isBinaryOp(Opcode)) |
4846 | return false; |
4847 | // Be conservative and return true. |
4848 | return true; |
4849 | } |
4850 | } |
4851 | } |
4852 | |
4853 | bool llvm::canCreateUndefOrPoison(const Operator *Op) { |
4854 | return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false); |
4855 | } |
4856 | |
4857 | bool llvm::canCreatePoison(const Operator *Op) { |
4858 | return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true); |
4859 | } |
4860 | |
4861 | static bool directlyImpliesPoison(const Value *ValAssumedPoison, |
4862 | const Value *V, unsigned Depth) { |
4863 | if (ValAssumedPoison == V) |
4864 | return true; |
4865 | |
4866 | const unsigned MaxDepth = 2; |
4867 | if (Depth >= MaxDepth) |
4868 | return false; |
4869 | |
4870 | if (const auto *I = dyn_cast<Instruction>(V)) { |
4871 | if (propagatesPoison(cast<Operator>(I))) |
4872 | return any_of(I->operands(), [=](const Value *Op) { |
4873 | return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1); |
4874 | }); |
4875 | |
4876 | // V = extractvalue V0, idx |
4877 | // V2 = extractvalue V0, idx2 |
4878 | // V0's elements are all poison or not. (e.g., add_with_overflow) |
4879 | const WithOverflowInst *II; |
4880 | if (match(I, m_ExtractValue(m_WithOverflowInst(II))) && |
4881 | match(ValAssumedPoison, m_ExtractValue(m_Specific(II)))) |
4882 | return true; |
4883 | } |
4884 | return false; |
4885 | } |
4886 | |
4887 | static bool impliesPoison(const Value *ValAssumedPoison, const Value *V, |
4888 | unsigned Depth) { |
4889 | if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison)) |
4890 | return true; |
4891 | |
4892 | if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0)) |
4893 | return true; |
4894 | |
4895 | const unsigned MaxDepth = 2; |
4896 | if (Depth >= MaxDepth) |
4897 | return false; |
4898 | |
4899 | const auto *I = dyn_cast<Instruction>(ValAssumedPoison); |
4900 | if (I && !canCreatePoison(cast<Operator>(I))) { |
4901 | return all_of(I->operands(), [=](const Value *Op) { |
4902 | return impliesPoison(Op, V, Depth + 1); |
4903 | }); |
4904 | } |
4905 | return false; |
4906 | } |
4907 | |
4908 | bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) { |
4909 | return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0); |
4910 | } |
4911 | |
4912 | static bool programUndefinedIfUndefOrPoison(const Value *V, |
4913 | bool PoisonOnly); |
4914 | |
4915 | static bool isGuaranteedNotToBeUndefOrPoison(const Value *V, |
4916 | AssumptionCache *AC, |
4917 | const Instruction *CtxI, |
4918 | const DominatorTree *DT, |
4919 | unsigned Depth, bool PoisonOnly) { |
4920 | if (Depth >= MaxAnalysisRecursionDepth) |
4921 | return false; |
4922 | |
4923 | if (isa<MetadataAsValue>(V)) |
4924 | return false; |
4925 | |
4926 | if (const auto *A = dyn_cast<Argument>(V)) { |
4927 | if (A->hasAttribute(Attribute::NoUndef)) |
4928 | return true; |
4929 | } |
4930 | |
4931 | if (auto *C = dyn_cast<Constant>(V)) { |
4932 | if (isa<UndefValue>(C)) |
4933 | return PoisonOnly && !isa<PoisonValue>(C); |
4934 | |
4935 | if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) || |
4936 | isa<ConstantPointerNull>(C) || isa<Function>(C)) |
4937 | return true; |
4938 | |
4939 | if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C)) |
4940 | return (PoisonOnly ? !C->containsPoisonElement() |
4941 | : !C->containsUndefOrPoisonElement()) && |
4942 | !C->containsConstantExpression(); |
4943 | } |
4944 | |
4945 | // Strip cast operations from a pointer value. |
4946 | // Note that stripPointerCastsSameRepresentation can strip off getelementptr |
4947 | // inbounds with zero offset. To guarantee that the result isn't poison, the |
4948 | // stripped pointer is checked as it has to be pointing into an allocated |
4949 | // object or be null `null` to ensure `inbounds` getelement pointers with a |
4950 | // zero offset could not produce poison. |
4951 | // It can strip off addrspacecast that do not change bit representation as |
4952 | // well. We believe that such addrspacecast is equivalent to no-op. |
4953 | auto *StrippedV = V->stripPointerCastsSameRepresentation(); |
4954 | if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) || |
4955 | isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV)) |
4956 | return true; |
4957 | |
4958 | auto OpCheck = [&](const Value *V) { |
4959 | return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, |
4960 | PoisonOnly); |
4961 | }; |
4962 | |
4963 | if (auto *Opr = dyn_cast<Operator>(V)) { |
4964 | // If the value is a freeze instruction, then it can never |
4965 | // be undef or poison. |
4966 | if (isa<FreezeInst>(V)) |
4967 | return true; |
4968 | |
4969 | if (const auto *CB = dyn_cast<CallBase>(V)) { |
4970 | if (CB->hasRetAttr(Attribute::NoUndef)) |
4971 | return true; |
4972 | } |
4973 | |
4974 | if (const auto *PN = dyn_cast<PHINode>(V)) { |
4975 | unsigned Num = PN->getNumIncomingValues(); |
4976 | bool IsWellDefined = true; |
4977 | for (unsigned i = 0; i < Num; ++i) { |
4978 | auto *TI = PN->getIncomingBlock(i)->getTerminator(); |
4979 | if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, |
4980 | DT, Depth + 1, PoisonOnly)) { |
4981 | IsWellDefined = false; |
4982 | break; |
4983 | } |
4984 | } |
4985 | if (IsWellDefined) |
4986 | return true; |
4987 | } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck)) |
4988 | return true; |
4989 | } |
4990 | |
4991 | if (auto *I = dyn_cast<LoadInst>(V)) |
4992 | if (I->getMetadata(LLVMContext::MD_noundef)) |
4993 | return true; |
4994 | |
4995 | if (programUndefinedIfUndefOrPoison(V, PoisonOnly)) |
4996 | return true; |
4997 | |
4998 | // CxtI may be null or a cloned instruction. |
4999 | if (!CtxI || !CtxI->getParent() || !DT) |
5000 | return false; |
5001 | |
5002 | auto *DNode = DT->getNode(CtxI->getParent()); |
5003 | if (!DNode) |
5004 | // Unreachable block |
5005 | return false; |
5006 | |
5007 | // If V is used as a branch condition before reaching CtxI, V cannot be |
5008 | // undef or poison. |
5009 | // br V, BB1, BB2 |
5010 | // BB1: |
5011 | // CtxI ; V cannot be undef or poison here |
5012 | auto *Dominator = DNode->getIDom(); |
5013 | while (Dominator) { |
5014 | auto *TI = Dominator->getBlock()->getTerminator(); |
5015 | |
5016 | Value *Cond = nullptr; |
5017 | if (auto BI = dyn_cast<BranchInst>(TI)) { |
5018 | if (BI->isConditional()) |
5019 | Cond = BI->getCondition(); |
5020 | } else if (auto SI = dyn_cast<SwitchInst>(TI)) { |
5021 | Cond = SI->getCondition(); |
5022 | } |
5023 | |
5024 | if (Cond) { |
5025 | if (Cond == V) |
5026 | return true; |
5027 | else if (PoisonOnly && isa<Operator>(Cond)) { |
5028 | // For poison, we can analyze further |
5029 | auto *Opr = cast<Operator>(Cond); |
5030 | if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V)) |
5031 | return true; |
5032 | } |
5033 | } |
5034 | |
5035 | Dominator = Dominator->getIDom(); |
5036 | } |
5037 | |
5038 | SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef}; |
5039 | if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC)) |
5040 | return true; |
5041 | |
5042 | return false; |
5043 | } |
5044 | |
5045 | bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC, |
5046 | const Instruction *CtxI, |
5047 | const DominatorTree *DT, |
5048 | unsigned Depth) { |
5049 | return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false); |
5050 | } |
5051 | |
5052 | bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC, |
5053 | const Instruction *CtxI, |
5054 | const DominatorTree *DT, unsigned Depth) { |
5055 | return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true); |
5056 | } |
5057 | |
5058 | OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, |
5059 | const DataLayout &DL, |
5060 | AssumptionCache *AC, |
5061 | const Instruction *CxtI, |
5062 | const DominatorTree *DT) { |
5063 | return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), |
5064 | Add, DL, AC, CxtI, DT); |
5065 | } |
5066 | |
5067 | OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, |
5068 | const Value *RHS, |
5069 | const DataLayout &DL, |
5070 | AssumptionCache *AC, |
5071 | const Instruction *CxtI, |
5072 | const DominatorTree *DT) { |
5073 | return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); |
5074 | } |
5075 | |
5076 | bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { |
5077 | // Note: An atomic operation isn't guaranteed to return in a reasonable amount |
5078 | // of time because it's possible for another thread to interfere with it for an |
5079 | // arbitrary length of time, but programs aren't allowed to rely on that. |
5080 | |
5081 | // If there is no successor, then execution can't transfer to it. |
5082 | if (isa<ReturnInst>(I)) |
5083 | return false; |
5084 | if (isa<UnreachableInst>(I)) |
5085 | return false; |
5086 | |
5087 | // An instruction that returns without throwing must transfer control flow |
5088 | // to a successor. |
5089 | return !I->mayThrow() && I->willReturn(); |
5090 | } |
5091 | |
5092 | bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { |
5093 | // TODO: This is slightly conservative for invoke instruction since exiting |
5094 | // via an exception *is* normal control for them. |
5095 | for (const Instruction &I : *BB) |
5096 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) |
5097 | return false; |
5098 | return true; |
5099 | } |
5100 | |
5101 | bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, |
5102 | const Loop *L) { |
5103 | // The loop header is guaranteed to be executed for every iteration. |
5104 | // |
5105 | // FIXME: Relax this constraint to cover all basic blocks that are |
5106 | // guaranteed to be executed at every iteration. |
5107 | if (I->getParent() != L->getHeader()) return false; |
5108 | |
5109 | for (const Instruction &LI : *L->getHeader()) { |
5110 | if (&LI == I) return true; |
5111 | if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; |
5112 | } |
5113 | llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block." , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 5113); |
5114 | } |
5115 | |
5116 | bool llvm::propagatesPoison(const Operator *I) { |
5117 | switch (I->getOpcode()) { |
5118 | case Instruction::Freeze: |
5119 | case Instruction::Select: |
5120 | case Instruction::PHI: |
5121 | case Instruction::Call: |
5122 | case Instruction::Invoke: |
5123 | return false; |
5124 | case Instruction::ICmp: |
5125 | case Instruction::FCmp: |
5126 | case Instruction::GetElementPtr: |
5127 | return true; |
5128 | default: |
5129 | if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I)) |
5130 | return true; |
5131 | |
5132 | // Be conservative and return false. |
5133 | return false; |
5134 | } |
5135 | } |
5136 | |
5137 | void llvm::getGuaranteedWellDefinedOps( |
5138 | const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) { |
5139 | switch (I->getOpcode()) { |
5140 | case Instruction::Store: |
5141 | Operands.insert(cast<StoreInst>(I)->getPointerOperand()); |
5142 | break; |
5143 | |
5144 | case Instruction::Load: |
5145 | Operands.insert(cast<LoadInst>(I)->getPointerOperand()); |
5146 | break; |
5147 | |
5148 | // Since dereferenceable attribute imply noundef, atomic operations |
5149 | // also implicitly have noundef pointers too |
5150 | case Instruction::AtomicCmpXchg: |
5151 | Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand()); |
5152 | break; |
5153 | |
5154 | case Instruction::AtomicRMW: |
5155 | Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand()); |
5156 | break; |
5157 | |
5158 | case Instruction::Call: |
5159 | case Instruction::Invoke: { |
5160 | const CallBase *CB = cast<CallBase>(I); |
5161 | if (CB->isIndirectCall()) |
5162 | Operands.insert(CB->getCalledOperand()); |
5163 | for (unsigned i = 0; i < CB->arg_size(); ++i) { |
5164 | if (CB->paramHasAttr(i, Attribute::NoUndef) || |
5165 | CB->paramHasAttr(i, Attribute::Dereferenceable)) |
5166 | Operands.insert(CB->getArgOperand(i)); |
5167 | } |
5168 | break; |
5169 | } |
5170 | |
5171 | default: |
5172 | break; |
5173 | } |
5174 | } |
5175 | |
5176 | void llvm::getGuaranteedNonPoisonOps(const Instruction *I, |
5177 | SmallPtrSetImpl<const Value *> &Operands) { |
5178 | getGuaranteedWellDefinedOps(I, Operands); |
5179 | switch (I->getOpcode()) { |
5180 | // Divisors of these operations are allowed to be partially undef. |
5181 | case Instruction::UDiv: |
5182 | case Instruction::SDiv: |
5183 | case Instruction::URem: |
5184 | case Instruction::SRem: |
5185 | Operands.insert(I->getOperand(1)); |
5186 | break; |
5187 | |
5188 | default: |
5189 | break; |
5190 | } |
5191 | } |
5192 | |
5193 | bool llvm::mustTriggerUB(const Instruction *I, |
5194 | const SmallSet<const Value *, 16>& KnownPoison) { |
5195 | SmallPtrSet<const Value *, 4> NonPoisonOps; |
5196 | getGuaranteedNonPoisonOps(I, NonPoisonOps); |
5197 | |
5198 | for (const auto *V : NonPoisonOps) |
5199 | if (KnownPoison.count(V)) |
5200 | return true; |
5201 | |
5202 | return false; |
5203 | } |
5204 | |
5205 | static bool programUndefinedIfUndefOrPoison(const Value *V, |
5206 | bool PoisonOnly) { |
5207 | // We currently only look for uses of values within the same basic |
5208 | // block, as that makes it easier to guarantee that the uses will be |
5209 | // executed given that Inst is executed. |
5210 | // |
5211 | // FIXME: Expand this to consider uses beyond the same basic block. To do |
5212 | // this, look out for the distinction between post-dominance and strong |
5213 | // post-dominance. |
5214 | const BasicBlock *BB = nullptr; |
5215 | BasicBlock::const_iterator Begin; |
5216 | if (const auto *Inst = dyn_cast<Instruction>(V)) { |
5217 | BB = Inst->getParent(); |
5218 | Begin = Inst->getIterator(); |
5219 | Begin++; |
5220 | } else if (const auto *Arg = dyn_cast<Argument>(V)) { |
5221 | BB = &Arg->getParent()->getEntryBlock(); |
5222 | Begin = BB->begin(); |
5223 | } else { |
5224 | return false; |
5225 | } |
5226 | |
5227 | BasicBlock::const_iterator End = BB->end(); |
5228 | |
5229 | if (!PoisonOnly) { |
5230 | // Since undef does not propagate eagerly, be conservative & just check |
5231 | // whether a value is directly passed to an instruction that must take |
5232 | // well-defined operands. |
5233 | |
5234 | for (auto &I : make_range(Begin, End)) { |
5235 | SmallPtrSet<const Value *, 4> WellDefinedOps; |
5236 | getGuaranteedWellDefinedOps(&I, WellDefinedOps); |
5237 | for (auto *Op : WellDefinedOps) { |
5238 | if (Op == V) |
5239 | return true; |
5240 | } |
5241 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) |
5242 | break; |
5243 | } |
5244 | return false; |
5245 | } |
5246 | |
5247 | // Set of instructions that we have proved will yield poison if Inst |
5248 | // does. |
5249 | SmallSet<const Value *, 16> YieldsPoison; |
5250 | SmallSet<const BasicBlock *, 4> Visited; |
5251 | |
5252 | YieldsPoison.insert(V); |
5253 | auto Propagate = [&](const User *User) { |
5254 | if (propagatesPoison(cast<Operator>(User))) |
5255 | YieldsPoison.insert(User); |
5256 | }; |
5257 | for_each(V->users(), Propagate); |
5258 | Visited.insert(BB); |
5259 | |
5260 | unsigned Iter = 0; |
5261 | while (Iter++ < MaxAnalysisRecursionDepth) { |
5262 | for (auto &I : make_range(Begin, End)) { |
5263 | if (mustTriggerUB(&I, YieldsPoison)) |
5264 | return true; |
5265 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) |
5266 | return false; |
5267 | |
5268 | // Mark poison that propagates from I through uses of I. |
5269 | if (YieldsPoison.count(&I)) |
5270 | for_each(I.users(), Propagate); |
5271 | } |
5272 | |
5273 | if (auto *NextBB = BB->getSingleSuccessor()) { |
5274 | if (Visited.insert(NextBB).second) { |
5275 | BB = NextBB; |
5276 | Begin = BB->getFirstNonPHI()->getIterator(); |
5277 | End = BB->end(); |
5278 | continue; |
5279 | } |
5280 | } |
5281 | |
5282 | break; |
5283 | } |
5284 | return false; |
5285 | } |
5286 | |
5287 | bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) { |
5288 | return ::programUndefinedIfUndefOrPoison(Inst, false); |
5289 | } |
5290 | |
5291 | bool llvm::programUndefinedIfPoison(const Instruction *Inst) { |
5292 | return ::programUndefinedIfUndefOrPoison(Inst, true); |
5293 | } |
5294 | |
5295 | static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { |
5296 | if (FMF.noNaNs()) |
5297 | return true; |
5298 | |
5299 | if (auto *C = dyn_cast<ConstantFP>(V)) |
5300 | return !C->isNaN(); |
5301 | |
5302 | if (auto *C = dyn_cast<ConstantDataVector>(V)) { |
5303 | if (!C->getElementType()->isFloatingPointTy()) |
5304 | return false; |
5305 | for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { |
5306 | if (C->getElementAsAPFloat(I).isNaN()) |
5307 | return false; |
5308 | } |
5309 | return true; |
5310 | } |
5311 | |
5312 | if (isa<ConstantAggregateZero>(V)) |
5313 | return true; |
5314 | |
5315 | return false; |
5316 | } |
5317 | |
5318 | static bool isKnownNonZero(const Value *V) { |
5319 | if (auto *C = dyn_cast<ConstantFP>(V)) |
5320 | return !C->isZero(); |
5321 | |
5322 | if (auto *C = dyn_cast<ConstantDataVector>(V)) { |
5323 | if (!C->getElementType()->isFloatingPointTy()) |
5324 | return false; |
5325 | for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { |
5326 | if (C->getElementAsAPFloat(I).isZero()) |
5327 | return false; |
5328 | } |
5329 | return true; |
5330 | } |
5331 | |
5332 | return false; |
5333 | } |
5334 | |
5335 | /// Match clamp pattern for float types without care about NaNs or signed zeros. |
5336 | /// Given non-min/max outer cmp/select from the clamp pattern this |
5337 | /// function recognizes if it can be substitued by a "canonical" min/max |
5338 | /// pattern. |
5339 | static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, |
5340 | Value *CmpLHS, Value *CmpRHS, |
5341 | Value *TrueVal, Value *FalseVal, |
5342 | Value *&LHS, Value *&RHS) { |
5343 | // Try to match |
5344 | // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) |
5345 | // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) |
5346 | // and return description of the outer Max/Min. |
5347 | |
5348 | // First, check if select has inverse order: |
5349 | if (CmpRHS == FalseVal) { |
5350 | std::swap(TrueVal, FalseVal); |
5351 | Pred = CmpInst::getInversePredicate(Pred); |
5352 | } |
5353 | |
5354 | // Assume success now. If there's no match, callers should not use these anyway. |
5355 | LHS = TrueVal; |
5356 | RHS = FalseVal; |
5357 | |
5358 | const APFloat *FC1; |
5359 | if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) |
5360 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5361 | |
5362 | const APFloat *FC2; |
5363 | switch (Pred) { |
5364 | case CmpInst::FCMP_OLT: |
5365 | case CmpInst::FCMP_OLE: |
5366 | case CmpInst::FCMP_ULT: |
5367 | case CmpInst::FCMP_ULE: |
5368 | if (match(FalseVal, |
5369 | m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), |
5370 | m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && |
5371 | *FC1 < *FC2) |
5372 | return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; |
5373 | break; |
5374 | case CmpInst::FCMP_OGT: |
5375 | case CmpInst::FCMP_OGE: |
5376 | case CmpInst::FCMP_UGT: |
5377 | case CmpInst::FCMP_UGE: |
5378 | if (match(FalseVal, |
5379 | m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), |
5380 | m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && |
5381 | *FC1 > *FC2) |
5382 | return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; |
5383 | break; |
5384 | default: |
5385 | break; |
5386 | } |
5387 | |
5388 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5389 | } |
5390 | |
5391 | /// Recognize variations of: |
5392 | /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) |
5393 | static SelectPatternResult matchClamp(CmpInst::Predicate Pred, |
5394 | Value *CmpLHS, Value *CmpRHS, |
5395 | Value *TrueVal, Value *FalseVal) { |
5396 | // Swap the select operands and predicate to match the patterns below. |
5397 | if (CmpRHS != TrueVal) { |
5398 | Pred = ICmpInst::getSwappedPredicate(Pred); |
5399 | std::swap(TrueVal, FalseVal); |
5400 | } |
5401 | const APInt *C1; |
5402 | if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { |
5403 | const APInt *C2; |
5404 | // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) |
5405 | if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && |
5406 | C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) |
5407 | return {SPF_SMAX, SPNB_NA, false}; |
5408 | |
5409 | // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) |
5410 | if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && |
5411 | C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) |
5412 | return {SPF_SMIN, SPNB_NA, false}; |
5413 | |
5414 | // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) |
5415 | if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && |
5416 | C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) |
5417 | return {SPF_UMAX, SPNB_NA, false}; |
5418 | |
5419 | // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) |
5420 | if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && |
5421 | C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) |
5422 | return {SPF_UMIN, SPNB_NA, false}; |
5423 | } |
5424 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5425 | } |
5426 | |
5427 | /// Recognize variations of: |
5428 | /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) |
5429 | static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, |
5430 | Value *CmpLHS, Value *CmpRHS, |
5431 | Value *TVal, Value *FVal, |
5432 | unsigned Depth) { |
5433 | // TODO: Allow FP min/max with nnan/nsz. |
5434 | assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")((CmpInst::isIntPredicate(Pred) && "Expected integer comparison" ) ? static_cast<void> (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 5434, __PRETTY_FUNCTION__)); |
5435 | |
5436 | Value *A = nullptr, *B = nullptr; |
5437 | SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); |
5438 | if (!SelectPatternResult::isMinOrMax(L.Flavor)) |
5439 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5440 | |
5441 | Value *C = nullptr, *D = nullptr; |
5442 | SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); |
5443 | if (L.Flavor != R.Flavor) |
5444 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5445 | |
5446 | // We have something like: x Pred y ? min(a, b) : min(c, d). |
5447 | // Try to match the compare to the min/max operations of the select operands. |
5448 | // First, make sure we have the right compare predicate. |
5449 | switch (L.Flavor) { |
5450 | case SPF_SMIN: |
5451 | if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { |
5452 | Pred = ICmpInst::getSwappedPredicate(Pred); |
5453 | std::swap(CmpLHS, CmpRHS); |
5454 | } |
5455 | if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) |
5456 | break; |
5457 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5458 | case SPF_SMAX: |
5459 | if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { |
5460 | Pred = ICmpInst::getSwappedPredicate(Pred); |
5461 | std::swap(CmpLHS, CmpRHS); |
5462 | } |
5463 | if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) |
5464 | break; |
5465 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5466 | case SPF_UMIN: |
5467 | if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { |
5468 | Pred = ICmpInst::getSwappedPredicate(Pred); |
5469 | std::swap(CmpLHS, CmpRHS); |
5470 | } |
5471 | if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) |
5472 | break; |
5473 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5474 | case SPF_UMAX: |
5475 | if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { |
5476 | Pred = ICmpInst::getSwappedPredicate(Pred); |
5477 | std::swap(CmpLHS, CmpRHS); |
5478 | } |
5479 | if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) |
5480 | break; |
5481 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5482 | default: |
5483 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5484 | } |
5485 | |
5486 | // If there is a common operand in the already matched min/max and the other |
5487 | // min/max operands match the compare operands (either directly or inverted), |
5488 | // then this is min/max of the same flavor. |
5489 | |
5490 | // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) |
5491 | // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) |
5492 | if (D == B) { |
5493 | if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && |
5494 | match(A, m_Not(m_Specific(CmpRHS))))) |
5495 | return {L.Flavor, SPNB_NA, false}; |
5496 | } |
5497 | // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) |
5498 | // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) |
5499 | if (C == B) { |
5500 | if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && |
5501 | match(A, m_Not(m_Specific(CmpRHS))))) |
5502 | return {L.Flavor, SPNB_NA, false}; |
5503 | } |
5504 | // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) |
5505 | // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) |
5506 | if (D == A) { |
5507 | if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && |
5508 | match(B, m_Not(m_Specific(CmpRHS))))) |
5509 | return {L.Flavor, SPNB_NA, false}; |
5510 | } |
5511 | // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) |
5512 | // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) |
5513 | if (C == A) { |
5514 | if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && |
5515 | match(B, m_Not(m_Specific(CmpRHS))))) |
5516 | return {L.Flavor, SPNB_NA, false}; |
5517 | } |
5518 | |
5519 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5520 | } |
5521 | |
5522 | /// If the input value is the result of a 'not' op, constant integer, or vector |
5523 | /// splat of a constant integer, return the bitwise-not source value. |
5524 | /// TODO: This could be extended to handle non-splat vector integer constants. |
5525 | static Value *getNotValue(Value *V) { |
5526 | Value *NotV; |
5527 | if (match(V, m_Not(m_Value(NotV)))) |
5528 | return NotV; |
5529 | |
5530 | const APInt *C; |
5531 | if (match(V, m_APInt(C))) |
5532 | return ConstantInt::get(V->getType(), ~(*C)); |
5533 | |
5534 | return nullptr; |
5535 | } |
5536 | |
5537 | /// Match non-obvious integer minimum and maximum sequences. |
5538 | static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, |
5539 | Value *CmpLHS, Value *CmpRHS, |
5540 | Value *TrueVal, Value *FalseVal, |
5541 | Value *&LHS, Value *&RHS, |
5542 | unsigned Depth) { |
5543 | // Assume success. If there's no match, callers should not use these anyway. |
5544 | LHS = TrueVal; |
5545 | RHS = FalseVal; |
5546 | |
5547 | SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); |
5548 | if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) |
5549 | return SPR; |
5550 | |
5551 | SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); |
5552 | if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) |
5553 | return SPR; |
5554 | |
5555 | // Look through 'not' ops to find disguised min/max. |
5556 | // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y) |
5557 | // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y) |
5558 | if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) { |
5559 | switch (Pred) { |
5560 | case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false}; |
5561 | case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false}; |
5562 | case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false}; |
5563 | case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false}; |
5564 | default: break; |
5565 | } |
5566 | } |
5567 | |
5568 | // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X) |
5569 | // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X) |
5570 | if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) { |
5571 | switch (Pred) { |
5572 | case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false}; |
5573 | case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false}; |
5574 | case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false}; |
5575 | case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false}; |
5576 | default: break; |
5577 | } |
5578 | } |
5579 | |
5580 | if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) |
5581 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5582 | |
5583 | // Z = X -nsw Y |
5584 | // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) |
5585 | // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) |
5586 | if (match(TrueVal, m_Zero()) && |
5587 | match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) |
5588 | return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; |
5589 | |
5590 | // Z = X -nsw Y |
5591 | // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) |
5592 | // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) |
5593 | if (match(FalseVal, m_Zero()) && |
5594 | match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) |
5595 | return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; |
5596 | |
5597 | const APInt *C1; |
5598 | if (!match(CmpRHS, m_APInt(C1))) |
5599 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5600 | |
5601 | // An unsigned min/max can be written with a signed compare. |
5602 | const APInt *C2; |
5603 | if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || |
5604 | (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { |
5605 | // Is the sign bit set? |
5606 | // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX |
5607 | // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN |
5608 | if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && |
5609 | C2->isMaxSignedValue()) |
5610 | return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; |
5611 | |
5612 | // Is the sign bit clear? |
5613 | // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX |
5614 | // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN |
5615 | if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && |
5616 | C2->isMinSignedValue()) |
5617 | return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; |
5618 | } |
5619 | |
5620 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5621 | } |
5622 | |
5623 | bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { |
5624 | assert(X && Y && "Invalid operand")((X && Y && "Invalid operand") ? static_cast< void> (0) : __assert_fail ("X && Y && \"Invalid operand\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 5624, __PRETTY_FUNCTION__)); |
5625 | |
5626 | // X = sub (0, Y) || X = sub nsw (0, Y) |
5627 | if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || |
5628 | (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) |
5629 | return true; |
5630 | |
5631 | // Y = sub (0, X) || Y = sub nsw (0, X) |
5632 | if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || |
5633 | (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) |
5634 | return true; |
5635 | |
5636 | // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) |
5637 | Value *A, *B; |
5638 | return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && |
5639 | match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || |
5640 | (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && |
5641 | match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); |
5642 | } |
5643 | |
5644 | static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, |
5645 | FastMathFlags FMF, |
5646 | Value *CmpLHS, Value *CmpRHS, |
5647 | Value *TrueVal, Value *FalseVal, |
5648 | Value *&LHS, Value *&RHS, |
5649 | unsigned Depth) { |
5650 | if (CmpInst::isFPPredicate(Pred)) { |
5651 | // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one |
5652 | // 0.0 operand, set the compare's 0.0 operands to that same value for the |
5653 | // purpose of identifying min/max. Disregard vector constants with undefined |
5654 | // elements because those can not be back-propagated for analysis. |
5655 | Value *OutputZeroVal = nullptr; |
5656 | if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && |
5657 | !cast<Constant>(TrueVal)->containsUndefOrPoisonElement()) |
5658 | OutputZeroVal = TrueVal; |
5659 | else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && |
5660 | !cast<Constant>(FalseVal)->containsUndefOrPoisonElement()) |
5661 | OutputZeroVal = FalseVal; |
5662 | |
5663 | if (OutputZeroVal) { |
5664 | if (match(CmpLHS, m_AnyZeroFP())) |
5665 | CmpLHS = OutputZeroVal; |
5666 | if (match(CmpRHS, m_AnyZeroFP())) |
5667 | CmpRHS = OutputZeroVal; |
5668 | } |
5669 | } |
5670 | |
5671 | LHS = CmpLHS; |
5672 | RHS = CmpRHS; |
5673 | |
5674 | // Signed zero may return inconsistent results between implementations. |
5675 | // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 |
5676 | // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) |
5677 | // Therefore, we behave conservatively and only proceed if at least one of the |
5678 | // operands is known to not be zero or if we don't care about signed zero. |
5679 | switch (Pred) { |
5680 | default: break; |
5681 | // FIXME: Include OGT/OLT/UGT/ULT. |
5682 | case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: |
5683 | case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: |
5684 | if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && |
5685 | !isKnownNonZero(CmpRHS)) |
5686 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5687 | } |
5688 | |
5689 | SelectPatternNaNBehavior NaNBehavior = SPNB_NA; |
5690 | bool Ordered = false; |
5691 | |
5692 | // When given one NaN and one non-NaN input: |
5693 | // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. |
5694 | // - A simple C99 (a < b ? a : b) construction will return 'b' (as the |
5695 | // ordered comparison fails), which could be NaN or non-NaN. |
5696 | // so here we discover exactly what NaN behavior is required/accepted. |
5697 | if (CmpInst::isFPPredicate(Pred)) { |
5698 | bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); |
5699 | bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); |
5700 | |
5701 | if (LHSSafe && RHSSafe) { |
5702 | // Both operands are known non-NaN. |
5703 | NaNBehavior = SPNB_RETURNS_ANY; |
5704 | } else if (CmpInst::isOrdered(Pred)) { |
5705 | // An ordered comparison will return false when given a NaN, so it |
5706 | // returns the RHS. |
5707 | Ordered = true; |
5708 | if (LHSSafe) |
5709 | // LHS is non-NaN, so if RHS is NaN then NaN will be returned. |
5710 | NaNBehavior = SPNB_RETURNS_NAN; |
5711 | else if (RHSSafe) |
5712 | NaNBehavior = SPNB_RETURNS_OTHER; |
5713 | else |
5714 | // Completely unsafe. |
5715 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5716 | } else { |
5717 | Ordered = false; |
5718 | // An unordered comparison will return true when given a NaN, so it |
5719 | // returns the LHS. |
5720 | if (LHSSafe) |
5721 | // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. |
5722 | NaNBehavior = SPNB_RETURNS_OTHER; |
5723 | else if (RHSSafe) |
5724 | NaNBehavior = SPNB_RETURNS_NAN; |
5725 | else |
5726 | // Completely unsafe. |
5727 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5728 | } |
5729 | } |
5730 | |
5731 | if (TrueVal == CmpRHS && FalseVal == CmpLHS) { |
5732 | std::swap(CmpLHS, CmpRHS); |
5733 | Pred = CmpInst::getSwappedPredicate(Pred); |
5734 | if (NaNBehavior == SPNB_RETURNS_NAN) |
5735 | NaNBehavior = SPNB_RETURNS_OTHER; |
5736 | else if (NaNBehavior == SPNB_RETURNS_OTHER) |
5737 | NaNBehavior = SPNB_RETURNS_NAN; |
5738 | Ordered = !Ordered; |
5739 | } |
5740 | |
5741 | // ([if]cmp X, Y) ? X : Y |
5742 | if (TrueVal == CmpLHS && FalseVal == CmpRHS) { |
5743 | switch (Pred) { |
5744 | default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. |
5745 | case ICmpInst::ICMP_UGT: |
5746 | case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; |
5747 | case ICmpInst::ICMP_SGT: |
5748 | case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; |
5749 | case ICmpInst::ICMP_ULT: |
5750 | case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; |
5751 | case ICmpInst::ICMP_SLT: |
5752 | case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; |
5753 | case FCmpInst::FCMP_UGT: |
5754 | case FCmpInst::FCMP_UGE: |
5755 | case FCmpInst::FCMP_OGT: |
5756 | case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; |
5757 | case FCmpInst::FCMP_ULT: |
5758 | case FCmpInst::FCMP_ULE: |
5759 | case FCmpInst::FCMP_OLT: |
5760 | case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; |
5761 | } |
5762 | } |
5763 | |
5764 | if (isKnownNegation(TrueVal, FalseVal)) { |
5765 | // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can |
5766 | // match against either LHS or sext(LHS). |
5767 | auto MaybeSExtCmpLHS = |
5768 | m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); |
5769 | auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); |
5770 | auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); |
5771 | if (match(TrueVal, MaybeSExtCmpLHS)) { |
5772 | // Set the return values. If the compare uses the negated value (-X >s 0), |
5773 | // swap the return values because the negated value is always 'RHS'. |
5774 | LHS = TrueVal; |
5775 | RHS = FalseVal; |
5776 | if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) |
5777 | std::swap(LHS, RHS); |
5778 | |
5779 | // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) |
5780 | // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) |
5781 | if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) |
5782 | return {SPF_ABS, SPNB_NA, false}; |
5783 | |
5784 | // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) |
5785 | if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne)) |
5786 | return {SPF_ABS, SPNB_NA, false}; |
5787 | |
5788 | // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) |
5789 | // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) |
5790 | if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) |
5791 | return {SPF_NABS, SPNB_NA, false}; |
5792 | } |
5793 | else if (match(FalseVal, MaybeSExtCmpLHS)) { |
5794 | // Set the return values. If the compare uses the negated value (-X >s 0), |
5795 | // swap the return values because the negated value is always 'RHS'. |
5796 | LHS = FalseVal; |
5797 | RHS = TrueVal; |
5798 | if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) |
5799 | std::swap(LHS, RHS); |
5800 | |
5801 | // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) |
5802 | // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) |
5803 | if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) |
5804 | return {SPF_NABS, SPNB_NA, false}; |
5805 | |
5806 | // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) |
5807 | // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) |
5808 | if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) |
5809 | return {SPF_ABS, SPNB_NA, false}; |
5810 | } |
5811 | } |
5812 | |
5813 | if (CmpInst::isIntPredicate(Pred)) |
5814 | return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); |
5815 | |
5816 | // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar |
5817 | // may return either -0.0 or 0.0, so fcmp/select pair has stricter |
5818 | // semantics than minNum. Be conservative in such case. |
5819 | if (NaNBehavior != SPNB_RETURNS_ANY || |
5820 | (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && |
5821 | !isKnownNonZero(CmpRHS))) |
5822 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5823 | |
5824 | return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); |
5825 | } |
5826 | |
5827 | /// Helps to match a select pattern in case of a type mismatch. |
5828 | /// |
5829 | /// The function processes the case when type of true and false values of a |
5830 | /// select instruction differs from type of the cmp instruction operands because |
5831 | /// of a cast instruction. The function checks if it is legal to move the cast |
5832 | /// operation after "select". If yes, it returns the new second value of |
5833 | /// "select" (with the assumption that cast is moved): |
5834 | /// 1. As operand of cast instruction when both values of "select" are same cast |
5835 | /// instructions. |
5836 | /// 2. As restored constant (by applying reverse cast operation) when the first |
5837 | /// value of the "select" is a cast operation and the second value is a |
5838 | /// constant. |
5839 | /// NOTE: We return only the new second value because the first value could be |
5840 | /// accessed as operand of cast instruction. |
5841 | static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, |
5842 | Instruction::CastOps *CastOp) { |
5843 | auto *Cast1 = dyn_cast<CastInst>(V1); |
5844 | if (!Cast1) |
5845 | return nullptr; |
5846 | |
5847 | *CastOp = Cast1->getOpcode(); |
5848 | Type *SrcTy = Cast1->getSrcTy(); |
5849 | if (auto *Cast2 = dyn_cast<CastInst>(V2)) { |
5850 | // If V1 and V2 are both the same cast from the same type, look through V1. |
5851 | if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) |
5852 | return Cast2->getOperand(0); |
5853 | return nullptr; |
5854 | } |
5855 | |
5856 | auto *C = dyn_cast<Constant>(V2); |
5857 | if (!C) |
5858 | return nullptr; |
5859 | |
5860 | Constant *CastedTo = nullptr; |
5861 | switch (*CastOp) { |
5862 | case Instruction::ZExt: |
5863 | if (CmpI->isUnsigned()) |
5864 | CastedTo = ConstantExpr::getTrunc(C, SrcTy); |
5865 | break; |
5866 | case Instruction::SExt: |
5867 | if (CmpI->isSigned()) |
5868 | CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); |
5869 | break; |
5870 | case Instruction::Trunc: |
5871 | Constant *CmpConst; |
5872 | if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && |
5873 | CmpConst->getType() == SrcTy) { |
5874 | // Here we have the following case: |
5875 | // |
5876 | // %cond = cmp iN %x, CmpConst |
5877 | // %tr = trunc iN %x to iK |
5878 | // %narrowsel = select i1 %cond, iK %t, iK C |
5879 | // |
5880 | // We can always move trunc after select operation: |
5881 | // |
5882 | // %cond = cmp iN %x, CmpConst |
5883 | // %widesel = select i1 %cond, iN %x, iN CmpConst |
5884 | // %tr = trunc iN %widesel to iK |
5885 | // |
5886 | // Note that C could be extended in any way because we don't care about |
5887 | // upper bits after truncation. It can't be abs pattern, because it would |
5888 | // look like: |
5889 | // |
5890 | // select i1 %cond, x, -x. |
5891 | // |
5892 | // So only min/max pattern could be matched. Such match requires widened C |
5893 | // == CmpConst. That is why set widened C = CmpConst, condition trunc |
5894 | // CmpConst == C is checked below. |
5895 | CastedTo = CmpConst; |
5896 | } else { |
5897 | CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); |
5898 | } |
5899 | break; |
5900 | case Instruction::FPTrunc: |
5901 | CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); |
5902 | break; |
5903 | case Instruction::FPExt: |
5904 | CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); |
5905 | break; |
5906 | case Instruction::FPToUI: |
5907 | CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); |
5908 | break; |
5909 | case Instruction::FPToSI: |
5910 | CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); |
5911 | break; |
5912 | case Instruction::UIToFP: |
5913 | CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); |
5914 | break; |
5915 | case Instruction::SIToFP: |
5916 | CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); |
5917 | break; |
5918 | default: |
5919 | break; |
5920 | } |
5921 | |
5922 | if (!CastedTo) |
5923 | return nullptr; |
5924 | |
5925 | // Make sure the cast doesn't lose any information. |
5926 | Constant *CastedBack = |
5927 | ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); |
5928 | if (CastedBack != C) |
5929 | return nullptr; |
5930 | |
5931 | return CastedTo; |
5932 | } |
5933 | |
5934 | SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, |
5935 | Instruction::CastOps *CastOp, |
5936 | unsigned Depth) { |
5937 | if (Depth >= MaxAnalysisRecursionDepth) |
5938 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5939 | |
5940 | SelectInst *SI = dyn_cast<SelectInst>(V); |
5941 | if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; |
5942 | |
5943 | CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); |
5944 | if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; |
5945 | |
5946 | Value *TrueVal = SI->getTrueValue(); |
5947 | Value *FalseVal = SI->getFalseValue(); |
5948 | |
5949 | return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS, |
5950 | CastOp, Depth); |
5951 | } |
5952 | |
5953 | SelectPatternResult llvm::matchDecomposedSelectPattern( |
5954 | CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, |
5955 | Instruction::CastOps *CastOp, unsigned Depth) { |
5956 | CmpInst::Predicate Pred = CmpI->getPredicate(); |
5957 | Value *CmpLHS = CmpI->getOperand(0); |
5958 | Value *CmpRHS = CmpI->getOperand(1); |
5959 | FastMathFlags FMF; |
5960 | if (isa<FPMathOperator>(CmpI)) |
5961 | FMF = CmpI->getFastMathFlags(); |
5962 | |
5963 | // Bail out early. |
5964 | if (CmpI->isEquality()) |
5965 | return {SPF_UNKNOWN, SPNB_NA, false}; |
5966 | |
5967 | // Deal with type mismatches. |
5968 | if (CastOp && CmpLHS->getType() != TrueVal->getType()) { |
5969 | if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { |
5970 | // If this is a potential fmin/fmax with a cast to integer, then ignore |
5971 | // -0.0 because there is no corresponding integer value. |
5972 | if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) |
5973 | FMF.setNoSignedZeros(); |
5974 | return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, |
5975 | cast<CastInst>(TrueVal)->getOperand(0), C, |
5976 | LHS, RHS, Depth); |
5977 | } |
5978 | if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { |
5979 | // If this is a potential fmin/fmax with a cast to integer, then ignore |
5980 | // -0.0 because there is no corresponding integer value. |
5981 | if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) |
5982 | FMF.setNoSignedZeros(); |
5983 | return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, |
5984 | C, cast<CastInst>(FalseVal)->getOperand(0), |
5985 | LHS, RHS, Depth); |
5986 | } |
5987 | } |
5988 | return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, |
5989 | LHS, RHS, Depth); |
5990 | } |
5991 | |
5992 | CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { |
5993 | if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; |
5994 | if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; |
5995 | if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; |
5996 | if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; |
5997 | if (SPF == SPF_FMINNUM) |
5998 | return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; |
5999 | if (SPF == SPF_FMAXNUM) |
6000 | return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; |
6001 | llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6001); |
6002 | } |
6003 | |
6004 | SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { |
6005 | if (SPF == SPF_SMIN) return SPF_SMAX; |
6006 | if (SPF == SPF_UMIN) return SPF_UMAX; |
6007 | if (SPF == SPF_SMAX) return SPF_SMIN; |
6008 | if (SPF == SPF_UMAX) return SPF_UMIN; |
6009 | llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6009); |
6010 | } |
6011 | |
6012 | CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { |
6013 | return getMinMaxPred(getInverseMinMaxFlavor(SPF)); |
6014 | } |
6015 | |
6016 | std::pair<Intrinsic::ID, bool> |
6017 | llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) { |
6018 | // Check if VL contains select instructions that can be folded into a min/max |
6019 | // vector intrinsic and return the intrinsic if it is possible. |
6020 | // TODO: Support floating point min/max. |
6021 | bool AllCmpSingleUse = true; |
6022 | SelectPatternResult SelectPattern; |
6023 | SelectPattern.Flavor = SPF_UNKNOWN; |
6024 | if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) { |
6025 | Value *LHS, *RHS; |
6026 | auto CurrentPattern = matchSelectPattern(I, LHS, RHS); |
6027 | if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) || |
6028 | CurrentPattern.Flavor == SPF_FMINNUM || |
6029 | CurrentPattern.Flavor == SPF_FMAXNUM || |
6030 | !I->getType()->isIntOrIntVectorTy()) |
6031 | return false; |
6032 | if (SelectPattern.Flavor != SPF_UNKNOWN && |
6033 | SelectPattern.Flavor != CurrentPattern.Flavor) |
6034 | return false; |
6035 | SelectPattern = CurrentPattern; |
6036 | AllCmpSingleUse &= |
6037 | match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value())); |
6038 | return true; |
6039 | })) { |
6040 | switch (SelectPattern.Flavor) { |
6041 | case SPF_SMIN: |
6042 | return {Intrinsic::smin, AllCmpSingleUse}; |
6043 | case SPF_UMIN: |
6044 | return {Intrinsic::umin, AllCmpSingleUse}; |
6045 | case SPF_SMAX: |
6046 | return {Intrinsic::smax, AllCmpSingleUse}; |
6047 | case SPF_UMAX: |
6048 | return {Intrinsic::umax, AllCmpSingleUse}; |
6049 | default: |
6050 | llvm_unreachable("unexpected select pattern flavor")::llvm::llvm_unreachable_internal("unexpected select pattern flavor" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6050); |
6051 | } |
6052 | } |
6053 | return {Intrinsic::not_intrinsic, false}; |
6054 | } |
6055 | |
6056 | /// Return true if "icmp Pred LHS RHS" is always true. |
6057 | static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, |
6058 | const Value *RHS, const DataLayout &DL, |
6059 | unsigned Depth) { |
6060 | assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!")((!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!" ) ? static_cast<void> (0) : __assert_fail ("!LHS->getType()->isVectorTy() && \"TODO: extend to handle vectors!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6060, __PRETTY_FUNCTION__)); |
6061 | if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) |
6062 | return true; |
6063 | |
6064 | switch (Pred) { |
6065 | default: |
6066 | return false; |
6067 | |
6068 | case CmpInst::ICMP_SLE: { |
6069 | const APInt *C; |
6070 | |
6071 | // LHS s<= LHS +_{nsw} C if C >= 0 |
6072 | if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) |
6073 | return !C->isNegative(); |
6074 | return false; |
6075 | } |
6076 | |
6077 | case CmpInst::ICMP_ULE: { |
6078 | const APInt *C; |
6079 | |
6080 | // LHS u<= LHS +_{nuw} C for any C |
6081 | if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) |
6082 | return true; |
6083 | |
6084 | // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) |
6085 | auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, |
6086 | const Value *&X, |
6087 | const APInt *&CA, const APInt *&CB) { |
6088 | if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && |
6089 | match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) |
6090 | return true; |
6091 | |
6092 | // If X & C == 0 then (X | C) == X +_{nuw} C |
6093 | if (match(A, m_Or(m_Value(X), m_APInt(CA))) && |
6094 | match(B, m_Or(m_Specific(X), m_APInt(CB)))) { |
6095 | KnownBits Known(CA->getBitWidth()); |
6096 | computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, |
6097 | /*CxtI*/ nullptr, /*DT*/ nullptr); |
6098 | if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) |
6099 | return true; |
6100 | } |
6101 | |
6102 | return false; |
6103 | }; |
6104 | |
6105 | const Value *X; |
6106 | const APInt *CLHS, *CRHS; |
6107 | if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) |
6108 | return CLHS->ule(*CRHS); |
6109 | |
6110 | return false; |
6111 | } |
6112 | } |
6113 | } |
6114 | |
6115 | /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred |
6116 | /// ALHS ARHS" is true. Otherwise, return None. |
6117 | static Optional<bool> |
6118 | isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, |
6119 | const Value *ARHS, const Value *BLHS, const Value *BRHS, |
6120 | const DataLayout &DL, unsigned Depth) { |
6121 | switch (Pred) { |
6122 | default: |
6123 | return None; |
6124 | |
6125 | case CmpInst::ICMP_SLT: |
6126 | case CmpInst::ICMP_SLE: |
6127 | if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && |
6128 | isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) |
6129 | return true; |
6130 | return None; |
6131 | |
6132 | case CmpInst::ICMP_ULT: |
6133 | case CmpInst::ICMP_ULE: |
6134 | if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && |
6135 | isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) |
6136 | return true; |
6137 | return None; |
6138 | } |
6139 | } |
6140 | |
6141 | /// Return true if the operands of the two compares match. IsSwappedOps is true |
6142 | /// when the operands match, but are swapped. |
6143 | static bool isMatchingOps(const Value *ALHS, const Value *ARHS, |
6144 | const Value *BLHS, const Value *BRHS, |
6145 | bool &IsSwappedOps) { |
6146 | |
6147 | bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); |
6148 | IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); |
6149 | return IsMatchingOps || IsSwappedOps; |
6150 | } |
6151 | |
6152 | /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true. |
6153 | /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false. |
6154 | /// Otherwise, return None if we can't infer anything. |
6155 | static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, |
6156 | CmpInst::Predicate BPred, |
6157 | bool AreSwappedOps) { |
6158 | // Canonicalize the predicate as if the operands were not commuted. |
6159 | if (AreSwappedOps) |
6160 | BPred = ICmpInst::getSwappedPredicate(BPred); |
6161 | |
6162 | if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) |
6163 | return true; |
6164 | if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) |
6165 | return false; |
6166 | |
6167 | return None; |
6168 | } |
6169 | |
6170 | /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true. |
6171 | /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false. |
6172 | /// Otherwise, return None if we can't infer anything. |
6173 | static Optional<bool> |
6174 | isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, |
6175 | const ConstantInt *C1, |
6176 | CmpInst::Predicate BPred, |
6177 | const ConstantInt *C2) { |
6178 | ConstantRange DomCR = |
6179 | ConstantRange::makeExactICmpRegion(APred, C1->getValue()); |
6180 | ConstantRange CR = |
6181 | ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); |
6182 | ConstantRange Intersection = DomCR.intersectWith(CR); |
6183 | ConstantRange Difference = DomCR.difference(CR); |
6184 | if (Intersection.isEmptySet()) |
6185 | return false; |
6186 | if (Difference.isEmptySet()) |
6187 | return true; |
6188 | return None; |
6189 | } |
6190 | |
6191 | /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is |
6192 | /// false. Otherwise, return None if we can't infer anything. |
6193 | static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, |
6194 | CmpInst::Predicate BPred, |
6195 | const Value *BLHS, const Value *BRHS, |
6196 | const DataLayout &DL, bool LHSIsTrue, |
6197 | unsigned Depth) { |
6198 | Value *ALHS = LHS->getOperand(0); |
6199 | Value *ARHS = LHS->getOperand(1); |
6200 | |
6201 | // The rest of the logic assumes the LHS condition is true. If that's not the |
6202 | // case, invert the predicate to make it so. |
6203 | CmpInst::Predicate APred = |
6204 | LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); |
6205 | |
6206 | // Can we infer anything when the two compares have matching operands? |
6207 | bool AreSwappedOps; |
6208 | if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) { |
6209 | if (Optional<bool> Implication = isImpliedCondMatchingOperands( |
6210 | APred, BPred, AreSwappedOps)) |
6211 | return Implication; |
6212 | // No amount of additional analysis will infer the second condition, so |
6213 | // early exit. |
6214 | return None; |
6215 | } |
6216 | |
6217 | // Can we infer anything when the LHS operands match and the RHS operands are |
6218 | // constants (not necessarily matching)? |
6219 | if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { |
6220 | if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( |
6221 | APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS))) |
6222 | return Implication; |
6223 | // No amount of additional analysis will infer the second condition, so |
6224 | // early exit. |
6225 | return None; |
6226 | } |
6227 | |
6228 | if (APred == BPred) |
6229 | return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); |
6230 | return None; |
6231 | } |
6232 | |
6233 | /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is |
6234 | /// false. Otherwise, return None if we can't infer anything. We expect the |
6235 | /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction. |
6236 | static Optional<bool> |
6237 | isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred, |
6238 | const Value *RHSOp0, const Value *RHSOp1, |
6239 | const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { |
6240 | // The LHS must be an 'or', 'and', or a 'select' instruction. |
6241 | assert((LHS->getOpcode() == Instruction::And ||(((LHS->getOpcode() == Instruction::And || LHS->getOpcode () == Instruction::Or || LHS->getOpcode() == Instruction:: Select) && "Expected LHS to be 'and', 'or', or 'select'." ) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6244, __PRETTY_FUNCTION__)) |
6242 | LHS->getOpcode() == Instruction::Or ||(((LHS->getOpcode() == Instruction::And || LHS->getOpcode () == Instruction::Or || LHS->getOpcode() == Instruction:: Select) && "Expected LHS to be 'and', 'or', or 'select'." ) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6244, __PRETTY_FUNCTION__)) |
6243 | LHS->getOpcode() == Instruction::Select) &&(((LHS->getOpcode() == Instruction::And || LHS->getOpcode () == Instruction::Or || LHS->getOpcode() == Instruction:: Select) && "Expected LHS to be 'and', 'or', or 'select'." ) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6244, __PRETTY_FUNCTION__)) |
6244 | "Expected LHS to be 'and', 'or', or 'select'.")(((LHS->getOpcode() == Instruction::And || LHS->getOpcode () == Instruction::Or || LHS->getOpcode() == Instruction:: Select) && "Expected LHS to be 'and', 'or', or 'select'." ) ? static_cast<void> (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6244, __PRETTY_FUNCTION__)); |
6245 | |
6246 | assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit")((Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit" ) ? static_cast<void> (0) : __assert_fail ("Depth <= MaxAnalysisRecursionDepth && \"Hit recursion limit\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6246, __PRETTY_FUNCTION__)); |
6247 | |
6248 | // If the result of an 'or' is false, then we know both legs of the 'or' are |
6249 | // false. Similarly, if the result of an 'and' is true, then we know both |
6250 | // legs of the 'and' are true. |
6251 | const Value *ALHS, *ARHS; |
6252 | if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) || |
6253 | (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) { |
6254 | // FIXME: Make this non-recursion. |
6255 | if (Optional<bool> Implication = isImpliedCondition( |
6256 | ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) |
6257 | return Implication; |
6258 | if (Optional<bool> Implication = isImpliedCondition( |
6259 | ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) |
6260 | return Implication; |
6261 | return None; |
6262 | } |
6263 | return None; |
6264 | } |
6265 | |
6266 | Optional<bool> |
6267 | llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred, |
6268 | const Value *RHSOp0, const Value *RHSOp1, |
6269 | const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { |
6270 | // Bail out when we hit the limit. |
6271 | if (Depth == MaxAnalysisRecursionDepth) |
6272 | return None; |
6273 | |
6274 | // A mismatch occurs when we compare a scalar cmp to a vector cmp, for |
6275 | // example. |
6276 | if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy()) |
6277 | return None; |
6278 | |
6279 | Type *OpTy = LHS->getType(); |
6280 | assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!")((OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!" ) ? static_cast<void> (0) : __assert_fail ("OpTy->isIntOrIntVectorTy(1) && \"Expected integer type only!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6280, __PRETTY_FUNCTION__)); |
6281 | |
6282 | // FIXME: Extending the code below to handle vectors. |
6283 | if (OpTy->isVectorTy()) |
6284 | return None; |
6285 | |
6286 | assert(OpTy->isIntegerTy(1) && "implied by above")((OpTy->isIntegerTy(1) && "implied by above") ? static_cast <void> (0) : __assert_fail ("OpTy->isIntegerTy(1) && \"implied by above\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6286, __PRETTY_FUNCTION__)); |
6287 | |
6288 | // Both LHS and RHS are icmps. |
6289 | const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); |
6290 | if (LHSCmp) |
6291 | return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, |
6292 | Depth); |
6293 | |
6294 | /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect |
6295 | /// the RHS to be an icmp. |
6296 | /// FIXME: Add support for and/or/select on the RHS. |
6297 | if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) { |
6298 | if ((LHSI->getOpcode() == Instruction::And || |
6299 | LHSI->getOpcode() == Instruction::Or || |
6300 | LHSI->getOpcode() == Instruction::Select)) |
6301 | return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, |
6302 | Depth); |
6303 | } |
6304 | return None; |
6305 | } |
6306 | |
6307 | Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, |
6308 | const DataLayout &DL, bool LHSIsTrue, |
6309 | unsigned Depth) { |
6310 | // LHS ==> RHS by definition |
6311 | if (LHS == RHS) |
6312 | return LHSIsTrue; |
6313 | |
6314 | const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); |
6315 | if (RHSCmp) |
6316 | return isImpliedCondition(LHS, RHSCmp->getPredicate(), |
6317 | RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL, |
6318 | LHSIsTrue, Depth); |
6319 | return None; |
6320 | } |
6321 | |
6322 | // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch |
6323 | // condition dominating ContextI or nullptr, if no condition is found. |
6324 | static std::pair<Value *, bool> |
6325 | getDomPredecessorCondition(const Instruction *ContextI) { |
6326 | if (!ContextI || !ContextI->getParent()) |
6327 | return {nullptr, false}; |
6328 | |
6329 | // TODO: This is a poor/cheap way to determine dominance. Should we use a |
6330 | // dominator tree (eg, from a SimplifyQuery) instead? |
6331 | const BasicBlock *ContextBB = ContextI->getParent(); |
6332 | const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); |
6333 | if (!PredBB) |
6334 | return {nullptr, false}; |
6335 | |
6336 | // We need a conditional branch in the predecessor. |
6337 | Value *PredCond; |
6338 | BasicBlock *TrueBB, *FalseBB; |
6339 | if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) |
6340 | return {nullptr, false}; |
6341 | |
6342 | // The branch should get simplified. Don't bother simplifying this condition. |
6343 | if (TrueBB == FalseBB) |
6344 | return {nullptr, false}; |
6345 | |
6346 | assert((TrueBB == ContextBB || FalseBB == ContextBB) &&(((TrueBB == ContextBB || FalseBB == ContextBB) && "Predecessor block does not point to successor?" ) ? static_cast<void> (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6347, __PRETTY_FUNCTION__)) |
6347 | "Predecessor block does not point to successor?")(((TrueBB == ContextBB || FalseBB == ContextBB) && "Predecessor block does not point to successor?" ) ? static_cast<void> (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6347, __PRETTY_FUNCTION__)); |
6348 | |
6349 | // Is this condition implied by the predecessor condition? |
6350 | return {PredCond, TrueBB == ContextBB}; |
6351 | } |
6352 | |
6353 | Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, |
6354 | const Instruction *ContextI, |
6355 | const DataLayout &DL) { |
6356 | assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool")((Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool" ) ? static_cast<void> (0) : __assert_fail ("Cond->getType()->isIntOrIntVectorTy(1) && \"Condition must be bool\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6356, __PRETTY_FUNCTION__)); |
6357 | auto PredCond = getDomPredecessorCondition(ContextI); |
6358 | if (PredCond.first) |
6359 | return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second); |
6360 | return None; |
6361 | } |
6362 | |
6363 | Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred, |
6364 | const Value *LHS, const Value *RHS, |
6365 | const Instruction *ContextI, |
6366 | const DataLayout &DL) { |
6367 | auto PredCond = getDomPredecessorCondition(ContextI); |
6368 | if (PredCond.first) |
6369 | return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL, |
6370 | PredCond.second); |
6371 | return None; |
6372 | } |
6373 | |
6374 | static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, |
6375 | APInt &Upper, const InstrInfoQuery &IIQ) { |
6376 | unsigned Width = Lower.getBitWidth(); |
6377 | const APInt *C; |
6378 | switch (BO.getOpcode()) { |
6379 | case Instruction::Add: |
6380 | if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { |
6381 | // FIXME: If we have both nuw and nsw, we should reduce the range further. |
6382 | if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) { |
6383 | // 'add nuw x, C' produces [C, UINT_MAX]. |
6384 | Lower = *C; |
6385 | } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) { |
6386 | if (C->isNegative()) { |
6387 | // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. |
6388 | Lower = APInt::getSignedMinValue(Width); |
6389 | Upper = APInt::getSignedMaxValue(Width) + *C + 1; |
6390 | } else { |
6391 | // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX]. |
6392 | Lower = APInt::getSignedMinValue(Width) + *C; |
6393 | Upper = APInt::getSignedMaxValue(Width) + 1; |
6394 | } |
6395 | } |
6396 | } |
6397 | break; |
6398 | |
6399 | case Instruction::And: |
6400 | if (match(BO.getOperand(1), m_APInt(C))) |
6401 | // 'and x, C' produces [0, C]. |
6402 | Upper = *C + 1; |
6403 | break; |
6404 | |
6405 | case Instruction::Or: |
6406 | if (match(BO.getOperand(1), m_APInt(C))) |
6407 | // 'or x, C' produces [C, UINT_MAX]. |
6408 | Lower = *C; |
6409 | break; |
6410 | |
6411 | case Instruction::AShr: |
6412 | if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { |
6413 | // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C]. |
6414 | Lower = APInt::getSignedMinValue(Width).ashr(*C); |
6415 | Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; |
6416 | } else if (match(BO.getOperand(0), m_APInt(C))) { |
6417 | unsigned ShiftAmount = Width - 1; |
6418 | if (!C->isNullValue() && IIQ.isExact(&BO)) |
6419 | ShiftAmount = C->countTrailingZeros(); |
6420 | if (C->isNegative()) { |
6421 | // 'ashr C, x' produces [C, C >> (Width-1)] |
6422 | Lower = *C; |
6423 | Upper = C->ashr(ShiftAmount) + 1; |
6424 | } else { |
6425 | // 'ashr C, x' produces [C >> (Width-1), C] |
6426 | Lower = C->ashr(ShiftAmount); |
6427 | Upper = *C + 1; |
6428 | } |
6429 | } |
6430 | break; |
6431 | |
6432 | case Instruction::LShr: |
6433 | if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { |
6434 | // 'lshr x, C' produces [0, UINT_MAX >> C]. |
6435 | Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1; |
6436 | } else if (match(BO.getOperand(0), m_APInt(C))) { |
6437 | // 'lshr C, x' produces [C >> (Width-1), C]. |
6438 | unsigned ShiftAmount = Width - 1; |
6439 | if (!C->isNullValue() && IIQ.isExact(&BO)) |
6440 | ShiftAmount = C->countTrailingZeros(); |
6441 | Lower = C->lshr(ShiftAmount); |
6442 | Upper = *C + 1; |
6443 | } |
6444 | break; |
6445 | |
6446 | case Instruction::Shl: |
6447 | if (match(BO.getOperand(0), m_APInt(C))) { |
6448 | if (IIQ.hasNoUnsignedWrap(&BO)) { |
6449 | // 'shl nuw C, x' produces [C, C << CLZ(C)] |
6450 | Lower = *C; |
6451 | Upper = Lower.shl(Lower.countLeadingZeros()) + 1; |
6452 | } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw? |
6453 | if (C->isNegative()) { |
6454 | // 'shl nsw C, x' produces [C << CLO(C)-1, C] |
6455 | unsigned ShiftAmount = C->countLeadingOnes() - 1; |
6456 | Lower = C->shl(ShiftAmount); |
6457 | Upper = *C + 1; |
6458 | } else { |
6459 | // 'shl nsw C, x' produces [C, C << CLZ(C)-1] |
6460 | unsigned ShiftAmount = C->countLeadingZeros() - 1; |
6461 | Lower = *C; |
6462 | Upper = C->shl(ShiftAmount) + 1; |
6463 | } |
6464 | } |
6465 | } |
6466 | break; |
6467 | |
6468 | case Instruction::SDiv: |
6469 | if (match(BO.getOperand(1), m_APInt(C))) { |
6470 | APInt IntMin = APInt::getSignedMinValue(Width); |
6471 | APInt IntMax = APInt::getSignedMaxValue(Width); |
6472 | if (C->isAllOnesValue()) { |
6473 | // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] |
6474 | // where C != -1 and C != 0 and C != 1 |
6475 | Lower = IntMin + 1; |
6476 | Upper = IntMax + 1; |
6477 | } else if (C->countLeadingZeros() < Width - 1) { |
6478 | // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C] |
6479 | // where C != -1 and C != 0 and C != 1 |
6480 | Lower = IntMin.sdiv(*C); |
6481 | Upper = IntMax.sdiv(*C); |
6482 | if (Lower.sgt(Upper)) |
6483 | std::swap(Lower, Upper); |
6484 | Upper = Upper + 1; |
6485 | assert(Upper != Lower && "Upper part of range has wrapped!")((Upper != Lower && "Upper part of range has wrapped!" ) ? static_cast<void> (0) : __assert_fail ("Upper != Lower && \"Upper part of range has wrapped!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6485, __PRETTY_FUNCTION__)); |
6486 | } |
6487 | } else if (match(BO.getOperand(0), m_APInt(C))) { |
6488 | if (C->isMinSignedValue()) { |
6489 | // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. |
6490 | Lower = *C; |
6491 | Upper = Lower.lshr(1) + 1; |
6492 | } else { |
6493 | // 'sdiv C, x' produces [-|C|, |C|]. |
6494 | Upper = C->abs() + 1; |
6495 | Lower = (-Upper) + 1; |
6496 | } |
6497 | } |
6498 | break; |
6499 | |
6500 | case Instruction::UDiv: |
6501 | if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { |
6502 | // 'udiv x, C' produces [0, UINT_MAX / C]. |
6503 | Upper = APInt::getMaxValue(Width).udiv(*C) + 1; |
6504 | } else if (match(BO.getOperand(0), m_APInt(C))) { |
6505 | // 'udiv C, x' produces [0, C]. |
6506 | Upper = *C + 1; |
6507 | } |
6508 | break; |
6509 | |
6510 | case Instruction::SRem: |
6511 | if (match(BO.getOperand(1), m_APInt(C))) { |
6512 | // 'srem x, C' produces (-|C|, |C|). |
6513 | Upper = C->abs(); |
6514 | Lower = (-Upper) + 1; |
6515 | } |
6516 | break; |
6517 | |
6518 | case Instruction::URem: |
6519 | if (match(BO.getOperand(1), m_APInt(C))) |
6520 | // 'urem x, C' produces [0, C). |
6521 | Upper = *C; |
6522 | break; |
6523 | |
6524 | default: |
6525 | break; |
6526 | } |
6527 | } |
6528 | |
6529 | static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower, |
6530 | APInt &Upper) { |
6531 | unsigned Width = Lower.getBitWidth(); |
6532 | const APInt *C; |
6533 | switch (II.getIntrinsicID()) { |
6534 | case Intrinsic::ctpop: |
6535 | case Intrinsic::ctlz: |
6536 | case Intrinsic::cttz: |
6537 | // Maximum of set/clear bits is the bit width. |
6538 | assert(Lower == 0 && "Expected lower bound to be zero")((Lower == 0 && "Expected lower bound to be zero") ? static_cast <void> (0) : __assert_fail ("Lower == 0 && \"Expected lower bound to be zero\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6538, __PRETTY_FUNCTION__)); |
6539 | Upper = Width + 1; |
6540 | break; |
6541 | case Intrinsic::uadd_sat: |
6542 | // uadd.sat(x, C) produces [C, UINT_MAX]. |
6543 | if (match(II.getOperand(0), m_APInt(C)) || |
6544 | match(II.getOperand(1), m_APInt(C))) |
6545 | Lower = *C; |
6546 | break; |
6547 | case Intrinsic::sadd_sat: |
6548 | if (match(II.getOperand(0), m_APInt(C)) || |
6549 | match(II.getOperand(1), m_APInt(C))) { |
6550 | if (C->isNegative()) { |
6551 | // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. |
6552 | Lower = APInt::getSignedMinValue(Width); |
6553 | Upper = APInt::getSignedMaxValue(Width) + *C + 1; |
6554 | } else { |
6555 | // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX]. |
6556 | Lower = APInt::getSignedMinValue(Width) + *C; |
6557 | Upper = APInt::getSignedMaxValue(Width) + 1; |
6558 | } |
6559 | } |
6560 | break; |
6561 | case Intrinsic::usub_sat: |
6562 | // usub.sat(C, x) produces [0, C]. |
6563 | if (match(II.getOperand(0), m_APInt(C))) |
6564 | Upper = *C + 1; |
6565 | // usub.sat(x, C) produces [0, UINT_MAX - C]. |
6566 | else if (match(II.getOperand(1), m_APInt(C))) |
6567 | Upper = APInt::getMaxValue(Width) - *C + 1; |
6568 | break; |
6569 | case Intrinsic::ssub_sat: |
6570 | if (match(II.getOperand(0), m_APInt(C))) { |
6571 | if (C->isNegative()) { |
6572 | // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. |
6573 | Lower = APInt::getSignedMinValue(Width); |
6574 | Upper = *C - APInt::getSignedMinValue(Width) + 1; |
6575 | } else { |
6576 | // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. |
6577 | Lower = *C - APInt::getSignedMaxValue(Width); |
6578 | Upper = APInt::getSignedMaxValue(Width) + 1; |
6579 | } |
6580 | } else if (match(II.getOperand(1), m_APInt(C))) { |
6581 | if (C->isNegative()) { |
6582 | // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: |
6583 | Lower = APInt::getSignedMinValue(Width) - *C; |
6584 | Upper = APInt::getSignedMaxValue(Width) + 1; |
6585 | } else { |
6586 | // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. |
6587 | Lower = APInt::getSignedMinValue(Width); |
6588 | Upper = APInt::getSignedMaxValue(Width) - *C + 1; |
6589 | } |
6590 | } |
6591 | break; |
6592 | case Intrinsic::umin: |
6593 | case Intrinsic::umax: |
6594 | case Intrinsic::smin: |
6595 | case Intrinsic::smax: |
6596 | if (!match(II.getOperand(0), m_APInt(C)) && |
6597 | !match(II.getOperand(1), m_APInt(C))) |
6598 | break; |
6599 | |
6600 | switch (II.getIntrinsicID()) { |
6601 | case Intrinsic::umin: |
6602 | Upper = *C + 1; |
6603 | break; |
6604 | case Intrinsic::umax: |
6605 | Lower = *C; |
6606 | break; |
6607 | case Intrinsic::smin: |
6608 | Lower = APInt::getSignedMinValue(Width); |
6609 | Upper = *C + 1; |
6610 | break; |
6611 | case Intrinsic::smax: |
6612 | Lower = *C; |
6613 | Upper = APInt::getSignedMaxValue(Width) + 1; |
6614 | break; |
6615 | default: |
6616 | llvm_unreachable("Must be min/max intrinsic")::llvm::llvm_unreachable_internal("Must be min/max intrinsic" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6616); |
6617 | } |
6618 | break; |
6619 | case Intrinsic::abs: |
6620 | // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX], |
6621 | // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. |
6622 | if (match(II.getOperand(1), m_One())) |
6623 | Upper = APInt::getSignedMaxValue(Width) + 1; |
6624 | else |
6625 | Upper = APInt::getSignedMinValue(Width) + 1; |
6626 | break; |
6627 | default: |
6628 | break; |
6629 | } |
6630 | } |
6631 | |
6632 | static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower, |
6633 | APInt &Upper, const InstrInfoQuery &IIQ) { |
6634 | const Value *LHS = nullptr, *RHS = nullptr; |
6635 | SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS); |
6636 | if (R.Flavor == SPF_UNKNOWN) |
6637 | return; |
6638 | |
6639 | unsigned BitWidth = SI.getType()->getScalarSizeInBits(); |
6640 | |
6641 | if (R.Flavor == SelectPatternFlavor::SPF_ABS) { |
6642 | // If the negation part of the abs (in RHS) has the NSW flag, |
6643 | // then the result of abs(X) is [0..SIGNED_MAX], |
6644 | // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. |
6645 | Lower = APInt::getNullValue(BitWidth); |
6646 | if (match(RHS, m_Neg(m_Specific(LHS))) && |
6647 | IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) |
6648 | Upper = APInt::getSignedMaxValue(BitWidth) + 1; |
6649 | else |
6650 | Upper = APInt::getSignedMinValue(BitWidth) + 1; |
6651 | return; |
6652 | } |
6653 | |
6654 | if (R.Flavor == SelectPatternFlavor::SPF_NABS) { |
6655 | // The result of -abs(X) is <= 0. |
6656 | Lower = APInt::getSignedMinValue(BitWidth); |
6657 | Upper = APInt(BitWidth, 1); |
6658 | return; |
6659 | } |
6660 | |
6661 | const APInt *C; |
6662 | if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C))) |
6663 | return; |
6664 | |
6665 | switch (R.Flavor) { |
6666 | case SPF_UMIN: |
6667 | Upper = *C + 1; |
6668 | break; |
6669 | case SPF_UMAX: |
6670 | Lower = *C; |
6671 | break; |
6672 | case SPF_SMIN: |
6673 | Lower = APInt::getSignedMinValue(BitWidth); |
6674 | Upper = *C + 1; |
6675 | break; |
6676 | case SPF_SMAX: |
6677 | Lower = *C; |
6678 | Upper = APInt::getSignedMaxValue(BitWidth) + 1; |
6679 | break; |
6680 | default: |
6681 | break; |
6682 | } |
6683 | } |
6684 | |
6685 | ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo, |
6686 | AssumptionCache *AC, |
6687 | const Instruction *CtxI, |
6688 | unsigned Depth) { |
6689 | assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction")((V->getType()->isIntOrIntVectorTy() && "Expected integer instruction" ) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && \"Expected integer instruction\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6689, __PRETTY_FUNCTION__)); |
6690 | |
6691 | if (Depth == MaxAnalysisRecursionDepth) |
6692 | return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); |
6693 | |
6694 | const APInt *C; |
6695 | if (match(V, m_APInt(C))) |
6696 | return ConstantRange(*C); |
6697 | |
6698 | InstrInfoQuery IIQ(UseInstrInfo); |
6699 | unsigned BitWidth = V->getType()->getScalarSizeInBits(); |
6700 | APInt Lower = APInt(BitWidth, 0); |
6701 | APInt Upper = APInt(BitWidth, 0); |
6702 | if (auto *BO = dyn_cast<BinaryOperator>(V)) |
6703 | setLimitsForBinOp(*BO, Lower, Upper, IIQ); |
6704 | else if (auto *II = dyn_cast<IntrinsicInst>(V)) |
6705 | setLimitsForIntrinsic(*II, Lower, Upper); |
6706 | else if (auto *SI = dyn_cast<SelectInst>(V)) |
6707 | setLimitsForSelectPattern(*SI, Lower, Upper, IIQ); |
6708 | |
6709 | ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper); |
6710 | |
6711 | if (auto *I = dyn_cast<Instruction>(V)) |
6712 | if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range)) |
6713 | CR = CR.intersectWith(getConstantRangeFromMetadata(*Range)); |
6714 | |
6715 | if (CtxI && AC) { |
6716 | // Try to restrict the range based on information from assumptions. |
6717 | for (auto &AssumeVH : AC->assumptionsFor(V)) { |
6718 | if (!AssumeVH) |
6719 | continue; |
6720 | CallInst *I = cast<CallInst>(AssumeVH); |
6721 | assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&((I->getParent()->getParent() == CtxI->getParent()-> getParent() && "Got assumption for the wrong function!" ) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6722, __PRETTY_FUNCTION__)) |
6722 | "Got assumption for the wrong function!")((I->getParent()->getParent() == CtxI->getParent()-> getParent() && "Got assumption for the wrong function!" ) ? static_cast<void> (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6722, __PRETTY_FUNCTION__)); |
6723 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&((I->getCalledFunction()->getIntrinsicID() == Intrinsic ::assume && "must be an assume intrinsic") ? static_cast <void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6724, __PRETTY_FUNCTION__)) |
6724 | "must be an assume intrinsic")((I->getCalledFunction()->getIntrinsicID() == Intrinsic ::assume && "must be an assume intrinsic") ? static_cast <void> (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "/build/llvm-toolchain-snapshot-13~++20210223111116+16ede0956cb1/llvm/lib/Analysis/ValueTracking.cpp" , 6724, __PRETTY_FUNCTION__)); |
6725 | |
6726 | if (!isValidAssumeForContext(I, CtxI, nullptr)) |
6727 | continue; |
6728 | Value *Arg = I->getArgOperand(0); |
6729 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); |
6730 | // Currently we just use information from comparisons. |
6731 | if (!Cmp || Cmp->getOperand(0) != V) |
6732 | continue; |
6733 | ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo, |
6734 | AC, I, Depth + 1); |
6735 | CR = CR.intersectWith( |
6736 | ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS)); |
6737 | } |
6738 | } |
6739 | |
6740 | return CR; |
6741 | } |
6742 | |
6743 | static Optional<int64_t> |
6744 | getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) { |
6745 | // Skip over the first indices. |
6746 | gep_type_iterator GTI = gep_type_begin(GEP); |
6747 | for (unsigned i = 1; i != Idx; ++i, ++GTI) |
6748 | /*skip along*/; |
6749 | |
6750 | // Compute the offset implied by the rest of the indices. |
6751 | int64_t Offset = 0; |
6752 | for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { |
6753 | ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); |
6754 | if (!OpC) |
6755 | return None; |
6756 | if (OpC->isZero()) |
6757 | continue; // No offset. |
6758 | |
6759 | // Handle struct indices, which add their field offset to the pointer. |
6760 | if (StructType *STy = GTI.getStructTypeOrNull()) { |
6761 | Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); |
6762 | continue; |
6763 | } |
6764 | |
6765 | // Otherwise, we have a sequential type like an array or fixed-length |
6766 | // vector. Multiply the index by the ElementSize. |
6767 | TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType()); |
6768 | if (Size.isScalable()) |
6769 | return None; |
6770 | Offset += Size.getFixedSize() * OpC->getSExtValue(); |
6771 | } |
6772 | |
6773 | return Offset; |
6774 | } |
6775 | |
6776 | Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2, |
6777 | const DataLayout &DL) { |
6778 | Ptr1 = Ptr1->stripPointerCasts(); |
6779 | Ptr2 = Ptr2->stripPointerCasts(); |
6780 | |
6781 | // Handle the trivial case first. |
6782 | if (Ptr1 == Ptr2) { |
6783 | return 0; |
6784 | } |
6785 | |
6786 | const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); |
6787 | const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); |
6788 | |
6789 | // If one pointer is a GEP see if the GEP is a constant offset from the base, |
6790 | // as in "P" and "gep P, 1". |
6791 | // Also do this iteratively to handle the the following case: |
6792 | // Ptr_t1 = GEP Ptr1, c1 |
6793 | // Ptr_t2 = GEP Ptr_t1, c2 |
6794 | // Ptr2 = GEP Ptr_t2, c3 |
6795 | // where we will return c1+c2+c3. |
6796 | // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base |
6797 | // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases |
6798 | // are the same, and return the difference between offsets. |
6799 | auto getOffsetFromBase = [&DL](const GEPOperator *GEP, |
6800 | const Value *Ptr) -> Optional<int64_t> { |
6801 | const GEPOperator *GEP_T = GEP; |
6802 | int64_t OffsetVal = 0; |
6803 | bool HasSameBase = false; |
6804 | while (GEP_T) { |
6805 | auto Offset = getOffsetFromIndex(GEP_T, 1, DL); |
6806 | if (!Offset) |
6807 | return None; |
6808 | OffsetVal += *Offset; |
6809 | auto Op0 = GEP_T->getOperand(0)->stripPointerCasts(); |
6810 | if (Op0 == Ptr) { |
6811 | HasSameBase = true; |
6812 | break; |
6813 | } |
6814 | GEP_T = dyn_cast<GEPOperator>(Op0); |
6815 | } |
6816 | if (!HasSameBase) |
6817 | return None; |
6818 | return OffsetVal; |
6819 | }; |
6820 | |
6821 | if (GEP1) { |
6822 | auto Offset = getOffsetFromBase(GEP1, Ptr2); |
6823 | if (Offset) |
6824 | return -*Offset; |
6825 | } |
6826 | if (GEP2) { |
6827 | auto Offset = getOffsetFromBase(GEP2, Ptr1); |
6828 | if (Offset) |
6829 | return Offset; |
6830 | } |
6831 | |
6832 | // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical |
6833 | // base. After that base, they may have some number of common (and |
6834 | // potentially variable) indices. After that they handle some constant |
6835 | // offset, which determines their offset from each other. At this point, we |
6836 | // handle no other case. |
6837 | if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) |
6838 | return None; |
6839 | |
6840 | // Skip any common indices and track the GEP types. |
6841 | unsigned Idx = 1; |
6842 | for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) |
6843 | if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) |
6844 | break; |
6845 | |
6846 | auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL); |
6847 | auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL); |
6848 | if (!Offset1 || !Offset2) |
6849 | return None; |
6850 | return *Offset2 - *Offset1; |
6851 | } |