File: | build/source/llvm/include/llvm/IR/Instructions.h |
Warning: | line 2712, column 17 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ValueTracking.cpp - Walk computations to compute properties --------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file contains routines that help analyze properties that chains of | ||||
10 | // computations have. | ||||
11 | // | ||||
12 | //===----------------------------------------------------------------------===// | ||||
13 | |||||
14 | #include "llvm/Analysis/ValueTracking.h" | ||||
15 | #include "llvm/ADT/APFloat.h" | ||||
16 | #include "llvm/ADT/APInt.h" | ||||
17 | #include "llvm/ADT/ArrayRef.h" | ||||
18 | #include "llvm/ADT/STLExtras.h" | ||||
19 | #include "llvm/ADT/ScopeExit.h" | ||||
20 | #include "llvm/ADT/SmallPtrSet.h" | ||||
21 | #include "llvm/ADT/SmallSet.h" | ||||
22 | #include "llvm/ADT/SmallVector.h" | ||||
23 | #include "llvm/ADT/StringRef.h" | ||||
24 | #include "llvm/ADT/iterator_range.h" | ||||
25 | #include "llvm/Analysis/AliasAnalysis.h" | ||||
26 | #include "llvm/Analysis/AssumeBundleQueries.h" | ||||
27 | #include "llvm/Analysis/AssumptionCache.h" | ||||
28 | #include "llvm/Analysis/ConstantFolding.h" | ||||
29 | #include "llvm/Analysis/GuardUtils.h" | ||||
30 | #include "llvm/Analysis/InstructionSimplify.h" | ||||
31 | #include "llvm/Analysis/Loads.h" | ||||
32 | #include "llvm/Analysis/LoopInfo.h" | ||||
33 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||
34 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
35 | #include "llvm/Analysis/VectorUtils.h" | ||||
36 | #include "llvm/IR/Argument.h" | ||||
37 | #include "llvm/IR/Attributes.h" | ||||
38 | #include "llvm/IR/BasicBlock.h" | ||||
39 | #include "llvm/IR/Constant.h" | ||||
40 | #include "llvm/IR/ConstantRange.h" | ||||
41 | #include "llvm/IR/Constants.h" | ||||
42 | #include "llvm/IR/DerivedTypes.h" | ||||
43 | #include "llvm/IR/DiagnosticInfo.h" | ||||
44 | #include "llvm/IR/Dominators.h" | ||||
45 | #include "llvm/IR/EHPersonalities.h" | ||||
46 | #include "llvm/IR/Function.h" | ||||
47 | #include "llvm/IR/GetElementPtrTypeIterator.h" | ||||
48 | #include "llvm/IR/GlobalAlias.h" | ||||
49 | #include "llvm/IR/GlobalValue.h" | ||||
50 | #include "llvm/IR/GlobalVariable.h" | ||||
51 | #include "llvm/IR/InstrTypes.h" | ||||
52 | #include "llvm/IR/Instruction.h" | ||||
53 | #include "llvm/IR/Instructions.h" | ||||
54 | #include "llvm/IR/IntrinsicInst.h" | ||||
55 | #include "llvm/IR/Intrinsics.h" | ||||
56 | #include "llvm/IR/IntrinsicsAArch64.h" | ||||
57 | #include "llvm/IR/IntrinsicsRISCV.h" | ||||
58 | #include "llvm/IR/IntrinsicsX86.h" | ||||
59 | #include "llvm/IR/LLVMContext.h" | ||||
60 | #include "llvm/IR/Metadata.h" | ||||
61 | #include "llvm/IR/Module.h" | ||||
62 | #include "llvm/IR/Operator.h" | ||||
63 | #include "llvm/IR/PatternMatch.h" | ||||
64 | #include "llvm/IR/Type.h" | ||||
65 | #include "llvm/IR/User.h" | ||||
66 | #include "llvm/IR/Value.h" | ||||
67 | #include "llvm/Support/Casting.h" | ||||
68 | #include "llvm/Support/CommandLine.h" | ||||
69 | #include "llvm/Support/Compiler.h" | ||||
70 | #include "llvm/Support/ErrorHandling.h" | ||||
71 | #include "llvm/Support/KnownBits.h" | ||||
72 | #include "llvm/Support/MathExtras.h" | ||||
73 | #include <algorithm> | ||||
74 | #include <cassert> | ||||
75 | #include <cstdint> | ||||
76 | #include <optional> | ||||
77 | #include <utility> | ||||
78 | |||||
79 | using namespace llvm; | ||||
80 | using namespace llvm::PatternMatch; | ||||
81 | |||||
82 | // Controls the number of uses of the value searched for possible | ||||
83 | // dominating comparisons. | ||||
84 | static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", | ||||
85 | cl::Hidden, cl::init(20)); | ||||
86 | |||||
87 | |||||
88 | /// Returns the bitwidth of the given scalar or pointer type. For vector types, | ||||
89 | /// returns the element type's bitwidth. | ||||
90 | static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { | ||||
91 | if (unsigned BitWidth = Ty->getScalarSizeInBits()) | ||||
92 | return BitWidth; | ||||
93 | |||||
94 | return DL.getPointerTypeSizeInBits(Ty); | ||||
95 | } | ||||
96 | |||||
97 | namespace { | ||||
98 | |||||
99 | // Simplifying using an assume can only be done in a particular control-flow | ||||
100 | // context (the context instruction provides that context). If an assume and | ||||
101 | // the context instruction are not in the same block then the DT helps in | ||||
102 | // figuring out if we can use it. | ||||
103 | struct Query { | ||||
104 | const DataLayout &DL; | ||||
105 | AssumptionCache *AC; | ||||
106 | const Instruction *CxtI; | ||||
107 | const DominatorTree *DT; | ||||
108 | |||||
109 | // Unlike the other analyses, this may be a nullptr because not all clients | ||||
110 | // provide it currently. | ||||
111 | OptimizationRemarkEmitter *ORE; | ||||
112 | |||||
113 | /// If true, it is safe to use metadata during simplification. | ||||
114 | InstrInfoQuery IIQ; | ||||
115 | |||||
116 | Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, | ||||
117 | const DominatorTree *DT, bool UseInstrInfo, | ||||
118 | OptimizationRemarkEmitter *ORE = nullptr) | ||||
119 | : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} | ||||
120 | }; | ||||
121 | |||||
122 | } // end anonymous namespace | ||||
123 | |||||
124 | // Given the provided Value and, potentially, a context instruction, return | ||||
125 | // the preferred context instruction (if any). | ||||
126 | static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { | ||||
127 | // If we've been provided with a context instruction, then use that (provided | ||||
128 | // it has been inserted). | ||||
129 | if (CxtI && CxtI->getParent()) | ||||
130 | return CxtI; | ||||
131 | |||||
132 | // If the value is really an already-inserted instruction, then use that. | ||||
133 | CxtI = dyn_cast<Instruction>(V); | ||||
134 | if (CxtI && CxtI->getParent()) | ||||
135 | return CxtI; | ||||
136 | |||||
137 | return nullptr; | ||||
138 | } | ||||
139 | |||||
140 | static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) { | ||||
141 | // If we've been provided with a context instruction, then use that (provided | ||||
142 | // it has been inserted). | ||||
143 | if (CxtI && CxtI->getParent()) | ||||
144 | return CxtI; | ||||
145 | |||||
146 | // If the value is really an already-inserted instruction, then use that. | ||||
147 | CxtI = dyn_cast<Instruction>(V1); | ||||
148 | if (CxtI && CxtI->getParent()) | ||||
149 | return CxtI; | ||||
150 | |||||
151 | CxtI = dyn_cast<Instruction>(V2); | ||||
152 | if (CxtI && CxtI->getParent()) | ||||
153 | return CxtI; | ||||
154 | |||||
155 | return nullptr; | ||||
156 | } | ||||
157 | |||||
158 | static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, | ||||
159 | const APInt &DemandedElts, | ||||
160 | APInt &DemandedLHS, APInt &DemandedRHS) { | ||||
161 | if (isa<ScalableVectorType>(Shuf->getType())) { | ||||
162 | assert(DemandedElts == APInt(1,1))(static_cast <bool> (DemandedElts == APInt(1,1)) ? void (0) : __assert_fail ("DemandedElts == APInt(1,1)", "llvm/lib/Analysis/ValueTracking.cpp" , 162, __extension__ __PRETTY_FUNCTION__)); | ||||
163 | DemandedLHS = DemandedRHS = DemandedElts; | ||||
164 | return true; | ||||
165 | } | ||||
166 | |||||
167 | int NumElts = | ||||
168 | cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements(); | ||||
169 | return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(), | ||||
170 | DemandedElts, DemandedLHS, DemandedRHS); | ||||
171 | } | ||||
172 | |||||
173 | static void computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
174 | KnownBits &Known, unsigned Depth, const Query &Q); | ||||
175 | |||||
176 | static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, | ||||
177 | const Query &Q) { | ||||
178 | // Since the number of lanes in a scalable vector is unknown at compile time, | ||||
179 | // we track one bit which is implicitly broadcast to all lanes. This means | ||||
180 | // that all lanes in a scalable vector are considered demanded. | ||||
181 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
182 | APInt DemandedElts = | ||||
183 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
184 | computeKnownBits(V, DemandedElts, Known, Depth, Q); | ||||
185 | } | ||||
186 | |||||
187 | void llvm::computeKnownBits(const Value *V, KnownBits &Known, | ||||
188 | const DataLayout &DL, unsigned Depth, | ||||
189 | AssumptionCache *AC, const Instruction *CxtI, | ||||
190 | const DominatorTree *DT, | ||||
191 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
192 | ::computeKnownBits(V, Known, Depth, | ||||
193 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); | ||||
194 | } | ||||
195 | |||||
196 | void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
197 | KnownBits &Known, const DataLayout &DL, | ||||
198 | unsigned Depth, AssumptionCache *AC, | ||||
199 | const Instruction *CxtI, const DominatorTree *DT, | ||||
200 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
201 | ::computeKnownBits(V, DemandedElts, Known, Depth, | ||||
202 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); | ||||
203 | } | ||||
204 | |||||
205 | static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
206 | unsigned Depth, const Query &Q); | ||||
207 | |||||
208 | static KnownBits computeKnownBits(const Value *V, unsigned Depth, | ||||
209 | const Query &Q); | ||||
210 | |||||
211 | KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, | ||||
212 | unsigned Depth, AssumptionCache *AC, | ||||
213 | const Instruction *CxtI, | ||||
214 | const DominatorTree *DT, | ||||
215 | OptimizationRemarkEmitter *ORE, | ||||
216 | bool UseInstrInfo) { | ||||
217 | return ::computeKnownBits( | ||||
218 | V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); | ||||
219 | } | ||||
220 | |||||
221 | KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
222 | const DataLayout &DL, unsigned Depth, | ||||
223 | AssumptionCache *AC, const Instruction *CxtI, | ||||
224 | const DominatorTree *DT, | ||||
225 | OptimizationRemarkEmitter *ORE, | ||||
226 | bool UseInstrInfo) { | ||||
227 | return ::computeKnownBits( | ||||
228 | V, DemandedElts, Depth, | ||||
229 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); | ||||
230 | } | ||||
231 | |||||
232 | bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, | ||||
233 | const DataLayout &DL, AssumptionCache *AC, | ||||
234 | const Instruction *CxtI, const DominatorTree *DT, | ||||
235 | bool UseInstrInfo) { | ||||
236 | assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType () && "LHS and RHS should have the same type") ? void (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 237, __extension__ __PRETTY_FUNCTION__ )) | ||||
237 | "LHS and RHS should have the same type")(static_cast <bool> (LHS->getType() == RHS->getType () && "LHS and RHS should have the same type") ? void (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 237, __extension__ __PRETTY_FUNCTION__ )); | ||||
238 | assert(LHS->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy () && "LHS and RHS should be integers") ? void (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\"" , "llvm/lib/Analysis/ValueTracking.cpp", 239, __extension__ __PRETTY_FUNCTION__ )) | ||||
239 | "LHS and RHS should be integers")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy () && "LHS and RHS should be integers") ? void (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\"" , "llvm/lib/Analysis/ValueTracking.cpp", 239, __extension__ __PRETTY_FUNCTION__ )); | ||||
240 | // Look for an inverted mask: (X & ~M) op (Y & M). | ||||
241 | { | ||||
242 | Value *M; | ||||
243 | if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && | ||||
244 | match(RHS, m_c_And(m_Specific(M), m_Value()))) | ||||
245 | return true; | ||||
246 | if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && | ||||
247 | match(LHS, m_c_And(m_Specific(M), m_Value()))) | ||||
248 | return true; | ||||
249 | } | ||||
250 | |||||
251 | // X op (Y & ~X) | ||||
252 | if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) || | ||||
253 | match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value()))) | ||||
254 | return true; | ||||
255 | |||||
256 | // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern | ||||
257 | // for constant Y. | ||||
258 | Value *Y; | ||||
259 | if (match(RHS, | ||||
260 | m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) || | ||||
261 | match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y)))) | ||||
262 | return true; | ||||
263 | |||||
264 | // Peek through extends to find a 'not' of the other side: | ||||
265 | // (ext Y) op ext(~Y) | ||||
266 | // (ext ~Y) op ext(Y) | ||||
267 | if ((match(LHS, m_ZExtOrSExt(m_Value(Y))) && | ||||
268 | match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))) || | ||||
269 | (match(RHS, m_ZExtOrSExt(m_Value(Y))) && | ||||
270 | match(LHS, m_ZExtOrSExt(m_Not(m_Specific(Y)))))) | ||||
271 | return true; | ||||
272 | |||||
273 | // Look for: (A & B) op ~(A | B) | ||||
274 | { | ||||
275 | Value *A, *B; | ||||
276 | if (match(LHS, m_And(m_Value(A), m_Value(B))) && | ||||
277 | match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) | ||||
278 | return true; | ||||
279 | if (match(RHS, m_And(m_Value(A), m_Value(B))) && | ||||
280 | match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) | ||||
281 | return true; | ||||
282 | } | ||||
283 | IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); | ||||
284 | KnownBits LHSKnown(IT->getBitWidth()); | ||||
285 | KnownBits RHSKnown(IT->getBitWidth()); | ||||
286 | computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); | ||||
287 | computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); | ||||
288 | return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown); | ||||
289 | } | ||||
290 | |||||
291 | bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) { | ||||
292 | return !I->user_empty() && all_of(I->users(), [](const User *U) { | ||||
293 | ICmpInst::Predicate P; | ||||
294 | return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P); | ||||
295 | }); | ||||
296 | } | ||||
297 | |||||
298 | static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, | ||||
299 | const Query &Q); | ||||
300 | |||||
301 | bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, | ||||
302 | bool OrZero, unsigned Depth, | ||||
303 | AssumptionCache *AC, const Instruction *CxtI, | ||||
304 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
305 | return ::isKnownToBeAPowerOfTwo( | ||||
306 | V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); | ||||
307 | } | ||||
308 | |||||
309 | static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, | ||||
310 | unsigned Depth, const Query &Q); | ||||
311 | |||||
312 | static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); | ||||
313 | |||||
314 | bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, | ||||
315 | AssumptionCache *AC, const Instruction *CxtI, | ||||
316 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
317 | return ::isKnownNonZero(V, Depth, | ||||
318 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); | ||||
319 | } | ||||
320 | |||||
321 | bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, | ||||
322 | unsigned Depth, AssumptionCache *AC, | ||||
323 | const Instruction *CxtI, const DominatorTree *DT, | ||||
324 | bool UseInstrInfo) { | ||||
325 | KnownBits Known = | ||||
326 | computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); | ||||
327 | return Known.isNonNegative(); | ||||
328 | } | ||||
329 | |||||
330 | bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, | ||||
331 | AssumptionCache *AC, const Instruction *CxtI, | ||||
332 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
333 | if (auto *CI = dyn_cast<ConstantInt>(V)) | ||||
334 | return CI->getValue().isStrictlyPositive(); | ||||
335 | |||||
336 | // TODO: We'd doing two recursive queries here. We should factor this such | ||||
337 | // that only a single query is needed. | ||||
338 | return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && | ||||
339 | isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); | ||||
340 | } | ||||
341 | |||||
342 | bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, | ||||
343 | AssumptionCache *AC, const Instruction *CxtI, | ||||
344 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
345 | KnownBits Known = | ||||
346 | computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); | ||||
347 | return Known.isNegative(); | ||||
348 | } | ||||
349 | |||||
350 | static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, | ||||
351 | const Query &Q); | ||||
352 | |||||
353 | bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, | ||||
354 | const DataLayout &DL, AssumptionCache *AC, | ||||
355 | const Instruction *CxtI, const DominatorTree *DT, | ||||
356 | bool UseInstrInfo) { | ||||
357 | return ::isKnownNonEqual(V1, V2, 0, | ||||
358 | Query(DL, AC, safeCxtI(V2, V1, CxtI), DT, | ||||
359 | UseInstrInfo, /*ORE=*/nullptr)); | ||||
360 | } | ||||
361 | |||||
362 | static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, | ||||
363 | const Query &Q); | ||||
364 | |||||
365 | bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, | ||||
366 | const DataLayout &DL, unsigned Depth, | ||||
367 | AssumptionCache *AC, const Instruction *CxtI, | ||||
368 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
369 | return ::MaskedValueIsZero( | ||||
370 | V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); | ||||
371 | } | ||||
372 | |||||
373 | static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, | ||||
374 | unsigned Depth, const Query &Q); | ||||
375 | |||||
376 | static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, | ||||
377 | const Query &Q) { | ||||
378 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
379 | APInt DemandedElts = | ||||
380 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
381 | return ComputeNumSignBits(V, DemandedElts, Depth, Q); | ||||
382 | } | ||||
383 | |||||
384 | unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, | ||||
385 | unsigned Depth, AssumptionCache *AC, | ||||
386 | const Instruction *CxtI, | ||||
387 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
388 | return ::ComputeNumSignBits( | ||||
389 | V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); | ||||
390 | } | ||||
391 | |||||
392 | unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL, | ||||
393 | unsigned Depth, AssumptionCache *AC, | ||||
394 | const Instruction *CxtI, | ||||
395 | const DominatorTree *DT) { | ||||
396 | unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT); | ||||
397 | return V->getType()->getScalarSizeInBits() - SignBits + 1; | ||||
398 | } | ||||
399 | |||||
400 | static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, | ||||
401 | bool NSW, const APInt &DemandedElts, | ||||
402 | KnownBits &KnownOut, KnownBits &Known2, | ||||
403 | unsigned Depth, const Query &Q) { | ||||
404 | computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); | ||||
405 | |||||
406 | // If one operand is unknown and we have no nowrap information, | ||||
407 | // the result will be unknown independently of the second operand. | ||||
408 | if (KnownOut.isUnknown() && !NSW) | ||||
409 | return; | ||||
410 | |||||
411 | computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); | ||||
412 | KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut); | ||||
413 | } | ||||
414 | |||||
415 | static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, | ||||
416 | const APInt &DemandedElts, KnownBits &Known, | ||||
417 | KnownBits &Known2, unsigned Depth, | ||||
418 | const Query &Q) { | ||||
419 | computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); | ||||
420 | computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); | ||||
421 | |||||
422 | bool isKnownNegative = false; | ||||
423 | bool isKnownNonNegative = false; | ||||
424 | // If the multiplication is known not to overflow, compute the sign bit. | ||||
425 | if (NSW) { | ||||
426 | if (Op0 == Op1) { | ||||
427 | // The product of a number with itself is non-negative. | ||||
428 | isKnownNonNegative = true; | ||||
429 | } else { | ||||
430 | bool isKnownNonNegativeOp1 = Known.isNonNegative(); | ||||
431 | bool isKnownNonNegativeOp0 = Known2.isNonNegative(); | ||||
432 | bool isKnownNegativeOp1 = Known.isNegative(); | ||||
433 | bool isKnownNegativeOp0 = Known2.isNegative(); | ||||
434 | // The product of two numbers with the same sign is non-negative. | ||||
435 | isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || | ||||
436 | (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); | ||||
437 | // The product of a negative number and a non-negative number is either | ||||
438 | // negative or zero. | ||||
439 | if (!isKnownNonNegative) | ||||
440 | isKnownNegative = | ||||
441 | (isKnownNegativeOp1 && isKnownNonNegativeOp0 && | ||||
442 | Known2.isNonZero()) || | ||||
443 | (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero()); | ||||
444 | } | ||||
445 | } | ||||
446 | |||||
447 | bool SelfMultiply = Op0 == Op1; | ||||
448 | // TODO: SelfMultiply can be poison, but not undef. | ||||
449 | if (SelfMultiply) | ||||
450 | SelfMultiply &= | ||||
451 | isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1); | ||||
452 | Known = KnownBits::mul(Known, Known2, SelfMultiply); | ||||
453 | |||||
454 | // Only make use of no-wrap flags if we failed to compute the sign bit | ||||
455 | // directly. This matters if the multiplication always overflows, in | ||||
456 | // which case we prefer to follow the result of the direct computation, | ||||
457 | // though as the program is invoking undefined behaviour we can choose | ||||
458 | // whatever we like here. | ||||
459 | if (isKnownNonNegative && !Known.isNegative()) | ||||
460 | Known.makeNonNegative(); | ||||
461 | else if (isKnownNegative && !Known.isNonNegative()) | ||||
462 | Known.makeNegative(); | ||||
463 | } | ||||
464 | |||||
465 | void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, | ||||
466 | KnownBits &Known) { | ||||
467 | unsigned BitWidth = Known.getBitWidth(); | ||||
468 | unsigned NumRanges = Ranges.getNumOperands() / 2; | ||||
469 | assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail ("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp", 469, __extension__ __PRETTY_FUNCTION__)); | ||||
470 | |||||
471 | Known.Zero.setAllBits(); | ||||
472 | Known.One.setAllBits(); | ||||
473 | |||||
474 | for (unsigned i = 0; i < NumRanges; ++i) { | ||||
475 | ConstantInt *Lower = | ||||
476 | mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); | ||||
477 | ConstantInt *Upper = | ||||
478 | mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); | ||||
479 | ConstantRange Range(Lower->getValue(), Upper->getValue()); | ||||
480 | |||||
481 | // The first CommonPrefixBits of all values in Range are equal. | ||||
482 | unsigned CommonPrefixBits = | ||||
483 | (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countl_zero(); | ||||
484 | APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); | ||||
485 | APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth); | ||||
486 | Known.One &= UnsignedMax & Mask; | ||||
487 | Known.Zero &= ~UnsignedMax & Mask; | ||||
488 | } | ||||
489 | } | ||||
490 | |||||
491 | static bool isEphemeralValueOf(const Instruction *I, const Value *E) { | ||||
492 | SmallVector<const Value *, 16> WorkSet(1, I); | ||||
493 | SmallPtrSet<const Value *, 32> Visited; | ||||
494 | SmallPtrSet<const Value *, 16> EphValues; | ||||
495 | |||||
496 | // The instruction defining an assumption's condition itself is always | ||||
497 | // considered ephemeral to that assumption (even if it has other | ||||
498 | // non-ephemeral users). See r246696's test case for an example. | ||||
499 | if (is_contained(I->operands(), E)) | ||||
500 | return true; | ||||
501 | |||||
502 | while (!WorkSet.empty()) { | ||||
503 | const Value *V = WorkSet.pop_back_val(); | ||||
504 | if (!Visited.insert(V).second) | ||||
505 | continue; | ||||
506 | |||||
507 | // If all uses of this value are ephemeral, then so is this value. | ||||
508 | if (llvm::all_of(V->users(), [&](const User *U) { | ||||
509 | return EphValues.count(U); | ||||
510 | })) { | ||||
511 | if (V == E) | ||||
512 | return true; | ||||
513 | |||||
514 | if (V == I || (isa<Instruction>(V) && | ||||
515 | !cast<Instruction>(V)->mayHaveSideEffects() && | ||||
516 | !cast<Instruction>(V)->isTerminator())) { | ||||
517 | EphValues.insert(V); | ||||
518 | if (const User *U = dyn_cast<User>(V)) | ||||
519 | append_range(WorkSet, U->operands()); | ||||
520 | } | ||||
521 | } | ||||
522 | } | ||||
523 | |||||
524 | return false; | ||||
525 | } | ||||
526 | |||||
527 | // Is this an intrinsic that cannot be speculated but also cannot trap? | ||||
528 | bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { | ||||
529 | if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I)) | ||||
530 | return CI->isAssumeLikeIntrinsic(); | ||||
531 | |||||
532 | return false; | ||||
533 | } | ||||
534 | |||||
535 | bool llvm::isValidAssumeForContext(const Instruction *Inv, | ||||
536 | const Instruction *CxtI, | ||||
537 | const DominatorTree *DT) { | ||||
538 | // There are two restrictions on the use of an assume: | ||||
539 | // 1. The assume must dominate the context (or the control flow must | ||||
540 | // reach the assume whenever it reaches the context). | ||||
541 | // 2. The context must not be in the assume's set of ephemeral values | ||||
542 | // (otherwise we will use the assume to prove that the condition | ||||
543 | // feeding the assume is trivially true, thus causing the removal of | ||||
544 | // the assume). | ||||
545 | |||||
546 | if (Inv->getParent() == CxtI->getParent()) { | ||||
547 | // If Inv and CtxI are in the same block, check if the assume (Inv) is first | ||||
548 | // in the BB. | ||||
549 | if (Inv->comesBefore(CxtI)) | ||||
550 | return true; | ||||
551 | |||||
552 | // Don't let an assume affect itself - this would cause the problems | ||||
553 | // `isEphemeralValueOf` is trying to prevent, and it would also make | ||||
554 | // the loop below go out of bounds. | ||||
555 | if (Inv == CxtI) | ||||
556 | return false; | ||||
557 | |||||
558 | // The context comes first, but they're both in the same block. | ||||
559 | // Make sure there is nothing in between that might interrupt | ||||
560 | // the control flow, not even CxtI itself. | ||||
561 | // We limit the scan distance between the assume and its context instruction | ||||
562 | // to avoid a compile-time explosion. This limit is chosen arbitrarily, so | ||||
563 | // it can be adjusted if needed (could be turned into a cl::opt). | ||||
564 | auto Range = make_range(CxtI->getIterator(), Inv->getIterator()); | ||||
565 | if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15)) | ||||
566 | return false; | ||||
567 | |||||
568 | return !isEphemeralValueOf(Inv, CxtI); | ||||
569 | } | ||||
570 | |||||
571 | // Inv and CxtI are in different blocks. | ||||
572 | if (DT) { | ||||
573 | if (DT->dominates(Inv, CxtI)) | ||||
574 | return true; | ||||
575 | } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { | ||||
576 | // We don't have a DT, but this trivially dominates. | ||||
577 | return true; | ||||
578 | } | ||||
579 | |||||
580 | return false; | ||||
581 | } | ||||
582 | |||||
583 | static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) { | ||||
584 | // v u> y implies v != 0. | ||||
585 | if (Pred == ICmpInst::ICMP_UGT) | ||||
586 | return true; | ||||
587 | |||||
588 | // Special-case v != 0 to also handle v != null. | ||||
589 | if (Pred == ICmpInst::ICMP_NE) | ||||
590 | return match(RHS, m_Zero()); | ||||
591 | |||||
592 | // All other predicates - rely on generic ConstantRange handling. | ||||
593 | const APInt *C; | ||||
594 | if (!match(RHS, m_APInt(C))) | ||||
595 | return false; | ||||
596 | |||||
597 | ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C); | ||||
598 | return !TrueValues.contains(APInt::getZero(C->getBitWidth())); | ||||
599 | } | ||||
600 | |||||
601 | static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { | ||||
602 | // Use of assumptions is context-sensitive. If we don't have a context, we | ||||
603 | // cannot use them! | ||||
604 | if (!Q.AC || !Q.CxtI) | ||||
605 | return false; | ||||
606 | |||||
607 | if (Q.CxtI && V->getType()->isPointerTy()) { | ||||
608 | SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull}; | ||||
609 | if (!NullPointerIsDefined(Q.CxtI->getFunction(), | ||||
610 | V->getType()->getPointerAddressSpace())) | ||||
611 | AttrKinds.push_back(Attribute::Dereferenceable); | ||||
612 | |||||
613 | if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC)) | ||||
614 | return true; | ||||
615 | } | ||||
616 | |||||
617 | for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { | ||||
618 | if (!AssumeVH) | ||||
619 | continue; | ||||
620 | CallInst *I = cast<CallInst>(AssumeVH); | ||||
621 | assert(I->getFunction() == Q.CxtI->getFunction() &&(static_cast <bool> (I->getFunction() == Q.CxtI-> getFunction() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 622, __extension__ __PRETTY_FUNCTION__ )) | ||||
622 | "Got assumption for the wrong function!")(static_cast <bool> (I->getFunction() == Q.CxtI-> getFunction() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 622, __extension__ __PRETTY_FUNCTION__ )); | ||||
623 | |||||
624 | // Warning: This loop can end up being somewhat performance sensitive. | ||||
625 | // We're running this loop for once for each value queried resulting in a | ||||
626 | // runtime of ~O(#assumes * #values). | ||||
627 | |||||
628 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 629, __extension__ __PRETTY_FUNCTION__ )) | ||||
629 | "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 629, __extension__ __PRETTY_FUNCTION__ )); | ||||
630 | |||||
631 | Value *RHS; | ||||
632 | CmpInst::Predicate Pred; | ||||
633 | auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); | ||||
634 | if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS)))) | ||||
635 | return false; | ||||
636 | |||||
637 | if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT)) | ||||
638 | return true; | ||||
639 | } | ||||
640 | |||||
641 | return false; | ||||
642 | } | ||||
643 | |||||
644 | static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp, | ||||
645 | KnownBits &Known, unsigned Depth, | ||||
646 | const Query &Q) { | ||||
647 | unsigned BitWidth = Known.getBitWidth(); | ||||
648 | // We are attempting to compute known bits for the operands of an assume. | ||||
649 | // Do not try to use other assumptions for those recursive calls because | ||||
650 | // that can lead to mutual recursion and a compile-time explosion. | ||||
651 | // An example of the mutual recursion: computeKnownBits can call | ||||
652 | // isKnownNonZero which calls computeKnownBitsFromAssume (this function) | ||||
653 | // and so on. | ||||
654 | Query QueryNoAC = Q; | ||||
655 | QueryNoAC.AC = nullptr; | ||||
656 | |||||
657 | // Note that ptrtoint may change the bitwidth. | ||||
658 | Value *A, *B; | ||||
659 | auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); | ||||
660 | |||||
661 | CmpInst::Predicate Pred; | ||||
662 | uint64_t C; | ||||
663 | switch (Cmp->getPredicate()) { | ||||
664 | default: | ||||
665 | break; | ||||
666 | case ICmpInst::ICMP_EQ: | ||||
667 | // assume(v = a) | ||||
668 | if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A)))) { | ||||
669 | KnownBits RHSKnown = | ||||
670 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
671 | Known.Zero |= RHSKnown.Zero; | ||||
672 | Known.One |= RHSKnown.One; | ||||
673 | // assume(v & b = a) | ||||
674 | } else if (match(Cmp, | ||||
675 | m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A)))) { | ||||
676 | KnownBits RHSKnown = | ||||
677 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
678 | KnownBits MaskKnown = | ||||
679 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
680 | |||||
681 | // For those bits in the mask that are known to be one, we can propagate | ||||
682 | // known bits from the RHS to V. | ||||
683 | Known.Zero |= RHSKnown.Zero & MaskKnown.One; | ||||
684 | Known.One |= RHSKnown.One & MaskKnown.One; | ||||
685 | // assume(~(v & b) = a) | ||||
686 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), | ||||
687 | m_Value(A)))) { | ||||
688 | KnownBits RHSKnown = | ||||
689 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
690 | KnownBits MaskKnown = | ||||
691 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
692 | |||||
693 | // For those bits in the mask that are known to be one, we can propagate | ||||
694 | // inverted known bits from the RHS to V. | ||||
695 | Known.Zero |= RHSKnown.One & MaskKnown.One; | ||||
696 | Known.One |= RHSKnown.Zero & MaskKnown.One; | ||||
697 | // assume(v | b = a) | ||||
698 | } else if (match(Cmp, | ||||
699 | m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A)))) { | ||||
700 | KnownBits RHSKnown = | ||||
701 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
702 | KnownBits BKnown = | ||||
703 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
704 | |||||
705 | // For those bits in B that are known to be zero, we can propagate known | ||||
706 | // bits from the RHS to V. | ||||
707 | Known.Zero |= RHSKnown.Zero & BKnown.Zero; | ||||
708 | Known.One |= RHSKnown.One & BKnown.Zero; | ||||
709 | // assume(~(v | b) = a) | ||||
710 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), | ||||
711 | m_Value(A)))) { | ||||
712 | KnownBits RHSKnown = | ||||
713 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
714 | KnownBits BKnown = | ||||
715 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
716 | |||||
717 | // For those bits in B that are known to be zero, we can propagate | ||||
718 | // inverted known bits from the RHS to V. | ||||
719 | Known.Zero |= RHSKnown.One & BKnown.Zero; | ||||
720 | Known.One |= RHSKnown.Zero & BKnown.Zero; | ||||
721 | // assume(v ^ b = a) | ||||
722 | } else if (match(Cmp, | ||||
723 | m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A)))) { | ||||
724 | KnownBits RHSKnown = | ||||
725 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
726 | KnownBits BKnown = | ||||
727 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
728 | |||||
729 | // For those bits in B that are known to be zero, we can propagate known | ||||
730 | // bits from the RHS to V. For those bits in B that are known to be one, | ||||
731 | // we can propagate inverted known bits from the RHS to V. | ||||
732 | Known.Zero |= RHSKnown.Zero & BKnown.Zero; | ||||
733 | Known.One |= RHSKnown.One & BKnown.Zero; | ||||
734 | Known.Zero |= RHSKnown.One & BKnown.One; | ||||
735 | Known.One |= RHSKnown.Zero & BKnown.One; | ||||
736 | // assume(~(v ^ b) = a) | ||||
737 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), | ||||
738 | m_Value(A)))) { | ||||
739 | KnownBits RHSKnown = | ||||
740 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
741 | KnownBits BKnown = | ||||
742 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
743 | |||||
744 | // For those bits in B that are known to be zero, we can propagate | ||||
745 | // inverted known bits from the RHS to V. For those bits in B that are | ||||
746 | // known to be one, we can propagate known bits from the RHS to V. | ||||
747 | Known.Zero |= RHSKnown.One & BKnown.Zero; | ||||
748 | Known.One |= RHSKnown.Zero & BKnown.Zero; | ||||
749 | Known.Zero |= RHSKnown.Zero & BKnown.One; | ||||
750 | Known.One |= RHSKnown.One & BKnown.One; | ||||
751 | // assume(v << c = a) | ||||
752 | } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), | ||||
753 | m_Value(A))) && | ||||
754 | C < BitWidth) { | ||||
755 | KnownBits RHSKnown = | ||||
756 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
757 | |||||
758 | // For those bits in RHS that are known, we can propagate them to known | ||||
759 | // bits in V shifted to the right by C. | ||||
760 | RHSKnown.Zero.lshrInPlace(C); | ||||
761 | Known.Zero |= RHSKnown.Zero; | ||||
762 | RHSKnown.One.lshrInPlace(C); | ||||
763 | Known.One |= RHSKnown.One; | ||||
764 | // assume(~(v << c) = a) | ||||
765 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), | ||||
766 | m_Value(A))) && | ||||
767 | C < BitWidth) { | ||||
768 | KnownBits RHSKnown = | ||||
769 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
770 | // For those bits in RHS that are known, we can propagate them inverted | ||||
771 | // to known bits in V shifted to the right by C. | ||||
772 | RHSKnown.One.lshrInPlace(C); | ||||
773 | Known.Zero |= RHSKnown.One; | ||||
774 | RHSKnown.Zero.lshrInPlace(C); | ||||
775 | Known.One |= RHSKnown.Zero; | ||||
776 | // assume(v >> c = a) | ||||
777 | } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), | ||||
778 | m_Value(A))) && | ||||
779 | C < BitWidth) { | ||||
780 | KnownBits RHSKnown = | ||||
781 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
782 | // For those bits in RHS that are known, we can propagate them to known | ||||
783 | // bits in V shifted to the right by C. | ||||
784 | Known.Zero |= RHSKnown.Zero << C; | ||||
785 | Known.One |= RHSKnown.One << C; | ||||
786 | // assume(~(v >> c) = a) | ||||
787 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), | ||||
788 | m_Value(A))) && | ||||
789 | C < BitWidth) { | ||||
790 | KnownBits RHSKnown = | ||||
791 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
792 | // For those bits in RHS that are known, we can propagate them inverted | ||||
793 | // to known bits in V shifted to the right by C. | ||||
794 | Known.Zero |= RHSKnown.One << C; | ||||
795 | Known.One |= RHSKnown.Zero << C; | ||||
796 | } | ||||
797 | break; | ||||
798 | case ICmpInst::ICMP_SGE: | ||||
799 | // assume(v >=_s c) where c is non-negative | ||||
800 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
801 | KnownBits RHSKnown = | ||||
802 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
803 | |||||
804 | if (RHSKnown.isNonNegative()) { | ||||
805 | // We know that the sign bit is zero. | ||||
806 | Known.makeNonNegative(); | ||||
807 | } | ||||
808 | } | ||||
809 | break; | ||||
810 | case ICmpInst::ICMP_SGT: | ||||
811 | // assume(v >_s c) where c is at least -1. | ||||
812 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
813 | KnownBits RHSKnown = | ||||
814 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
815 | |||||
816 | if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { | ||||
817 | // We know that the sign bit is zero. | ||||
818 | Known.makeNonNegative(); | ||||
819 | } | ||||
820 | } | ||||
821 | break; | ||||
822 | case ICmpInst::ICMP_SLE: | ||||
823 | // assume(v <=_s c) where c is negative | ||||
824 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
825 | KnownBits RHSKnown = | ||||
826 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
827 | |||||
828 | if (RHSKnown.isNegative()) { | ||||
829 | // We know that the sign bit is one. | ||||
830 | Known.makeNegative(); | ||||
831 | } | ||||
832 | } | ||||
833 | break; | ||||
834 | case ICmpInst::ICMP_SLT: | ||||
835 | // assume(v <_s c) where c is non-positive | ||||
836 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
837 | KnownBits RHSKnown = | ||||
838 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
839 | |||||
840 | if (RHSKnown.isZero() || RHSKnown.isNegative()) { | ||||
841 | // We know that the sign bit is one. | ||||
842 | Known.makeNegative(); | ||||
843 | } | ||||
844 | } | ||||
845 | break; | ||||
846 | case ICmpInst::ICMP_ULE: | ||||
847 | // assume(v <=_u c) | ||||
848 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
849 | KnownBits RHSKnown = | ||||
850 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
851 | |||||
852 | // Whatever high bits in c are zero are known to be zero. | ||||
853 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); | ||||
854 | } | ||||
855 | break; | ||||
856 | case ICmpInst::ICMP_ULT: | ||||
857 | // assume(v <_u c) | ||||
858 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
859 | KnownBits RHSKnown = | ||||
860 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
861 | |||||
862 | // If the RHS is known zero, then this assumption must be wrong (nothing | ||||
863 | // is unsigned less than zero). Signal a conflict and get out of here. | ||||
864 | if (RHSKnown.isZero()) { | ||||
865 | Known.Zero.setAllBits(); | ||||
866 | Known.One.setAllBits(); | ||||
867 | break; | ||||
868 | } | ||||
869 | |||||
870 | // Whatever high bits in c are zero are known to be zero (if c is a power | ||||
871 | // of 2, then one more). | ||||
872 | if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC)) | ||||
873 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); | ||||
874 | else | ||||
875 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); | ||||
876 | } | ||||
877 | break; | ||||
878 | case ICmpInst::ICMP_NE: { | ||||
879 | // assume (v & b != 0) where b is a power of 2 | ||||
880 | const APInt *BPow2; | ||||
881 | if (match(Cmp, m_ICmp(Pred, m_c_And(m_V, m_Power2(BPow2)), m_Zero()))) { | ||||
882 | Known.One |= BPow2->zextOrTrunc(BitWidth); | ||||
883 | } | ||||
884 | } break; | ||||
885 | } | ||||
886 | } | ||||
887 | |||||
888 | static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, | ||||
889 | unsigned Depth, const Query &Q) { | ||||
890 | // Use of assumptions is context-sensitive. If we don't have a context, we | ||||
891 | // cannot use them! | ||||
892 | if (!Q.AC || !Q.CxtI) | ||||
893 | return; | ||||
894 | |||||
895 | unsigned BitWidth = Known.getBitWidth(); | ||||
896 | |||||
897 | // Refine Known set if the pointer alignment is set by assume bundles. | ||||
898 | if (V->getType()->isPointerTy()) { | ||||
899 | if (RetainedKnowledge RK = getKnowledgeValidInContext( | ||||
900 | V, { Attribute::Alignment }, Q.CxtI, Q.DT, Q.AC)) { | ||||
901 | if (isPowerOf2_64(RK.ArgValue)) | ||||
902 | Known.Zero.setLowBits(Log2_64(RK.ArgValue)); | ||||
903 | } | ||||
904 | } | ||||
905 | |||||
906 | // Note that the patterns below need to be kept in sync with the code | ||||
907 | // in AssumptionCache::updateAffectedValues. | ||||
908 | |||||
909 | for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { | ||||
910 | if (!AssumeVH) | ||||
911 | continue; | ||||
912 | CallInst *I = cast<CallInst>(AssumeVH); | ||||
913 | assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 914, __extension__ __PRETTY_FUNCTION__ )) | ||||
914 | "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 914, __extension__ __PRETTY_FUNCTION__ )); | ||||
915 | |||||
916 | // Warning: This loop can end up being somewhat performance sensitive. | ||||
917 | // We're running this loop for once for each value queried resulting in a | ||||
918 | // runtime of ~O(#assumes * #values). | ||||
919 | |||||
920 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 921, __extension__ __PRETTY_FUNCTION__ )) | ||||
921 | "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 921, __extension__ __PRETTY_FUNCTION__ )); | ||||
922 | |||||
923 | Value *Arg = I->getArgOperand(0); | ||||
924 | |||||
925 | if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { | ||||
926 | assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?" ) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 926, __extension__ __PRETTY_FUNCTION__ )); | ||||
927 | (void)BitWidth; | ||||
928 | Known.setAllOnes(); | ||||
929 | return; | ||||
930 | } | ||||
931 | if (match(Arg, m_Not(m_Specific(V))) && | ||||
932 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { | ||||
933 | assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?" ) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 933, __extension__ __PRETTY_FUNCTION__ )); | ||||
934 | (void)BitWidth; | ||||
935 | Known.setAllZero(); | ||||
936 | return; | ||||
937 | } | ||||
938 | |||||
939 | // The remaining tests are all recursive, so bail out if we hit the limit. | ||||
940 | if (Depth == MaxAnalysisRecursionDepth) | ||||
941 | continue; | ||||
942 | |||||
943 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); | ||||
944 | if (!Cmp) | ||||
945 | continue; | ||||
946 | |||||
947 | if (!isValidAssumeForContext(I, Q.CxtI, Q.DT)) | ||||
948 | continue; | ||||
949 | |||||
950 | computeKnownBitsFromCmp(V, Cmp, Known, Depth, Q); | ||||
951 | } | ||||
952 | |||||
953 | // If assumptions conflict with each other or previous known bits, then we | ||||
954 | // have a logical fallacy. It's possible that the assumption is not reachable, | ||||
955 | // so this isn't a real bug. On the other hand, the program may have undefined | ||||
956 | // behavior, or we might have a bug in the compiler. We can't assert/crash, so | ||||
957 | // clear out the known bits, try to warn the user, and hope for the best. | ||||
958 | if (Known.Zero.intersects(Known.One)) { | ||||
959 | Known.resetAll(); | ||||
960 | |||||
961 | if (Q.ORE) | ||||
962 | Q.ORE->emit([&]() { | ||||
963 | auto *CxtI = const_cast<Instruction *>(Q.CxtI); | ||||
964 | return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", | ||||
965 | CxtI) | ||||
966 | << "Detected conflicting code assumptions. Program may " | ||||
967 | "have undefined behavior, or compiler may have " | ||||
968 | "internal error."; | ||||
969 | }); | ||||
970 | } | ||||
971 | } | ||||
972 | |||||
973 | /// Compute known bits from a shift operator, including those with a | ||||
974 | /// non-constant shift amount. Known is the output of this function. Known2 is a | ||||
975 | /// pre-allocated temporary with the same bit width as Known and on return | ||||
976 | /// contains the known bit of the shift value source. KF is an | ||||
977 | /// operator-specific function that, given the known-bits and a shift amount, | ||||
978 | /// compute the implied known-bits of the shift operator's result respectively | ||||
979 | /// for that shift amount. The results from calling KF are conservatively | ||||
980 | /// combined for all permitted shift amounts. | ||||
981 | static void computeKnownBitsFromShiftOperator( | ||||
982 | const Operator *I, const APInt &DemandedElts, KnownBits &Known, | ||||
983 | KnownBits &Known2, unsigned Depth, const Query &Q, | ||||
984 | function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) { | ||||
985 | unsigned BitWidth = Known.getBitWidth(); | ||||
986 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
987 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); | ||||
988 | |||||
989 | // Note: We cannot use Known.Zero.getLimitedValue() here, because if | ||||
990 | // BitWidth > 64 and any upper bits are known, we'll end up returning the | ||||
991 | // limit value (which implies all bits are known). | ||||
992 | uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); | ||||
993 | uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); | ||||
994 | bool ShiftAmtIsConstant = Known.isConstant(); | ||||
995 | bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth); | ||||
996 | |||||
997 | if (ShiftAmtIsConstant) { | ||||
998 | Known = KF(Known2, Known); | ||||
999 | |||||
1000 | // If the known bits conflict, this must be an overflowing left shift, so | ||||
1001 | // the shift result is poison. We can return anything we want. Choose 0 for | ||||
1002 | // the best folding opportunity. | ||||
1003 | if (Known.hasConflict()) | ||||
1004 | Known.setAllZero(); | ||||
1005 | |||||
1006 | return; | ||||
1007 | } | ||||
1008 | |||||
1009 | // If the shift amount could be greater than or equal to the bit-width of the | ||||
1010 | // LHS, the value could be poison, but bail out because the check below is | ||||
1011 | // expensive. | ||||
1012 | // TODO: Should we just carry on? | ||||
1013 | if (MaxShiftAmtIsOutOfRange) { | ||||
1014 | Known.resetAll(); | ||||
1015 | return; | ||||
1016 | } | ||||
1017 | |||||
1018 | // It would be more-clearly correct to use the two temporaries for this | ||||
1019 | // calculation. Reusing the APInts here to prevent unnecessary allocations. | ||||
1020 | Known.resetAll(); | ||||
1021 | |||||
1022 | // If we know the shifter operand is nonzero, we can sometimes infer more | ||||
1023 | // known bits. However this is expensive to compute, so be lazy about it and | ||||
1024 | // only compute it when absolutely necessary. | ||||
1025 | std::optional<bool> ShifterOperandIsNonZero; | ||||
1026 | |||||
1027 | // Early exit if we can't constrain any well-defined shift amount. | ||||
1028 | if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && | ||||
1029 | !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { | ||||
1030 | ShifterOperandIsNonZero = | ||||
1031 | isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); | ||||
1032 | if (!*ShifterOperandIsNonZero) | ||||
1033 | return; | ||||
1034 | } | ||||
1035 | |||||
1036 | Known.Zero.setAllBits(); | ||||
1037 | Known.One.setAllBits(); | ||||
1038 | for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { | ||||
1039 | // Combine the shifted known input bits only for those shift amounts | ||||
1040 | // compatible with its known constraints. | ||||
1041 | if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) | ||||
1042 | continue; | ||||
1043 | if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) | ||||
1044 | continue; | ||||
1045 | // If we know the shifter is nonzero, we may be able to infer more known | ||||
1046 | // bits. This check is sunk down as far as possible to avoid the expensive | ||||
1047 | // call to isKnownNonZero if the cheaper checks above fail. | ||||
1048 | if (ShiftAmt == 0) { | ||||
1049 | if (!ShifterOperandIsNonZero) | ||||
1050 | ShifterOperandIsNonZero = | ||||
1051 | isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); | ||||
1052 | if (*ShifterOperandIsNonZero) | ||||
1053 | continue; | ||||
1054 | } | ||||
1055 | |||||
1056 | Known = KnownBits::commonBits( | ||||
1057 | Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt)))); | ||||
1058 | } | ||||
1059 | |||||
1060 | // If the known bits conflict, the result is poison. Return a 0 and hope the | ||||
1061 | // caller can further optimize that. | ||||
1062 | if (Known.hasConflict()) | ||||
1063 | Known.setAllZero(); | ||||
1064 | } | ||||
1065 | |||||
1066 | static KnownBits getKnownBitsFromAndXorOr(const Operator *I, | ||||
1067 | const APInt &DemandedElts, | ||||
1068 | const KnownBits &KnownLHS, | ||||
1069 | const KnownBits &KnownRHS, | ||||
1070 | unsigned Depth, const Query &Q) { | ||||
1071 | unsigned BitWidth = KnownLHS.getBitWidth(); | ||||
1072 | KnownBits KnownOut(BitWidth); | ||||
1073 | bool IsAnd = false; | ||||
1074 | bool HasKnownOne = !KnownLHS.One.isZero() || !KnownRHS.One.isZero(); | ||||
1075 | Value *X = nullptr, *Y = nullptr; | ||||
1076 | |||||
1077 | switch (I->getOpcode()) { | ||||
1078 | case Instruction::And: | ||||
1079 | KnownOut = KnownLHS & KnownRHS; | ||||
1080 | IsAnd = true; | ||||
1081 | // and(x, -x) is common idioms that will clear all but lowest set | ||||
1082 | // bit. If we have a single known bit in x, we can clear all bits | ||||
1083 | // above it. | ||||
1084 | // TODO: instcombine often reassociates independent `and` which can hide | ||||
1085 | // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x). | ||||
1086 | if (HasKnownOne && match(I, m_c_And(m_Value(X), m_Neg(m_Deferred(X))))) { | ||||
1087 | // -(-x) == x so using whichever (LHS/RHS) gets us a better result. | ||||
1088 | if (KnownLHS.countMaxTrailingZeros() <= KnownRHS.countMaxTrailingZeros()) | ||||
1089 | KnownOut = KnownLHS.blsi(); | ||||
1090 | else | ||||
1091 | KnownOut = KnownRHS.blsi(); | ||||
1092 | } | ||||
1093 | break; | ||||
1094 | case Instruction::Or: | ||||
1095 | KnownOut = KnownLHS | KnownRHS; | ||||
1096 | break; | ||||
1097 | case Instruction::Xor: | ||||
1098 | KnownOut = KnownLHS ^ KnownRHS; | ||||
1099 | // xor(x, x-1) is common idioms that will clear all but lowest set | ||||
1100 | // bit. If we have a single known bit in x, we can clear all bits | ||||
1101 | // above it. | ||||
1102 | // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C != | ||||
1103 | // -1 but for the purpose of demanded bits (xor(x, x-C) & | ||||
1104 | // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern | ||||
1105 | // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1). | ||||
1106 | if (HasKnownOne && | ||||
1107 | match(I, m_c_Xor(m_Value(X), m_c_Add(m_Deferred(X), m_AllOnes())))) { | ||||
1108 | const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS; | ||||
1109 | KnownOut = XBits.blsmsk(); | ||||
1110 | } | ||||
1111 | break; | ||||
1112 | default: | ||||
1113 | llvm_unreachable("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'")::llvm::llvm_unreachable_internal("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'" , "llvm/lib/Analysis/ValueTracking.cpp", 1113); | ||||
1114 | } | ||||
1115 | |||||
1116 | // and(x, add (x, -1)) is a common idiom that always clears the low bit; | ||||
1117 | // xor/or(x, add (x, -1)) is an idiom that will always set the low bit. | ||||
1118 | // here we handle the more general case of adding any odd number by | ||||
1119 | // matching the form and/xor/or(x, add(x, y)) where y is odd. | ||||
1120 | // TODO: This could be generalized to clearing any bit set in y where the | ||||
1121 | // following bit is known to be unset in y. | ||||
1122 | if (!KnownOut.Zero[0] && !KnownOut.One[0] && | ||||
1123 | (match(I, m_c_BinOp(m_Value(X), m_c_Add(m_Deferred(X), m_Value(Y)))) || | ||||
1124 | match(I, m_c_BinOp(m_Value(X), m_Sub(m_Deferred(X), m_Value(Y)))) || | ||||
1125 | match(I, m_c_BinOp(m_Value(X), m_Sub(m_Value(Y), m_Deferred(X)))))) { | ||||
1126 | KnownBits KnownY(BitWidth); | ||||
1127 | computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q); | ||||
1128 | if (KnownY.countMinTrailingOnes() > 0) { | ||||
1129 | if (IsAnd) | ||||
1130 | KnownOut.Zero.setBit(0); | ||||
1131 | else | ||||
1132 | KnownOut.One.setBit(0); | ||||
1133 | } | ||||
1134 | } | ||||
1135 | return KnownOut; | ||||
1136 | } | ||||
1137 | |||||
1138 | // Public so this can be used in `SimplifyDemandedUseBits`. | ||||
1139 | KnownBits llvm::analyzeKnownBitsFromAndXorOr( | ||||
1140 | const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, | ||||
1141 | unsigned Depth, const DataLayout &DL, AssumptionCache *AC, | ||||
1142 | const Instruction *CxtI, const DominatorTree *DT, | ||||
1143 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
1144 | auto *FVTy = dyn_cast<FixedVectorType>(I->getType()); | ||||
1145 | APInt DemandedElts = | ||||
1146 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
1147 | |||||
1148 | return getKnownBitsFromAndXorOr( | ||||
1149 | I, DemandedElts, KnownLHS, KnownRHS, Depth, | ||||
1150 | Query(DL, AC, safeCxtI(I, CxtI), DT, UseInstrInfo, ORE)); | ||||
1151 | } | ||||
1152 | |||||
1153 | ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) { | ||||
1154 | Attribute Attr = F->getFnAttribute(Attribute::VScaleRange); | ||||
1155 | // Without vscale_range, we only know that vscale is non-zero. | ||||
1156 | if (!Attr.isValid()) | ||||
1157 | return ConstantRange(APInt(BitWidth, 1), APInt::getZero(BitWidth)); | ||||
1158 | |||||
1159 | unsigned AttrMin = Attr.getVScaleRangeMin(); | ||||
1160 | // Minimum is larger than vscale width, result is always poison. | ||||
1161 | if ((unsigned)llvm::bit_width(AttrMin) > BitWidth) | ||||
1162 | return ConstantRange::getEmpty(BitWidth); | ||||
1163 | |||||
1164 | APInt Min(BitWidth, AttrMin); | ||||
1165 | std::optional<unsigned> AttrMax = Attr.getVScaleRangeMax(); | ||||
1166 | if (!AttrMax || (unsigned)llvm::bit_width(*AttrMax) > BitWidth) | ||||
1167 | return ConstantRange(Min, APInt::getZero(BitWidth)); | ||||
1168 | |||||
1169 | return ConstantRange(Min, APInt(BitWidth, *AttrMax) + 1); | ||||
1170 | } | ||||
1171 | |||||
1172 | static void computeKnownBitsFromOperator(const Operator *I, | ||||
1173 | const APInt &DemandedElts, | ||||
1174 | KnownBits &Known, unsigned Depth, | ||||
1175 | const Query &Q) { | ||||
1176 | unsigned BitWidth = Known.getBitWidth(); | ||||
1177 | |||||
1178 | KnownBits Known2(BitWidth); | ||||
1179 | switch (I->getOpcode()) { | ||||
1180 | default: break; | ||||
1181 | case Instruction::Load: | ||||
1182 | if (MDNode *MD = | ||||
1183 | Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) | ||||
1184 | computeKnownBitsFromRangeMetadata(*MD, Known); | ||||
1185 | break; | ||||
1186 | case Instruction::And: | ||||
1187 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); | ||||
1188 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1189 | |||||
1190 | Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); | ||||
1191 | break; | ||||
1192 | case Instruction::Or: | ||||
1193 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); | ||||
1194 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1195 | |||||
1196 | Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); | ||||
1197 | break; | ||||
1198 | case Instruction::Xor: | ||||
1199 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); | ||||
1200 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1201 | |||||
1202 | Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); | ||||
1203 | break; | ||||
1204 | case Instruction::Mul: { | ||||
1205 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); | ||||
1206 | computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, | ||||
1207 | Known, Known2, Depth, Q); | ||||
1208 | break; | ||||
1209 | } | ||||
1210 | case Instruction::UDiv: { | ||||
1211 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1212 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1213 | Known = KnownBits::udiv(Known, Known2); | ||||
1214 | break; | ||||
1215 | } | ||||
1216 | case Instruction::Select: { | ||||
1217 | const Value *LHS = nullptr, *RHS = nullptr; | ||||
1218 | SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; | ||||
1219 | if (SelectPatternResult::isMinOrMax(SPF)) { | ||||
1220 | computeKnownBits(RHS, Known, Depth + 1, Q); | ||||
1221 | computeKnownBits(LHS, Known2, Depth + 1, Q); | ||||
1222 | switch (SPF) { | ||||
1223 | default: | ||||
1224 | llvm_unreachable("Unhandled select pattern flavor!")::llvm::llvm_unreachable_internal("Unhandled select pattern flavor!" , "llvm/lib/Analysis/ValueTracking.cpp", 1224); | ||||
1225 | case SPF_SMAX: | ||||
1226 | Known = KnownBits::smax(Known, Known2); | ||||
1227 | break; | ||||
1228 | case SPF_SMIN: | ||||
1229 | Known = KnownBits::smin(Known, Known2); | ||||
1230 | break; | ||||
1231 | case SPF_UMAX: | ||||
1232 | Known = KnownBits::umax(Known, Known2); | ||||
1233 | break; | ||||
1234 | case SPF_UMIN: | ||||
1235 | Known = KnownBits::umin(Known, Known2); | ||||
1236 | break; | ||||
1237 | } | ||||
1238 | break; | ||||
1239 | } | ||||
1240 | |||||
1241 | computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); | ||||
1242 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1243 | |||||
1244 | // Only known if known in both the LHS and RHS. | ||||
1245 | Known = KnownBits::commonBits(Known, Known2); | ||||
1246 | |||||
1247 | if (SPF == SPF_ABS) { | ||||
1248 | // RHS from matchSelectPattern returns the negation part of abs pattern. | ||||
1249 | // If the negate has an NSW flag we can assume the sign bit of the result | ||||
1250 | // will be 0 because that makes abs(INT_MIN) undefined. | ||||
1251 | if (match(RHS, m_Neg(m_Specific(LHS))) && | ||||
1252 | Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS))) | ||||
1253 | Known.Zero.setSignBit(); | ||||
1254 | } | ||||
1255 | |||||
1256 | break; | ||||
1257 | } | ||||
1258 | case Instruction::FPTrunc: | ||||
1259 | case Instruction::FPExt: | ||||
1260 | case Instruction::FPToUI: | ||||
1261 | case Instruction::FPToSI: | ||||
1262 | case Instruction::SIToFP: | ||||
1263 | case Instruction::UIToFP: | ||||
1264 | break; // Can't work with floating point. | ||||
1265 | case Instruction::PtrToInt: | ||||
1266 | case Instruction::IntToPtr: | ||||
1267 | // Fall through and handle them the same as zext/trunc. | ||||
1268 | [[fallthrough]]; | ||||
1269 | case Instruction::ZExt: | ||||
1270 | case Instruction::Trunc: { | ||||
1271 | Type *SrcTy = I->getOperand(0)->getType(); | ||||
1272 | |||||
1273 | unsigned SrcBitWidth; | ||||
1274 | // Note that we handle pointer operands here because of inttoptr/ptrtoint | ||||
1275 | // which fall through here. | ||||
1276 | Type *ScalarTy = SrcTy->getScalarType(); | ||||
1277 | SrcBitWidth = ScalarTy->isPointerTy() ? | ||||
1278 | Q.DL.getPointerTypeSizeInBits(ScalarTy) : | ||||
1279 | Q.DL.getTypeSizeInBits(ScalarTy); | ||||
1280 | |||||
1281 | assert(SrcBitWidth && "SrcBitWidth can't be zero")(static_cast <bool> (SrcBitWidth && "SrcBitWidth can't be zero" ) ? void (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\"" , "llvm/lib/Analysis/ValueTracking.cpp", 1281, __extension__ __PRETTY_FUNCTION__ )); | ||||
1282 | Known = Known.anyextOrTrunc(SrcBitWidth); | ||||
1283 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1284 | Known = Known.zextOrTrunc(BitWidth); | ||||
1285 | break; | ||||
1286 | } | ||||
1287 | case Instruction::BitCast: { | ||||
1288 | Type *SrcTy = I->getOperand(0)->getType(); | ||||
1289 | if (SrcTy->isIntOrPtrTy() && | ||||
1290 | // TODO: For now, not handling conversions like: | ||||
1291 | // (bitcast i64 %x to <2 x i32>) | ||||
1292 | !I->getType()->isVectorTy()) { | ||||
1293 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1294 | break; | ||||
1295 | } | ||||
1296 | |||||
1297 | // Handle cast from vector integer type to scalar or vector integer. | ||||
1298 | auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy); | ||||
1299 | if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() || | ||||
1300 | !I->getType()->isIntOrIntVectorTy() || | ||||
1301 | isa<ScalableVectorType>(I->getType())) | ||||
1302 | break; | ||||
1303 | |||||
1304 | // Look through a cast from narrow vector elements to wider type. | ||||
1305 | // Examples: v4i32 -> v2i64, v3i8 -> v24 | ||||
1306 | unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits(); | ||||
1307 | if (BitWidth % SubBitWidth == 0) { | ||||
1308 | // Known bits are automatically intersected across demanded elements of a | ||||
1309 | // vector. So for example, if a bit is computed as known zero, it must be | ||||
1310 | // zero across all demanded elements of the vector. | ||||
1311 | // | ||||
1312 | // For this bitcast, each demanded element of the output is sub-divided | ||||
1313 | // across a set of smaller vector elements in the source vector. To get | ||||
1314 | // the known bits for an entire element of the output, compute the known | ||||
1315 | // bits for each sub-element sequentially. This is done by shifting the | ||||
1316 | // one-set-bit demanded elements parameter across the sub-elements for | ||||
1317 | // consecutive calls to computeKnownBits. We are using the demanded | ||||
1318 | // elements parameter as a mask operator. | ||||
1319 | // | ||||
1320 | // The known bits of each sub-element are then inserted into place | ||||
1321 | // (dependent on endian) to form the full result of known bits. | ||||
1322 | unsigned NumElts = DemandedElts.getBitWidth(); | ||||
1323 | unsigned SubScale = BitWidth / SubBitWidth; | ||||
1324 | APInt SubDemandedElts = APInt::getZero(NumElts * SubScale); | ||||
1325 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
1326 | if (DemandedElts[i]) | ||||
1327 | SubDemandedElts.setBit(i * SubScale); | ||||
1328 | } | ||||
1329 | |||||
1330 | KnownBits KnownSrc(SubBitWidth); | ||||
1331 | for (unsigned i = 0; i != SubScale; ++i) { | ||||
1332 | computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, | ||||
1333 | Depth + 1, Q); | ||||
1334 | unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i; | ||||
1335 | Known.insertBits(KnownSrc, ShiftElt * SubBitWidth); | ||||
1336 | } | ||||
1337 | } | ||||
1338 | break; | ||||
1339 | } | ||||
1340 | case Instruction::SExt: { | ||||
1341 | // Compute the bits in the result that are not present in the input. | ||||
1342 | unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
1343 | |||||
1344 | Known = Known.trunc(SrcBitWidth); | ||||
1345 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1346 | // If the sign bit of the input is known set or clear, then we know the | ||||
1347 | // top bits of the result. | ||||
1348 | Known = Known.sext(BitWidth); | ||||
1349 | break; | ||||
1350 | } | ||||
1351 | case Instruction::Shl: { | ||||
1352 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); | ||||
1353 | auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) { | ||||
1354 | KnownBits Result = KnownBits::shl(KnownVal, KnownAmt); | ||||
1355 | // If this shift has "nsw" keyword, then the result is either a poison | ||||
1356 | // value or has the same sign bit as the first operand. | ||||
1357 | if (NSW) { | ||||
1358 | if (KnownVal.Zero.isSignBitSet()) | ||||
1359 | Result.Zero.setSignBit(); | ||||
1360 | if (KnownVal.One.isSignBitSet()) | ||||
1361 | Result.One.setSignBit(); | ||||
1362 | } | ||||
1363 | return Result; | ||||
1364 | }; | ||||
1365 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, | ||||
1366 | KF); | ||||
1367 | // Trailing zeros of a right-shifted constant never decrease. | ||||
1368 | const APInt *C; | ||||
1369 | if (match(I->getOperand(0), m_APInt(C))) | ||||
1370 | Known.Zero.setLowBits(C->countr_zero()); | ||||
1371 | break; | ||||
1372 | } | ||||
1373 | case Instruction::LShr: { | ||||
1374 | auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { | ||||
1375 | return KnownBits::lshr(KnownVal, KnownAmt); | ||||
1376 | }; | ||||
1377 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, | ||||
1378 | KF); | ||||
1379 | // Leading zeros of a left-shifted constant never decrease. | ||||
1380 | const APInt *C; | ||||
1381 | if (match(I->getOperand(0), m_APInt(C))) | ||||
1382 | Known.Zero.setHighBits(C->countl_zero()); | ||||
1383 | break; | ||||
1384 | } | ||||
1385 | case Instruction::AShr: { | ||||
1386 | auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { | ||||
1387 | return KnownBits::ashr(KnownVal, KnownAmt); | ||||
1388 | }; | ||||
1389 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, | ||||
1390 | KF); | ||||
1391 | break; | ||||
1392 | } | ||||
1393 | case Instruction::Sub: { | ||||
1394 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); | ||||
1395 | computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, | ||||
1396 | DemandedElts, Known, Known2, Depth, Q); | ||||
1397 | break; | ||||
1398 | } | ||||
1399 | case Instruction::Add: { | ||||
1400 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); | ||||
1401 | computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, | ||||
1402 | DemandedElts, Known, Known2, Depth, Q); | ||||
1403 | break; | ||||
1404 | } | ||||
1405 | case Instruction::SRem: | ||||
1406 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1407 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1408 | Known = KnownBits::srem(Known, Known2); | ||||
1409 | break; | ||||
1410 | |||||
1411 | case Instruction::URem: | ||||
1412 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1413 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1414 | Known = KnownBits::urem(Known, Known2); | ||||
1415 | break; | ||||
1416 | case Instruction::Alloca: | ||||
1417 | Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign())); | ||||
1418 | break; | ||||
1419 | case Instruction::GetElementPtr: { | ||||
1420 | // Analyze all of the subscripts of this getelementptr instruction | ||||
1421 | // to determine if we can prove known low zero bits. | ||||
1422 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1423 | // Accumulate the constant indices in a separate variable | ||||
1424 | // to minimize the number of calls to computeForAddSub. | ||||
1425 | APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); | ||||
1426 | |||||
1427 | gep_type_iterator GTI = gep_type_begin(I); | ||||
1428 | for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { | ||||
1429 | // TrailZ can only become smaller, short-circuit if we hit zero. | ||||
1430 | if (Known.isUnknown()) | ||||
1431 | break; | ||||
1432 | |||||
1433 | Value *Index = I->getOperand(i); | ||||
1434 | |||||
1435 | // Handle case when index is zero. | ||||
1436 | Constant *CIndex = dyn_cast<Constant>(Index); | ||||
1437 | if (CIndex && CIndex->isZeroValue()) | ||||
1438 | continue; | ||||
1439 | |||||
1440 | if (StructType *STy = GTI.getStructTypeOrNull()) { | ||||
1441 | // Handle struct member offset arithmetic. | ||||
1442 | |||||
1443 | assert(CIndex &&(static_cast <bool> (CIndex && "Access to structure field must be known at compile time" ) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\"" , "llvm/lib/Analysis/ValueTracking.cpp", 1444, __extension__ __PRETTY_FUNCTION__ )) | ||||
1444 | "Access to structure field must be known at compile time")(static_cast <bool> (CIndex && "Access to structure field must be known at compile time" ) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\"" , "llvm/lib/Analysis/ValueTracking.cpp", 1444, __extension__ __PRETTY_FUNCTION__ )); | ||||
1445 | |||||
1446 | if (CIndex->getType()->isVectorTy()) | ||||
1447 | Index = CIndex->getSplatValue(); | ||||
1448 | |||||
1449 | unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); | ||||
1450 | const StructLayout *SL = Q.DL.getStructLayout(STy); | ||||
1451 | uint64_t Offset = SL->getElementOffset(Idx); | ||||
1452 | AccConstIndices += Offset; | ||||
1453 | continue; | ||||
1454 | } | ||||
1455 | |||||
1456 | // Handle array index arithmetic. | ||||
1457 | Type *IndexedTy = GTI.getIndexedType(); | ||||
1458 | if (!IndexedTy->isSized()) { | ||||
1459 | Known.resetAll(); | ||||
1460 | break; | ||||
1461 | } | ||||
1462 | |||||
1463 | unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); | ||||
1464 | KnownBits IndexBits(IndexBitWidth); | ||||
1465 | computeKnownBits(Index, IndexBits, Depth + 1, Q); | ||||
1466 | TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); | ||||
1467 | uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue(); | ||||
1468 | KnownBits ScalingFactor(IndexBitWidth); | ||||
1469 | // Multiply by current sizeof type. | ||||
1470 | // &A[i] == A + i * sizeof(*A[i]). | ||||
1471 | if (IndexTypeSize.isScalable()) { | ||||
1472 | // For scalable types the only thing we know about sizeof is | ||||
1473 | // that this is a multiple of the minimum size. | ||||
1474 | ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes)); | ||||
1475 | } else if (IndexBits.isConstant()) { | ||||
1476 | APInt IndexConst = IndexBits.getConstant(); | ||||
1477 | APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); | ||||
1478 | IndexConst *= ScalingFactor; | ||||
1479 | AccConstIndices += IndexConst.sextOrTrunc(BitWidth); | ||||
1480 | continue; | ||||
1481 | } else { | ||||
1482 | ScalingFactor = | ||||
1483 | KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes)); | ||||
1484 | } | ||||
1485 | IndexBits = KnownBits::mul(IndexBits, ScalingFactor); | ||||
1486 | |||||
1487 | // If the offsets have a different width from the pointer, according | ||||
1488 | // to the language reference we need to sign-extend or truncate them | ||||
1489 | // to the width of the pointer. | ||||
1490 | IndexBits = IndexBits.sextOrTrunc(BitWidth); | ||||
1491 | |||||
1492 | // Note that inbounds does *not* guarantee nsw for the addition, as only | ||||
1493 | // the offset is signed, while the base address is unsigned. | ||||
1494 | Known = KnownBits::computeForAddSub( | ||||
1495 | /*Add=*/true, /*NSW=*/false, Known, IndexBits); | ||||
1496 | } | ||||
1497 | if (!Known.isUnknown() && !AccConstIndices.isZero()) { | ||||
1498 | KnownBits Index = KnownBits::makeConstant(AccConstIndices); | ||||
1499 | Known = KnownBits::computeForAddSub( | ||||
1500 | /*Add=*/true, /*NSW=*/false, Known, Index); | ||||
1501 | } | ||||
1502 | break; | ||||
1503 | } | ||||
1504 | case Instruction::PHI: { | ||||
1505 | const PHINode *P = cast<PHINode>(I); | ||||
1506 | BinaryOperator *BO = nullptr; | ||||
1507 | Value *R = nullptr, *L = nullptr; | ||||
1508 | if (matchSimpleRecurrence(P, BO, R, L)) { | ||||
1509 | // Handle the case of a simple two-predecessor recurrence PHI. | ||||
1510 | // There's a lot more that could theoretically be done here, but | ||||
1511 | // this is sufficient to catch some interesting cases. | ||||
1512 | unsigned Opcode = BO->getOpcode(); | ||||
1513 | |||||
1514 | // If this is a shift recurrence, we know the bits being shifted in. | ||||
1515 | // We can combine that with information about the start value of the | ||||
1516 | // recurrence to conclude facts about the result. | ||||
1517 | if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr || | ||||
1518 | Opcode == Instruction::Shl) && | ||||
1519 | BO->getOperand(0) == I) { | ||||
1520 | |||||
1521 | // We have matched a recurrence of the form: | ||||
1522 | // %iv = [R, %entry], [%iv.next, %backedge] | ||||
1523 | // %iv.next = shift_op %iv, L | ||||
1524 | |||||
1525 | // Recurse with the phi context to avoid concern about whether facts | ||||
1526 | // inferred hold at original context instruction. TODO: It may be | ||||
1527 | // correct to use the original context. IF warranted, explore and | ||||
1528 | // add sufficient tests to cover. | ||||
1529 | Query RecQ = Q; | ||||
1530 | RecQ.CxtI = P; | ||||
1531 | computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); | ||||
1532 | switch (Opcode) { | ||||
1533 | case Instruction::Shl: | ||||
1534 | // A shl recurrence will only increase the tailing zeros | ||||
1535 | Known.Zero.setLowBits(Known2.countMinTrailingZeros()); | ||||
1536 | break; | ||||
1537 | case Instruction::LShr: | ||||
1538 | // A lshr recurrence will preserve the leading zeros of the | ||||
1539 | // start value | ||||
1540 | Known.Zero.setHighBits(Known2.countMinLeadingZeros()); | ||||
1541 | break; | ||||
1542 | case Instruction::AShr: | ||||
1543 | // An ashr recurrence will extend the initial sign bit | ||||
1544 | Known.Zero.setHighBits(Known2.countMinLeadingZeros()); | ||||
1545 | Known.One.setHighBits(Known2.countMinLeadingOnes()); | ||||
1546 | break; | ||||
1547 | }; | ||||
1548 | } | ||||
1549 | |||||
1550 | // Check for operations that have the property that if | ||||
1551 | // both their operands have low zero bits, the result | ||||
1552 | // will have low zero bits. | ||||
1553 | if (Opcode == Instruction::Add || | ||||
1554 | Opcode == Instruction::Sub || | ||||
1555 | Opcode == Instruction::And || | ||||
1556 | Opcode == Instruction::Or || | ||||
1557 | Opcode == Instruction::Mul) { | ||||
1558 | // Change the context instruction to the "edge" that flows into the | ||||
1559 | // phi. This is important because that is where the value is actually | ||||
1560 | // "evaluated" even though it is used later somewhere else. (see also | ||||
1561 | // D69571). | ||||
1562 | Query RecQ = Q; | ||||
1563 | |||||
1564 | unsigned OpNum = P->getOperand(0) == R ? 0 : 1; | ||||
1565 | Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator(); | ||||
1566 | Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator(); | ||||
1567 | |||||
1568 | // Ok, we have a PHI of the form L op= R. Check for low | ||||
1569 | // zero bits. | ||||
1570 | RecQ.CxtI = RInst; | ||||
1571 | computeKnownBits(R, Known2, Depth + 1, RecQ); | ||||
1572 | |||||
1573 | // We need to take the minimum number of known bits | ||||
1574 | KnownBits Known3(BitWidth); | ||||
1575 | RecQ.CxtI = LInst; | ||||
1576 | computeKnownBits(L, Known3, Depth + 1, RecQ); | ||||
1577 | |||||
1578 | Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), | ||||
1579 | Known3.countMinTrailingZeros())); | ||||
1580 | |||||
1581 | auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO); | ||||
1582 | if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { | ||||
1583 | // If initial value of recurrence is nonnegative, and we are adding | ||||
1584 | // a nonnegative number with nsw, the result can only be nonnegative | ||||
1585 | // or poison value regardless of the number of times we execute the | ||||
1586 | // add in phi recurrence. If initial value is negative and we are | ||||
1587 | // adding a negative number with nsw, the result can only be | ||||
1588 | // negative or poison value. Similar arguments apply to sub and mul. | ||||
1589 | // | ||||
1590 | // (add non-negative, non-negative) --> non-negative | ||||
1591 | // (add negative, negative) --> negative | ||||
1592 | if (Opcode == Instruction::Add) { | ||||
1593 | if (Known2.isNonNegative() && Known3.isNonNegative()) | ||||
1594 | Known.makeNonNegative(); | ||||
1595 | else if (Known2.isNegative() && Known3.isNegative()) | ||||
1596 | Known.makeNegative(); | ||||
1597 | } | ||||
1598 | |||||
1599 | // (sub nsw non-negative, negative) --> non-negative | ||||
1600 | // (sub nsw negative, non-negative) --> negative | ||||
1601 | else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) { | ||||
1602 | if (Known2.isNonNegative() && Known3.isNegative()) | ||||
1603 | Known.makeNonNegative(); | ||||
1604 | else if (Known2.isNegative() && Known3.isNonNegative()) | ||||
1605 | Known.makeNegative(); | ||||
1606 | } | ||||
1607 | |||||
1608 | // (mul nsw non-negative, non-negative) --> non-negative | ||||
1609 | else if (Opcode == Instruction::Mul && Known2.isNonNegative() && | ||||
1610 | Known3.isNonNegative()) | ||||
1611 | Known.makeNonNegative(); | ||||
1612 | } | ||||
1613 | |||||
1614 | break; | ||||
1615 | } | ||||
1616 | } | ||||
1617 | |||||
1618 | // Unreachable blocks may have zero-operand PHI nodes. | ||||
1619 | if (P->getNumIncomingValues() == 0) | ||||
1620 | break; | ||||
1621 | |||||
1622 | // Otherwise take the unions of the known bit sets of the operands, | ||||
1623 | // taking conservative care to avoid excessive recursion. | ||||
1624 | if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) { | ||||
1625 | // Skip if every incoming value references to ourself. | ||||
1626 | if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) | ||||
1627 | break; | ||||
1628 | |||||
1629 | Known.Zero.setAllBits(); | ||||
1630 | Known.One.setAllBits(); | ||||
1631 | for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { | ||||
1632 | Value *IncValue = P->getIncomingValue(u); | ||||
1633 | // Skip direct self references. | ||||
1634 | if (IncValue == P) continue; | ||||
1635 | |||||
1636 | // Change the context instruction to the "edge" that flows into the | ||||
1637 | // phi. This is important because that is where the value is actually | ||||
1638 | // "evaluated" even though it is used later somewhere else. (see also | ||||
1639 | // D69571). | ||||
1640 | Query RecQ = Q; | ||||
1641 | RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); | ||||
1642 | |||||
1643 | Known2 = KnownBits(BitWidth); | ||||
1644 | |||||
1645 | // Recurse, but cap the recursion to one level, because we don't | ||||
1646 | // want to waste time spinning around in loops. | ||||
1647 | computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ); | ||||
1648 | |||||
1649 | // If this failed, see if we can use a conditional branch into the phi | ||||
1650 | // to help us determine the range of the value. | ||||
1651 | if (Known2.isUnknown()) { | ||||
1652 | ICmpInst::Predicate Pred; | ||||
1653 | const APInt *RHSC; | ||||
1654 | BasicBlock *TrueSucc, *FalseSucc; | ||||
1655 | // TODO: Use RHS Value and compute range from its known bits. | ||||
1656 | if (match(RecQ.CxtI, | ||||
1657 | m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)), | ||||
1658 | m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) { | ||||
1659 | // Check for cases of duplicate successors. | ||||
1660 | if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) { | ||||
1661 | // If we're using the false successor, invert the predicate. | ||||
1662 | if (FalseSucc == P->getParent()) | ||||
1663 | Pred = CmpInst::getInversePredicate(Pred); | ||||
1664 | |||||
1665 | switch (Pred) { | ||||
1666 | case CmpInst::Predicate::ICMP_EQ: | ||||
1667 | Known2 = KnownBits::makeConstant(*RHSC); | ||||
1668 | break; | ||||
1669 | case CmpInst::Predicate::ICMP_ULE: | ||||
1670 | Known2.Zero.setHighBits(RHSC->countl_zero()); | ||||
1671 | break; | ||||
1672 | case CmpInst::Predicate::ICMP_ULT: | ||||
1673 | Known2.Zero.setHighBits((*RHSC - 1).countl_zero()); | ||||
1674 | break; | ||||
1675 | default: | ||||
1676 | // TODO - add additional integer predicate handling. | ||||
1677 | break; | ||||
1678 | } | ||||
1679 | } | ||||
1680 | } | ||||
1681 | } | ||||
1682 | |||||
1683 | Known = KnownBits::commonBits(Known, Known2); | ||||
1684 | // If all bits have been ruled out, there's no need to check | ||||
1685 | // more operands. | ||||
1686 | if (Known.isUnknown()) | ||||
1687 | break; | ||||
1688 | } | ||||
1689 | } | ||||
1690 | break; | ||||
1691 | } | ||||
1692 | case Instruction::Call: | ||||
1693 | case Instruction::Invoke: | ||||
1694 | // If range metadata is attached to this call, set known bits from that, | ||||
1695 | // and then intersect with known bits based on other properties of the | ||||
1696 | // function. | ||||
1697 | if (MDNode *MD = | ||||
1698 | Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) | ||||
1699 | computeKnownBitsFromRangeMetadata(*MD, Known); | ||||
1700 | if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) { | ||||
1701 | computeKnownBits(RV, Known2, Depth + 1, Q); | ||||
1702 | Known.Zero |= Known2.Zero; | ||||
1703 | Known.One |= Known2.One; | ||||
1704 | } | ||||
1705 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
1706 | switch (II->getIntrinsicID()) { | ||||
1707 | default: break; | ||||
1708 | case Intrinsic::abs: { | ||||
1709 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1710 | bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); | ||||
1711 | Known = Known2.abs(IntMinIsPoison); | ||||
1712 | break; | ||||
1713 | } | ||||
1714 | case Intrinsic::bitreverse: | ||||
1715 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1716 | Known.Zero |= Known2.Zero.reverseBits(); | ||||
1717 | Known.One |= Known2.One.reverseBits(); | ||||
1718 | break; | ||||
1719 | case Intrinsic::bswap: | ||||
1720 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1721 | Known.Zero |= Known2.Zero.byteSwap(); | ||||
1722 | Known.One |= Known2.One.byteSwap(); | ||||
1723 | break; | ||||
1724 | case Intrinsic::ctlz: { | ||||
1725 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1726 | // If we have a known 1, its position is our upper bound. | ||||
1727 | unsigned PossibleLZ = Known2.countMaxLeadingZeros(); | ||||
1728 | // If this call is poison for 0 input, the result will be less than 2^n. | ||||
1729 | if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) | ||||
1730 | PossibleLZ = std::min(PossibleLZ, BitWidth - 1); | ||||
1731 | unsigned LowBits = llvm::bit_width(PossibleLZ); | ||||
1732 | Known.Zero.setBitsFrom(LowBits); | ||||
1733 | break; | ||||
1734 | } | ||||
1735 | case Intrinsic::cttz: { | ||||
1736 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1737 | // If we have a known 1, its position is our upper bound. | ||||
1738 | unsigned PossibleTZ = Known2.countMaxTrailingZeros(); | ||||
1739 | // If this call is poison for 0 input, the result will be less than 2^n. | ||||
1740 | if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) | ||||
1741 | PossibleTZ = std::min(PossibleTZ, BitWidth - 1); | ||||
1742 | unsigned LowBits = llvm::bit_width(PossibleTZ); | ||||
1743 | Known.Zero.setBitsFrom(LowBits); | ||||
1744 | break; | ||||
1745 | } | ||||
1746 | case Intrinsic::ctpop: { | ||||
1747 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1748 | // We can bound the space the count needs. Also, bits known to be zero | ||||
1749 | // can't contribute to the population. | ||||
1750 | unsigned BitsPossiblySet = Known2.countMaxPopulation(); | ||||
1751 | unsigned LowBits = llvm::bit_width(BitsPossiblySet); | ||||
1752 | Known.Zero.setBitsFrom(LowBits); | ||||
1753 | // TODO: we could bound KnownOne using the lower bound on the number | ||||
1754 | // of bits which might be set provided by popcnt KnownOne2. | ||||
1755 | break; | ||||
1756 | } | ||||
1757 | case Intrinsic::fshr: | ||||
1758 | case Intrinsic::fshl: { | ||||
1759 | const APInt *SA; | ||||
1760 | if (!match(I->getOperand(2), m_APInt(SA))) | ||||
1761 | break; | ||||
1762 | |||||
1763 | // Normalize to funnel shift left. | ||||
1764 | uint64_t ShiftAmt = SA->urem(BitWidth); | ||||
1765 | if (II->getIntrinsicID() == Intrinsic::fshr) | ||||
1766 | ShiftAmt = BitWidth - ShiftAmt; | ||||
1767 | |||||
1768 | KnownBits Known3(BitWidth); | ||||
1769 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1770 | computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); | ||||
1771 | |||||
1772 | Known.Zero = | ||||
1773 | Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); | ||||
1774 | Known.One = | ||||
1775 | Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); | ||||
1776 | break; | ||||
1777 | } | ||||
1778 | case Intrinsic::uadd_sat: | ||||
1779 | case Intrinsic::usub_sat: { | ||||
1780 | bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat; | ||||
1781 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1782 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1783 | |||||
1784 | // Add: Leading ones of either operand are preserved. | ||||
1785 | // Sub: Leading zeros of LHS and leading ones of RHS are preserved | ||||
1786 | // as leading zeros in the result. | ||||
1787 | unsigned LeadingKnown; | ||||
1788 | if (IsAdd) | ||||
1789 | LeadingKnown = std::max(Known.countMinLeadingOnes(), | ||||
1790 | Known2.countMinLeadingOnes()); | ||||
1791 | else | ||||
1792 | LeadingKnown = std::max(Known.countMinLeadingZeros(), | ||||
1793 | Known2.countMinLeadingOnes()); | ||||
1794 | |||||
1795 | Known = KnownBits::computeForAddSub( | ||||
1796 | IsAdd, /* NSW */ false, Known, Known2); | ||||
1797 | |||||
1798 | // We select between the operation result and all-ones/zero | ||||
1799 | // respectively, so we can preserve known ones/zeros. | ||||
1800 | if (IsAdd) { | ||||
1801 | Known.One.setHighBits(LeadingKnown); | ||||
1802 | Known.Zero.clearAllBits(); | ||||
1803 | } else { | ||||
1804 | Known.Zero.setHighBits(LeadingKnown); | ||||
1805 | Known.One.clearAllBits(); | ||||
1806 | } | ||||
1807 | break; | ||||
1808 | } | ||||
1809 | case Intrinsic::umin: | ||||
1810 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1811 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1812 | Known = KnownBits::umin(Known, Known2); | ||||
1813 | break; | ||||
1814 | case Intrinsic::umax: | ||||
1815 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1816 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1817 | Known = KnownBits::umax(Known, Known2); | ||||
1818 | break; | ||||
1819 | case Intrinsic::smin: | ||||
1820 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1821 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1822 | Known = KnownBits::smin(Known, Known2); | ||||
1823 | break; | ||||
1824 | case Intrinsic::smax: | ||||
1825 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1826 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1827 | Known = KnownBits::smax(Known, Known2); | ||||
1828 | break; | ||||
1829 | case Intrinsic::x86_sse42_crc32_64_64: | ||||
1830 | Known.Zero.setBitsFrom(32); | ||||
1831 | break; | ||||
1832 | case Intrinsic::riscv_vsetvli: | ||||
1833 | case Intrinsic::riscv_vsetvlimax: | ||||
1834 | // Assume that VL output is >= 65536. | ||||
1835 | // TODO: Take SEW and LMUL into account. | ||||
1836 | if (BitWidth > 17) | ||||
1837 | Known.Zero.setBitsFrom(17); | ||||
1838 | break; | ||||
1839 | case Intrinsic::vscale: { | ||||
1840 | if (!II->getParent() || !II->getFunction()) | ||||
1841 | break; | ||||
1842 | |||||
1843 | Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits(); | ||||
1844 | break; | ||||
1845 | } | ||||
1846 | } | ||||
1847 | } | ||||
1848 | break; | ||||
1849 | case Instruction::ShuffleVector: { | ||||
1850 | auto *Shuf = dyn_cast<ShuffleVectorInst>(I); | ||||
1851 | // FIXME: Do we need to handle ConstantExpr involving shufflevectors? | ||||
1852 | if (!Shuf) { | ||||
1853 | Known.resetAll(); | ||||
1854 | return; | ||||
1855 | } | ||||
1856 | // For undef elements, we don't know anything about the common state of | ||||
1857 | // the shuffle result. | ||||
1858 | APInt DemandedLHS, DemandedRHS; | ||||
1859 | if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) { | ||||
1860 | Known.resetAll(); | ||||
1861 | return; | ||||
1862 | } | ||||
1863 | Known.One.setAllBits(); | ||||
1864 | Known.Zero.setAllBits(); | ||||
1865 | if (!!DemandedLHS) { | ||||
1866 | const Value *LHS = Shuf->getOperand(0); | ||||
1867 | computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q); | ||||
1868 | // If we don't know any bits, early out. | ||||
1869 | if (Known.isUnknown()) | ||||
1870 | break; | ||||
1871 | } | ||||
1872 | if (!!DemandedRHS) { | ||||
1873 | const Value *RHS = Shuf->getOperand(1); | ||||
1874 | computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q); | ||||
1875 | Known = KnownBits::commonBits(Known, Known2); | ||||
1876 | } | ||||
1877 | break; | ||||
1878 | } | ||||
1879 | case Instruction::InsertElement: { | ||||
1880 | if (isa<ScalableVectorType>(I->getType())) { | ||||
1881 | Known.resetAll(); | ||||
1882 | return; | ||||
1883 | } | ||||
1884 | const Value *Vec = I->getOperand(0); | ||||
1885 | const Value *Elt = I->getOperand(1); | ||||
1886 | auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); | ||||
1887 | // Early out if the index is non-constant or out-of-range. | ||||
1888 | unsigned NumElts = DemandedElts.getBitWidth(); | ||||
1889 | if (!CIdx || CIdx->getValue().uge(NumElts)) { | ||||
1890 | Known.resetAll(); | ||||
1891 | return; | ||||
1892 | } | ||||
1893 | Known.One.setAllBits(); | ||||
1894 | Known.Zero.setAllBits(); | ||||
1895 | unsigned EltIdx = CIdx->getZExtValue(); | ||||
1896 | // Do we demand the inserted element? | ||||
1897 | if (DemandedElts[EltIdx]) { | ||||
1898 | computeKnownBits(Elt, Known, Depth + 1, Q); | ||||
1899 | // If we don't know any bits, early out. | ||||
1900 | if (Known.isUnknown()) | ||||
1901 | break; | ||||
1902 | } | ||||
1903 | // We don't need the base vector element that has been inserted. | ||||
1904 | APInt DemandedVecElts = DemandedElts; | ||||
1905 | DemandedVecElts.clearBit(EltIdx); | ||||
1906 | if (!!DemandedVecElts) { | ||||
1907 | computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q); | ||||
1908 | Known = KnownBits::commonBits(Known, Known2); | ||||
1909 | } | ||||
1910 | break; | ||||
1911 | } | ||||
1912 | case Instruction::ExtractElement: { | ||||
1913 | // Look through extract element. If the index is non-constant or | ||||
1914 | // out-of-range demand all elements, otherwise just the extracted element. | ||||
1915 | const Value *Vec = I->getOperand(0); | ||||
1916 | const Value *Idx = I->getOperand(1); | ||||
1917 | auto *CIdx = dyn_cast<ConstantInt>(Idx); | ||||
1918 | if (isa<ScalableVectorType>(Vec->getType())) { | ||||
1919 | // FIXME: there's probably *something* we can do with scalable vectors | ||||
1920 | Known.resetAll(); | ||||
1921 | break; | ||||
1922 | } | ||||
1923 | unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); | ||||
1924 | APInt DemandedVecElts = APInt::getAllOnes(NumElts); | ||||
1925 | if (CIdx && CIdx->getValue().ult(NumElts)) | ||||
1926 | DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); | ||||
1927 | computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q); | ||||
1928 | break; | ||||
1929 | } | ||||
1930 | case Instruction::ExtractValue: | ||||
1931 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { | ||||
1932 | const ExtractValueInst *EVI = cast<ExtractValueInst>(I); | ||||
1933 | if (EVI->getNumIndices() != 1) break; | ||||
1934 | if (EVI->getIndices()[0] == 0) { | ||||
1935 | switch (II->getIntrinsicID()) { | ||||
1936 | default: break; | ||||
1937 | case Intrinsic::uadd_with_overflow: | ||||
1938 | case Intrinsic::sadd_with_overflow: | ||||
1939 | computeKnownBitsAddSub(true, II->getArgOperand(0), | ||||
1940 | II->getArgOperand(1), false, DemandedElts, | ||||
1941 | Known, Known2, Depth, Q); | ||||
1942 | break; | ||||
1943 | case Intrinsic::usub_with_overflow: | ||||
1944 | case Intrinsic::ssub_with_overflow: | ||||
1945 | computeKnownBitsAddSub(false, II->getArgOperand(0), | ||||
1946 | II->getArgOperand(1), false, DemandedElts, | ||||
1947 | Known, Known2, Depth, Q); | ||||
1948 | break; | ||||
1949 | case Intrinsic::umul_with_overflow: | ||||
1950 | case Intrinsic::smul_with_overflow: | ||||
1951 | computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, | ||||
1952 | DemandedElts, Known, Known2, Depth, Q); | ||||
1953 | break; | ||||
1954 | } | ||||
1955 | } | ||||
1956 | } | ||||
1957 | break; | ||||
1958 | case Instruction::Freeze: | ||||
1959 | if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, | ||||
1960 | Depth + 1)) | ||||
1961 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1962 | break; | ||||
1963 | } | ||||
1964 | } | ||||
1965 | |||||
1966 | /// Determine which bits of V are known to be either zero or one and return | ||||
1967 | /// them. | ||||
1968 | KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
1969 | unsigned Depth, const Query &Q) { | ||||
1970 | KnownBits Known(getBitWidth(V->getType(), Q.DL)); | ||||
1971 | computeKnownBits(V, DemandedElts, Known, Depth, Q); | ||||
1972 | return Known; | ||||
1973 | } | ||||
1974 | |||||
1975 | /// Determine which bits of V are known to be either zero or one and return | ||||
1976 | /// them. | ||||
1977 | KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { | ||||
1978 | KnownBits Known(getBitWidth(V->getType(), Q.DL)); | ||||
1979 | computeKnownBits(V, Known, Depth, Q); | ||||
1980 | return Known; | ||||
1981 | } | ||||
1982 | |||||
1983 | /// Determine which bits of V are known to be either zero or one and return | ||||
1984 | /// them in the Known bit set. | ||||
1985 | /// | ||||
1986 | /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that | ||||
1987 | /// we cannot optimize based on the assumption that it is zero without changing | ||||
1988 | /// it to be an explicit zero. If we don't change it to zero, other code could | ||||
1989 | /// optimized based on the contradictory assumption that it is non-zero. | ||||
1990 | /// Because instcombine aggressively folds operations with undef args anyway, | ||||
1991 | /// this won't lose us code quality. | ||||
1992 | /// | ||||
1993 | /// This function is defined on values with integer type, values with pointer | ||||
1994 | /// type, and vectors of integers. In the case | ||||
1995 | /// where V is a vector, known zero, and known one values are the | ||||
1996 | /// same width as the vector element, and the bit is set only if it is true | ||||
1997 | /// for all of the demanded elements in the vector specified by DemandedElts. | ||||
1998 | void computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
1999 | KnownBits &Known, unsigned Depth, const Query &Q) { | ||||
2000 | if (!DemandedElts) { | ||||
2001 | // No demanded elts, better to assume we don't know anything. | ||||
2002 | Known.resetAll(); | ||||
2003 | return; | ||||
2004 | } | ||||
2005 | |||||
2006 | assert(V && "No Value?")(static_cast <bool> (V && "No Value?") ? void ( 0) : __assert_fail ("V && \"No Value?\"", "llvm/lib/Analysis/ValueTracking.cpp" , 2006, __extension__ __PRETTY_FUNCTION__)); | ||||
2007 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2007, __extension__ __PRETTY_FUNCTION__ )); | ||||
2008 | |||||
2009 | #ifndef NDEBUG | ||||
2010 | Type *Ty = V->getType(); | ||||
2011 | unsigned BitWidth = Known.getBitWidth(); | ||||
2012 | |||||
2013 | assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth ) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!" ) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2014, __extension__ __PRETTY_FUNCTION__ )) | ||||
2014 | "Not integer or pointer type!")(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth ) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!" ) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2014, __extension__ __PRETTY_FUNCTION__ )); | ||||
2015 | |||||
2016 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { | ||||
2017 | assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__ )) | ||||
2018 | FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__ )) | ||||
2019 | "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__ )); | ||||
2020 | } else { | ||||
2021 | assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars or scalable vectors" ) ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars or scalable vectors\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2022, __extension__ __PRETTY_FUNCTION__ )) | ||||
2022 | "DemandedElt width should be 1 for scalars or scalable vectors")(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars or scalable vectors" ) ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars or scalable vectors\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2022, __extension__ __PRETTY_FUNCTION__ )); | ||||
2023 | } | ||||
2024 | |||||
2025 | Type *ScalarTy = Ty->getScalarType(); | ||||
2026 | if (ScalarTy->isPointerTy()) { | ||||
2027 | assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits (ScalarTy) && "V and Known should have same BitWidth" ) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2028, __extension__ __PRETTY_FUNCTION__ )) | ||||
2028 | "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits (ScalarTy) && "V and Known should have same BitWidth" ) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2028, __extension__ __PRETTY_FUNCTION__ )); | ||||
2029 | } else { | ||||
2030 | assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits (ScalarTy) && "V and Known should have same BitWidth" ) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2031, __extension__ __PRETTY_FUNCTION__ )) | ||||
2031 | "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits (ScalarTy) && "V and Known should have same BitWidth" ) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2031, __extension__ __PRETTY_FUNCTION__ )); | ||||
2032 | } | ||||
2033 | #endif | ||||
2034 | |||||
2035 | const APInt *C; | ||||
2036 | if (match(V, m_APInt(C))) { | ||||
2037 | // We know all of the bits for a scalar constant or a splat vector constant! | ||||
2038 | Known = KnownBits::makeConstant(*C); | ||||
2039 | return; | ||||
2040 | } | ||||
2041 | // Null and aggregate-zero are all-zeros. | ||||
2042 | if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { | ||||
2043 | Known.setAllZero(); | ||||
2044 | return; | ||||
2045 | } | ||||
2046 | // Handle a constant vector by taking the intersection of the known bits of | ||||
2047 | // each element. | ||||
2048 | if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) { | ||||
2049 | assert(!isa<ScalableVectorType>(V->getType()))(static_cast <bool> (!isa<ScalableVectorType>(V-> getType())) ? void (0) : __assert_fail ("!isa<ScalableVectorType>(V->getType())" , "llvm/lib/Analysis/ValueTracking.cpp", 2049, __extension__ __PRETTY_FUNCTION__ )); | ||||
2050 | // We know that CDV must be a vector of integers. Take the intersection of | ||||
2051 | // each element. | ||||
2052 | Known.Zero.setAllBits(); Known.One.setAllBits(); | ||||
2053 | for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { | ||||
2054 | if (!DemandedElts[i]) | ||||
2055 | continue; | ||||
2056 | APInt Elt = CDV->getElementAsAPInt(i); | ||||
2057 | Known.Zero &= ~Elt; | ||||
2058 | Known.One &= Elt; | ||||
2059 | } | ||||
2060 | return; | ||||
2061 | } | ||||
2062 | |||||
2063 | if (const auto *CV = dyn_cast<ConstantVector>(V)) { | ||||
2064 | assert(!isa<ScalableVectorType>(V->getType()))(static_cast <bool> (!isa<ScalableVectorType>(V-> getType())) ? void (0) : __assert_fail ("!isa<ScalableVectorType>(V->getType())" , "llvm/lib/Analysis/ValueTracking.cpp", 2064, __extension__ __PRETTY_FUNCTION__ )); | ||||
2065 | // We know that CV must be a vector of integers. Take the intersection of | ||||
2066 | // each element. | ||||
2067 | Known.Zero.setAllBits(); Known.One.setAllBits(); | ||||
2068 | for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { | ||||
2069 | if (!DemandedElts[i]) | ||||
2070 | continue; | ||||
2071 | Constant *Element = CV->getAggregateElement(i); | ||||
2072 | auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); | ||||
2073 | if (!ElementCI) { | ||||
2074 | Known.resetAll(); | ||||
2075 | return; | ||||
2076 | } | ||||
2077 | const APInt &Elt = ElementCI->getValue(); | ||||
2078 | Known.Zero &= ~Elt; | ||||
2079 | Known.One &= Elt; | ||||
2080 | } | ||||
2081 | return; | ||||
2082 | } | ||||
2083 | |||||
2084 | // Start out not knowing anything. | ||||
2085 | Known.resetAll(); | ||||
2086 | |||||
2087 | // We can't imply anything about undefs. | ||||
2088 | if (isa<UndefValue>(V)) | ||||
2089 | return; | ||||
2090 | |||||
2091 | // There's no point in looking through other users of ConstantData for | ||||
2092 | // assumptions. Confirm that we've handled them all. | ||||
2093 | assert(!isa<ConstantData>(V) && "Unhandled constant data!")(static_cast <bool> (!isa<ConstantData>(V) && "Unhandled constant data!") ? void (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2093, __extension__ __PRETTY_FUNCTION__ )); | ||||
2094 | |||||
2095 | // All recursive calls that increase depth must come after this. | ||||
2096 | if (Depth == MaxAnalysisRecursionDepth) | ||||
2097 | return; | ||||
2098 | |||||
2099 | // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has | ||||
2100 | // the bits of its aliasee. | ||||
2101 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { | ||||
2102 | if (!GA->isInterposable()) | ||||
2103 | computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); | ||||
2104 | return; | ||||
2105 | } | ||||
2106 | |||||
2107 | if (const Operator *I = dyn_cast<Operator>(V)) | ||||
2108 | computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q); | ||||
2109 | |||||
2110 | // Aligned pointers have trailing zeros - refine Known.Zero set | ||||
2111 | if (isa<PointerType>(V->getType())) { | ||||
2112 | Align Alignment = V->getPointerAlignment(Q.DL); | ||||
2113 | Known.Zero.setLowBits(Log2(Alignment)); | ||||
2114 | } | ||||
2115 | |||||
2116 | // computeKnownBitsFromAssume strictly refines Known. | ||||
2117 | // Therefore, we run them after computeKnownBitsFromOperator. | ||||
2118 | |||||
2119 | // Check whether a nearby assume intrinsic can determine some known bits. | ||||
2120 | computeKnownBitsFromAssume(V, Known, Depth, Q); | ||||
2121 | |||||
2122 | assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(static_cast <bool> ((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?") ? void (0) : __assert_fail ("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2122, __extension__ __PRETTY_FUNCTION__ )); | ||||
2123 | } | ||||
2124 | |||||
2125 | /// Try to detect a recurrence that the value of the induction variable is | ||||
2126 | /// always a power of two (or zero). | ||||
2127 | static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, | ||||
2128 | unsigned Depth, Query &Q) { | ||||
2129 | BinaryOperator *BO = nullptr; | ||||
2130 | Value *Start = nullptr, *Step = nullptr; | ||||
2131 | if (!matchSimpleRecurrence(PN, BO, Start, Step)) | ||||
2132 | return false; | ||||
2133 | |||||
2134 | // Initial value must be a power of two. | ||||
2135 | for (const Use &U : PN->operands()) { | ||||
2136 | if (U.get() == Start) { | ||||
2137 | // Initial value comes from a different BB, need to adjust context | ||||
2138 | // instruction for analysis. | ||||
2139 | Q.CxtI = PN->getIncomingBlock(U)->getTerminator(); | ||||
2140 | if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q)) | ||||
2141 | return false; | ||||
2142 | } | ||||
2143 | } | ||||
2144 | |||||
2145 | // Except for Mul, the induction variable must be on the left side of the | ||||
2146 | // increment expression, otherwise its value can be arbitrary. | ||||
2147 | if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step) | ||||
2148 | return false; | ||||
2149 | |||||
2150 | Q.CxtI = BO->getParent()->getTerminator(); | ||||
2151 | switch (BO->getOpcode()) { | ||||
2152 | case Instruction::Mul: | ||||
2153 | // Power of two is closed under multiplication. | ||||
2154 | return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || | ||||
2155 | Q.IIQ.hasNoSignedWrap(BO)) && | ||||
2156 | isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q); | ||||
2157 | case Instruction::SDiv: | ||||
2158 | // Start value must not be signmask for signed division, so simply being a | ||||
2159 | // power of two is not sufficient, and it has to be a constant. | ||||
2160 | if (!match(Start, m_Power2()) || match(Start, m_SignMask())) | ||||
2161 | return false; | ||||
2162 | [[fallthrough]]; | ||||
2163 | case Instruction::UDiv: | ||||
2164 | // Divisor must be a power of two. | ||||
2165 | // If OrZero is false, cannot guarantee induction variable is non-zero after | ||||
2166 | // division, same for Shr, unless it is exact division. | ||||
2167 | return (OrZero || Q.IIQ.isExact(BO)) && | ||||
2168 | isKnownToBeAPowerOfTwo(Step, false, Depth, Q); | ||||
2169 | case Instruction::Shl: | ||||
2170 | return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO); | ||||
2171 | case Instruction::AShr: | ||||
2172 | if (!match(Start, m_Power2()) || match(Start, m_SignMask())) | ||||
2173 | return false; | ||||
2174 | [[fallthrough]]; | ||||
2175 | case Instruction::LShr: | ||||
2176 | return OrZero || Q.IIQ.isExact(BO); | ||||
2177 | default: | ||||
2178 | return false; | ||||
2179 | } | ||||
2180 | } | ||||
2181 | |||||
2182 | /// Return true if the given value is known to have exactly one | ||||
2183 | /// bit set when defined. For vectors return true if every element is known to | ||||
2184 | /// be a power of two when defined. Supports values with integer or pointer | ||||
2185 | /// types and vectors of integers. | ||||
2186 | bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, | ||||
2187 | const Query &Q) { | ||||
2188 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2188, __extension__ __PRETTY_FUNCTION__ )); | ||||
2189 | |||||
2190 | // Attempt to match against constants. | ||||
2191 | if (OrZero && match(V, m_Power2OrZero())) | ||||
2192 | return true; | ||||
2193 | if (match(V, m_Power2())) | ||||
2194 | return true; | ||||
2195 | |||||
2196 | // 1 << X is clearly a power of two if the one is not shifted off the end. If | ||||
2197 | // it is shifted off the end then the result is undefined. | ||||
2198 | if (match(V, m_Shl(m_One(), m_Value()))) | ||||
2199 | return true; | ||||
2200 | |||||
2201 | // (signmask) >>l X is clearly a power of two if the one is not shifted off | ||||
2202 | // the bottom. If it is shifted off the bottom then the result is undefined. | ||||
2203 | if (match(V, m_LShr(m_SignMask(), m_Value()))) | ||||
2204 | return true; | ||||
2205 | |||||
2206 | // The remaining tests are all recursive, so bail out if we hit the limit. | ||||
2207 | if (Depth++ == MaxAnalysisRecursionDepth) | ||||
2208 | return false; | ||||
2209 | |||||
2210 | Value *X = nullptr, *Y = nullptr; | ||||
2211 | // A shift left or a logical shift right of a power of two is a power of two | ||||
2212 | // or zero. | ||||
2213 | if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || | ||||
2214 | match(V, m_LShr(m_Value(X), m_Value())))) | ||||
2215 | return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); | ||||
2216 | |||||
2217 | if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) | ||||
2218 | return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); | ||||
2219 | |||||
2220 | if (const SelectInst *SI = dyn_cast<SelectInst>(V)) | ||||
2221 | return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && | ||||
2222 | isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); | ||||
2223 | |||||
2224 | // Peek through min/max. | ||||
2225 | if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) { | ||||
2226 | return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) && | ||||
2227 | isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q); | ||||
2228 | } | ||||
2229 | |||||
2230 | if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { | ||||
2231 | // A power of two and'd with anything is a power of two or zero. | ||||
2232 | if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || | ||||
2233 | isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) | ||||
2234 | return true; | ||||
2235 | // X & (-X) is always a power of two or zero. | ||||
2236 | if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) | ||||
2237 | return true; | ||||
2238 | return false; | ||||
2239 | } | ||||
2240 | |||||
2241 | // Adding a power-of-two or zero to the same power-of-two or zero yields | ||||
2242 | // either the original power-of-two, a larger power-of-two or zero. | ||||
2243 | if (match(V, m_Add(m_Value(X), m_Value(Y)))) { | ||||
2244 | const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); | ||||
2245 | if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || | ||||
2246 | Q.IIQ.hasNoSignedWrap(VOBO)) { | ||||
2247 | if (match(X, m_And(m_Specific(Y), m_Value())) || | ||||
2248 | match(X, m_And(m_Value(), m_Specific(Y)))) | ||||
2249 | if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) | ||||
2250 | return true; | ||||
2251 | if (match(Y, m_And(m_Specific(X), m_Value())) || | ||||
2252 | match(Y, m_And(m_Value(), m_Specific(X)))) | ||||
2253 | if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) | ||||
2254 | return true; | ||||
2255 | |||||
2256 | unsigned BitWidth = V->getType()->getScalarSizeInBits(); | ||||
2257 | KnownBits LHSBits(BitWidth); | ||||
2258 | computeKnownBits(X, LHSBits, Depth, Q); | ||||
2259 | |||||
2260 | KnownBits RHSBits(BitWidth); | ||||
2261 | computeKnownBits(Y, RHSBits, Depth, Q); | ||||
2262 | // If i8 V is a power of two or zero: | ||||
2263 | // ZeroBits: 1 1 1 0 1 1 1 1 | ||||
2264 | // ~ZeroBits: 0 0 0 1 0 0 0 0 | ||||
2265 | if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) | ||||
2266 | // If OrZero isn't set, we cannot give back a zero result. | ||||
2267 | // Make sure either the LHS or RHS has a bit set. | ||||
2268 | if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) | ||||
2269 | return true; | ||||
2270 | } | ||||
2271 | } | ||||
2272 | |||||
2273 | // A PHI node is power of two if all incoming values are power of two, or if | ||||
2274 | // it is an induction variable where in each step its value is a power of two. | ||||
2275 | if (const PHINode *PN = dyn_cast<PHINode>(V)) { | ||||
2276 | Query RecQ = Q; | ||||
2277 | |||||
2278 | // Check if it is an induction variable and always power of two. | ||||
2279 | if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ)) | ||||
2280 | return true; | ||||
2281 | |||||
2282 | // Recursively check all incoming values. Limit recursion to 2 levels, so | ||||
2283 | // that search complexity is limited to number of operands^2. | ||||
2284 | unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); | ||||
2285 | return llvm::all_of(PN->operands(), [&](const Use &U) { | ||||
2286 | // Value is power of 2 if it is coming from PHI node itself by induction. | ||||
2287 | if (U.get() == PN) | ||||
2288 | return true; | ||||
2289 | |||||
2290 | // Change the context instruction to the incoming block where it is | ||||
2291 | // evaluated. | ||||
2292 | RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); | ||||
2293 | return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ); | ||||
2294 | }); | ||||
2295 | } | ||||
2296 | |||||
2297 | // An exact divide or right shift can only shift off zero bits, so the result | ||||
2298 | // is a power of two only if the first operand is a power of two and not | ||||
2299 | // copying a sign bit (sdiv int_min, 2). | ||||
2300 | if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || | ||||
2301 | match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { | ||||
2302 | return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, | ||||
2303 | Depth, Q); | ||||
2304 | } | ||||
2305 | |||||
2306 | return false; | ||||
2307 | } | ||||
2308 | |||||
2309 | /// Test whether a GEP's result is known to be non-null. | ||||
2310 | /// | ||||
2311 | /// Uses properties inherent in a GEP to try to determine whether it is known | ||||
2312 | /// to be non-null. | ||||
2313 | /// | ||||
2314 | /// Currently this routine does not support vector GEPs. | ||||
2315 | static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, | ||||
2316 | const Query &Q) { | ||||
2317 | const Function *F = nullptr; | ||||
2318 | if (const Instruction *I = dyn_cast<Instruction>(GEP)) | ||||
2319 | F = I->getFunction(); | ||||
2320 | |||||
2321 | if (!GEP->isInBounds() || | ||||
2322 | NullPointerIsDefined(F, GEP->getPointerAddressSpace())) | ||||
2323 | return false; | ||||
2324 | |||||
2325 | // FIXME: Support vector-GEPs. | ||||
2326 | assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")(static_cast <bool> (GEP->getType()->isPointerTy( ) && "We only support plain pointer GEP") ? void (0) : __assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2326, __extension__ __PRETTY_FUNCTION__ )); | ||||
2327 | |||||
2328 | // If the base pointer is non-null, we cannot walk to a null address with an | ||||
2329 | // inbounds GEP in address space zero. | ||||
2330 | if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) | ||||
2331 | return true; | ||||
2332 | |||||
2333 | // Walk the GEP operands and see if any operand introduces a non-zero offset. | ||||
2334 | // If so, then the GEP cannot produce a null pointer, as doing so would | ||||
2335 | // inherently violate the inbounds contract within address space zero. | ||||
2336 | for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); | ||||
2337 | GTI != GTE; ++GTI) { | ||||
2338 | // Struct types are easy -- they must always be indexed by a constant. | ||||
2339 | if (StructType *STy = GTI.getStructTypeOrNull()) { | ||||
2340 | ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); | ||||
2341 | unsigned ElementIdx = OpC->getZExtValue(); | ||||
2342 | const StructLayout *SL = Q.DL.getStructLayout(STy); | ||||
2343 | uint64_t ElementOffset = SL->getElementOffset(ElementIdx); | ||||
2344 | if (ElementOffset > 0) | ||||
2345 | return true; | ||||
2346 | continue; | ||||
2347 | } | ||||
2348 | |||||
2349 | // If we have a zero-sized type, the index doesn't matter. Keep looping. | ||||
2350 | if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).isZero()) | ||||
2351 | continue; | ||||
2352 | |||||
2353 | // Fast path the constant operand case both for efficiency and so we don't | ||||
2354 | // increment Depth when just zipping down an all-constant GEP. | ||||
2355 | if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { | ||||
2356 | if (!OpC->isZero()) | ||||
2357 | return true; | ||||
2358 | continue; | ||||
2359 | } | ||||
2360 | |||||
2361 | // We post-increment Depth here because while isKnownNonZero increments it | ||||
2362 | // as well, when we pop back up that increment won't persist. We don't want | ||||
2363 | // to recurse 10k times just because we have 10k GEP operands. We don't | ||||
2364 | // bail completely out because we want to handle constant GEPs regardless | ||||
2365 | // of depth. | ||||
2366 | if (Depth++ >= MaxAnalysisRecursionDepth) | ||||
2367 | continue; | ||||
2368 | |||||
2369 | if (isKnownNonZero(GTI.getOperand(), Depth, Q)) | ||||
2370 | return true; | ||||
2371 | } | ||||
2372 | |||||
2373 | return false; | ||||
2374 | } | ||||
2375 | |||||
2376 | static bool isKnownNonNullFromDominatingCondition(const Value *V, | ||||
2377 | const Instruction *CtxI, | ||||
2378 | const DominatorTree *DT) { | ||||
2379 | assert(!isa<Constant>(V) && "Called for constant?")(static_cast <bool> (!isa<Constant>(V) && "Called for constant?") ? void (0) : __assert_fail ("!isa<Constant>(V) && \"Called for constant?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2379, __extension__ __PRETTY_FUNCTION__ )); | ||||
2380 | |||||
2381 | if (!CtxI || !DT) | ||||
2382 | return false; | ||||
2383 | |||||
2384 | unsigned NumUsesExplored = 0; | ||||
2385 | for (const auto *U : V->users()) { | ||||
2386 | // Avoid massive lists | ||||
2387 | if (NumUsesExplored >= DomConditionsMaxUses) | ||||
2388 | break; | ||||
2389 | NumUsesExplored++; | ||||
2390 | |||||
2391 | // If the value is used as an argument to a call or invoke, then argument | ||||
2392 | // attributes may provide an answer about null-ness. | ||||
2393 | if (const auto *CB = dyn_cast<CallBase>(U)) | ||||
2394 | if (auto *CalledFunc = CB->getCalledFunction()) | ||||
2395 | for (const Argument &Arg : CalledFunc->args()) | ||||
2396 | if (CB->getArgOperand(Arg.getArgNo()) == V && | ||||
2397 | Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) && | ||||
2398 | DT->dominates(CB, CtxI)) | ||||
2399 | return true; | ||||
2400 | |||||
2401 | // If the value is used as a load/store, then the pointer must be non null. | ||||
2402 | if (V == getLoadStorePointerOperand(U)) { | ||||
2403 | const Instruction *I = cast<Instruction>(U); | ||||
2404 | if (!NullPointerIsDefined(I->getFunction(), | ||||
2405 | V->getType()->getPointerAddressSpace()) && | ||||
2406 | DT->dominates(I, CtxI)) | ||||
2407 | return true; | ||||
2408 | } | ||||
2409 | |||||
2410 | // Consider only compare instructions uniquely controlling a branch | ||||
2411 | Value *RHS; | ||||
2412 | CmpInst::Predicate Pred; | ||||
2413 | if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS)))) | ||||
2414 | continue; | ||||
2415 | |||||
2416 | bool NonNullIfTrue; | ||||
2417 | if (cmpExcludesZero(Pred, RHS)) | ||||
2418 | NonNullIfTrue = true; | ||||
2419 | else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS)) | ||||
2420 | NonNullIfTrue = false; | ||||
2421 | else | ||||
2422 | continue; | ||||
2423 | |||||
2424 | SmallVector<const User *, 4> WorkList; | ||||
2425 | SmallPtrSet<const User *, 4> Visited; | ||||
2426 | for (const auto *CmpU : U->users()) { | ||||
2427 | assert(WorkList.empty() && "Should be!")(static_cast <bool> (WorkList.empty() && "Should be!" ) ? void (0) : __assert_fail ("WorkList.empty() && \"Should be!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2427, __extension__ __PRETTY_FUNCTION__ )); | ||||
2428 | if (Visited.insert(CmpU).second) | ||||
2429 | WorkList.push_back(CmpU); | ||||
2430 | |||||
2431 | while (!WorkList.empty()) { | ||||
2432 | auto *Curr = WorkList.pop_back_val(); | ||||
2433 | |||||
2434 | // If a user is an AND, add all its users to the work list. We only | ||||
2435 | // propagate "pred != null" condition through AND because it is only | ||||
2436 | // correct to assume that all conditions of AND are met in true branch. | ||||
2437 | // TODO: Support similar logic of OR and EQ predicate? | ||||
2438 | if (NonNullIfTrue) | ||||
2439 | if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) { | ||||
2440 | for (const auto *CurrU : Curr->users()) | ||||
2441 | if (Visited.insert(CurrU).second) | ||||
2442 | WorkList.push_back(CurrU); | ||||
2443 | continue; | ||||
2444 | } | ||||
2445 | |||||
2446 | if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { | ||||
2447 | assert(BI->isConditional() && "uses a comparison!")(static_cast <bool> (BI->isConditional() && "uses a comparison!" ) ? void (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2447, __extension__ __PRETTY_FUNCTION__ )); | ||||
2448 | |||||
2449 | BasicBlock *NonNullSuccessor = | ||||
2450 | BI->getSuccessor(NonNullIfTrue ? 0 : 1); | ||||
2451 | BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); | ||||
2452 | if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) | ||||
2453 | return true; | ||||
2454 | } else if (NonNullIfTrue && isGuard(Curr) && | ||||
2455 | DT->dominates(cast<Instruction>(Curr), CtxI)) { | ||||
2456 | return true; | ||||
2457 | } | ||||
2458 | } | ||||
2459 | } | ||||
2460 | } | ||||
2461 | |||||
2462 | return false; | ||||
2463 | } | ||||
2464 | |||||
2465 | /// Does the 'Range' metadata (which must be a valid MD_range operand list) | ||||
2466 | /// ensure that the value it's attached to is never Value? 'RangeType' is | ||||
2467 | /// is the type of the value described by the range. | ||||
2468 | static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { | ||||
2469 | const unsigned NumRanges = Ranges->getNumOperands() / 2; | ||||
2470 | assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail ("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp", 2470, __extension__ __PRETTY_FUNCTION__)); | ||||
2471 | for (unsigned i = 0; i < NumRanges; ++i) { | ||||
2472 | ConstantInt *Lower = | ||||
2473 | mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); | ||||
2474 | ConstantInt *Upper = | ||||
2475 | mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); | ||||
2476 | ConstantRange Range(Lower->getValue(), Upper->getValue()); | ||||
2477 | if (Range.contains(Value)) | ||||
2478 | return false; | ||||
2479 | } | ||||
2480 | return true; | ||||
2481 | } | ||||
2482 | |||||
2483 | /// Try to detect a recurrence that monotonically increases/decreases from a | ||||
2484 | /// non-zero starting value. These are common as induction variables. | ||||
2485 | static bool isNonZeroRecurrence(const PHINode *PN) { | ||||
2486 | BinaryOperator *BO = nullptr; | ||||
2487 | Value *Start = nullptr, *Step = nullptr; | ||||
2488 | const APInt *StartC, *StepC; | ||||
2489 | if (!matchSimpleRecurrence(PN, BO, Start, Step) || | ||||
2490 | !match(Start, m_APInt(StartC)) || StartC->isZero()) | ||||
2491 | return false; | ||||
2492 | |||||
2493 | switch (BO->getOpcode()) { | ||||
2494 | case Instruction::Add: | ||||
2495 | // Starting from non-zero and stepping away from zero can never wrap back | ||||
2496 | // to zero. | ||||
2497 | return BO->hasNoUnsignedWrap() || | ||||
2498 | (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) && | ||||
2499 | StartC->isNegative() == StepC->isNegative()); | ||||
2500 | case Instruction::Mul: | ||||
2501 | return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) && | ||||
2502 | match(Step, m_APInt(StepC)) && !StepC->isZero(); | ||||
2503 | case Instruction::Shl: | ||||
2504 | return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap(); | ||||
2505 | case Instruction::AShr: | ||||
2506 | case Instruction::LShr: | ||||
2507 | return BO->isExact(); | ||||
2508 | default: | ||||
2509 | return false; | ||||
2510 | } | ||||
2511 | } | ||||
2512 | |||||
2513 | static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, | ||||
2514 | const Query &Q, unsigned BitWidth, Value *X, Value *Y, | ||||
2515 | bool NSW) { | ||||
2516 | KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); | ||||
2517 | KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); | ||||
2518 | |||||
2519 | // If X and Y are both non-negative (as signed values) then their sum is not | ||||
2520 | // zero unless both X and Y are zero. | ||||
2521 | if (XKnown.isNonNegative() && YKnown.isNonNegative()) | ||||
2522 | if (isKnownNonZero(Y, DemandedElts, Depth, Q) || | ||||
2523 | isKnownNonZero(X, DemandedElts, Depth, Q)) | ||||
2524 | return true; | ||||
2525 | |||||
2526 | // If X and Y are both negative (as signed values) then their sum is not | ||||
2527 | // zero unless both X and Y equal INT_MIN. | ||||
2528 | if (XKnown.isNegative() && YKnown.isNegative()) { | ||||
2529 | APInt Mask = APInt::getSignedMaxValue(BitWidth); | ||||
2530 | // The sign bit of X is set. If some other bit is set then X is not equal | ||||
2531 | // to INT_MIN. | ||||
2532 | if (XKnown.One.intersects(Mask)) | ||||
2533 | return true; | ||||
2534 | // The sign bit of Y is set. If some other bit is set then Y is not equal | ||||
2535 | // to INT_MIN. | ||||
2536 | if (YKnown.One.intersects(Mask)) | ||||
2537 | return true; | ||||
2538 | } | ||||
2539 | |||||
2540 | // The sum of a non-negative number and a power of two is not zero. | ||||
2541 | if (XKnown.isNonNegative() && | ||||
2542 | isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) | ||||
2543 | return true; | ||||
2544 | if (YKnown.isNonNegative() && | ||||
2545 | isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) | ||||
2546 | return true; | ||||
2547 | |||||
2548 | return KnownBits::computeForAddSub(/*Add*/ true, NSW, XKnown, YKnown) | ||||
2549 | .isNonZero(); | ||||
2550 | } | ||||
2551 | |||||
2552 | static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, | ||||
2553 | const Query &Q, unsigned BitWidth, Value *X, | ||||
2554 | Value *Y) { | ||||
2555 | if (auto *C = dyn_cast<Constant>(X)) | ||||
2556 | if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Depth, Q)) | ||||
2557 | return true; | ||||
2558 | |||||
2559 | KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); | ||||
2560 | if (XKnown.isUnknown()) | ||||
2561 | return false; | ||||
2562 | KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); | ||||
2563 | // If X != Y then X - Y is non zero. | ||||
2564 | std::optional<bool> ne = KnownBits::ne(XKnown, YKnown); | ||||
2565 | // If we are unable to compute if X != Y, we won't be able to do anything | ||||
2566 | // computing the knownbits of the sub expression so just return here. | ||||
2567 | return ne && *ne; | ||||
2568 | } | ||||
2569 | |||||
2570 | static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, | ||||
2571 | unsigned Depth, const Query &Q, | ||||
2572 | const KnownBits &KnownVal) { | ||||
2573 | auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) { | ||||
2574 | switch (I->getOpcode()) { | ||||
2575 | case Instruction::Shl: | ||||
2576 | return Lhs.shl(Rhs); | ||||
2577 | case Instruction::LShr: | ||||
2578 | return Lhs.lshr(Rhs); | ||||
2579 | case Instruction::AShr: | ||||
2580 | return Lhs.ashr(Rhs); | ||||
2581 | default: | ||||
2582 | llvm_unreachable("Unknown Shift Opcode")::llvm::llvm_unreachable_internal("Unknown Shift Opcode", "llvm/lib/Analysis/ValueTracking.cpp" , 2582); | ||||
2583 | } | ||||
2584 | }; | ||||
2585 | |||||
2586 | auto InvShiftOp = [&](const APInt &Lhs, const APInt &Rhs) { | ||||
2587 | switch (I->getOpcode()) { | ||||
2588 | case Instruction::Shl: | ||||
2589 | return Lhs.lshr(Rhs); | ||||
2590 | case Instruction::LShr: | ||||
2591 | case Instruction::AShr: | ||||
2592 | return Lhs.shl(Rhs); | ||||
2593 | default: | ||||
2594 | llvm_unreachable("Unknown Shift Opcode")::llvm::llvm_unreachable_internal("Unknown Shift Opcode", "llvm/lib/Analysis/ValueTracking.cpp" , 2594); | ||||
2595 | } | ||||
2596 | }; | ||||
2597 | |||||
2598 | if (KnownVal.isUnknown()) | ||||
2599 | return false; | ||||
2600 | |||||
2601 | KnownBits KnownCnt = | ||||
2602 | computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2603 | APInt MaxShift = KnownCnt.getMaxValue(); | ||||
2604 | unsigned NumBits = KnownVal.getBitWidth(); | ||||
2605 | if (MaxShift.uge(NumBits)) | ||||
2606 | return false; | ||||
2607 | |||||
2608 | if (!ShiftOp(KnownVal.One, MaxShift).isZero()) | ||||
2609 | return true; | ||||
2610 | |||||
2611 | // If all of the bits shifted out are known to be zero, and Val is known | ||||
2612 | // non-zero then at least one non-zero bit must remain. | ||||
2613 | if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift) | ||||
2614 | .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) && | ||||
2615 | isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q)) | ||||
2616 | return true; | ||||
2617 | |||||
2618 | return false; | ||||
2619 | } | ||||
2620 | |||||
2621 | /// Return true if the given value is known to be non-zero when defined. For | ||||
2622 | /// vectors, return true if every demanded element is known to be non-zero when | ||||
2623 | /// defined. For pointers, if the context instruction and dominator tree are | ||||
2624 | /// specified, perform context-sensitive analysis and return true if the | ||||
2625 | /// pointer couldn't possibly be null at the specified instruction. | ||||
2626 | /// Supports values with integer or pointer type and vectors of integers. | ||||
2627 | bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, | ||||
2628 | const Query &Q) { | ||||
2629 | |||||
2630 | #ifndef NDEBUG | ||||
2631 | Type *Ty = V->getType(); | ||||
2632 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2632, __extension__ __PRETTY_FUNCTION__ )); | ||||
2633 | |||||
2634 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { | ||||
2635 | assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__ )) | ||||
2636 | FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__ )) | ||||
2637 | "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__ )); | ||||
2638 | } else { | ||||
2639 | assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2640, __extension__ __PRETTY_FUNCTION__ )) | ||||
2640 | "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2640, __extension__ __PRETTY_FUNCTION__ )); | ||||
2641 | } | ||||
2642 | #endif | ||||
2643 | |||||
2644 | if (auto *C = dyn_cast<Constant>(V)) { | ||||
2645 | if (C->isNullValue()) | ||||
2646 | return false; | ||||
2647 | if (isa<ConstantInt>(C)) | ||||
2648 | // Must be non-zero due to null test above. | ||||
2649 | return true; | ||||
2650 | |||||
2651 | // For constant vectors, check that all elements are undefined or known | ||||
2652 | // non-zero to determine that the whole vector is known non-zero. | ||||
2653 | if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) { | ||||
2654 | for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { | ||||
2655 | if (!DemandedElts[i]) | ||||
2656 | continue; | ||||
2657 | Constant *Elt = C->getAggregateElement(i); | ||||
2658 | if (!Elt || Elt->isNullValue()) | ||||
2659 | return false; | ||||
2660 | if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) | ||||
2661 | return false; | ||||
2662 | } | ||||
2663 | return true; | ||||
2664 | } | ||||
2665 | |||||
2666 | // A global variable in address space 0 is non null unless extern weak | ||||
2667 | // or an absolute symbol reference. Other address spaces may have null as a | ||||
2668 | // valid address for a global, so we can't assume anything. | ||||
2669 | if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { | ||||
2670 | if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && | ||||
2671 | GV->getType()->getAddressSpace() == 0) | ||||
2672 | return true; | ||||
2673 | } | ||||
2674 | |||||
2675 | // For constant expressions, fall through to the Operator code below. | ||||
2676 | if (!isa<ConstantExpr>(V)) | ||||
2677 | return false; | ||||
2678 | } | ||||
2679 | |||||
2680 | if (auto *I = dyn_cast<Instruction>(V)) { | ||||
2681 | if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { | ||||
2682 | // If the possible ranges don't contain zero, then the value is | ||||
2683 | // definitely non-zero. | ||||
2684 | if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { | ||||
2685 | const APInt ZeroValue(Ty->getBitWidth(), 0); | ||||
2686 | if (rangeMetadataExcludesValue(Ranges, ZeroValue)) | ||||
2687 | return true; | ||||
2688 | } | ||||
2689 | } | ||||
2690 | } | ||||
2691 | |||||
2692 | if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q)) | ||||
2693 | return true; | ||||
2694 | |||||
2695 | // Some of the tests below are recursive, so bail out if we hit the limit. | ||||
2696 | if (Depth++ >= MaxAnalysisRecursionDepth) | ||||
2697 | return false; | ||||
2698 | |||||
2699 | // Check for pointer simplifications. | ||||
2700 | |||||
2701 | if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) { | ||||
2702 | // Alloca never returns null, malloc might. | ||||
2703 | if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) | ||||
2704 | return true; | ||||
2705 | |||||
2706 | // A byval, inalloca may not be null in a non-default addres space. A | ||||
2707 | // nonnull argument is assumed never 0. | ||||
2708 | if (const Argument *A = dyn_cast<Argument>(V)) { | ||||
2709 | if (((A->hasPassPointeeByValueCopyAttr() && | ||||
2710 | !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) || | ||||
2711 | A->hasNonNullAttr())) | ||||
2712 | return true; | ||||
2713 | } | ||||
2714 | |||||
2715 | // A Load tagged with nonnull metadata is never null. | ||||
2716 | if (const LoadInst *LI = dyn_cast<LoadInst>(V)) | ||||
2717 | if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) | ||||
2718 | return true; | ||||
2719 | |||||
2720 | if (const auto *Call = dyn_cast<CallBase>(V)) { | ||||
2721 | if (Call->isReturnNonNull()) | ||||
2722 | return true; | ||||
2723 | if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) | ||||
2724 | return isKnownNonZero(RP, Depth, Q); | ||||
2725 | } | ||||
2726 | } | ||||
2727 | |||||
2728 | if (!isa<Constant>(V) && | ||||
2729 | isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) | ||||
2730 | return true; | ||||
2731 | |||||
2732 | const Operator *I = dyn_cast<Operator>(V); | ||||
2733 | if (!I) | ||||
2734 | return false; | ||||
2735 | |||||
2736 | unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); | ||||
2737 | switch (I->getOpcode()) { | ||||
2738 | case Instruction::GetElementPtr: | ||||
2739 | if (I->getType()->isPointerTy()) | ||||
2740 | return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q); | ||||
2741 | break; | ||||
2742 | case Instruction::BitCast: { | ||||
2743 | // We need to be a bit careful here. We can only peek through the bitcast | ||||
2744 | // if the scalar size of elements in the operand are smaller than and a | ||||
2745 | // multiple of the size they are casting too. Take three cases: | ||||
2746 | // | ||||
2747 | // 1) Unsafe: | ||||
2748 | // bitcast <2 x i16> %NonZero to <4 x i8> | ||||
2749 | // | ||||
2750 | // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a | ||||
2751 | // <4 x i8> requires that all 4 i8 elements be non-zero which isn't | ||||
2752 | // guranteed (imagine just sign bit set in the 2 i16 elements). | ||||
2753 | // | ||||
2754 | // 2) Unsafe: | ||||
2755 | // bitcast <4 x i3> %NonZero to <3 x i4> | ||||
2756 | // | ||||
2757 | // Even though the scalar size of the src (`i3`) is smaller than the | ||||
2758 | // scalar size of the dst `i4`, because `i3` is not a multiple of `i4` | ||||
2759 | // its possible for the `3 x i4` elements to be zero because there are | ||||
2760 | // some elements in the destination that don't contain any full src | ||||
2761 | // element. | ||||
2762 | // | ||||
2763 | // 3) Safe: | ||||
2764 | // bitcast <4 x i8> %NonZero to <2 x i16> | ||||
2765 | // | ||||
2766 | // This is always safe as non-zero in the 4 i8 elements implies | ||||
2767 | // non-zero in the combination of any two adjacent ones. Since i8 is a | ||||
2768 | // multiple of i16, each i16 is guranteed to have 2 full i8 elements. | ||||
2769 | // This all implies the 2 i16 elements are non-zero. | ||||
2770 | Type *FromTy = I->getOperand(0)->getType(); | ||||
2771 | if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) && | ||||
2772 | (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0) | ||||
2773 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2774 | } break; | ||||
2775 | case Instruction::IntToPtr: | ||||
2776 | // Note that we have to take special care to avoid looking through | ||||
2777 | // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well | ||||
2778 | // as casts that can alter the value, e.g., AddrSpaceCasts. | ||||
2779 | if (!isa<ScalableVectorType>(I->getType()) && | ||||
2780 | Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= | ||||
2781 | Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) | ||||
2782 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2783 | break; | ||||
2784 | case Instruction::PtrToInt: | ||||
2785 | // Similar to int2ptr above, we can look through ptr2int here if the cast | ||||
2786 | // is a no-op or an extend and not a truncate. | ||||
2787 | if (!isa<ScalableVectorType>(I->getType()) && | ||||
2788 | Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= | ||||
2789 | Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) | ||||
2790 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2791 | break; | ||||
2792 | case Instruction::Sub: | ||||
2793 | return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), | ||||
2794 | I->getOperand(1)); | ||||
2795 | case Instruction::Or: | ||||
2796 | // X | Y != 0 if X != 0 or Y != 0. | ||||
2797 | return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) || | ||||
2798 | isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2799 | case Instruction::SExt: | ||||
2800 | case Instruction::ZExt: | ||||
2801 | // ext X != 0 if X != 0. | ||||
2802 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2803 | |||||
2804 | case Instruction::Shl: { | ||||
2805 | // shl nsw/nuw can't remove any non-zero bits. | ||||
2806 | const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); | ||||
2807 | if (Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO)) | ||||
2808 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2809 | |||||
2810 | // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined | ||||
2811 | // if the lowest bit is shifted off the end. | ||||
2812 | KnownBits Known(BitWidth); | ||||
2813 | computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q); | ||||
2814 | if (Known.One[0]) | ||||
2815 | return true; | ||||
2816 | |||||
2817 | return isNonZeroShift(I, DemandedElts, Depth, Q, Known); | ||||
2818 | } | ||||
2819 | case Instruction::LShr: | ||||
2820 | case Instruction::AShr: { | ||||
2821 | // shr exact can only shift out zero bits. | ||||
2822 | const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); | ||||
2823 | if (BO->isExact()) | ||||
2824 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2825 | |||||
2826 | // shr X, Y != 0 if X is negative. Note that the value of the shift is not | ||||
2827 | // defined if the sign bit is shifted off the end. | ||||
2828 | KnownBits Known = | ||||
2829 | computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2830 | if (Known.isNegative()) | ||||
2831 | return true; | ||||
2832 | |||||
2833 | return isNonZeroShift(I, DemandedElts, Depth, Q, Known); | ||||
2834 | } | ||||
2835 | case Instruction::UDiv: | ||||
2836 | case Instruction::SDiv: | ||||
2837 | // X / Y | ||||
2838 | // div exact can only produce a zero if the dividend is zero. | ||||
2839 | if (cast<PossiblyExactOperator>(I)->isExact()) | ||||
2840 | return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2841 | if (I->getOpcode() == Instruction::UDiv) { | ||||
2842 | std::optional<bool> XUgeY; | ||||
2843 | KnownBits XKnown = | ||||
2844 | computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2845 | if (!XKnown.isUnknown()) { | ||||
2846 | KnownBits YKnown = | ||||
2847 | computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2848 | // If X u>= Y then div is non zero (0/0 is UB). | ||||
2849 | XUgeY = KnownBits::uge(XKnown, YKnown); | ||||
2850 | } | ||||
2851 | // If X is total unknown or X u< Y we won't be able to prove non-zero | ||||
2852 | // with compute known bits so just return early. | ||||
2853 | return XUgeY && *XUgeY; | ||||
2854 | } | ||||
2855 | break; | ||||
2856 | case Instruction::Add: { | ||||
2857 | // X + Y. | ||||
2858 | |||||
2859 | // If Add has nuw wrap flag, then if either X or Y is non-zero the result is | ||||
2860 | // non-zero. | ||||
2861 | auto *BO = cast<OverflowingBinaryOperator>(V); | ||||
2862 | if (Q.IIQ.hasNoUnsignedWrap(BO)) | ||||
2863 | return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) || | ||||
2864 | isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2865 | |||||
2866 | return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), | ||||
2867 | I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO)); | ||||
2868 | } | ||||
2869 | case Instruction::Mul: { | ||||
2870 | // If X and Y are non-zero then so is X * Y as long as the multiplication | ||||
2871 | // does not overflow. | ||||
2872 | const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); | ||||
2873 | if (Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) | ||||
2874 | return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) && | ||||
2875 | isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2876 | |||||
2877 | // If either X or Y is odd, then if the other is non-zero the result can't | ||||
2878 | // be zero. | ||||
2879 | KnownBits XKnown = | ||||
2880 | computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2881 | if (XKnown.One[0]) | ||||
2882 | return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2883 | |||||
2884 | KnownBits YKnown = | ||||
2885 | computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2886 | if (YKnown.One[0]) | ||||
2887 | return XKnown.isNonZero() || | ||||
2888 | isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2889 | |||||
2890 | return KnownBits::mul(XKnown, YKnown).isNonZero(); | ||||
2891 | } | ||||
2892 | case Instruction::Select: | ||||
2893 | // (C ? X : Y) != 0 if X != 0 and Y != 0. | ||||
2894 | if (isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) && | ||||
2895 | isKnownNonZero(I->getOperand(2), DemandedElts, Depth, Q)) | ||||
2896 | return true; | ||||
2897 | break; | ||||
2898 | case Instruction::PHI: { | ||||
2899 | auto *PN = cast<PHINode>(I); | ||||
2900 | if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN)) | ||||
2901 | return true; | ||||
2902 | |||||
2903 | // Check if all incoming values are non-zero using recursion. | ||||
2904 | Query RecQ = Q; | ||||
2905 | unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); | ||||
2906 | return llvm::all_of(PN->operands(), [&](const Use &U) { | ||||
2907 | if (U.get() == PN) | ||||
2908 | return true; | ||||
2909 | RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); | ||||
2910 | return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ); | ||||
2911 | }); | ||||
2912 | } | ||||
2913 | case Instruction::ExtractElement: | ||||
2914 | if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) { | ||||
2915 | const Value *Vec = EEI->getVectorOperand(); | ||||
2916 | const Value *Idx = EEI->getIndexOperand(); | ||||
2917 | auto *CIdx = dyn_cast<ConstantInt>(Idx); | ||||
2918 | if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { | ||||
2919 | unsigned NumElts = VecTy->getNumElements(); | ||||
2920 | APInt DemandedVecElts = APInt::getAllOnes(NumElts); | ||||
2921 | if (CIdx && CIdx->getValue().ult(NumElts)) | ||||
2922 | DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); | ||||
2923 | return isKnownNonZero(Vec, DemandedVecElts, Depth, Q); | ||||
2924 | } | ||||
2925 | } | ||||
2926 | break; | ||||
2927 | case Instruction::Freeze: | ||||
2928 | return isKnownNonZero(I->getOperand(0), Depth, Q) && | ||||
2929 | isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, | ||||
2930 | Depth); | ||||
2931 | case Instruction::Call: | ||||
2932 | if (auto *II = dyn_cast<IntrinsicInst>(I)) { | ||||
2933 | switch (II->getIntrinsicID()) { | ||||
2934 | case Intrinsic::sshl_sat: | ||||
2935 | case Intrinsic::ushl_sat: | ||||
2936 | case Intrinsic::abs: | ||||
2937 | case Intrinsic::bitreverse: | ||||
2938 | case Intrinsic::bswap: | ||||
2939 | case Intrinsic::ctpop: | ||||
2940 | return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); | ||||
2941 | case Intrinsic::ssub_sat: | ||||
2942 | return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, | ||||
2943 | II->getArgOperand(0), II->getArgOperand(1)); | ||||
2944 | case Intrinsic::sadd_sat: | ||||
2945 | return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, | ||||
2946 | II->getArgOperand(0), II->getArgOperand(1), | ||||
2947 | /*NSW*/ true); | ||||
2948 | case Intrinsic::umax: | ||||
2949 | case Intrinsic::uadd_sat: | ||||
2950 | return isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q) || | ||||
2951 | isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); | ||||
2952 | case Intrinsic::smin: | ||||
2953 | case Intrinsic::smax: { | ||||
2954 | auto KnownOpImpliesNonZero = [&](const KnownBits &K) { | ||||
2955 | return II->getIntrinsicID() == Intrinsic::smin | ||||
2956 | ? K.isNegative() | ||||
2957 | : K.isStrictlyPositive(); | ||||
2958 | }; | ||||
2959 | KnownBits XKnown = | ||||
2960 | computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q); | ||||
2961 | if (KnownOpImpliesNonZero(XKnown)) | ||||
2962 | return true; | ||||
2963 | KnownBits YKnown = | ||||
2964 | computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q); | ||||
2965 | if (KnownOpImpliesNonZero(YKnown)) | ||||
2966 | return true; | ||||
2967 | |||||
2968 | if (XKnown.isNonZero() && YKnown.isNonZero()) | ||||
2969 | return true; | ||||
2970 | } | ||||
2971 | [[fallthrough]]; | ||||
2972 | case Intrinsic::umin: | ||||
2973 | return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q) && | ||||
2974 | isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q); | ||||
2975 | case Intrinsic::cttz: | ||||
2976 | return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) | ||||
2977 | .Zero[0]; | ||||
2978 | case Intrinsic::ctlz: | ||||
2979 | return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) | ||||
2980 | .isNonNegative(); | ||||
2981 | case Intrinsic::fshr: | ||||
2982 | case Intrinsic::fshl: | ||||
2983 | // If Op0 == Op1, this is a rotate. rotate(x, y) != 0 iff x != 0. | ||||
2984 | if (II->getArgOperand(0) == II->getArgOperand(1)) | ||||
2985 | return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); | ||||
2986 | break; | ||||
2987 | case Intrinsic::vscale: | ||||
2988 | return true; | ||||
2989 | default: | ||||
2990 | break; | ||||
2991 | } | ||||
2992 | } | ||||
2993 | break; | ||||
2994 | } | ||||
2995 | |||||
2996 | KnownBits Known(BitWidth); | ||||
2997 | computeKnownBits(V, DemandedElts, Known, Depth, Q); | ||||
2998 | return Known.One != 0; | ||||
2999 | } | ||||
3000 | |||||
3001 | bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { | ||||
3002 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
3003 | APInt DemandedElts = | ||||
3004 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
3005 | return isKnownNonZero(V, DemandedElts, Depth, Q); | ||||
3006 | } | ||||
3007 | |||||
3008 | /// If the pair of operators are the same invertible function, return the | ||||
3009 | /// the operands of the function corresponding to each input. Otherwise, | ||||
3010 | /// return std::nullopt. An invertible function is one that is 1-to-1 and maps | ||||
3011 | /// every input value to exactly one output value. This is equivalent to | ||||
3012 | /// saying that Op1 and Op2 are equal exactly when the specified pair of | ||||
3013 | /// operands are equal, (except that Op1 and Op2 may be poison more often.) | ||||
3014 | static std::optional<std::pair<Value*, Value*>> | ||||
3015 | getInvertibleOperands(const Operator *Op1, | ||||
3016 | const Operator *Op2) { | ||||
3017 | if (Op1->getOpcode() != Op2->getOpcode()) | ||||
3018 | return std::nullopt; | ||||
3019 | |||||
3020 | auto getOperands = [&](unsigned OpNum) -> auto { | ||||
3021 | return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum)); | ||||
3022 | }; | ||||
3023 | |||||
3024 | switch (Op1->getOpcode()) { | ||||
3025 | default: | ||||
3026 | break; | ||||
3027 | case Instruction::Add: | ||||
3028 | case Instruction::Sub: | ||||
3029 | if (Op1->getOperand(0) == Op2->getOperand(0)) | ||||
3030 | return getOperands(1); | ||||
3031 | if (Op1->getOperand(1) == Op2->getOperand(1)) | ||||
3032 | return getOperands(0); | ||||
3033 | break; | ||||
3034 | case Instruction::Mul: { | ||||
3035 | // invertible if A * B == (A * B) mod 2^N where A, and B are integers | ||||
3036 | // and N is the bitwdith. The nsw case is non-obvious, but proven by | ||||
3037 | // alive2: https://alive2.llvm.org/ce/z/Z6D5qK | ||||
3038 | auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); | ||||
3039 | auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); | ||||
3040 | if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && | ||||
3041 | (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) | ||||
3042 | break; | ||||
3043 | |||||
3044 | // Assume operand order has been canonicalized | ||||
3045 | if (Op1->getOperand(1) == Op2->getOperand(1) && | ||||
3046 | isa<ConstantInt>(Op1->getOperand(1)) && | ||||
3047 | !cast<ConstantInt>(Op1->getOperand(1))->isZero()) | ||||
3048 | return getOperands(0); | ||||
3049 | break; | ||||
3050 | } | ||||
3051 | case Instruction::Shl: { | ||||
3052 | // Same as multiplies, with the difference that we don't need to check | ||||
3053 | // for a non-zero multiply. Shifts always multiply by non-zero. | ||||
3054 | auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); | ||||
3055 | auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); | ||||
3056 | if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && | ||||
3057 | (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) | ||||
3058 | break; | ||||
3059 | |||||
3060 | if (Op1->getOperand(1) == Op2->getOperand(1)) | ||||
3061 | return getOperands(0); | ||||
3062 | break; | ||||
3063 | } | ||||
3064 | case Instruction::AShr: | ||||
3065 | case Instruction::LShr: { | ||||
3066 | auto *PEO1 = cast<PossiblyExactOperator>(Op1); | ||||
3067 | auto *PEO2 = cast<PossiblyExactOperator>(Op2); | ||||
3068 | if (!PEO1->isExact() || !PEO2->isExact()) | ||||
3069 | break; | ||||
3070 | |||||
3071 | if (Op1->getOperand(1) == Op2->getOperand(1)) | ||||
3072 | return getOperands(0); | ||||
3073 | break; | ||||
3074 | } | ||||
3075 | case Instruction::SExt: | ||||
3076 | case Instruction::ZExt: | ||||
3077 | if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType()) | ||||
3078 | return getOperands(0); | ||||
3079 | break; | ||||
3080 | case Instruction::PHI: { | ||||
3081 | const PHINode *PN1 = cast<PHINode>(Op1); | ||||
3082 | const PHINode *PN2 = cast<PHINode>(Op2); | ||||
3083 | |||||
3084 | // If PN1 and PN2 are both recurrences, can we prove the entire recurrences | ||||
3085 | // are a single invertible function of the start values? Note that repeated | ||||
3086 | // application of an invertible function is also invertible | ||||
3087 | BinaryOperator *BO1 = nullptr; | ||||
3088 | Value *Start1 = nullptr, *Step1 = nullptr; | ||||
3089 | BinaryOperator *BO2 = nullptr; | ||||
3090 | Value *Start2 = nullptr, *Step2 = nullptr; | ||||
3091 | if (PN1->getParent() != PN2->getParent() || | ||||
3092 | !matchSimpleRecurrence(PN1, BO1, Start1, Step1) || | ||||
3093 | !matchSimpleRecurrence(PN2, BO2, Start2, Step2)) | ||||
3094 | break; | ||||
3095 | |||||
3096 | auto Values = getInvertibleOperands(cast<Operator>(BO1), | ||||
3097 | cast<Operator>(BO2)); | ||||
3098 | if (!Values) | ||||
3099 | break; | ||||
3100 | |||||
3101 | // We have to be careful of mutually defined recurrences here. Ex: | ||||
3102 | // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V | ||||
3103 | // * X_i = Y_i = X_(i-1) OP Y_(i-1) | ||||
3104 | // The invertibility of these is complicated, and not worth reasoning | ||||
3105 | // about (yet?). | ||||
3106 | if (Values->first != PN1 || Values->second != PN2) | ||||
3107 | break; | ||||
3108 | |||||
3109 | return std::make_pair(Start1, Start2); | ||||
3110 | } | ||||
3111 | } | ||||
3112 | return std::nullopt; | ||||
3113 | } | ||||
3114 | |||||
3115 | /// Return true if V2 == V1 + X, where X is known non-zero. | ||||
3116 | static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, | ||||
3117 | const Query &Q) { | ||||
3118 | const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); | ||||
3119 | if (!BO || BO->getOpcode() != Instruction::Add) | ||||
3120 | return false; | ||||
3121 | Value *Op = nullptr; | ||||
3122 | if (V2 == BO->getOperand(0)) | ||||
3123 | Op = BO->getOperand(1); | ||||
3124 | else if (V2 == BO->getOperand(1)) | ||||
3125 | Op = BO->getOperand(0); | ||||
3126 | else | ||||
3127 | return false; | ||||
3128 | return isKnownNonZero(Op, Depth + 1, Q); | ||||
3129 | } | ||||
3130 | |||||
3131 | /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and | ||||
3132 | /// the multiplication is nuw or nsw. | ||||
3133 | static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth, | ||||
3134 | const Query &Q) { | ||||
3135 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { | ||||
3136 | const APInt *C; | ||||
3137 | return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) && | ||||
3138 | (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && | ||||
3139 | !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q); | ||||
3140 | } | ||||
3141 | return false; | ||||
3142 | } | ||||
3143 | |||||
3144 | /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and | ||||
3145 | /// the shift is nuw or nsw. | ||||
3146 | static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth, | ||||
3147 | const Query &Q) { | ||||
3148 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { | ||||
3149 | const APInt *C; | ||||
3150 | return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) && | ||||
3151 | (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && | ||||
3152 | !C->isZero() && isKnownNonZero(V1, Depth + 1, Q); | ||||
3153 | } | ||||
3154 | return false; | ||||
3155 | } | ||||
3156 | |||||
3157 | static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, | ||||
3158 | unsigned Depth, const Query &Q) { | ||||
3159 | // Check two PHIs are in same block. | ||||
3160 | if (PN1->getParent() != PN2->getParent()) | ||||
3161 | return false; | ||||
3162 | |||||
3163 | SmallPtrSet<const BasicBlock *, 8> VisitedBBs; | ||||
3164 | bool UsedFullRecursion = false; | ||||
3165 | for (const BasicBlock *IncomBB : PN1->blocks()) { | ||||
3166 | if (!VisitedBBs.insert(IncomBB).second) | ||||
3167 | continue; // Don't reprocess blocks that we have dealt with already. | ||||
3168 | const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB); | ||||
3169 | const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB); | ||||
3170 | const APInt *C1, *C2; | ||||
3171 | if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2) | ||||
3172 | continue; | ||||
3173 | |||||
3174 | // Only one pair of phi operands is allowed for full recursion. | ||||
3175 | if (UsedFullRecursion) | ||||
3176 | return false; | ||||
3177 | |||||
3178 | Query RecQ = Q; | ||||
3179 | RecQ.CxtI = IncomBB->getTerminator(); | ||||
3180 | if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ)) | ||||
3181 | return false; | ||||
3182 | UsedFullRecursion = true; | ||||
3183 | } | ||||
3184 | return true; | ||||
3185 | } | ||||
3186 | |||||
3187 | /// Return true if it is known that V1 != V2. | ||||
3188 | static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, | ||||
3189 | const Query &Q) { | ||||
3190 | if (V1 == V2) | ||||
3191 | return false; | ||||
3192 | if (V1->getType() != V2->getType()) | ||||
3193 | // We can't look through casts yet. | ||||
3194 | return false; | ||||
3195 | |||||
3196 | if (Depth >= MaxAnalysisRecursionDepth) | ||||
3197 | return false; | ||||
3198 | |||||
3199 | // See if we can recurse through (exactly one of) our operands. This | ||||
3200 | // requires our operation be 1-to-1 and map every input value to exactly | ||||
3201 | // one output value. Such an operation is invertible. | ||||
3202 | auto *O1 = dyn_cast<Operator>(V1); | ||||
3203 | auto *O2 = dyn_cast<Operator>(V2); | ||||
3204 | if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { | ||||
3205 | if (auto Values = getInvertibleOperands(O1, O2)) | ||||
3206 | return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q); | ||||
3207 | |||||
3208 | if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) { | ||||
3209 | const PHINode *PN2 = cast<PHINode>(V2); | ||||
3210 | // FIXME: This is missing a generalization to handle the case where one is | ||||
3211 | // a PHI and another one isn't. | ||||
3212 | if (isNonEqualPHIs(PN1, PN2, Depth, Q)) | ||||
3213 | return true; | ||||
3214 | }; | ||||
3215 | } | ||||
3216 | |||||
3217 | if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q)) | ||||
3218 | return true; | ||||
3219 | |||||
3220 | if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q)) | ||||
3221 | return true; | ||||
3222 | |||||
3223 | if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q)) | ||||
3224 | return true; | ||||
3225 | |||||
3226 | if (V1->getType()->isIntOrIntVectorTy()) { | ||||
3227 | // Are any known bits in V1 contradictory to known bits in V2? If V1 | ||||
3228 | // has a known zero where V2 has a known one, they must not be equal. | ||||
3229 | KnownBits Known1 = computeKnownBits(V1, Depth, Q); | ||||
3230 | KnownBits Known2 = computeKnownBits(V2, Depth, Q); | ||||
3231 | |||||
3232 | if (Known1.Zero.intersects(Known2.One) || | ||||
3233 | Known2.Zero.intersects(Known1.One)) | ||||
3234 | return true; | ||||
3235 | } | ||||
3236 | return false; | ||||
3237 | } | ||||
3238 | |||||
3239 | /// Return true if 'V & Mask' is known to be zero. We use this predicate to | ||||
3240 | /// simplify operations downstream. Mask is known to be zero for bits that V | ||||
3241 | /// cannot have. | ||||
3242 | /// | ||||
3243 | /// This function is defined on values with integer type, values with pointer | ||||
3244 | /// type, and vectors of integers. In the case | ||||
3245 | /// where V is a vector, the mask, known zero, and known one values are the | ||||
3246 | /// same width as the vector element, and the bit is set only if it is true | ||||
3247 | /// for all of the elements in the vector. | ||||
3248 | bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, | ||||
3249 | const Query &Q) { | ||||
3250 | KnownBits Known(Mask.getBitWidth()); | ||||
3251 | computeKnownBits(V, Known, Depth, Q); | ||||
3252 | return Mask.isSubsetOf(Known.Zero); | ||||
3253 | } | ||||
3254 | |||||
3255 | // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). | ||||
3256 | // Returns the input and lower/upper bounds. | ||||
3257 | static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, | ||||
3258 | const APInt *&CLow, const APInt *&CHigh) { | ||||
3259 | assert(isa<Operator>(Select) &&(static_cast <bool> (isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction:: Select && "Input should be a Select!") ? void (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__ )) | ||||
3260 | cast<Operator>(Select)->getOpcode() == Instruction::Select &&(static_cast <bool> (isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction:: Select && "Input should be a Select!") ? void (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__ )) | ||||
3261 | "Input should be a Select!")(static_cast <bool> (isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction:: Select && "Input should be a Select!") ? void (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__ )); | ||||
3262 | |||||
3263 | const Value *LHS = nullptr, *RHS = nullptr; | ||||
3264 | SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; | ||||
3265 | if (SPF != SPF_SMAX && SPF != SPF_SMIN) | ||||
3266 | return false; | ||||
3267 | |||||
3268 | if (!match(RHS, m_APInt(CLow))) | ||||
3269 | return false; | ||||
3270 | |||||
3271 | const Value *LHS2 = nullptr, *RHS2 = nullptr; | ||||
3272 | SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; | ||||
3273 | if (getInverseMinMaxFlavor(SPF) != SPF2) | ||||
3274 | return false; | ||||
3275 | |||||
3276 | if (!match(RHS2, m_APInt(CHigh))) | ||||
3277 | return false; | ||||
3278 | |||||
3279 | if (SPF == SPF_SMIN) | ||||
3280 | std::swap(CLow, CHigh); | ||||
3281 | |||||
3282 | In = LHS2; | ||||
3283 | return CLow->sle(*CHigh); | ||||
3284 | } | ||||
3285 | |||||
3286 | static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, | ||||
3287 | const APInt *&CLow, | ||||
3288 | const APInt *&CHigh) { | ||||
3289 | assert((II->getIntrinsicID() == Intrinsic::smin ||(static_cast <bool> ((II->getIntrinsicID() == Intrinsic ::smin || II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3290, __extension__ __PRETTY_FUNCTION__ )) | ||||
3290 | II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax")(static_cast <bool> ((II->getIntrinsicID() == Intrinsic ::smin || II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3290, __extension__ __PRETTY_FUNCTION__ )); | ||||
3291 | |||||
3292 | Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); | ||||
3293 | auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); | ||||
3294 | if (!InnerII || InnerII->getIntrinsicID() != InverseID || | ||||
3295 | !match(II->getArgOperand(1), m_APInt(CLow)) || | ||||
3296 | !match(InnerII->getArgOperand(1), m_APInt(CHigh))) | ||||
3297 | return false; | ||||
3298 | |||||
3299 | if (II->getIntrinsicID() == Intrinsic::smin) | ||||
3300 | std::swap(CLow, CHigh); | ||||
3301 | return CLow->sle(*CHigh); | ||||
3302 | } | ||||
3303 | |||||
3304 | /// For vector constants, loop over the elements and find the constant with the | ||||
3305 | /// minimum number of sign bits. Return 0 if the value is not a vector constant | ||||
3306 | /// or if any element was not analyzed; otherwise, return the count for the | ||||
3307 | /// element with the minimum number of sign bits. | ||||
3308 | static unsigned computeNumSignBitsVectorConstant(const Value *V, | ||||
3309 | const APInt &DemandedElts, | ||||
3310 | unsigned TyBits) { | ||||
3311 | const auto *CV = dyn_cast<Constant>(V); | ||||
3312 | if (!CV || !isa<FixedVectorType>(CV->getType())) | ||||
3313 | return 0; | ||||
3314 | |||||
3315 | unsigned MinSignBits = TyBits; | ||||
3316 | unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); | ||||
3317 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
3318 | if (!DemandedElts[i]) | ||||
3319 | continue; | ||||
3320 | // If we find a non-ConstantInt, bail out. | ||||
3321 | auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); | ||||
3322 | if (!Elt) | ||||
3323 | return 0; | ||||
3324 | |||||
3325 | MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); | ||||
3326 | } | ||||
3327 | |||||
3328 | return MinSignBits; | ||||
3329 | } | ||||
3330 | |||||
3331 | static unsigned ComputeNumSignBitsImpl(const Value *V, | ||||
3332 | const APInt &DemandedElts, | ||||
3333 | unsigned Depth, const Query &Q); | ||||
3334 | |||||
3335 | static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, | ||||
3336 | unsigned Depth, const Query &Q) { | ||||
3337 | unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); | ||||
3338 | assert(Result > 0 && "At least one sign bit needs to be present!")(static_cast <bool> (Result > 0 && "At least one sign bit needs to be present!" ) ? void (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3338, __extension__ __PRETTY_FUNCTION__ )); | ||||
3339 | return Result; | ||||
3340 | } | ||||
3341 | |||||
3342 | /// Return the number of times the sign bit of the register is replicated into | ||||
3343 | /// the other bits. We know that at least 1 bit is always equal to the sign bit | ||||
3344 | /// (itself), but other cases can give us information. For example, immediately | ||||
3345 | /// after an "ashr X, 2", we know that the top 3 bits are all equal to each | ||||
3346 | /// other, so we return 3. For vectors, return the number of sign bits for the | ||||
3347 | /// vector element with the minimum number of known sign bits of the demanded | ||||
3348 | /// elements in the vector specified by DemandedElts. | ||||
3349 | static unsigned ComputeNumSignBitsImpl(const Value *V, | ||||
3350 | const APInt &DemandedElts, | ||||
3351 | unsigned Depth, const Query &Q) { | ||||
3352 | Type *Ty = V->getType(); | ||||
3353 | #ifndef NDEBUG | ||||
3354 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3354, __extension__ __PRETTY_FUNCTION__ )); | ||||
3355 | |||||
3356 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { | ||||
3357 | assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__ )) | ||||
3358 | FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__ )) | ||||
3359 | "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__ )); | ||||
3360 | } else { | ||||
3361 | assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3362, __extension__ __PRETTY_FUNCTION__ )) | ||||
3362 | "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3362, __extension__ __PRETTY_FUNCTION__ )); | ||||
3363 | } | ||||
3364 | #endif | ||||
3365 | |||||
3366 | // We return the minimum number of sign bits that are guaranteed to be present | ||||
3367 | // in V, so for undef we have to conservatively return 1. We don't have the | ||||
3368 | // same behavior for poison though -- that's a FIXME today. | ||||
3369 | |||||
3370 | Type *ScalarTy = Ty->getScalarType(); | ||||
3371 | unsigned TyBits = ScalarTy->isPointerTy() ? | ||||
3372 | Q.DL.getPointerTypeSizeInBits(ScalarTy) : | ||||
3373 | Q.DL.getTypeSizeInBits(ScalarTy); | ||||
3374 | |||||
3375 | unsigned Tmp, Tmp2; | ||||
3376 | unsigned FirstAnswer = 1; | ||||
3377 | |||||
3378 | // Note that ConstantInt is handled by the general computeKnownBits case | ||||
3379 | // below. | ||||
3380 | |||||
3381 | if (Depth == MaxAnalysisRecursionDepth) | ||||
3382 | return 1; | ||||
3383 | |||||
3384 | if (auto *U = dyn_cast<Operator>(V)) { | ||||
3385 | switch (Operator::getOpcode(V)) { | ||||
3386 | default: break; | ||||
3387 | case Instruction::SExt: | ||||
3388 | Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
3389 | return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; | ||||
3390 | |||||
3391 | case Instruction::SDiv: { | ||||
3392 | const APInt *Denominator; | ||||
3393 | // sdiv X, C -> adds log(C) sign bits. | ||||
3394 | if (match(U->getOperand(1), m_APInt(Denominator))) { | ||||
3395 | |||||
3396 | // Ignore non-positive denominator. | ||||
3397 | if (!Denominator->isStrictlyPositive()) | ||||
3398 | break; | ||||
3399 | |||||
3400 | // Calculate the incoming numerator bits. | ||||
3401 | unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3402 | |||||
3403 | // Add floor(log(C)) bits to the numerator bits. | ||||
3404 | return std::min(TyBits, NumBits + Denominator->logBase2()); | ||||
3405 | } | ||||
3406 | break; | ||||
3407 | } | ||||
3408 | |||||
3409 | case Instruction::SRem: { | ||||
3410 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3411 | |||||
3412 | const APInt *Denominator; | ||||
3413 | // srem X, C -> we know that the result is within [-C+1,C) when C is a | ||||
3414 | // positive constant. This let us put a lower bound on the number of sign | ||||
3415 | // bits. | ||||
3416 | if (match(U->getOperand(1), m_APInt(Denominator))) { | ||||
3417 | |||||
3418 | // Ignore non-positive denominator. | ||||
3419 | if (Denominator->isStrictlyPositive()) { | ||||
3420 | // Calculate the leading sign bit constraints by examining the | ||||
3421 | // denominator. Given that the denominator is positive, there are two | ||||
3422 | // cases: | ||||
3423 | // | ||||
3424 | // 1. The numerator is positive. The result range is [0,C) and | ||||
3425 | // [0,C) u< (1 << ceilLogBase2(C)). | ||||
3426 | // | ||||
3427 | // 2. The numerator is negative. Then the result range is (-C,0] and | ||||
3428 | // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). | ||||
3429 | // | ||||
3430 | // Thus a lower bound on the number of sign bits is `TyBits - | ||||
3431 | // ceilLogBase2(C)`. | ||||
3432 | |||||
3433 | unsigned ResBits = TyBits - Denominator->ceilLogBase2(); | ||||
3434 | Tmp = std::max(Tmp, ResBits); | ||||
3435 | } | ||||
3436 | } | ||||
3437 | return Tmp; | ||||
3438 | } | ||||
3439 | |||||
3440 | case Instruction::AShr: { | ||||
3441 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3442 | // ashr X, C -> adds C sign bits. Vectors too. | ||||
3443 | const APInt *ShAmt; | ||||
3444 | if (match(U->getOperand(1), m_APInt(ShAmt))) { | ||||
3445 | if (ShAmt->uge(TyBits)) | ||||
3446 | break; // Bad shift. | ||||
3447 | unsigned ShAmtLimited = ShAmt->getZExtValue(); | ||||
3448 | Tmp += ShAmtLimited; | ||||
3449 | if (Tmp > TyBits) Tmp = TyBits; | ||||
3450 | } | ||||
3451 | return Tmp; | ||||
3452 | } | ||||
3453 | case Instruction::Shl: { | ||||
3454 | const APInt *ShAmt; | ||||
3455 | if (match(U->getOperand(1), m_APInt(ShAmt))) { | ||||
3456 | // shl destroys sign bits. | ||||
3457 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3458 | if (ShAmt->uge(TyBits) || // Bad shift. | ||||
3459 | ShAmt->uge(Tmp)) break; // Shifted all sign bits out. | ||||
3460 | Tmp2 = ShAmt->getZExtValue(); | ||||
3461 | return Tmp - Tmp2; | ||||
3462 | } | ||||
3463 | break; | ||||
3464 | } | ||||
3465 | case Instruction::And: | ||||
3466 | case Instruction::Or: | ||||
3467 | case Instruction::Xor: // NOT is handled here. | ||||
3468 | // Logical binary ops preserve the number of sign bits at the worst. | ||||
3469 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3470 | if (Tmp != 1) { | ||||
3471 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3472 | FirstAnswer = std::min(Tmp, Tmp2); | ||||
3473 | // We computed what we know about the sign bits as our first | ||||
3474 | // answer. Now proceed to the generic code that uses | ||||
3475 | // computeKnownBits, and pick whichever answer is better. | ||||
3476 | } | ||||
3477 | break; | ||||
3478 | |||||
3479 | case Instruction::Select: { | ||||
3480 | // If we have a clamp pattern, we know that the number of sign bits will | ||||
3481 | // be the minimum of the clamp min/max range. | ||||
3482 | const Value *X; | ||||
3483 | const APInt *CLow, *CHigh; | ||||
3484 | if (isSignedMinMaxClamp(U, X, CLow, CHigh)) | ||||
3485 | return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); | ||||
3486 | |||||
3487 | Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3488 | if (Tmp == 1) break; | ||||
3489 | Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); | ||||
3490 | return std::min(Tmp, Tmp2); | ||||
3491 | } | ||||
3492 | |||||
3493 | case Instruction::Add: | ||||
3494 | // Add can have at most one carry bit. Thus we know that the output | ||||
3495 | // is, at worst, one more bit than the inputs. | ||||
3496 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3497 | if (Tmp == 1) break; | ||||
3498 | |||||
3499 | // Special case decrementing a value (ADD X, -1): | ||||
3500 | if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) | ||||
3501 | if (CRHS->isAllOnesValue()) { | ||||
3502 | KnownBits Known(TyBits); | ||||
3503 | computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); | ||||
3504 | |||||
3505 | // If the input is known to be 0 or 1, the output is 0/-1, which is | ||||
3506 | // all sign bits set. | ||||
3507 | if ((Known.Zero | 1).isAllOnes()) | ||||
3508 | return TyBits; | ||||
3509 | |||||
3510 | // If we are subtracting one from a positive number, there is no carry | ||||
3511 | // out of the result. | ||||
3512 | if (Known.isNonNegative()) | ||||
3513 | return Tmp; | ||||
3514 | } | ||||
3515 | |||||
3516 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3517 | if (Tmp2 == 1) break; | ||||
3518 | return std::min(Tmp, Tmp2) - 1; | ||||
3519 | |||||
3520 | case Instruction::Sub: | ||||
3521 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3522 | if (Tmp2 == 1) break; | ||||
3523 | |||||
3524 | // Handle NEG. | ||||
3525 | if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) | ||||
3526 | if (CLHS->isNullValue()) { | ||||
3527 | KnownBits Known(TyBits); | ||||
3528 | computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); | ||||
3529 | // If the input is known to be 0 or 1, the output is 0/-1, which is | ||||
3530 | // all sign bits set. | ||||
3531 | if ((Known.Zero | 1).isAllOnes()) | ||||
3532 | return TyBits; | ||||
3533 | |||||
3534 | // If the input is known to be positive (the sign bit is known clear), | ||||
3535 | // the output of the NEG has the same number of sign bits as the | ||||
3536 | // input. | ||||
3537 | if (Known.isNonNegative()) | ||||
3538 | return Tmp2; | ||||
3539 | |||||
3540 | // Otherwise, we treat this like a SUB. | ||||
3541 | } | ||||
3542 | |||||
3543 | // Sub can have at most one carry bit. Thus we know that the output | ||||
3544 | // is, at worst, one more bit than the inputs. | ||||
3545 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3546 | if (Tmp == 1) break; | ||||
3547 | return std::min(Tmp, Tmp2) - 1; | ||||
3548 | |||||
3549 | case Instruction::Mul: { | ||||
3550 | // The output of the Mul can be at most twice the valid bits in the | ||||
3551 | // inputs. | ||||
3552 | unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3553 | if (SignBitsOp0 == 1) break; | ||||
3554 | unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3555 | if (SignBitsOp1 == 1) break; | ||||
3556 | unsigned OutValidBits = | ||||
3557 | (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); | ||||
3558 | return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; | ||||
3559 | } | ||||
3560 | |||||
3561 | case Instruction::PHI: { | ||||
3562 | const PHINode *PN = cast<PHINode>(U); | ||||
3563 | unsigned NumIncomingValues = PN->getNumIncomingValues(); | ||||
3564 | // Don't analyze large in-degree PHIs. | ||||
3565 | if (NumIncomingValues > 4) break; | ||||
3566 | // Unreachable blocks may have zero-operand PHI nodes. | ||||
3567 | if (NumIncomingValues == 0) break; | ||||
3568 | |||||
3569 | // Take the minimum of all incoming values. This can't infinitely loop | ||||
3570 | // because of our depth threshold. | ||||
3571 | Query RecQ = Q; | ||||
3572 | Tmp = TyBits; | ||||
3573 | for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) { | ||||
3574 | if (Tmp == 1) return Tmp; | ||||
3575 | RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); | ||||
3576 | Tmp = std::min( | ||||
3577 | Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ)); | ||||
3578 | } | ||||
3579 | return Tmp; | ||||
3580 | } | ||||
3581 | |||||
3582 | case Instruction::Trunc: { | ||||
3583 | // If the input contained enough sign bits that some remain after the | ||||
3584 | // truncation, then we can make use of that. Otherwise we don't know | ||||
3585 | // anything. | ||||
3586 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3587 | unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
3588 | if (Tmp > (OperandTyBits - TyBits)) | ||||
3589 | return Tmp - (OperandTyBits - TyBits); | ||||
3590 | |||||
3591 | return 1; | ||||
3592 | } | ||||
3593 | |||||
3594 | case Instruction::ExtractElement: | ||||
3595 | // Look through extract element. At the moment we keep this simple and | ||||
3596 | // skip tracking the specific element. But at least we might find | ||||
3597 | // information valid for all elements of the vector (for example if vector | ||||
3598 | // is sign extended, shifted, etc). | ||||
3599 | return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3600 | |||||
3601 | case Instruction::ShuffleVector: { | ||||
3602 | // Collect the minimum number of sign bits that are shared by every vector | ||||
3603 | // element referenced by the shuffle. | ||||
3604 | auto *Shuf = dyn_cast<ShuffleVectorInst>(U); | ||||
3605 | if (!Shuf) { | ||||
3606 | // FIXME: Add support for shufflevector constant expressions. | ||||
3607 | return 1; | ||||
3608 | } | ||||
3609 | APInt DemandedLHS, DemandedRHS; | ||||
3610 | // For undef elements, we don't know anything about the common state of | ||||
3611 | // the shuffle result. | ||||
3612 | if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) | ||||
3613 | return 1; | ||||
3614 | Tmp = std::numeric_limits<unsigned>::max(); | ||||
3615 | if (!!DemandedLHS) { | ||||
3616 | const Value *LHS = Shuf->getOperand(0); | ||||
3617 | Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q); | ||||
3618 | } | ||||
3619 | // If we don't know anything, early out and try computeKnownBits | ||||
3620 | // fall-back. | ||||
3621 | if (Tmp == 1) | ||||
3622 | break; | ||||
3623 | if (!!DemandedRHS) { | ||||
3624 | const Value *RHS = Shuf->getOperand(1); | ||||
3625 | Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q); | ||||
3626 | Tmp = std::min(Tmp, Tmp2); | ||||
3627 | } | ||||
3628 | // If we don't know anything, early out and try computeKnownBits | ||||
3629 | // fall-back. | ||||
3630 | if (Tmp == 1) | ||||
3631 | break; | ||||
3632 | assert(Tmp <= TyBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= TyBits && "Failed to determine minimum sign bits" ) ? void (0) : __assert_fail ("Tmp <= TyBits && \"Failed to determine minimum sign bits\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3632, __extension__ __PRETTY_FUNCTION__ )); | ||||
3633 | return Tmp; | ||||
3634 | } | ||||
3635 | case Instruction::Call: { | ||||
3636 | if (const auto *II = dyn_cast<IntrinsicInst>(U)) { | ||||
3637 | switch (II->getIntrinsicID()) { | ||||
3638 | default: break; | ||||
3639 | case Intrinsic::abs: | ||||
3640 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3641 | if (Tmp == 1) break; | ||||
3642 | |||||
3643 | // Absolute value reduces number of sign bits by at most 1. | ||||
3644 | return Tmp - 1; | ||||
3645 | case Intrinsic::smin: | ||||
3646 | case Intrinsic::smax: { | ||||
3647 | const APInt *CLow, *CHigh; | ||||
3648 | if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh)) | ||||
3649 | return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); | ||||
3650 | } | ||||
3651 | } | ||||
3652 | } | ||||
3653 | } | ||||
3654 | } | ||||
3655 | } | ||||
3656 | |||||
3657 | // Finally, if we can prove that the top bits of the result are 0's or 1's, | ||||
3658 | // use this information. | ||||
3659 | |||||
3660 | // If we can examine all elements of a vector constant successfully, we're | ||||
3661 | // done (we can't do any better than that). If not, keep trying. | ||||
3662 | if (unsigned VecSignBits = | ||||
3663 | computeNumSignBitsVectorConstant(V, DemandedElts, TyBits)) | ||||
3664 | return VecSignBits; | ||||
3665 | |||||
3666 | KnownBits Known(TyBits); | ||||
3667 | computeKnownBits(V, DemandedElts, Known, Depth, Q); | ||||
3668 | |||||
3669 | // If we know that the sign bit is either zero or one, determine the number of | ||||
3670 | // identical bits in the top of the input value. | ||||
3671 | return std::max(FirstAnswer, Known.countMinSignBits()); | ||||
3672 | } | ||||
3673 | |||||
3674 | Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB, | ||||
3675 | const TargetLibraryInfo *TLI) { | ||||
3676 | const Function *F = CB.getCalledFunction(); | ||||
3677 | if (!F) | ||||
3678 | return Intrinsic::not_intrinsic; | ||||
3679 | |||||
3680 | if (F->isIntrinsic()) | ||||
3681 | return F->getIntrinsicID(); | ||||
3682 | |||||
3683 | // We are going to infer semantics of a library function based on mapping it | ||||
3684 | // to an LLVM intrinsic. Check that the library function is available from | ||||
3685 | // this callbase and in this environment. | ||||
3686 | LibFunc Func; | ||||
3687 | if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) || | ||||
3688 | !CB.onlyReadsMemory()) | ||||
3689 | return Intrinsic::not_intrinsic; | ||||
3690 | |||||
3691 | switch (Func) { | ||||
3692 | default: | ||||
3693 | break; | ||||
3694 | case LibFunc_sin: | ||||
3695 | case LibFunc_sinf: | ||||
3696 | case LibFunc_sinl: | ||||
3697 | return Intrinsic::sin; | ||||
3698 | case LibFunc_cos: | ||||
3699 | case LibFunc_cosf: | ||||
3700 | case LibFunc_cosl: | ||||
3701 | return Intrinsic::cos; | ||||
3702 | case LibFunc_exp: | ||||
3703 | case LibFunc_expf: | ||||
3704 | case LibFunc_expl: | ||||
3705 | return Intrinsic::exp; | ||||
3706 | case LibFunc_exp2: | ||||
3707 | case LibFunc_exp2f: | ||||
3708 | case LibFunc_exp2l: | ||||
3709 | return Intrinsic::exp2; | ||||
3710 | case LibFunc_log: | ||||
3711 | case LibFunc_logf: | ||||
3712 | case LibFunc_logl: | ||||
3713 | return Intrinsic::log; | ||||
3714 | case LibFunc_log10: | ||||
3715 | case LibFunc_log10f: | ||||
3716 | case LibFunc_log10l: | ||||
3717 | return Intrinsic::log10; | ||||
3718 | case LibFunc_log2: | ||||
3719 | case LibFunc_log2f: | ||||
3720 | case LibFunc_log2l: | ||||
3721 | return Intrinsic::log2; | ||||
3722 | case LibFunc_fabs: | ||||
3723 | case LibFunc_fabsf: | ||||
3724 | case LibFunc_fabsl: | ||||
3725 | return Intrinsic::fabs; | ||||
3726 | case LibFunc_fmin: | ||||
3727 | case LibFunc_fminf: | ||||
3728 | case LibFunc_fminl: | ||||
3729 | return Intrinsic::minnum; | ||||
3730 | case LibFunc_fmax: | ||||
3731 | case LibFunc_fmaxf: | ||||
3732 | case LibFunc_fmaxl: | ||||
3733 | return Intrinsic::maxnum; | ||||
3734 | case LibFunc_copysign: | ||||
3735 | case LibFunc_copysignf: | ||||
3736 | case LibFunc_copysignl: | ||||
3737 | return Intrinsic::copysign; | ||||
3738 | case LibFunc_floor: | ||||
3739 | case LibFunc_floorf: | ||||
3740 | case LibFunc_floorl: | ||||
3741 | return Intrinsic::floor; | ||||
3742 | case LibFunc_ceil: | ||||
3743 | case LibFunc_ceilf: | ||||
3744 | case LibFunc_ceill: | ||||
3745 | return Intrinsic::ceil; | ||||
3746 | case LibFunc_trunc: | ||||
3747 | case LibFunc_truncf: | ||||
3748 | case LibFunc_truncl: | ||||
3749 | return Intrinsic::trunc; | ||||
3750 | case LibFunc_rint: | ||||
3751 | case LibFunc_rintf: | ||||
3752 | case LibFunc_rintl: | ||||
3753 | return Intrinsic::rint; | ||||
3754 | case LibFunc_nearbyint: | ||||
3755 | case LibFunc_nearbyintf: | ||||
3756 | case LibFunc_nearbyintl: | ||||
3757 | return Intrinsic::nearbyint; | ||||
3758 | case LibFunc_round: | ||||
3759 | case LibFunc_roundf: | ||||
3760 | case LibFunc_roundl: | ||||
3761 | return Intrinsic::round; | ||||
3762 | case LibFunc_roundeven: | ||||
3763 | case LibFunc_roundevenf: | ||||
3764 | case LibFunc_roundevenl: | ||||
3765 | return Intrinsic::roundeven; | ||||
3766 | case LibFunc_pow: | ||||
3767 | case LibFunc_powf: | ||||
3768 | case LibFunc_powl: | ||||
3769 | return Intrinsic::pow; | ||||
3770 | case LibFunc_sqrt: | ||||
3771 | case LibFunc_sqrtf: | ||||
3772 | case LibFunc_sqrtl: | ||||
3773 | return Intrinsic::sqrt; | ||||
3774 | } | ||||
3775 | |||||
3776 | return Intrinsic::not_intrinsic; | ||||
3777 | } | ||||
3778 | |||||
3779 | /// Return true if we can prove that the specified FP value is never equal to | ||||
3780 | /// -0.0. | ||||
3781 | /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee | ||||
3782 | /// that a value is not -0.0. It only guarantees that -0.0 may be treated | ||||
3783 | /// the same as +0.0 in floating-point ops. | ||||
3784 | bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, | ||||
3785 | unsigned Depth) { | ||||
3786 | if (auto *CFP = dyn_cast<ConstantFP>(V)) | ||||
3787 | return !CFP->getValueAPF().isNegZero(); | ||||
3788 | |||||
3789 | if (Depth == MaxAnalysisRecursionDepth) | ||||
3790 | return false; | ||||
3791 | |||||
3792 | auto *Op = dyn_cast<Operator>(V); | ||||
3793 | if (!Op) | ||||
3794 | return false; | ||||
3795 | |||||
3796 | // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. | ||||
3797 | if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) | ||||
3798 | return true; | ||||
3799 | |||||
3800 | // sitofp and uitofp turn into +0.0 for zero. | ||||
3801 | if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) | ||||
3802 | return true; | ||||
3803 | |||||
3804 | if (auto *Call = dyn_cast<CallInst>(Op)) { | ||||
3805 | Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI); | ||||
3806 | switch (IID) { | ||||
3807 | default: | ||||
3808 | break; | ||||
3809 | // sqrt(-0.0) = -0.0, no other negative results are possible. | ||||
3810 | case Intrinsic::sqrt: | ||||
3811 | case Intrinsic::canonicalize: | ||||
3812 | return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); | ||||
3813 | case Intrinsic::experimental_constrained_sqrt: { | ||||
3814 | // NOTE: This rounding mode restriction may be too strict. | ||||
3815 | const auto *CI = cast<ConstrainedFPIntrinsic>(Call); | ||||
3816 | if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven) | ||||
3817 | return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); | ||||
3818 | else | ||||
3819 | return false; | ||||
3820 | } | ||||
3821 | // fabs(x) != -0.0 | ||||
3822 | case Intrinsic::fabs: | ||||
3823 | return true; | ||||
3824 | // sitofp and uitofp turn into +0.0 for zero. | ||||
3825 | case Intrinsic::experimental_constrained_sitofp: | ||||
3826 | case Intrinsic::experimental_constrained_uitofp: | ||||
3827 | return true; | ||||
3828 | } | ||||
3829 | } | ||||
3830 | |||||
3831 | return false; | ||||
3832 | } | ||||
3833 | |||||
3834 | /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a | ||||
3835 | /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign | ||||
3836 | /// bit despite comparing equal. | ||||
3837 | static bool cannotBeOrderedLessThanZeroImpl(const Value *V, | ||||
3838 | const TargetLibraryInfo *TLI, | ||||
3839 | bool SignBitOnly, | ||||
3840 | unsigned Depth) { | ||||
3841 | // TODO: This function does not do the right thing when SignBitOnly is true | ||||
3842 | // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform | ||||
3843 | // which flips the sign bits of NaNs. See | ||||
3844 | // https://llvm.org/bugs/show_bug.cgi?id=31702. | ||||
3845 | |||||
3846 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { | ||||
3847 | return !CFP->getValueAPF().isNegative() || | ||||
3848 | (!SignBitOnly && CFP->getValueAPF().isZero()); | ||||
3849 | } | ||||
3850 | |||||
3851 | // Handle vector of constants. | ||||
3852 | if (auto *CV = dyn_cast<Constant>(V)) { | ||||
3853 | if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) { | ||||
3854 | unsigned NumElts = CVFVTy->getNumElements(); | ||||
3855 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
3856 | auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); | ||||
3857 | if (!CFP) | ||||
3858 | return false; | ||||
3859 | if (CFP->getValueAPF().isNegative() && | ||||
3860 | (SignBitOnly || !CFP->getValueAPF().isZero())) | ||||
3861 | return false; | ||||
3862 | } | ||||
3863 | |||||
3864 | // All non-negative ConstantFPs. | ||||
3865 | return true; | ||||
3866 | } | ||||
3867 | } | ||||
3868 | |||||
3869 | if (Depth == MaxAnalysisRecursionDepth) | ||||
3870 | return false; | ||||
3871 | |||||
3872 | const Operator *I = dyn_cast<Operator>(V); | ||||
3873 | if (!I) | ||||
3874 | return false; | ||||
3875 | |||||
3876 | switch (I->getOpcode()) { | ||||
3877 | default: | ||||
3878 | break; | ||||
3879 | // Unsigned integers are always nonnegative. | ||||
3880 | case Instruction::UIToFP: | ||||
3881 | return true; | ||||
3882 | case Instruction::FDiv: | ||||
3883 | // X / X is always exactly 1.0 or a NaN. | ||||
3884 | if (I->getOperand(0) == I->getOperand(1) && | ||||
3885 | (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) | ||||
3886 | return true; | ||||
3887 | |||||
3888 | // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN). | ||||
3889 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3890 | Depth + 1) && | ||||
3891 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, | ||||
3892 | /*SignBitOnly*/ true, Depth + 1); | ||||
3893 | case Instruction::FMul: | ||||
3894 | // X * X is always non-negative or a NaN. | ||||
3895 | if (I->getOperand(0) == I->getOperand(1) && | ||||
3896 | (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) | ||||
3897 | return true; | ||||
3898 | |||||
3899 | [[fallthrough]]; | ||||
3900 | case Instruction::FAdd: | ||||
3901 | case Instruction::FRem: | ||||
3902 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3903 | Depth + 1) && | ||||
3904 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, | ||||
3905 | Depth + 1); | ||||
3906 | case Instruction::Select: | ||||
3907 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, | ||||
3908 | Depth + 1) && | ||||
3909 | cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, | ||||
3910 | Depth + 1); | ||||
3911 | case Instruction::FPExt: | ||||
3912 | case Instruction::FPTrunc: | ||||
3913 | // Widening/narrowing never change sign. | ||||
3914 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3915 | Depth + 1); | ||||
3916 | case Instruction::ExtractElement: | ||||
3917 | // Look through extract element. At the moment we keep this simple and skip | ||||
3918 | // tracking the specific element. But at least we might find information | ||||
3919 | // valid for all elements of the vector. | ||||
3920 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3921 | Depth + 1); | ||||
3922 | case Instruction::Call: | ||||
3923 | const auto *CI = cast<CallInst>(I); | ||||
3924 | Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI); | ||||
3925 | switch (IID) { | ||||
3926 | default: | ||||
3927 | break; | ||||
3928 | case Intrinsic::canonicalize: | ||||
3929 | case Intrinsic::arithmetic_fence: | ||||
3930 | case Intrinsic::floor: | ||||
3931 | case Intrinsic::ceil: | ||||
3932 | case Intrinsic::trunc: | ||||
3933 | case Intrinsic::rint: | ||||
3934 | case Intrinsic::nearbyint: | ||||
3935 | case Intrinsic::round: | ||||
3936 | case Intrinsic::roundeven: | ||||
3937 | case Intrinsic::fptrunc_round: | ||||
3938 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, Depth + 1); | ||||
3939 | case Intrinsic::maxnum: { | ||||
3940 | Value *V0 = I->getOperand(0), *V1 = I->getOperand(1); | ||||
3941 | auto isPositiveNum = [&](Value *V) { | ||||
3942 | if (SignBitOnly) { | ||||
3943 | // With SignBitOnly, this is tricky because the result of | ||||
3944 | // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is | ||||
3945 | // a constant strictly greater than 0.0. | ||||
3946 | const APFloat *C; | ||||
3947 | return match(V, m_APFloat(C)) && | ||||
3948 | *C > APFloat::getZero(C->getSemantics()); | ||||
3949 | } | ||||
3950 | |||||
3951 | // -0.0 compares equal to 0.0, so if this operand is at least -0.0, | ||||
3952 | // maxnum can't be ordered-less-than-zero. | ||||
3953 | return isKnownNeverNaN(V, TLI) && | ||||
3954 | cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1); | ||||
3955 | }; | ||||
3956 | |||||
3957 | // TODO: This could be improved. We could also check that neither operand | ||||
3958 | // has its sign bit set (and at least 1 is not-NAN?). | ||||
3959 | return isPositiveNum(V0) || isPositiveNum(V1); | ||||
3960 | } | ||||
3961 | |||||
3962 | case Intrinsic::maximum: | ||||
3963 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3964 | Depth + 1) || | ||||
3965 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, | ||||
3966 | Depth + 1); | ||||
3967 | case Intrinsic::minnum: | ||||
3968 | case Intrinsic::minimum: | ||||
3969 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3970 | Depth + 1) && | ||||
3971 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, | ||||
3972 | Depth + 1); | ||||
3973 | case Intrinsic::exp: | ||||
3974 | case Intrinsic::exp2: | ||||
3975 | case Intrinsic::fabs: | ||||
3976 | return true; | ||||
3977 | case Intrinsic::copysign: | ||||
3978 | // Only the sign operand matters. | ||||
3979 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, true, | ||||
3980 | Depth + 1); | ||||
3981 | case Intrinsic::sqrt: | ||||
3982 | // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. | ||||
3983 | if (!SignBitOnly) | ||||
3984 | return true; | ||||
3985 | return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || | ||||
3986 | CannotBeNegativeZero(CI->getOperand(0), TLI)); | ||||
3987 | |||||
3988 | case Intrinsic::powi: | ||||
3989 | if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { | ||||
3990 | // powi(x,n) is non-negative if n is even. | ||||
3991 | if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) | ||||
3992 | return true; | ||||
3993 | } | ||||
3994 | // TODO: This is not correct. Given that exp is an integer, here are the | ||||
3995 | // ways that pow can return a negative value: | ||||
3996 | // | ||||
3997 | // pow(x, exp) --> negative if exp is odd and x is negative. | ||||
3998 | // pow(-0, exp) --> -inf if exp is negative odd. | ||||
3999 | // pow(-0, exp) --> -0 if exp is positive odd. | ||||
4000 | // pow(-inf, exp) --> -0 if exp is negative odd. | ||||
4001 | // pow(-inf, exp) --> -inf if exp is positive odd. | ||||
4002 | // | ||||
4003 | // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, | ||||
4004 | // but we must return false if x == -0. Unfortunately we do not currently | ||||
4005 | // have a way of expressing this constraint. See details in | ||||
4006 | // https://llvm.org/bugs/show_bug.cgi?id=31702. | ||||
4007 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
4008 | Depth + 1); | ||||
4009 | |||||
4010 | case Intrinsic::fma: | ||||
4011 | case Intrinsic::fmuladd: | ||||
4012 | // x*x+y is non-negative if y is non-negative. | ||||
4013 | return I->getOperand(0) == I->getOperand(1) && | ||||
4014 | (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && | ||||
4015 | cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, | ||||
4016 | Depth + 1); | ||||
4017 | } | ||||
4018 | break; | ||||
4019 | } | ||||
4020 | return false; | ||||
4021 | } | ||||
4022 | |||||
4023 | bool llvm::CannotBeOrderedLessThanZero(const Value *V, | ||||
4024 | const TargetLibraryInfo *TLI) { | ||||
4025 | return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); | ||||
4026 | } | ||||
4027 | |||||
4028 | bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { | ||||
4029 | return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); | ||||
4030 | } | ||||
4031 | |||||
4032 | bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, | ||||
4033 | unsigned Depth) { | ||||
4034 | assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy () && "Querying for Inf on non-FP type") ? void (0) : __assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for Inf on non-FP type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4034, __extension__ __PRETTY_FUNCTION__ )); | ||||
4035 | |||||
4036 | // If we're told that infinities won't happen, assume they won't. | ||||
4037 | if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) | ||||
4038 | if (FPMathOp->hasNoInfs()) | ||||
4039 | return true; | ||||
4040 | |||||
4041 | if (const auto *Arg = dyn_cast<Argument>(V)) { | ||||
4042 | if ((Arg->getNoFPClass() & fcInf) == fcInf) | ||||
4043 | return true; | ||||
4044 | } | ||||
4045 | |||||
4046 | // TODO: Use fpclass like API for isKnown queries and distinguish +inf from | ||||
4047 | // -inf. | ||||
4048 | if (const auto *CB = dyn_cast<CallBase>(V)) { | ||||
4049 | if ((CB->getRetNoFPClass() & fcInf) == fcInf) | ||||
4050 | return true; | ||||
4051 | } | ||||
4052 | |||||
4053 | // Handle scalar constants. | ||||
4054 | if (auto *CFP = dyn_cast<ConstantFP>(V)) | ||||
4055 | return !CFP->isInfinity(); | ||||
4056 | |||||
4057 | if (Depth == MaxAnalysisRecursionDepth) | ||||
4058 | return false; | ||||
4059 | |||||
4060 | if (auto *Inst = dyn_cast<Instruction>(V)) { | ||||
4061 | switch (Inst->getOpcode()) { | ||||
4062 | case Instruction::Select: { | ||||
4063 | return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) && | ||||
4064 | isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1); | ||||
4065 | } | ||||
4066 | case Instruction::SIToFP: | ||||
4067 | case Instruction::UIToFP: { | ||||
4068 | // Get width of largest magnitude integer (remove a bit if signed). | ||||
4069 | // This still works for a signed minimum value because the largest FP | ||||
4070 | // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx). | ||||
4071 | int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
4072 | if (Inst->getOpcode() == Instruction::SIToFP) | ||||
4073 | --IntSize; | ||||
4074 | |||||
4075 | // If the exponent of the largest finite FP value can hold the largest | ||||
4076 | // integer, the result of the cast must be finite. | ||||
4077 | Type *FPTy = Inst->getType()->getScalarType(); | ||||
4078 | return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize; | ||||
4079 | } | ||||
4080 | case Instruction::FNeg: | ||||
4081 | case Instruction::FPExt: { | ||||
4082 | // Peek through to source op. If it is not infinity, this is not infinity. | ||||
4083 | return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1); | ||||
4084 | } | ||||
4085 | case Instruction::FPTrunc: { | ||||
4086 | // Need a range check. | ||||
4087 | return false; | ||||
4088 | } | ||||
4089 | default: | ||||
4090 | break; | ||||
4091 | } | ||||
4092 | |||||
4093 | if (const auto *II = dyn_cast<IntrinsicInst>(V)) { | ||||
4094 | switch (II->getIntrinsicID()) { | ||||
4095 | case Intrinsic::sin: | ||||
4096 | case Intrinsic::cos: | ||||
4097 | // Return NaN on infinite inputs. | ||||
4098 | return true; | ||||
4099 | case Intrinsic::fabs: | ||||
4100 | case Intrinsic::sqrt: | ||||
4101 | case Intrinsic::canonicalize: | ||||
4102 | case Intrinsic::copysign: | ||||
4103 | case Intrinsic::arithmetic_fence: | ||||
4104 | case Intrinsic::trunc: | ||||
4105 | return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1); | ||||
4106 | case Intrinsic::floor: | ||||
4107 | case Intrinsic::ceil: | ||||
4108 | case Intrinsic::rint: | ||||
4109 | case Intrinsic::nearbyint: | ||||
4110 | case Intrinsic::round: | ||||
4111 | case Intrinsic::roundeven: | ||||
4112 | // PPC_FP128 is a special case. | ||||
4113 | if (V->getType()->isMultiUnitFPType()) | ||||
4114 | return false; | ||||
4115 | return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1); | ||||
4116 | case Intrinsic::fptrunc_round: | ||||
4117 | // Requires knowing the value range. | ||||
4118 | return false; | ||||
4119 | case Intrinsic::minnum: | ||||
4120 | case Intrinsic::maxnum: | ||||
4121 | case Intrinsic::minimum: | ||||
4122 | case Intrinsic::maximum: | ||||
4123 | return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) && | ||||
4124 | isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1); | ||||
4125 | case Intrinsic::log: | ||||
4126 | case Intrinsic::log10: | ||||
4127 | case Intrinsic::log2: | ||||
4128 | // log(+inf) -> +inf | ||||
4129 | // log([+-]0.0) -> -inf | ||||
4130 | // log(-inf) -> nan | ||||
4131 | // log(-x) -> nan | ||||
4132 | // TODO: We lack API to check the == 0 case. | ||||
4133 | return false; | ||||
4134 | case Intrinsic::exp: | ||||
4135 | case Intrinsic::exp2: | ||||
4136 | case Intrinsic::pow: | ||||
4137 | case Intrinsic::powi: | ||||
4138 | case Intrinsic::fma: | ||||
4139 | case Intrinsic::fmuladd: | ||||
4140 | // These can return infinities on overflow cases, so it's hard to prove | ||||
4141 | // anything about it. | ||||
4142 | return false; | ||||
4143 | default: | ||||
4144 | break; | ||||
4145 | } | ||||
4146 | } | ||||
4147 | } | ||||
4148 | |||||
4149 | // try to handle fixed width vector constants | ||||
4150 | auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
4151 | if (VFVTy && isa<Constant>(V)) { | ||||
4152 | // For vectors, verify that each element is not infinity. | ||||
4153 | unsigned NumElts = VFVTy->getNumElements(); | ||||
4154 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
4155 | Constant *Elt = cast<Constant>(V)->getAggregateElement(i); | ||||
4156 | if (!Elt) | ||||
4157 | return false; | ||||
4158 | if (isa<UndefValue>(Elt)) | ||||
4159 | continue; | ||||
4160 | auto *CElt = dyn_cast<ConstantFP>(Elt); | ||||
4161 | if (!CElt || CElt->isInfinity()) | ||||
4162 | return false; | ||||
4163 | } | ||||
4164 | // All elements were confirmed non-infinity or undefined. | ||||
4165 | return true; | ||||
4166 | } | ||||
4167 | |||||
4168 | // was not able to prove that V never contains infinity | ||||
4169 | return false; | ||||
4170 | } | ||||
4171 | |||||
4172 | bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, | ||||
4173 | unsigned Depth) { | ||||
4174 | assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy () && "Querying for NaN on non-FP type") ? void (0) : __assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4174, __extension__ __PRETTY_FUNCTION__ )); | ||||
4175 | |||||
4176 | // If we're told that NaNs won't happen, assume they won't. | ||||
4177 | if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) | ||||
4178 | if (FPMathOp->hasNoNaNs()) | ||||
4179 | return true; | ||||
4180 | |||||
4181 | if (const auto *Arg = dyn_cast<Argument>(V)) { | ||||
4182 | if ((Arg->getNoFPClass() & fcNan) == fcNan) | ||||
4183 | return true; | ||||
4184 | } | ||||
4185 | |||||
4186 | // TODO: Use fpclass like API for isKnown queries and distinguish snan from | ||||
4187 | // qnan. | ||||
4188 | if (const auto *CB = dyn_cast<CallBase>(V)) { | ||||
4189 | FPClassTest Mask = CB->getRetNoFPClass(); | ||||
4190 | if ((Mask & fcNan) == fcNan) | ||||
4191 | return true; | ||||
4192 | } | ||||
4193 | |||||
4194 | // Handle scalar constants. | ||||
4195 | if (auto *CFP = dyn_cast<ConstantFP>(V)) | ||||
4196 | return !CFP->isNaN(); | ||||
4197 | |||||
4198 | if (Depth == MaxAnalysisRecursionDepth) | ||||
4199 | return false; | ||||
4200 | |||||
4201 | if (auto *Inst = dyn_cast<Instruction>(V)) { | ||||
4202 | switch (Inst->getOpcode()) { | ||||
4203 | case Instruction::FAdd: | ||||
4204 | case Instruction::FSub: | ||||
4205 | // Adding positive and negative infinity produces NaN. | ||||
4206 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && | ||||
4207 | isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && | ||||
4208 | (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) || | ||||
4209 | isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1)); | ||||
4210 | |||||
4211 | case Instruction::FMul: | ||||
4212 | // Zero multiplied with infinity produces NaN. | ||||
4213 | // FIXME: If neither side can be zero fmul never produces NaN. | ||||
4214 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && | ||||
4215 | isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) && | ||||
4216 | isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && | ||||
4217 | isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1); | ||||
4218 | |||||
4219 | case Instruction::FDiv: | ||||
4220 | case Instruction::FRem: | ||||
4221 | // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN. | ||||
4222 | return false; | ||||
4223 | |||||
4224 | case Instruction::Select: { | ||||
4225 | return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && | ||||
4226 | isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); | ||||
4227 | } | ||||
4228 | case Instruction::SIToFP: | ||||
4229 | case Instruction::UIToFP: | ||||
4230 | return true; | ||||
4231 | case Instruction::FPTrunc: | ||||
4232 | case Instruction::FPExt: | ||||
4233 | case Instruction::FNeg: | ||||
4234 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); | ||||
4235 | default: | ||||
4236 | break; | ||||
4237 | } | ||||
4238 | } | ||||
4239 | |||||
4240 | if (const auto *II = dyn_cast<IntrinsicInst>(V)) { | ||||
4241 | switch (II->getIntrinsicID()) { | ||||
4242 | case Intrinsic::canonicalize: | ||||
4243 | case Intrinsic::fabs: | ||||
4244 | case Intrinsic::copysign: | ||||
4245 | case Intrinsic::exp: | ||||
4246 | case Intrinsic::exp2: | ||||
4247 | case Intrinsic::floor: | ||||
4248 | case Intrinsic::ceil: | ||||
4249 | case Intrinsic::trunc: | ||||
4250 | case Intrinsic::rint: | ||||
4251 | case Intrinsic::nearbyint: | ||||
4252 | case Intrinsic::round: | ||||
4253 | case Intrinsic::roundeven: | ||||
4254 | case Intrinsic::arithmetic_fence: | ||||
4255 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); | ||||
4256 | case Intrinsic::sqrt: | ||||
4257 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && | ||||
4258 | CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); | ||||
4259 | case Intrinsic::minnum: | ||||
4260 | case Intrinsic::maxnum: | ||||
4261 | // If either operand is not NaN, the result is not NaN. | ||||
4262 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) || | ||||
4263 | isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1); | ||||
4264 | default: | ||||
4265 | return false; | ||||
4266 | } | ||||
4267 | } | ||||
4268 | |||||
4269 | // Try to handle fixed width vector constants | ||||
4270 | auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
4271 | if (VFVTy && isa<Constant>(V)) { | ||||
4272 | // For vectors, verify that each element is not NaN. | ||||
4273 | unsigned NumElts = VFVTy->getNumElements(); | ||||
4274 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
4275 | Constant *Elt = cast<Constant>(V)->getAggregateElement(i); | ||||
4276 | if (!Elt) | ||||
4277 | return false; | ||||
4278 | if (isa<UndefValue>(Elt)) | ||||
4279 | continue; | ||||
4280 | auto *CElt = dyn_cast<ConstantFP>(Elt); | ||||
4281 | if (!CElt || CElt->isNaN()) | ||||
4282 | return false; | ||||
4283 | } | ||||
4284 | // All elements were confirmed not-NaN or undefined. | ||||
4285 | return true; | ||||
4286 | } | ||||
4287 | |||||
4288 | // Was not able to prove that V never contains NaN | ||||
4289 | return false; | ||||
4290 | } | ||||
4291 | |||||
4292 | /// Return true if it's possible to assume IEEE treatment of input denormals in | ||||
4293 | /// \p F for \p Val. | ||||
4294 | static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) { | ||||
4295 | Ty = Ty->getScalarType(); | ||||
4296 | return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE; | ||||
4297 | } | ||||
4298 | |||||
4299 | bool KnownFPClass::isKnownNeverLogicalZero(const Function &F, Type *Ty) const { | ||||
4300 | return isKnownNeverZero() && | ||||
4301 | (isKnownNeverSubnormal() || inputDenormalIsIEEE(F, Ty)); | ||||
4302 | } | ||||
4303 | |||||
4304 | /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the | ||||
4305 | /// same result as an fcmp with the given operands. | ||||
4306 | std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred, | ||||
4307 | const Function &F, | ||||
4308 | Value *LHS, Value *RHS, | ||||
4309 | bool LookThroughSrc) { | ||||
4310 | const APFloat *ConstRHS; | ||||
4311 | if (!match(RHS, m_APFloat(ConstRHS))) | ||||
4312 | return {nullptr, fcNone}; | ||||
4313 | |||||
4314 | if (ConstRHS->isZero()) { | ||||
4315 | // Compares with fcNone are only exactly equal to fcZero if input denormals | ||||
4316 | // are not flushed. | ||||
4317 | // TODO: Handle DAZ by expanding masks to cover subnormal cases. | ||||
4318 | if (Pred != FCmpInst::FCMP_ORD && Pred != FCmpInst::FCMP_UNO && | ||||
4319 | !inputDenormalIsIEEE(F, LHS->getType())) | ||||
4320 | return {nullptr, fcNone}; | ||||
4321 | |||||
4322 | switch (Pred) { | ||||
4323 | case FCmpInst::FCMP_OEQ: // Match x == 0.0 | ||||
4324 | return {LHS, fcZero}; | ||||
4325 | case FCmpInst::FCMP_UEQ: // Match isnan(x) || (x == 0.0) | ||||
4326 | return {LHS, fcZero | fcNan}; | ||||
4327 | case FCmpInst::FCMP_UNE: // Match (x != 0.0) | ||||
4328 | return {LHS, ~fcZero}; | ||||
4329 | case FCmpInst::FCMP_ONE: // Match !isnan(x) && x != 0.0 | ||||
4330 | return {LHS, ~fcNan & ~fcZero}; | ||||
4331 | case FCmpInst::FCMP_ORD: | ||||
4332 | // Canonical form of ord/uno is with a zero. We could also handle | ||||
4333 | // non-canonical other non-NaN constants or LHS == RHS. | ||||
4334 | return {LHS, ~fcNan}; | ||||
4335 | case FCmpInst::FCMP_UNO: | ||||
4336 | return {LHS, fcNan}; | ||||
4337 | case FCmpInst::FCMP_OGT: // x > 0 | ||||
4338 | return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf}; | ||||
4339 | case FCmpInst::FCMP_UGT: // isnan(x) || x > 0 | ||||
4340 | return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf | fcNan}; | ||||
4341 | case FCmpInst::FCMP_OGE: // x >= 0 | ||||
4342 | return {LHS, fcPositive | fcNegZero}; | ||||
4343 | case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0 | ||||
4344 | return {LHS, fcPositive | fcNegZero | fcNan}; | ||||
4345 | case FCmpInst::FCMP_OLT: // x < 0 | ||||
4346 | return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf}; | ||||
4347 | case FCmpInst::FCMP_ULT: // isnan(x) || x < 0 | ||||
4348 | return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf | fcNan}; | ||||
4349 | case FCmpInst::FCMP_OLE: // x <= 0 | ||||
4350 | return {LHS, fcNegative | fcPosZero}; | ||||
4351 | case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0 | ||||
4352 | return {LHS, fcNegative | fcPosZero | fcNan}; | ||||
4353 | default: | ||||
4354 | break; | ||||
4355 | } | ||||
4356 | |||||
4357 | return {nullptr, fcNone}; | ||||
4358 | } | ||||
4359 | |||||
4360 | Value *Src = LHS; | ||||
4361 | const bool IsFabs = LookThroughSrc && match(LHS, m_FAbs(m_Value(Src))); | ||||
4362 | |||||
4363 | // Compute the test mask that would return true for the ordered comparisons. | ||||
4364 | FPClassTest Mask; | ||||
4365 | |||||
4366 | if (ConstRHS->isInfinity()) { | ||||
4367 | switch (Pred) { | ||||
4368 | case FCmpInst::FCMP_OEQ: | ||||
4369 | case FCmpInst::FCMP_UNE: { | ||||
4370 | // Match __builtin_isinf patterns | ||||
4371 | // | ||||
4372 | // fcmp oeq x, +inf -> is_fpclass x, fcPosInf | ||||
4373 | // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf | ||||
4374 | // fcmp oeq x, -inf -> is_fpclass x, fcNegInf | ||||
4375 | // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false | ||||
4376 | // | ||||
4377 | // fcmp une x, +inf -> is_fpclass x, ~fcPosInf | ||||
4378 | // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf | ||||
4379 | // fcmp une x, -inf -> is_fpclass x, ~fcNegInf | ||||
4380 | // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true | ||||
4381 | |||||
4382 | if (ConstRHS->isNegative()) { | ||||
4383 | Mask = fcNegInf; | ||||
4384 | if (IsFabs) | ||||
4385 | Mask = fcNone; | ||||
4386 | } else { | ||||
4387 | Mask = fcPosInf; | ||||
4388 | if (IsFabs) | ||||
4389 | Mask |= fcNegInf; | ||||
4390 | } | ||||
4391 | |||||
4392 | break; | ||||
4393 | } | ||||
4394 | case FCmpInst::FCMP_ONE: | ||||
4395 | case FCmpInst::FCMP_UEQ: { | ||||
4396 | // Match __builtin_isinf patterns | ||||
4397 | // fcmp one x, -inf -> is_fpclass x, fcNegInf | ||||
4398 | // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan | ||||
4399 | // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan | ||||
4400 | // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan | ||||
4401 | // | ||||
4402 | // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan | ||||
4403 | // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan | ||||
4404 | // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan | ||||
4405 | // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan | ||||
4406 | if (ConstRHS->isNegative()) { | ||||
4407 | Mask = ~fcNegInf & ~fcNan; | ||||
4408 | if (IsFabs) | ||||
4409 | Mask = ~fcNan; | ||||
4410 | } else { | ||||
4411 | Mask = ~fcPosInf & ~fcNan; | ||||
4412 | if (IsFabs) | ||||
4413 | Mask &= ~fcNegInf; | ||||
4414 | } | ||||
4415 | |||||
4416 | break; | ||||
4417 | } | ||||
4418 | case FCmpInst::FCMP_OLT: | ||||
4419 | case FCmpInst::FCMP_UGE: { | ||||
4420 | if (ConstRHS->isNegative()) // TODO | ||||
4421 | return {nullptr, fcNone}; | ||||
4422 | |||||
4423 | // fcmp olt fabs(x), +inf -> fcFinite | ||||
4424 | // fcmp uge fabs(x), +inf -> ~fcFinite | ||||
4425 | // fcmp olt x, +inf -> fcFinite|fcNegInf | ||||
4426 | // fcmp uge x, +inf -> ~(fcFinite|fcNegInf) | ||||
4427 | Mask = fcFinite; | ||||
4428 | if (!IsFabs) | ||||
4429 | Mask |= fcNegInf; | ||||
4430 | break; | ||||
4431 | } | ||||
4432 | case FCmpInst::FCMP_OGE: | ||||
4433 | case FCmpInst::FCMP_ULT: { | ||||
4434 | if (ConstRHS->isNegative()) // TODO | ||||
4435 | return {nullptr, fcNone}; | ||||
4436 | |||||
4437 | // fcmp oge fabs(x), +inf -> fcInf | ||||
4438 | // fcmp oge x, +inf -> fcPosInf | ||||
4439 | // fcmp ult fabs(x), +inf -> ~fcInf | ||||
4440 | // fcmp ult x, +inf -> ~fcPosInf | ||||
4441 | Mask = fcPosInf; | ||||
4442 | if (IsFabs) | ||||
4443 | Mask |= fcNegInf; | ||||
4444 | break; | ||||
4445 | } | ||||
4446 | default: | ||||
4447 | return {nullptr, fcNone}; | ||||
4448 | } | ||||
4449 | } else if (ConstRHS->isSmallestNormalized() && !ConstRHS->isNegative()) { | ||||
4450 | // Match pattern that's used in __builtin_isnormal. | ||||
4451 | switch (Pred) { | ||||
4452 | case FCmpInst::FCMP_OLT: | ||||
4453 | case FCmpInst::FCMP_UGE: { | ||||
4454 | // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero | ||||
4455 | // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero | ||||
4456 | // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf | ||||
4457 | // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero) | ||||
4458 | Mask = fcZero | fcSubnormal; | ||||
4459 | if (!IsFabs) | ||||
4460 | Mask |= fcNegNormal | fcNegInf; | ||||
4461 | |||||
4462 | break; | ||||
4463 | } | ||||
4464 | case FCmpInst::FCMP_OGE: | ||||
4465 | case FCmpInst::FCMP_ULT: { | ||||
4466 | // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf | ||||
4467 | // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal | ||||
4468 | // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf) | ||||
4469 | // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal) | ||||
4470 | Mask = fcPosInf | fcPosNormal; | ||||
4471 | if (IsFabs) | ||||
4472 | Mask |= fcNegInf | fcNegNormal; | ||||
4473 | break; | ||||
4474 | } | ||||
4475 | default: | ||||
4476 | return {nullptr, fcNone}; | ||||
4477 | } | ||||
4478 | } else | ||||
4479 | return {nullptr, fcNone}; | ||||
4480 | |||||
4481 | // Invert the comparison for the unordered cases. | ||||
4482 | if (FCmpInst::isUnordered(Pred)) | ||||
4483 | Mask = ~Mask; | ||||
4484 | |||||
4485 | return {Src, Mask}; | ||||
4486 | } | ||||
4487 | |||||
4488 | static FPClassTest computeKnownFPClassFromAssumes(const Value *V, | ||||
4489 | const Query &Q) { | ||||
4490 | FPClassTest KnownFromAssume = fcAllFlags; | ||||
4491 | |||||
4492 | // Try to restrict the floating-point classes based on information from | ||||
4493 | // assumptions. | ||||
4494 | for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { | ||||
4495 | if (!AssumeVH) | ||||
4496 | continue; | ||||
4497 | CallInst *I = cast<CallInst>(AssumeVH); | ||||
4498 | const Function *F = I->getFunction(); | ||||
4499 | |||||
4500 | assert(F == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (F == Q.CxtI->getParent()->getParent () && "Got assumption for the wrong function!") ? void (0) : __assert_fail ("F == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4501, __extension__ __PRETTY_FUNCTION__ )) | ||||
4501 | "Got assumption for the wrong function!")(static_cast <bool> (F == Q.CxtI->getParent()->getParent () && "Got assumption for the wrong function!") ? void (0) : __assert_fail ("F == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4501, __extension__ __PRETTY_FUNCTION__ )); | ||||
4502 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4503, __extension__ __PRETTY_FUNCTION__ )) | ||||
4503 | "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4503, __extension__ __PRETTY_FUNCTION__ )); | ||||
4504 | |||||
4505 | if (!isValidAssumeForContext(I, Q.CxtI, Q.DT)) | ||||
4506 | continue; | ||||
4507 | |||||
4508 | CmpInst::Predicate Pred; | ||||
4509 | Value *LHS, *RHS; | ||||
4510 | uint64_t ClassVal = 0; | ||||
4511 | if (match(I->getArgOperand(0), m_FCmp(Pred, m_Value(LHS), m_Value(RHS)))) { | ||||
4512 | auto [TestedValue, TestedMask] = | ||||
4513 | fcmpToClassTest(Pred, *F, LHS, RHS, true); | ||||
4514 | // First see if we can fold in fabs/fneg into the test. | ||||
4515 | if (TestedValue == V) | ||||
4516 | KnownFromAssume &= TestedMask; | ||||
4517 | else { | ||||
4518 | // Try again without the lookthrough if we found a different source | ||||
4519 | // value. | ||||
4520 | auto [TestedValue, TestedMask] = | ||||
4521 | fcmpToClassTest(Pred, *F, LHS, RHS, false); | ||||
4522 | if (TestedValue == V) | ||||
4523 | KnownFromAssume &= TestedMask; | ||||
4524 | } | ||||
4525 | } else if (match(I->getArgOperand(0), | ||||
4526 | m_Intrinsic<Intrinsic::is_fpclass>( | ||||
4527 | m_Value(LHS), m_ConstantInt(ClassVal)))) { | ||||
4528 | KnownFromAssume &= static_cast<FPClassTest>(ClassVal); | ||||
4529 | } | ||||
4530 | } | ||||
4531 | |||||
4532 | return KnownFromAssume; | ||||
4533 | } | ||||
4534 | |||||
4535 | void computeKnownFPClass(const Value *V, const APInt &DemandedElts, | ||||
4536 | FPClassTest InterestedClasses, KnownFPClass &Known, | ||||
4537 | unsigned Depth, const Query &Q, | ||||
4538 | const TargetLibraryInfo *TLI); | ||||
4539 | |||||
4540 | static void computeKnownFPClass(const Value *V, KnownFPClass &Known, | ||||
4541 | FPClassTest InterestedClasses, unsigned Depth, | ||||
4542 | const Query &Q, const TargetLibraryInfo *TLI) { | ||||
4543 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
4544 | APInt DemandedElts = | ||||
4545 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
4546 | computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q, TLI); | ||||
4547 | } | ||||
4548 | |||||
4549 | static void computeKnownFPClassForFPTrunc(const Operator *Op, | ||||
4550 | const APInt &DemandedElts, | ||||
4551 | FPClassTest InterestedClasses, | ||||
4552 | KnownFPClass &Known, unsigned Depth, | ||||
4553 | const Query &Q, | ||||
4554 | const TargetLibraryInfo *TLI) { | ||||
4555 | if ((InterestedClasses & fcNan) == fcNone) | ||||
4556 | return; | ||||
4557 | |||||
4558 | KnownFPClass KnownSrc; | ||||
4559 | computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, | ||||
4560 | KnownSrc, Depth + 1, Q, TLI); | ||||
4561 | if (KnownSrc.isKnownNeverNaN()) | ||||
4562 | Known.knownNot(fcNan); | ||||
4563 | |||||
4564 | // Infinity needs a range check. | ||||
4565 | // TODO: Sign bit should be preserved | ||||
4566 | } | ||||
4567 | |||||
4568 | // TODO: Merge implementations of isKnownNeverNaN, isKnownNeverInfinity, | ||||
4569 | // CannotBeNegativeZero, cannotBeOrderedLessThanZero into here. | ||||
4570 | |||||
4571 | void computeKnownFPClass(const Value *V, const APInt &DemandedElts, | ||||
4572 | FPClassTest InterestedClasses, KnownFPClass &Known, | ||||
4573 | unsigned Depth, const Query &Q, | ||||
4574 | const TargetLibraryInfo *TLI) { | ||||
4575 | assert(Known.isUnknown() && "should not be called with known information")(static_cast <bool> (Known.isUnknown() && "should not be called with known information" ) ? void (0) : __assert_fail ("Known.isUnknown() && \"should not be called with known information\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4575, __extension__ __PRETTY_FUNCTION__ )); | ||||
4576 | |||||
4577 | if (!DemandedElts) { | ||||
4578 | // No demanded elts, better to assume we don't know anything. | ||||
4579 | Known.resetAll(); | ||||
4580 | return; | ||||
4581 | } | ||||
4582 | |||||
4583 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4583, __extension__ __PRETTY_FUNCTION__ )); | ||||
4584 | |||||
4585 | if (auto *CFP = dyn_cast_or_null<ConstantFP>(V)) { | ||||
4586 | Known.KnownFPClasses = CFP->getValueAPF().classify(); | ||||
4587 | Known.SignBit = CFP->isNegative(); | ||||
4588 | return; | ||||
4589 | } | ||||
4590 | |||||
4591 | // Try to handle fixed width vector constants | ||||
4592 | auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
4593 | const Constant *CV = dyn_cast<Constant>(V); | ||||
4594 | if (VFVTy && CV) { | ||||
4595 | Known.KnownFPClasses = fcNone; | ||||
4596 | |||||
4597 | // For vectors, verify that each element is not NaN. | ||||
4598 | unsigned NumElts = VFVTy->getNumElements(); | ||||
4599 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
4600 | Constant *Elt = CV->getAggregateElement(i); | ||||
4601 | if (!Elt) { | ||||
4602 | Known = KnownFPClass(); | ||||
4603 | return; | ||||
4604 | } | ||||
4605 | if (isa<UndefValue>(Elt)) | ||||
4606 | continue; | ||||
4607 | auto *CElt = dyn_cast<ConstantFP>(Elt); | ||||
4608 | if (!CElt) { | ||||
4609 | Known = KnownFPClass(); | ||||
4610 | return; | ||||
4611 | } | ||||
4612 | |||||
4613 | KnownFPClass KnownElt{CElt->getValueAPF().classify(), CElt->isNegative()}; | ||||
4614 | Known |= KnownElt; | ||||
4615 | } | ||||
4616 | |||||
4617 | return; | ||||
4618 | } | ||||
4619 | |||||
4620 | FPClassTest KnownNotFromFlags = fcNone; | ||||
4621 | if (const auto *CB = dyn_cast<CallBase>(V)) | ||||
4622 | KnownNotFromFlags |= CB->getRetNoFPClass(); | ||||
4623 | else if (const auto *Arg = dyn_cast<Argument>(V)) | ||||
4624 | KnownNotFromFlags |= Arg->getNoFPClass(); | ||||
4625 | |||||
4626 | const Operator *Op = dyn_cast<Operator>(V); | ||||
4627 | if (const FPMathOperator *FPOp = dyn_cast_or_null<FPMathOperator>(Op)) { | ||||
4628 | if (FPOp->hasNoNaNs()) | ||||
4629 | KnownNotFromFlags |= fcNan; | ||||
4630 | if (FPOp->hasNoInfs()) | ||||
4631 | KnownNotFromFlags |= fcInf; | ||||
4632 | } | ||||
4633 | |||||
4634 | if (Q.AC) { | ||||
4635 | FPClassTest AssumedClasses = computeKnownFPClassFromAssumes(V, Q); | ||||
4636 | KnownNotFromFlags |= ~AssumedClasses; | ||||
4637 | } | ||||
4638 | |||||
4639 | // We no longer need to find out about these bits from inputs if we can | ||||
4640 | // assume this from flags/attributes. | ||||
4641 | InterestedClasses &= ~KnownNotFromFlags; | ||||
4642 | |||||
4643 | auto ClearClassesFromFlags = make_scope_exit([=, &Known] { | ||||
4644 | Known.knownNot(KnownNotFromFlags); | ||||
4645 | }); | ||||
4646 | |||||
4647 | if (!Op) | ||||
4648 | return; | ||||
4649 | |||||
4650 | // All recursive calls that increase depth must come after this. | ||||
4651 | if (Depth == MaxAnalysisRecursionDepth) | ||||
4652 | return; | ||||
4653 | |||||
4654 | const unsigned Opc = Op->getOpcode(); | ||||
4655 | switch (Opc) { | ||||
4656 | case Instruction::FNeg: { | ||||
4657 | computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, | ||||
4658 | Known, Depth + 1, Q, TLI); | ||||
4659 | Known.fneg(); | ||||
4660 | break; | ||||
4661 | } | ||||
4662 | case Instruction::Select: { | ||||
4663 | KnownFPClass Known2; | ||||
4664 | computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedClasses, | ||||
4665 | Known, Depth + 1, Q, TLI); | ||||
4666 | computeKnownFPClass(Op->getOperand(2), DemandedElts, InterestedClasses, | ||||
4667 | Known2, Depth + 1, Q, TLI); | ||||
4668 | Known |= Known2; | ||||
4669 | break; | ||||
4670 | } | ||||
4671 | case Instruction::Call: { | ||||
4672 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op)) { | ||||
4673 | const Intrinsic::ID IID = II->getIntrinsicID(); | ||||
4674 | switch (IID) { | ||||
4675 | case Intrinsic::fabs: { | ||||
4676 | if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) { | ||||
4677 | // If we only care about the sign bit we don't need to inspect the | ||||
4678 | // operand. | ||||
4679 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4680 | InterestedClasses, Known, Depth + 1, Q, TLI); | ||||
4681 | } | ||||
4682 | |||||
4683 | Known.fabs(); | ||||
4684 | break; | ||||
4685 | } | ||||
4686 | case Intrinsic::copysign: { | ||||
4687 | KnownFPClass KnownSign; | ||||
4688 | |||||
4689 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4690 | InterestedClasses, Known, Depth + 1, Q, TLI); | ||||
4691 | computeKnownFPClass(II->getArgOperand(1), DemandedElts, | ||||
4692 | InterestedClasses, KnownSign, Depth + 1, Q, TLI); | ||||
4693 | Known.copysign(KnownSign); | ||||
4694 | break; | ||||
4695 | } | ||||
4696 | case Intrinsic::fma: | ||||
4697 | case Intrinsic::fmuladd: { | ||||
4698 | if ((InterestedClasses & fcNegative) == fcNone) | ||||
4699 | break; | ||||
4700 | |||||
4701 | if (II->getArgOperand(0) != II->getArgOperand(1)) | ||||
4702 | break; | ||||
4703 | |||||
4704 | // The multiply cannot be -0 and therefore the add can't be -0 | ||||
4705 | Known.knownNot(fcNegZero); | ||||
4706 | |||||
4707 | // x * x + y is non-negative if y is non-negative. | ||||
4708 | KnownFPClass KnownAddend; | ||||
4709 | computeKnownFPClass(II->getArgOperand(2), DemandedElts, | ||||
4710 | InterestedClasses, KnownAddend, Depth + 1, Q, TLI); | ||||
4711 | |||||
4712 | // TODO: Known sign bit with no nans | ||||
4713 | if (KnownAddend.cannotBeOrderedLessThanZero()) | ||||
4714 | Known.knownNot(fcNegative); | ||||
4715 | break; | ||||
4716 | } | ||||
4717 | case Intrinsic::sin: | ||||
4718 | case Intrinsic::cos: { | ||||
4719 | // Return NaN on infinite inputs. | ||||
4720 | KnownFPClass KnownSrc; | ||||
4721 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4722 | InterestedClasses, KnownSrc, Depth + 1, Q, TLI); | ||||
4723 | Known.knownNot(fcInf); | ||||
4724 | if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity()) | ||||
4725 | Known.knownNot(fcNan); | ||||
4726 | break; | ||||
4727 | } | ||||
4728 | |||||
4729 | case Intrinsic::maxnum: | ||||
4730 | case Intrinsic::minnum: | ||||
4731 | case Intrinsic::minimum: | ||||
4732 | case Intrinsic::maximum: { | ||||
4733 | KnownFPClass KnownLHS, KnownRHS; | ||||
4734 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4735 | InterestedClasses, KnownLHS, Depth + 1, Q, TLI); | ||||
4736 | computeKnownFPClass(II->getArgOperand(1), DemandedElts, | ||||
4737 | InterestedClasses, KnownRHS, Depth + 1, Q, TLI); | ||||
4738 | |||||
4739 | bool NeverNaN = | ||||
4740 | KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN(); | ||||
4741 | Known = KnownLHS | KnownRHS; | ||||
4742 | |||||
4743 | // If either operand is not NaN, the result is not NaN. | ||||
4744 | if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum)) | ||||
4745 | Known.knownNot(fcNan); | ||||
4746 | |||||
4747 | if (IID == Intrinsic::maxnum) { | ||||
4748 | // If at least one operand is known to be positive, the result must be | ||||
4749 | // positive. | ||||
4750 | if ((KnownLHS.cannotBeOrderedLessThanZero() && | ||||
4751 | KnownLHS.isKnownNeverNaN()) || | ||||
4752 | (KnownRHS.cannotBeOrderedLessThanZero() && | ||||
4753 | KnownRHS.isKnownNeverNaN())) | ||||
4754 | Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); | ||||
4755 | } else if (IID == Intrinsic::maximum) { | ||||
4756 | // If at least one operand is known to be positive, the result must be | ||||
4757 | // positive. | ||||
4758 | if (KnownLHS.cannotBeOrderedLessThanZero() || | ||||
4759 | KnownRHS.cannotBeOrderedLessThanZero()) | ||||
4760 | Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); | ||||
4761 | } else if (IID == Intrinsic::minnum) { | ||||
4762 | // If at least one operand is known to be negative, the result must be | ||||
4763 | // negative. | ||||
4764 | if ((KnownLHS.cannotBeOrderedGreaterThanZero() && | ||||
4765 | KnownLHS.isKnownNeverNaN()) || | ||||
4766 | (KnownRHS.cannotBeOrderedGreaterThanZero() && | ||||
4767 | KnownRHS.isKnownNeverNaN())) | ||||
4768 | Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask); | ||||
4769 | } else { | ||||
4770 | // If at least one operand is known to be negative, the result must be | ||||
4771 | // negative. | ||||
4772 | if (KnownLHS.cannotBeOrderedGreaterThanZero() || | ||||
4773 | KnownRHS.cannotBeOrderedGreaterThanZero()) | ||||
4774 | Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask); | ||||
4775 | } | ||||
4776 | |||||
4777 | // Fixup zero handling if denormals could be returned as a zero. | ||||
4778 | // | ||||
4779 | // As there's no spec for denormal flushing, be conservative with the | ||||
4780 | // treatment of denormals that could be flushed to zero. For older | ||||
4781 | // subtargets on AMDGPU the min/max instructions would not flush the | ||||
4782 | // output and return the original value. | ||||
4783 | // | ||||
4784 | // TODO: This could be refined based on the sign | ||||
4785 | if ((Known.KnownFPClasses & fcZero) != fcNone && | ||||
4786 | !Known.isKnownNeverSubnormal()) { | ||||
4787 | const Function *Parent = II->getFunction(); | ||||
4788 | DenormalMode Mode = Parent->getDenormalMode( | ||||
4789 | II->getType()->getScalarType()->getFltSemantics()); | ||||
4790 | if (Mode != DenormalMode::getIEEE()) | ||||
4791 | Known.KnownFPClasses |= fcZero; | ||||
4792 | } | ||||
4793 | |||||
4794 | break; | ||||
4795 | } | ||||
4796 | case Intrinsic::canonicalize: { | ||||
4797 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4798 | InterestedClasses, Known, Depth + 1, Q, TLI); | ||||
4799 | // Canonicalize is guaranteed to quiet signaling nans. | ||||
4800 | Known.knownNot(fcSNan); | ||||
4801 | |||||
4802 | // If the parent function flushes denormals, the canonical output cannot | ||||
4803 | // be a denormal. | ||||
4804 | const fltSemantics &FPType = II->getType()->getFltSemantics(); | ||||
4805 | DenormalMode DenormMode = II->getFunction()->getDenormalMode(FPType); | ||||
4806 | if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero()) | ||||
4807 | Known.knownNot(fcSubnormal); | ||||
4808 | |||||
4809 | if (DenormMode.Input == DenormalMode::PositiveZero || | ||||
4810 | (DenormMode.Output == DenormalMode::PositiveZero && | ||||
4811 | DenormMode.Input == DenormalMode::IEEE)) | ||||
4812 | Known.knownNot(fcNegZero); | ||||
4813 | |||||
4814 | break; | ||||
4815 | } | ||||
4816 | case Intrinsic::trunc: { | ||||
4817 | KnownFPClass KnownSrc; | ||||
4818 | |||||
4819 | FPClassTest InterestedSrcs = InterestedClasses; | ||||
4820 | if (InterestedClasses & fcZero) | ||||
4821 | InterestedClasses |= fcNormal | fcSubnormal; | ||||
4822 | |||||
4823 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, | ||||
4824 | KnownSrc, Depth + 1, Q, TLI); | ||||
4825 | |||||
4826 | // Integer results cannot be subnormal. | ||||
4827 | Known.knownNot(fcSubnormal); | ||||
4828 | |||||
4829 | // trunc passes through infinities. | ||||
4830 | if (KnownSrc.isKnownNeverPosInfinity()) | ||||
4831 | Known.knownNot(fcPosInf); | ||||
4832 | if (KnownSrc.isKnownNeverNegInfinity()) | ||||
4833 | Known.knownNot(fcNegInf); | ||||
4834 | |||||
4835 | // Non-constrained intrinsics do not guarantee signaling nan quieting. | ||||
4836 | if (KnownSrc.isKnownNeverNaN()) | ||||
4837 | Known.knownNot(fcNan); | ||||
4838 | |||||
4839 | if (KnownSrc.isKnownNever(fcPosNormal)) | ||||
4840 | Known.knownNot(fcPosNormal); | ||||
4841 | |||||
4842 | if (KnownSrc.isKnownNever(fcNegNormal)) | ||||
4843 | Known.knownNot(fcNegNormal); | ||||
4844 | |||||
4845 | if (KnownSrc.isKnownNever(fcPosZero | fcPosSubnormal | fcPosNormal)) | ||||
4846 | Known.knownNot(fcPosZero); | ||||
4847 | |||||
4848 | if (KnownSrc.isKnownNever(fcNegZero | fcNegSubnormal | fcNegNormal)) | ||||
4849 | Known.knownNot(fcNegZero); | ||||
4850 | |||||
4851 | // Sign should be preserved | ||||
4852 | Known.SignBit = KnownSrc.SignBit; | ||||
4853 | break; | ||||
4854 | } | ||||
4855 | case Intrinsic::exp: | ||||
4856 | case Intrinsic::exp2: { | ||||
4857 | Known.knownNot(fcNegative); | ||||
4858 | if ((InterestedClasses & fcNan) == fcNone) | ||||
4859 | break; | ||||
4860 | |||||
4861 | KnownFPClass KnownSrc; | ||||
4862 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4863 | InterestedClasses, KnownSrc, Depth + 1, Q, TLI); | ||||
4864 | if (KnownSrc.isKnownNeverNaN()) { | ||||
4865 | Known.knownNot(fcNan); | ||||
4866 | Known.SignBit = false; | ||||
4867 | } | ||||
4868 | |||||
4869 | break; | ||||
4870 | } | ||||
4871 | case Intrinsic::fptrunc_round: { | ||||
4872 | computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, | ||||
4873 | Known, Depth, Q, TLI); | ||||
4874 | break; | ||||
4875 | } | ||||
4876 | case Intrinsic::log: | ||||
4877 | case Intrinsic::log10: | ||||
4878 | case Intrinsic::log2: | ||||
4879 | case Intrinsic::experimental_constrained_log: | ||||
4880 | case Intrinsic::experimental_constrained_log10: | ||||
4881 | case Intrinsic::experimental_constrained_log2: { | ||||
4882 | // log(+inf) -> +inf | ||||
4883 | // log([+-]0.0) -> -inf | ||||
4884 | // log(-inf) -> nan | ||||
4885 | // log(-x) -> nan | ||||
4886 | if ((InterestedClasses & (fcNan | fcInf)) == fcNone) | ||||
4887 | break; | ||||
4888 | |||||
4889 | FPClassTest InterestedSrcs = InterestedClasses; | ||||
4890 | if ((InterestedClasses & fcNegInf) != fcNone) | ||||
4891 | InterestedSrcs |= fcZero | fcSubnormal; | ||||
4892 | if ((InterestedClasses & fcNan) != fcNone) | ||||
4893 | InterestedSrcs |= fcNan | (fcNegative & ~fcNan); | ||||
4894 | |||||
4895 | KnownFPClass KnownSrc; | ||||
4896 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, | ||||
4897 | KnownSrc, Depth + 1, Q, TLI); | ||||
4898 | |||||
4899 | if (KnownSrc.isKnownNeverPosInfinity()) | ||||
4900 | Known.knownNot(fcPosInf); | ||||
4901 | |||||
4902 | if (KnownSrc.isKnownNeverNaN() && | ||||
4903 | KnownSrc.cannotBeOrderedLessThanZero()) | ||||
4904 | Known.knownNot(fcNan); | ||||
4905 | |||||
4906 | if (KnownSrc.isKnownNeverLogicalZero(*II->getFunction(), II->getType())) | ||||
4907 | Known.knownNot(fcNegInf); | ||||
4908 | |||||
4909 | break; | ||||
4910 | } | ||||
4911 | case Intrinsic::powi: { | ||||
4912 | if ((InterestedClasses & fcNegative) == fcNone) | ||||
4913 | break; | ||||
4914 | |||||
4915 | const Value *Exp = II->getArgOperand(1); | ||||
4916 | unsigned BitWidth = | ||||
4917 | Exp->getType()->getScalarType()->getIntegerBitWidth(); | ||||
4918 | KnownBits ExponentKnownBits(BitWidth); | ||||
4919 | computeKnownBits(Exp, DemandedElts, ExponentKnownBits, Depth + 1, Q); | ||||
4920 | |||||
4921 | if (ExponentKnownBits.Zero[0]) { // Is even | ||||
4922 | Known.knownNot(fcNegative); | ||||
4923 | break; | ||||
4924 | } | ||||
4925 | |||||
4926 | // Given that exp is an integer, here are the | ||||
4927 | // ways that pow can return a negative value: | ||||
4928 | // | ||||
4929 | // pow(-x, exp) --> negative if exp is odd and x is negative. | ||||
4930 | // pow(-0, exp) --> -inf if exp is negative odd. | ||||
4931 | // pow(-0, exp) --> -0 if exp is positive odd. | ||||
4932 | // pow(-inf, exp) --> -0 if exp is negative odd. | ||||
4933 | // pow(-inf, exp) --> -inf if exp is positive odd. | ||||
4934 | KnownFPClass KnownSrc; | ||||
4935 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative, | ||||
4936 | KnownSrc, Depth + 1, Q, TLI); | ||||
4937 | if (KnownSrc.isKnownNever(fcNegative)) | ||||
4938 | Known.knownNot(fcNegative); | ||||
4939 | break; | ||||
4940 | } | ||||
4941 | case Intrinsic::arithmetic_fence: { | ||||
4942 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4943 | InterestedClasses, Known, Depth + 1, Q, TLI); | ||||
4944 | break; | ||||
4945 | } | ||||
4946 | case Intrinsic::experimental_constrained_sitofp: | ||||
4947 | case Intrinsic::experimental_constrained_uitofp: | ||||
4948 | // Cannot produce nan | ||||
4949 | Known.knownNot(fcNan); | ||||
4950 | |||||
4951 | // sitofp and uitofp turn into +0.0 for zero. | ||||
4952 | Known.knownNot(fcNegZero); | ||||
4953 | |||||
4954 | // Integers cannot be subnormal | ||||
4955 | Known.knownNot(fcSubnormal); | ||||
4956 | |||||
4957 | if (IID == Intrinsic::experimental_constrained_uitofp) | ||||
4958 | Known.signBitMustBeZero(); | ||||
4959 | |||||
4960 | // TODO: Copy inf handling from instructions | ||||
4961 | break; | ||||
4962 | default: | ||||
4963 | break; | ||||
4964 | } | ||||
4965 | } | ||||
4966 | |||||
4967 | break; | ||||
4968 | } | ||||
4969 | case Instruction::FAdd: | ||||
4970 | case Instruction::FSub: { | ||||
4971 | KnownFPClass KnownLHS, KnownRHS; | ||||
4972 | computeKnownFPClass(Op->getOperand(1), DemandedElts, fcNan | fcInf, | ||||
4973 | KnownRHS, Depth + 1, Q, TLI); | ||||
4974 | if (KnownRHS.isKnownNeverNaN()) { | ||||
4975 | // RHS is canonically cheaper to compute. Skip inspecting the LHS if | ||||
4976 | // there's no point. | ||||
4977 | computeKnownFPClass(Op->getOperand(0), DemandedElts, fcNan | fcInf, | ||||
4978 | KnownLHS, Depth + 1, Q, TLI); | ||||
4979 | // Adding positive and negative infinity produces NaN. | ||||
4980 | // TODO: Check sign of infinities. | ||||
4981 | if (KnownLHS.isKnownNeverNaN() && | ||||
4982 | (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity())) | ||||
4983 | Known.knownNot(fcNan); | ||||
4984 | } | ||||
4985 | |||||
4986 | break; | ||||
4987 | } | ||||
4988 | case Instruction::FMul: { | ||||
4989 | // X * X is always non-negative or a NaN. | ||||
4990 | if (Op->getOperand(0) == Op->getOperand(1)) | ||||
4991 | Known.knownNot(fcNegative); | ||||
4992 | |||||
4993 | if ((InterestedClasses & fcNan) != fcNan) | ||||
4994 | break; | ||||
4995 | |||||
4996 | KnownFPClass KnownLHS, KnownRHS; | ||||
4997 | computeKnownFPClass(Op->getOperand(1), DemandedElts, | ||||
4998 | fcNan | fcInf | fcZero | fcSubnormal, KnownRHS, | ||||
4999 | Depth + 1, Q, TLI); | ||||
5000 | if (KnownRHS.isKnownNeverNaN() && | ||||
5001 | (KnownRHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverZero())) { | ||||
5002 | computeKnownFPClass(Op->getOperand(0), DemandedElts, | ||||
5003 | fcNan | fcInf | fcZero, KnownLHS, Depth + 1, Q, TLI); | ||||
5004 | if (!KnownLHS.isKnownNeverNaN()) | ||||
5005 | break; | ||||
5006 | |||||
5007 | const Function *F = cast<Instruction>(Op)->getFunction(); | ||||
5008 | |||||
5009 | // If neither side can be zero (or nan) fmul never produces NaN. | ||||
5010 | // TODO: Check operand combinations. | ||||
5011 | // e.g. fmul nofpclass(inf nan zero), nofpclass(nan) -> nofpclass(nan) | ||||
5012 | if ((KnownLHS.isKnownNeverInfinity() || | ||||
5013 | KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) && | ||||
5014 | (KnownRHS.isKnownNeverInfinity() || | ||||
5015 | KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) | ||||
5016 | Known.knownNot(fcNan); | ||||
5017 | } | ||||
5018 | |||||
5019 | break; | ||||
5020 | } | ||||
5021 | case Instruction::FDiv: | ||||
5022 | case Instruction::FRem: { | ||||
5023 | if (Op->getOperand(0) == Op->getOperand(1)) { | ||||
5024 | // TODO: Could filter out snan if we inspect the operand | ||||
5025 | if (Op->getOpcode() == Instruction::FDiv) { | ||||
5026 | // X / X is always exactly 1.0 or a NaN. | ||||
5027 | Known.KnownFPClasses = fcNan | fcPosNormal; | ||||
5028 | } else { | ||||
5029 | // X % X is always exactly [+-]0.0 or a NaN. | ||||
5030 | Known.KnownFPClasses = fcNan | fcZero; | ||||
5031 | } | ||||
5032 | |||||
5033 | break; | ||||
5034 | } | ||||
5035 | |||||
5036 | const bool WantNan = (InterestedClasses & fcNan) != fcNone; | ||||
5037 | const bool WantNegative = (InterestedClasses & fcNegative) != fcNone; | ||||
5038 | const bool WantPositive = | ||||
5039 | Opc == Instruction::FRem && (InterestedClasses & fcPositive) != fcNone; | ||||
5040 | if (!WantNan && !WantNegative && !WantPositive) | ||||
5041 | break; | ||||
5042 | |||||
5043 | KnownFPClass KnownLHS, KnownRHS; | ||||
5044 | |||||
5045 | computeKnownFPClass(Op->getOperand(1), DemandedElts, | ||||
5046 | fcNan | fcInf | fcZero | fcNegative, KnownRHS, | ||||
5047 | Depth + 1, Q, TLI); | ||||
5048 | |||||
5049 | bool KnowSomethingUseful = | ||||
5050 | KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative); | ||||
5051 | |||||
5052 | if (KnowSomethingUseful || WantPositive) { | ||||
5053 | const FPClassTest InterestedLHS = | ||||
5054 | WantPositive ? fcAllFlags | ||||
5055 | : fcNan | fcInf | fcZero | fcSubnormal | fcNegative; | ||||
5056 | |||||
5057 | computeKnownFPClass(Op->getOperand(0), DemandedElts, | ||||
5058 | InterestedClasses & InterestedLHS, KnownLHS, | ||||
5059 | Depth + 1, Q, TLI); | ||||
5060 | } | ||||
5061 | |||||
5062 | const Function *F = cast<Instruction>(Op)->getFunction(); | ||||
5063 | |||||
5064 | if (Op->getOpcode() == Instruction::FDiv) { | ||||
5065 | // Only 0/0, Inf/Inf produce NaN. | ||||
5066 | if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && | ||||
5067 | (KnownLHS.isKnownNeverInfinity() || | ||||
5068 | KnownRHS.isKnownNeverInfinity()) && | ||||
5069 | (KnownLHS.isKnownNeverLogicalZero(*F, Op->getType()) || | ||||
5070 | KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) { | ||||
5071 | Known.knownNot(fcNan); | ||||
5072 | } | ||||
5073 | |||||
5074 | // X / -0.0 is -Inf (or NaN). | ||||
5075 | // +X / +X is +X | ||||
5076 | if (KnownLHS.isKnownNever(fcNegative) && KnownRHS.isKnownNever(fcNegative)) | ||||
5077 | Known.knownNot(fcNegative); | ||||
5078 | } else { | ||||
5079 | // Inf REM x and x REM 0 produce NaN. | ||||
5080 | if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && | ||||
5081 | KnownLHS.isKnownNeverInfinity() && | ||||
5082 | KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) { | ||||
5083 | Known.knownNot(fcNan); | ||||
5084 | } | ||||
5085 | |||||
5086 | // The sign for frem is the same as the first operand. | ||||
5087 | if (KnownLHS.isKnownNever(fcNegative)) | ||||
5088 | Known.knownNot(fcNegative); | ||||
5089 | if (KnownLHS.isKnownNever(fcPositive)) | ||||
5090 | Known.knownNot(fcPositive); | ||||
5091 | } | ||||
5092 | |||||
5093 | break; | ||||
5094 | } | ||||
5095 | case Instruction::FPExt: { | ||||
5096 | // Infinity, nan and zero propagate from source. | ||||
5097 | computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, | ||||
5098 | Known, Depth + 1, Q, TLI); | ||||
5099 | |||||
5100 | const fltSemantics &DstTy = | ||||
5101 | Op->getType()->getScalarType()->getFltSemantics(); | ||||
5102 | const fltSemantics &SrcTy = | ||||
5103 | Op->getOperand(0)->getType()->getScalarType()->getFltSemantics(); | ||||
5104 | |||||
5105 | // All subnormal inputs should be in the normal range in the result type. | ||||
5106 | if (APFloat::isRepresentableAsNormalIn(SrcTy, DstTy)) | ||||
5107 | Known.knownNot(fcSubnormal); | ||||
5108 | |||||
5109 | // Sign bit of a nan isn't guaranteed. | ||||
5110 | if (!Known.isKnownNeverNaN()) | ||||
5111 | Known.SignBit = std::nullopt; | ||||
5112 | break; | ||||
5113 | } | ||||
5114 | case Instruction::FPTrunc: { | ||||
5115 | computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known, | ||||
5116 | Depth, Q, TLI); | ||||
5117 | break; | ||||
5118 | } | ||||
5119 | case Instruction::SIToFP: | ||||
5120 | case Instruction::UIToFP: { | ||||
5121 | // Cannot produce nan | ||||
5122 | Known.knownNot(fcNan); | ||||
5123 | |||||
5124 | // Integers cannot be subnormal | ||||
5125 | Known.knownNot(fcSubnormal); | ||||
5126 | |||||
5127 | // sitofp and uitofp turn into +0.0 for zero. | ||||
5128 | Known.knownNot(fcNegZero); | ||||
5129 | if (Op->getOpcode() == Instruction::UIToFP) | ||||
5130 | Known.signBitMustBeZero(); | ||||
5131 | |||||
5132 | if (InterestedClasses & fcInf) { | ||||
5133 | // Get width of largest magnitude integer (remove a bit if signed). | ||||
5134 | // This still works for a signed minimum value because the largest FP | ||||
5135 | // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx). | ||||
5136 | int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
5137 | if (Op->getOpcode() == Instruction::SIToFP) | ||||
5138 | --IntSize; | ||||
5139 | |||||
5140 | // If the exponent of the largest finite FP value can hold the largest | ||||
5141 | // integer, the result of the cast must be finite. | ||||
5142 | Type *FPTy = Op->getType()->getScalarType(); | ||||
5143 | if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize) | ||||
5144 | Known.knownNot(fcInf); | ||||
5145 | } | ||||
5146 | |||||
5147 | break; | ||||
5148 | } | ||||
5149 | case Instruction::ExtractElement: { | ||||
5150 | // Look through extract element. If the index is non-constant or | ||||
5151 | // out-of-range demand all elements, otherwise just the extracted element. | ||||
5152 | const Value *Vec = Op->getOperand(0); | ||||
5153 | const Value *Idx = Op->getOperand(1); | ||||
5154 | auto *CIdx = dyn_cast<ConstantInt>(Idx); | ||||
5155 | |||||
5156 | if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { | ||||
5157 | unsigned NumElts = VecTy->getNumElements(); | ||||
5158 | APInt DemandedVecElts = APInt::getAllOnes(NumElts); | ||||
5159 | if (CIdx && CIdx->getValue().ult(NumElts)) | ||||
5160 | DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); | ||||
5161 | return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known, | ||||
5162 | Depth + 1, Q, TLI); | ||||
5163 | } | ||||
5164 | |||||
5165 | break; | ||||
5166 | } | ||||
5167 | case Instruction::InsertElement: { | ||||
5168 | if (isa<ScalableVectorType>(Op->getType())) | ||||
5169 | return; | ||||
5170 | |||||
5171 | const Value *Vec = Op->getOperand(0); | ||||
5172 | const Value *Elt = Op->getOperand(1); | ||||
5173 | auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2)); | ||||
5174 | // Early out if the index is non-constant or out-of-range. | ||||
5175 | unsigned NumElts = DemandedElts.getBitWidth(); | ||||
5176 | if (!CIdx || CIdx->getValue().uge(NumElts)) | ||||
5177 | return; | ||||
5178 | |||||
5179 | unsigned EltIdx = CIdx->getZExtValue(); | ||||
5180 | // Do we demand the inserted element? | ||||
5181 | if (DemandedElts[EltIdx]) { | ||||
5182 | computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q, TLI); | ||||
5183 | // If we don't know any bits, early out. | ||||
5184 | if (Known.isUnknown()) | ||||
5185 | break; | ||||
5186 | } else { | ||||
5187 | Known.KnownFPClasses = fcNone; | ||||
5188 | } | ||||
5189 | |||||
5190 | // We don't need the base vector element that has been inserted. | ||||
5191 | APInt DemandedVecElts = DemandedElts; | ||||
5192 | DemandedVecElts.clearBit(EltIdx); | ||||
5193 | if (!!DemandedVecElts) { | ||||
5194 | KnownFPClass Known2; | ||||
5195 | computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2, | ||||
5196 | Depth + 1, Q, TLI); | ||||
5197 | Known |= Known2; | ||||
5198 | } | ||||
5199 | |||||
5200 | break; | ||||
5201 | } | ||||
5202 | case Instruction::ShuffleVector: { | ||||
5203 | // For undef elements, we don't know anything about the common state of | ||||
5204 | // the shuffle result. | ||||
5205 | APInt DemandedLHS, DemandedRHS; | ||||
5206 | auto *Shuf = dyn_cast<ShuffleVectorInst>(Op); | ||||
5207 | if (!Shuf || !getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) | ||||
5208 | return; | ||||
5209 | |||||
5210 | if (!!DemandedLHS) { | ||||
5211 | const Value *LHS = Shuf->getOperand(0); | ||||
5212 | computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known, | ||||
5213 | Depth + 1, Q, TLI); | ||||
5214 | |||||
5215 | // If we don't know any bits, early out. | ||||
5216 | if (Known.isUnknown()) | ||||
5217 | break; | ||||
5218 | } else { | ||||
5219 | Known.KnownFPClasses = fcNone; | ||||
5220 | } | ||||
5221 | |||||
5222 | if (!!DemandedRHS) { | ||||
5223 | KnownFPClass Known2; | ||||
5224 | const Value *RHS = Shuf->getOperand(1); | ||||
5225 | computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2, | ||||
5226 | Depth + 1, Q, TLI); | ||||
5227 | Known |= Known2; | ||||
5228 | } | ||||
5229 | |||||
5230 | break; | ||||
5231 | } | ||||
5232 | case Instruction::ExtractValue: { | ||||
5233 | computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, | ||||
5234 | Known, Depth + 1, Q, TLI); | ||||
5235 | break; | ||||
5236 | } | ||||
5237 | default: | ||||
5238 | break; | ||||
5239 | } | ||||
5240 | } | ||||
5241 | |||||
5242 | KnownFPClass llvm::computeKnownFPClass( | ||||
5243 | const Value *V, const APInt &DemandedElts, const DataLayout &DL, | ||||
5244 | FPClassTest InterestedClasses, unsigned Depth, const TargetLibraryInfo *TLI, | ||||
5245 | AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, | ||||
5246 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
5247 | KnownFPClass KnownClasses; | ||||
5248 | ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth, | ||||
5249 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE), | ||||
5250 | TLI); | ||||
5251 | return KnownClasses; | ||||
5252 | } | ||||
5253 | |||||
5254 | KnownFPClass | ||||
5255 | llvm::computeKnownFPClass(const Value *V, const DataLayout &DL, | ||||
5256 | FPClassTest InterestedClasses, unsigned Depth, | ||||
5257 | const TargetLibraryInfo *TLI, AssumptionCache *AC, | ||||
5258 | const Instruction *CxtI, const DominatorTree *DT, | ||||
5259 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
5260 | KnownFPClass Known; | ||||
5261 | ::computeKnownFPClass(V, Known, InterestedClasses, Depth, | ||||
5262 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE), | ||||
5263 | TLI); | ||||
5264 | return Known; | ||||
5265 | } | ||||
5266 | |||||
5267 | Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { | ||||
5268 | |||||
5269 | // All byte-wide stores are splatable, even of arbitrary variables. | ||||
5270 | if (V->getType()->isIntegerTy(8)) | ||||
5271 | return V; | ||||
5272 | |||||
5273 | LLVMContext &Ctx = V->getContext(); | ||||
5274 | |||||
5275 | // Undef don't care. | ||||
5276 | auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); | ||||
5277 | if (isa<UndefValue>(V)) | ||||
5278 | return UndefInt8; | ||||
5279 | |||||
5280 | // Return Undef for zero-sized type. | ||||
5281 | if (!DL.getTypeStoreSize(V->getType()).isNonZero()) | ||||
5282 | return UndefInt8; | ||||
5283 | |||||
5284 | Constant *C = dyn_cast<Constant>(V); | ||||
5285 | if (!C) { | ||||
5286 | // Conceptually, we could handle things like: | ||||
5287 | // %a = zext i8 %X to i16 | ||||
5288 | // %b = shl i16 %a, 8 | ||||
5289 | // %c = or i16 %a, %b | ||||
5290 | // but until there is an example that actually needs this, it doesn't seem | ||||
5291 | // worth worrying about. | ||||
5292 | return nullptr; | ||||
5293 | } | ||||
5294 | |||||
5295 | // Handle 'null' ConstantArrayZero etc. | ||||
5296 | if (C->isNullValue()) | ||||
5297 | return Constant::getNullValue(Type::getInt8Ty(Ctx)); | ||||
5298 | |||||
5299 | // Constant floating-point values can be handled as integer values if the | ||||
5300 | // corresponding integer value is "byteable". An important case is 0.0. | ||||
5301 | if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { | ||||
5302 | Type *Ty = nullptr; | ||||
5303 | if (CFP->getType()->isHalfTy()) | ||||
5304 | Ty = Type::getInt16Ty(Ctx); | ||||
5305 | else if (CFP->getType()->isFloatTy()) | ||||
5306 | Ty = Type::getInt32Ty(Ctx); | ||||
5307 | else if (CFP->getType()->isDoubleTy()) | ||||
5308 | Ty = Type::getInt64Ty(Ctx); | ||||
5309 | // Don't handle long double formats, which have strange constraints. | ||||
5310 | return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL) | ||||
5311 | : nullptr; | ||||
5312 | } | ||||
5313 | |||||
5314 | // We can handle constant integers that are multiple of 8 bits. | ||||
5315 | if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { | ||||
5316 | if (CI->getBitWidth() % 8 == 0) { | ||||
5317 | assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")(static_cast <bool> (CI->getBitWidth() > 8 && "8 bits should be handled above!") ? void (0) : __assert_fail ("CI->getBitWidth() > 8 && \"8 bits should be handled above!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5317, __extension__ __PRETTY_FUNCTION__ )); | ||||
5318 | if (!CI->getValue().isSplat(8)) | ||||
5319 | return nullptr; | ||||
5320 | return ConstantInt::get(Ctx, CI->getValue().trunc(8)); | ||||
5321 | } | ||||
5322 | } | ||||
5323 | |||||
5324 | if (auto *CE = dyn_cast<ConstantExpr>(C)) { | ||||
5325 | if (CE->getOpcode() == Instruction::IntToPtr) { | ||||
5326 | if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) { | ||||
5327 | unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace()); | ||||
5328 | return isBytewiseValue( | ||||
5329 | ConstantExpr::getIntegerCast(CE->getOperand(0), | ||||
5330 | Type::getIntNTy(Ctx, BitWidth), false), | ||||
5331 | DL); | ||||
5332 | } | ||||
5333 | } | ||||
5334 | } | ||||
5335 | |||||
5336 | auto Merge = [&](Value *LHS, Value *RHS) -> Value * { | ||||
5337 | if (LHS == RHS) | ||||
5338 | return LHS; | ||||
5339 | if (!LHS || !RHS) | ||||
5340 | return nullptr; | ||||
5341 | if (LHS == UndefInt8) | ||||
5342 | return RHS; | ||||
5343 | if (RHS == UndefInt8) | ||||
5344 | return LHS; | ||||
5345 | return nullptr; | ||||
5346 | }; | ||||
5347 | |||||
5348 | if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { | ||||
5349 | Value *Val = UndefInt8; | ||||
5350 | for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) | ||||
5351 | if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) | ||||
5352 | return nullptr; | ||||
5353 | return Val; | ||||
5354 | } | ||||
5355 | |||||
5356 | if (isa<ConstantAggregate>(C)) { | ||||
5357 | Value *Val = UndefInt8; | ||||
5358 | for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) | ||||
5359 | if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) | ||||
5360 | return nullptr; | ||||
5361 | return Val; | ||||
5362 | } | ||||
5363 | |||||
5364 | // Don't try to handle the handful of other constants. | ||||
5365 | return nullptr; | ||||
5366 | } | ||||
5367 | |||||
5368 | // This is the recursive version of BuildSubAggregate. It takes a few different | ||||
5369 | // arguments. Idxs is the index within the nested struct From that we are | ||||
5370 | // looking at now (which is of type IndexedType). IdxSkip is the number of | ||||
5371 | // indices from Idxs that should be left out when inserting into the resulting | ||||
5372 | // struct. To is the result struct built so far, new insertvalue instructions | ||||
5373 | // build on that. | ||||
5374 | static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, | ||||
5375 | SmallVectorImpl<unsigned> &Idxs, | ||||
5376 | unsigned IdxSkip, | ||||
5377 | Instruction *InsertBefore) { | ||||
5378 | StructType *STy = dyn_cast<StructType>(IndexedType); | ||||
| |||||
5379 | if (STy
| ||||
5380 | // Save the original To argument so we can modify it | ||||
5381 | Value *OrigTo = To; | ||||
5382 | // General case, the type indexed by Idxs is a struct | ||||
5383 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { | ||||
5384 | // Process each struct element recursively | ||||
5385 | Idxs.push_back(i); | ||||
5386 | Value *PrevTo = To; | ||||
5387 | To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, | ||||
5388 | InsertBefore); | ||||
5389 | Idxs.pop_back(); | ||||
5390 | if (!To) { | ||||
5391 | // Couldn't find any inserted value for this index? Cleanup | ||||
5392 | while (PrevTo != OrigTo) { | ||||
5393 | InsertValueInst* Del = cast<InsertValueInst>(PrevTo); | ||||
5394 | PrevTo = Del->getAggregateOperand(); | ||||
5395 | Del->eraseFromParent(); | ||||
5396 | } | ||||
5397 | // Stop processing elements | ||||
5398 | break; | ||||
5399 | } | ||||
5400 | } | ||||
5401 | // If we successfully found a value for each of our subaggregates | ||||
5402 | if (To) | ||||
5403 | return To; | ||||
5404 | } | ||||
5405 | // Base case, the type indexed by SourceIdxs is not a struct, or not all of | ||||
5406 | // the struct's elements had a value that was inserted directly. In the latter | ||||
5407 | // case, perhaps we can't determine each of the subelements individually, but | ||||
5408 | // we might be able to find the complete struct somewhere. | ||||
5409 | |||||
5410 | // Find the value that is at that particular spot | ||||
5411 | Value *V = FindInsertedValue(From, Idxs); | ||||
5412 | |||||
5413 | if (!V) | ||||
5414 | return nullptr; | ||||
5415 | |||||
5416 | // Insert the value in the new (sub) aggregate | ||||
5417 | return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp", | ||||
5418 | InsertBefore); | ||||
5419 | } | ||||
5420 | |||||
5421 | // This helper takes a nested struct and extracts a part of it (which is again a | ||||
5422 | // struct) into a new value. For example, given the struct: | ||||
5423 | // { a, { b, { c, d }, e } } | ||||
5424 | // and the indices "1, 1" this returns | ||||
5425 | // { c, d }. | ||||
5426 | // | ||||
5427 | // It does this by inserting an insertvalue for each element in the resulting | ||||
5428 | // struct, as opposed to just inserting a single struct. This will only work if | ||||
5429 | // each of the elements of the substruct are known (ie, inserted into From by an | ||||
5430 | // insertvalue instruction somewhere). | ||||
5431 | // | ||||
5432 | // All inserted insertvalue instructions are inserted before InsertBefore | ||||
5433 | static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, | ||||
5434 | Instruction *InsertBefore) { | ||||
5435 | assert(InsertBefore && "Must have someplace to insert!")(static_cast <bool> (InsertBefore && "Must have someplace to insert!" ) ? void (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5435, __extension__ __PRETTY_FUNCTION__ )); | ||||
5436 | Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), | ||||
5437 | idx_range); | ||||
5438 | Value *To = PoisonValue::get(IndexedType); | ||||
5439 | SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); | ||||
5440 | unsigned IdxSkip = Idxs.size(); | ||||
5441 | |||||
5442 | return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); | ||||
5443 | } | ||||
5444 | |||||
5445 | /// Given an aggregate and a sequence of indices, see if the scalar value | ||||
5446 | /// indexed is already around as a register, for example if it was inserted | ||||
5447 | /// directly into the aggregate. | ||||
5448 | /// | ||||
5449 | /// If InsertBefore is not null, this function will duplicate (modified) | ||||
5450 | /// insertvalues when a part of a nested struct is extracted. | ||||
5451 | Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, | ||||
5452 | Instruction *InsertBefore) { | ||||
5453 | // Nothing to index? Just return V then (this is useful at the end of our | ||||
5454 | // recursion). | ||||
5455 | if (idx_range.empty()) | ||||
5456 | return V; | ||||
5457 | // We have indices, so V should have an indexable type. | ||||
5458 | assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(static_cast <bool> ((V->getType()->isStructTy() || V->getType()->isArrayTy()) && "Not looking at a struct or array?" ) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5459, __extension__ __PRETTY_FUNCTION__ )) | ||||
5459 | "Not looking at a struct or array?")(static_cast <bool> ((V->getType()->isStructTy() || V->getType()->isArrayTy()) && "Not looking at a struct or array?" ) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5459, __extension__ __PRETTY_FUNCTION__ )); | ||||
5460 | assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&(static_cast <bool> (ExtractValueInst::getIndexedType(V ->getType(), idx_range) && "Invalid indices for type?" ) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5461, __extension__ __PRETTY_FUNCTION__ )) | ||||
5461 | "Invalid indices for type?")(static_cast <bool> (ExtractValueInst::getIndexedType(V ->getType(), idx_range) && "Invalid indices for type?" ) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5461, __extension__ __PRETTY_FUNCTION__ )); | ||||
5462 | |||||
5463 | if (Constant *C = dyn_cast<Constant>(V)) { | ||||
5464 | C = C->getAggregateElement(idx_range[0]); | ||||
5465 | if (!C) return nullptr; | ||||
5466 | return FindInsertedValue(C, idx_range.slice(1), InsertBefore); | ||||
5467 | } | ||||
5468 | |||||
5469 | if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { | ||||
5470 | // Loop the indices for the insertvalue instruction in parallel with the | ||||
5471 | // requested indices | ||||
5472 | const unsigned *req_idx = idx_range.begin(); | ||||
5473 | for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); | ||||
5474 | i != e; ++i, ++req_idx) { | ||||
5475 | if (req_idx == idx_range.end()) { | ||||
5476 | // We can't handle this without inserting insertvalues | ||||
5477 | if (!InsertBefore) | ||||
5478 | return nullptr; | ||||
5479 | |||||
5480 | // The requested index identifies a part of a nested aggregate. Handle | ||||
5481 | // this specially. For example, | ||||
5482 | // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 | ||||
5483 | // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 | ||||
5484 | // %C = extractvalue {i32, { i32, i32 } } %B, 1 | ||||
5485 | // This can be changed into | ||||
5486 | // %A = insertvalue {i32, i32 } undef, i32 10, 0 | ||||
5487 | // %C = insertvalue {i32, i32 } %A, i32 11, 1 | ||||
5488 | // which allows the unused 0,0 element from the nested struct to be | ||||
5489 | // removed. | ||||
5490 | return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx), | ||||
5491 | InsertBefore); | ||||
5492 | } | ||||
5493 | |||||
5494 | // This insert value inserts something else than what we are looking for. | ||||
5495 | // See if the (aggregate) value inserted into has the value we are | ||||
5496 | // looking for, then. | ||||
5497 | if (*req_idx != *i) | ||||
5498 | return FindInsertedValue(I->getAggregateOperand(), idx_range, | ||||
5499 | InsertBefore); | ||||
5500 | } | ||||
5501 | // If we end up here, the indices of the insertvalue match with those | ||||
5502 | // requested (though possibly only partially). Now we recursively look at | ||||
5503 | // the inserted value, passing any remaining indices. | ||||
5504 | return FindInsertedValue(I->getInsertedValueOperand(), | ||||
5505 | ArrayRef(req_idx, idx_range.end()), InsertBefore); | ||||
5506 | } | ||||
5507 | |||||
5508 | if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { | ||||
5509 | // If we're extracting a value from an aggregate that was extracted from | ||||
5510 | // something else, we can extract from that something else directly instead. | ||||
5511 | // However, we will need to chain I's indices with the requested indices. | ||||
5512 | |||||
5513 | // Calculate the number of indices required | ||||
5514 | unsigned size = I->getNumIndices() + idx_range.size(); | ||||
5515 | // Allocate some space to put the new indices in | ||||
5516 | SmallVector<unsigned, 5> Idxs; | ||||
5517 | Idxs.reserve(size); | ||||
5518 | // Add indices from the extract value instruction | ||||
5519 | Idxs.append(I->idx_begin(), I->idx_end()); | ||||
5520 | |||||
5521 | // Add requested indices | ||||
5522 | Idxs.append(idx_range.begin(), idx_range.end()); | ||||
5523 | |||||
5524 | assert(Idxs.size() == size(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?" ) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5525, __extension__ __PRETTY_FUNCTION__ )) | ||||
5525 | && "Number of indices added not correct?")(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?" ) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5525, __extension__ __PRETTY_FUNCTION__ )); | ||||
5526 | |||||
5527 | return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); | ||||
5528 | } | ||||
5529 | // Otherwise, we don't know (such as, extracting from a function return value | ||||
5530 | // or load instruction) | ||||
5531 | return nullptr; | ||||
5532 | } | ||||
5533 | |||||
5534 | bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, | ||||
5535 | unsigned CharSize) { | ||||
5536 | // Make sure the GEP has exactly three arguments. | ||||
5537 | if (GEP->getNumOperands() != 3) | ||||
5538 | return false; | ||||
5539 | |||||
5540 | // Make sure the index-ee is a pointer to array of \p CharSize integers. | ||||
5541 | // CharSize. | ||||
5542 | ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); | ||||
5543 | if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) | ||||
5544 | return false; | ||||
5545 | |||||
5546 | // Check to make sure that the first operand of the GEP is an integer and | ||||
5547 | // has value 0 so that we are sure we're indexing into the initializer. | ||||
5548 | const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); | ||||
5549 | if (!FirstIdx || !FirstIdx->isZero()) | ||||
5550 | return false; | ||||
5551 | |||||
5552 | return true; | ||||
5553 | } | ||||
5554 | |||||
5555 | // If V refers to an initialized global constant, set Slice either to | ||||
5556 | // its initializer if the size of its elements equals ElementSize, or, | ||||
5557 | // for ElementSize == 8, to its representation as an array of unsiged | ||||
5558 | // char. Return true on success. | ||||
5559 | // Offset is in the unit "nr of ElementSize sized elements". | ||||
5560 | bool llvm::getConstantDataArrayInfo(const Value *V, | ||||
5561 | ConstantDataArraySlice &Slice, | ||||
5562 | unsigned ElementSize, uint64_t Offset) { | ||||
5563 | assert(V && "V should not be null.")(static_cast <bool> (V && "V should not be null." ) ? void (0) : __assert_fail ("V && \"V should not be null.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5563, __extension__ __PRETTY_FUNCTION__ )); | ||||
5564 | assert((ElementSize % 8) == 0 &&(static_cast <bool> ((ElementSize % 8) == 0 && "ElementSize expected to be a multiple of the size of a byte." ) ? void (0) : __assert_fail ("(ElementSize % 8) == 0 && \"ElementSize expected to be a multiple of the size of a byte.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5565, __extension__ __PRETTY_FUNCTION__ )) | ||||
5565 | "ElementSize expected to be a multiple of the size of a byte.")(static_cast <bool> ((ElementSize % 8) == 0 && "ElementSize expected to be a multiple of the size of a byte." ) ? void (0) : __assert_fail ("(ElementSize % 8) == 0 && \"ElementSize expected to be a multiple of the size of a byte.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5565, __extension__ __PRETTY_FUNCTION__ )); | ||||
5566 | unsigned ElementSizeInBytes = ElementSize / 8; | ||||
5567 | |||||
5568 | // Drill down into the pointer expression V, ignoring any intervening | ||||
5569 | // casts, and determine the identity of the object it references along | ||||
5570 | // with the cumulative byte offset into it. | ||||
5571 | const GlobalVariable *GV = | ||||
5572 | dyn_cast<GlobalVariable>(getUnderlyingObject(V)); | ||||
5573 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) | ||||
5574 | // Fail if V is not based on constant global object. | ||||
5575 | return false; | ||||
5576 | |||||
5577 | const DataLayout &DL = GV->getParent()->getDataLayout(); | ||||
5578 | APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0); | ||||
5579 | |||||
5580 | if (GV != V->stripAndAccumulateConstantOffsets(DL, Off, | ||||
5581 | /*AllowNonInbounds*/ true)) | ||||
5582 | // Fail if a constant offset could not be determined. | ||||
5583 | return false; | ||||
5584 | |||||
5585 | uint64_t StartIdx = Off.getLimitedValue(); | ||||
5586 | if (StartIdx == UINT64_MAX(18446744073709551615UL)) | ||||
5587 | // Fail if the constant offset is excessive. | ||||
5588 | return false; | ||||
5589 | |||||
5590 | // Off/StartIdx is in the unit of bytes. So we need to convert to number of | ||||
5591 | // elements. Simply bail out if that isn't possible. | ||||
5592 | if ((StartIdx % ElementSizeInBytes) != 0) | ||||
5593 | return false; | ||||
5594 | |||||
5595 | Offset += StartIdx / ElementSizeInBytes; | ||||
5596 | ConstantDataArray *Array = nullptr; | ||||
5597 | ArrayType *ArrayTy = nullptr; | ||||
5598 | |||||
5599 | if (GV->getInitializer()->isNullValue()) { | ||||
5600 | Type *GVTy = GV->getValueType(); | ||||
5601 | uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue(); | ||||
5602 | uint64_t Length = SizeInBytes / ElementSizeInBytes; | ||||
5603 | |||||
5604 | Slice.Array = nullptr; | ||||
5605 | Slice.Offset = 0; | ||||
5606 | // Return an empty Slice for undersized constants to let callers | ||||
5607 | // transform even undefined library calls into simpler, well-defined | ||||
5608 | // expressions. This is preferable to making the calls although it | ||||
5609 | // prevents sanitizers from detecting such calls. | ||||
5610 | Slice.Length = Length < Offset ? 0 : Length - Offset; | ||||
5611 | return true; | ||||
5612 | } | ||||
5613 | |||||
5614 | auto *Init = const_cast<Constant *>(GV->getInitializer()); | ||||
5615 | if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) { | ||||
5616 | Type *InitElTy = ArrayInit->getElementType(); | ||||
5617 | if (InitElTy->isIntegerTy(ElementSize)) { | ||||
5618 | // If Init is an initializer for an array of the expected type | ||||
5619 | // and size, use it as is. | ||||
5620 | Array = ArrayInit; | ||||
5621 | ArrayTy = ArrayInit->getType(); | ||||
5622 | } | ||||
5623 | } | ||||
5624 | |||||
5625 | if (!Array) { | ||||
5626 | if (ElementSize != 8) | ||||
5627 | // TODO: Handle conversions to larger integral types. | ||||
5628 | return false; | ||||
5629 | |||||
5630 | // Otherwise extract the portion of the initializer starting | ||||
5631 | // at Offset as an array of bytes, and reset Offset. | ||||
5632 | Init = ReadByteArrayFromGlobal(GV, Offset); | ||||
5633 | if (!Init) | ||||
5634 | return false; | ||||
5635 | |||||
5636 | Offset = 0; | ||||
5637 | Array = dyn_cast<ConstantDataArray>(Init); | ||||
5638 | ArrayTy = dyn_cast<ArrayType>(Init->getType()); | ||||
5639 | } | ||||
5640 | |||||
5641 | uint64_t NumElts = ArrayTy->getArrayNumElements(); | ||||
5642 | if (Offset > NumElts) | ||||
5643 | return false; | ||||
5644 | |||||
5645 | Slice.Array = Array; | ||||
5646 | Slice.Offset = Offset; | ||||
5647 | Slice.Length = NumElts - Offset; | ||||
5648 | return true; | ||||
5649 | } | ||||
5650 | |||||
5651 | /// Extract bytes from the initializer of the constant array V, which need | ||||
5652 | /// not be a nul-terminated string. On success, store the bytes in Str and | ||||
5653 | /// return true. When TrimAtNul is set, Str will contain only the bytes up | ||||
5654 | /// to but not including the first nul. Return false on failure. | ||||
5655 | bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, | ||||
5656 | bool TrimAtNul) { | ||||
5657 | ConstantDataArraySlice Slice; | ||||
5658 | if (!getConstantDataArrayInfo(V, Slice, 8)) | ||||
5659 | return false; | ||||
5660 | |||||
5661 | if (Slice.Array == nullptr) { | ||||
5662 | if (TrimAtNul) { | ||||
5663 | // Return a nul-terminated string even for an empty Slice. This is | ||||
5664 | // safe because all existing SimplifyLibcalls callers require string | ||||
5665 | // arguments and the behavior of the functions they fold is undefined | ||||
5666 | // otherwise. Folding the calls this way is preferable to making | ||||
5667 | // the undefined library calls, even though it prevents sanitizers | ||||
5668 | // from reporting such calls. | ||||
5669 | Str = StringRef(); | ||||
5670 | return true; | ||||
5671 | } | ||||
5672 | if (Slice.Length == 1) { | ||||
5673 | Str = StringRef("", 1); | ||||
5674 | return true; | ||||
5675 | } | ||||
5676 | // We cannot instantiate a StringRef as we do not have an appropriate string | ||||
5677 | // of 0s at hand. | ||||
5678 | return false; | ||||
5679 | } | ||||
5680 | |||||
5681 | // Start out with the entire array in the StringRef. | ||||
5682 | Str = Slice.Array->getAsString(); | ||||
5683 | // Skip over 'offset' bytes. | ||||
5684 | Str = Str.substr(Slice.Offset); | ||||
5685 | |||||
5686 | if (TrimAtNul) { | ||||
5687 | // Trim off the \0 and anything after it. If the array is not nul | ||||
5688 | // terminated, we just return the whole end of string. The client may know | ||||
5689 | // some other way that the string is length-bound. | ||||
5690 | Str = Str.substr(0, Str.find('\0')); | ||||
5691 | } | ||||
5692 | return true; | ||||
5693 | } | ||||
5694 | |||||
5695 | // These next two are very similar to the above, but also look through PHI | ||||
5696 | // nodes. | ||||
5697 | // TODO: See if we can integrate these two together. | ||||
5698 | |||||
5699 | /// If we can compute the length of the string pointed to by | ||||
5700 | /// the specified pointer, return 'len+1'. If we can't, return 0. | ||||
5701 | static uint64_t GetStringLengthH(const Value *V, | ||||
5702 | SmallPtrSetImpl<const PHINode*> &PHIs, | ||||
5703 | unsigned CharSize) { | ||||
5704 | // Look through noop bitcast instructions. | ||||
5705 | V = V->stripPointerCasts(); | ||||
5706 | |||||
5707 | // If this is a PHI node, there are two cases: either we have already seen it | ||||
5708 | // or we haven't. | ||||
5709 | if (const PHINode *PN = dyn_cast<PHINode>(V)) { | ||||
5710 | if (!PHIs.insert(PN).second) | ||||
5711 | return ~0ULL; // already in the set. | ||||
5712 | |||||
5713 | // If it was new, see if all the input strings are the same length. | ||||
5714 | uint64_t LenSoFar = ~0ULL; | ||||
5715 | for (Value *IncValue : PN->incoming_values()) { | ||||
5716 | uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); | ||||
5717 | if (Len == 0) return 0; // Unknown length -> unknown. | ||||
5718 | |||||
5719 | if (Len == ~0ULL) continue; | ||||
5720 | |||||
5721 | if (Len != LenSoFar && LenSoFar != ~0ULL) | ||||
5722 | return 0; // Disagree -> unknown. | ||||
5723 | LenSoFar = Len; | ||||
5724 | } | ||||
5725 | |||||
5726 | // Success, all agree. | ||||
5727 | return LenSoFar; | ||||
5728 | } | ||||
5729 | |||||
5730 | // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) | ||||
5731 | if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { | ||||
5732 | uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); | ||||
5733 | if (Len1 == 0) return 0; | ||||
5734 | uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); | ||||
5735 | if (Len2 == 0) return 0; | ||||
5736 | if (Len1 == ~0ULL) return Len2; | ||||
5737 | if (Len2 == ~0ULL) return Len1; | ||||
5738 | if (Len1 != Len2) return 0; | ||||
5739 | return Len1; | ||||
5740 | } | ||||
5741 | |||||
5742 | // Otherwise, see if we can read the string. | ||||
5743 | ConstantDataArraySlice Slice; | ||||
5744 | if (!getConstantDataArrayInfo(V, Slice, CharSize)) | ||||
5745 | return 0; | ||||
5746 | |||||
5747 | if (Slice.Array == nullptr) | ||||
5748 | // Zeroinitializer (including an empty one). | ||||
5749 | return 1; | ||||
5750 | |||||
5751 | // Search for the first nul character. Return a conservative result even | ||||
5752 | // when there is no nul. This is safe since otherwise the string function | ||||
5753 | // being folded such as strlen is undefined, and can be preferable to | ||||
5754 | // making the undefined library call. | ||||
5755 | unsigned NullIndex = 0; | ||||
5756 | for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { | ||||
5757 | if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) | ||||
5758 | break; | ||||
5759 | } | ||||
5760 | |||||
5761 | return NullIndex + 1; | ||||
5762 | } | ||||
5763 | |||||
5764 | /// If we can compute the length of the string pointed to by | ||||
5765 | /// the specified pointer, return 'len+1'. If we can't, return 0. | ||||
5766 | uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { | ||||
5767 | if (!V->getType()->isPointerTy()) | ||||
5768 | return 0; | ||||
5769 | |||||
5770 | SmallPtrSet<const PHINode*, 32> PHIs; | ||||
5771 | uint64_t Len = GetStringLengthH(V, PHIs, CharSize); | ||||
5772 | // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return | ||||
5773 | // an empty string as a length. | ||||
5774 | return Len == ~0ULL ? 1 : Len; | ||||
5775 | } | ||||
5776 | |||||
5777 | const Value * | ||||
5778 | llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call, | ||||
5779 | bool MustPreserveNullness) { | ||||
5780 | assert(Call &&(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls" ) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5781, __extension__ __PRETTY_FUNCTION__ )) | ||||
5781 | "getArgumentAliasingToReturnedPointer only works on nonnull calls")(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls" ) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5781, __extension__ __PRETTY_FUNCTION__ )); | ||||
5782 | if (const Value *RV = Call->getReturnedArgOperand()) | ||||
5783 | return RV; | ||||
5784 | // This can be used only as a aliasing property. | ||||
5785 | if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( | ||||
5786 | Call, MustPreserveNullness)) | ||||
5787 | return Call->getArgOperand(0); | ||||
5788 | return nullptr; | ||||
5789 | } | ||||
5790 | |||||
5791 | bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( | ||||
5792 | const CallBase *Call, bool MustPreserveNullness) { | ||||
5793 | switch (Call->getIntrinsicID()) { | ||||
5794 | case Intrinsic::launder_invariant_group: | ||||
5795 | case Intrinsic::strip_invariant_group: | ||||
5796 | case Intrinsic::aarch64_irg: | ||||
5797 | case Intrinsic::aarch64_tagp: | ||||
5798 | return true; | ||||
5799 | case Intrinsic::ptrmask: | ||||
5800 | return !MustPreserveNullness; | ||||
5801 | default: | ||||
5802 | return false; | ||||
5803 | } | ||||
5804 | } | ||||
5805 | |||||
5806 | /// \p PN defines a loop-variant pointer to an object. Check if the | ||||
5807 | /// previous iteration of the loop was referring to the same object as \p PN. | ||||
5808 | static bool isSameUnderlyingObjectInLoop(const PHINode *PN, | ||||
5809 | const LoopInfo *LI) { | ||||
5810 | // Find the loop-defined value. | ||||
5811 | Loop *L = LI->getLoopFor(PN->getParent()); | ||||
5812 | if (PN->getNumIncomingValues() != 2) | ||||
5813 | return true; | ||||
5814 | |||||
5815 | // Find the value from previous iteration. | ||||
5816 | auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); | ||||
5817 | if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) | ||||
5818 | PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); | ||||
5819 | if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) | ||||
5820 | return true; | ||||
5821 | |||||
5822 | // If a new pointer is loaded in the loop, the pointer references a different | ||||
5823 | // object in every iteration. E.g.: | ||||
5824 | // for (i) | ||||
5825 | // int *p = a[i]; | ||||
5826 | // ... | ||||
5827 | if (auto *Load = dyn_cast<LoadInst>(PrevValue)) | ||||
5828 | if (!L->isLoopInvariant(Load->getPointerOperand())) | ||||
5829 | return |