File: | build/source/llvm/include/llvm/IR/Instructions.h |
Warning: | line 2712, column 17 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ValueTracking.cpp - Walk computations to compute properties --------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file contains routines that help analyze properties that chains of | ||||
10 | // computations have. | ||||
11 | // | ||||
12 | //===----------------------------------------------------------------------===// | ||||
13 | |||||
14 | #include "llvm/Analysis/ValueTracking.h" | ||||
15 | #include "llvm/ADT/APFloat.h" | ||||
16 | #include "llvm/ADT/APInt.h" | ||||
17 | #include "llvm/ADT/ArrayRef.h" | ||||
18 | #include "llvm/ADT/STLExtras.h" | ||||
19 | #include "llvm/ADT/ScopeExit.h" | ||||
20 | #include "llvm/ADT/SmallPtrSet.h" | ||||
21 | #include "llvm/ADT/SmallSet.h" | ||||
22 | #include "llvm/ADT/SmallVector.h" | ||||
23 | #include "llvm/ADT/StringRef.h" | ||||
24 | #include "llvm/ADT/iterator_range.h" | ||||
25 | #include "llvm/Analysis/AliasAnalysis.h" | ||||
26 | #include "llvm/Analysis/AssumeBundleQueries.h" | ||||
27 | #include "llvm/Analysis/AssumptionCache.h" | ||||
28 | #include "llvm/Analysis/ConstantFolding.h" | ||||
29 | #include "llvm/Analysis/GuardUtils.h" | ||||
30 | #include "llvm/Analysis/InstructionSimplify.h" | ||||
31 | #include "llvm/Analysis/Loads.h" | ||||
32 | #include "llvm/Analysis/LoopInfo.h" | ||||
33 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||
34 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
35 | #include "llvm/Analysis/VectorUtils.h" | ||||
36 | #include "llvm/IR/Argument.h" | ||||
37 | #include "llvm/IR/Attributes.h" | ||||
38 | #include "llvm/IR/BasicBlock.h" | ||||
39 | #include "llvm/IR/Constant.h" | ||||
40 | #include "llvm/IR/ConstantRange.h" | ||||
41 | #include "llvm/IR/Constants.h" | ||||
42 | #include "llvm/IR/DerivedTypes.h" | ||||
43 | #include "llvm/IR/DiagnosticInfo.h" | ||||
44 | #include "llvm/IR/Dominators.h" | ||||
45 | #include "llvm/IR/EHPersonalities.h" | ||||
46 | #include "llvm/IR/Function.h" | ||||
47 | #include "llvm/IR/GetElementPtrTypeIterator.h" | ||||
48 | #include "llvm/IR/GlobalAlias.h" | ||||
49 | #include "llvm/IR/GlobalValue.h" | ||||
50 | #include "llvm/IR/GlobalVariable.h" | ||||
51 | #include "llvm/IR/InstrTypes.h" | ||||
52 | #include "llvm/IR/Instruction.h" | ||||
53 | #include "llvm/IR/Instructions.h" | ||||
54 | #include "llvm/IR/IntrinsicInst.h" | ||||
55 | #include "llvm/IR/Intrinsics.h" | ||||
56 | #include "llvm/IR/IntrinsicsAArch64.h" | ||||
57 | #include "llvm/IR/IntrinsicsRISCV.h" | ||||
58 | #include "llvm/IR/IntrinsicsX86.h" | ||||
59 | #include "llvm/IR/LLVMContext.h" | ||||
60 | #include "llvm/IR/Metadata.h" | ||||
61 | #include "llvm/IR/Module.h" | ||||
62 | #include "llvm/IR/Operator.h" | ||||
63 | #include "llvm/IR/PatternMatch.h" | ||||
64 | #include "llvm/IR/Type.h" | ||||
65 | #include "llvm/IR/User.h" | ||||
66 | #include "llvm/IR/Value.h" | ||||
67 | #include "llvm/Support/Casting.h" | ||||
68 | #include "llvm/Support/CommandLine.h" | ||||
69 | #include "llvm/Support/Compiler.h" | ||||
70 | #include "llvm/Support/ErrorHandling.h" | ||||
71 | #include "llvm/Support/KnownBits.h" | ||||
72 | #include "llvm/Support/MathExtras.h" | ||||
73 | #include <algorithm> | ||||
74 | #include <cassert> | ||||
75 | #include <cstdint> | ||||
76 | #include <optional> | ||||
77 | #include <utility> | ||||
78 | |||||
79 | using namespace llvm; | ||||
80 | using namespace llvm::PatternMatch; | ||||
81 | |||||
82 | // Controls the number of uses of the value searched for possible | ||||
83 | // dominating comparisons. | ||||
84 | static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", | ||||
85 | cl::Hidden, cl::init(20)); | ||||
86 | |||||
87 | |||||
88 | /// Returns the bitwidth of the given scalar or pointer type. For vector types, | ||||
89 | /// returns the element type's bitwidth. | ||||
90 | static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { | ||||
91 | if (unsigned BitWidth = Ty->getScalarSizeInBits()) | ||||
92 | return BitWidth; | ||||
93 | |||||
94 | return DL.getPointerTypeSizeInBits(Ty); | ||||
95 | } | ||||
96 | |||||
97 | namespace { | ||||
98 | |||||
99 | // Simplifying using an assume can only be done in a particular control-flow | ||||
100 | // context (the context instruction provides that context). If an assume and | ||||
101 | // the context instruction are not in the same block then the DT helps in | ||||
102 | // figuring out if we can use it. | ||||
103 | struct Query { | ||||
104 | const DataLayout &DL; | ||||
105 | AssumptionCache *AC; | ||||
106 | const Instruction *CxtI; | ||||
107 | const DominatorTree *DT; | ||||
108 | |||||
109 | // Unlike the other analyses, this may be a nullptr because not all clients | ||||
110 | // provide it currently. | ||||
111 | OptimizationRemarkEmitter *ORE; | ||||
112 | |||||
113 | /// If true, it is safe to use metadata during simplification. | ||||
114 | InstrInfoQuery IIQ; | ||||
115 | |||||
116 | Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, | ||||
117 | const DominatorTree *DT, bool UseInstrInfo, | ||||
118 | OptimizationRemarkEmitter *ORE = nullptr) | ||||
119 | : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} | ||||
120 | }; | ||||
121 | |||||
122 | } // end anonymous namespace | ||||
123 | |||||
124 | // Given the provided Value and, potentially, a context instruction, return | ||||
125 | // the preferred context instruction (if any). | ||||
126 | static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { | ||||
127 | // If we've been provided with a context instruction, then use that (provided | ||||
128 | // it has been inserted). | ||||
129 | if (CxtI && CxtI->getParent()) | ||||
130 | return CxtI; | ||||
131 | |||||
132 | // If the value is really an already-inserted instruction, then use that. | ||||
133 | CxtI = dyn_cast<Instruction>(V); | ||||
134 | if (CxtI && CxtI->getParent()) | ||||
135 | return CxtI; | ||||
136 | |||||
137 | return nullptr; | ||||
138 | } | ||||
139 | |||||
140 | static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) { | ||||
141 | // If we've been provided with a context instruction, then use that (provided | ||||
142 | // it has been inserted). | ||||
143 | if (CxtI && CxtI->getParent()) | ||||
144 | return CxtI; | ||||
145 | |||||
146 | // If the value is really an already-inserted instruction, then use that. | ||||
147 | CxtI = dyn_cast<Instruction>(V1); | ||||
148 | if (CxtI && CxtI->getParent()) | ||||
149 | return CxtI; | ||||
150 | |||||
151 | CxtI = dyn_cast<Instruction>(V2); | ||||
152 | if (CxtI && CxtI->getParent()) | ||||
153 | return CxtI; | ||||
154 | |||||
155 | return nullptr; | ||||
156 | } | ||||
157 | |||||
158 | static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, | ||||
159 | const APInt &DemandedElts, | ||||
160 | APInt &DemandedLHS, APInt &DemandedRHS) { | ||||
161 | if (isa<ScalableVectorType>(Shuf->getType())) { | ||||
162 | assert(DemandedElts == APInt(1,1))(static_cast <bool> (DemandedElts == APInt(1,1)) ? void (0) : __assert_fail ("DemandedElts == APInt(1,1)", "llvm/lib/Analysis/ValueTracking.cpp" , 162, __extension__ __PRETTY_FUNCTION__)); | ||||
163 | DemandedLHS = DemandedRHS = DemandedElts; | ||||
164 | return true; | ||||
165 | } | ||||
166 | |||||
167 | int NumElts = | ||||
168 | cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements(); | ||||
169 | return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(), | ||||
170 | DemandedElts, DemandedLHS, DemandedRHS); | ||||
171 | } | ||||
172 | |||||
173 | static void computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
174 | KnownBits &Known, unsigned Depth, const Query &Q); | ||||
175 | |||||
176 | static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, | ||||
177 | const Query &Q) { | ||||
178 | // Since the number of lanes in a scalable vector is unknown at compile time, | ||||
179 | // we track one bit which is implicitly broadcast to all lanes. This means | ||||
180 | // that all lanes in a scalable vector are considered demanded. | ||||
181 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
182 | APInt DemandedElts = | ||||
183 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
184 | computeKnownBits(V, DemandedElts, Known, Depth, Q); | ||||
185 | } | ||||
186 | |||||
187 | void llvm::computeKnownBits(const Value *V, KnownBits &Known, | ||||
188 | const DataLayout &DL, unsigned Depth, | ||||
189 | AssumptionCache *AC, const Instruction *CxtI, | ||||
190 | const DominatorTree *DT, | ||||
191 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
192 | ::computeKnownBits(V, Known, Depth, | ||||
193 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); | ||||
194 | } | ||||
195 | |||||
196 | void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
197 | KnownBits &Known, const DataLayout &DL, | ||||
198 | unsigned Depth, AssumptionCache *AC, | ||||
199 | const Instruction *CxtI, const DominatorTree *DT, | ||||
200 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
201 | ::computeKnownBits(V, DemandedElts, Known, Depth, | ||||
202 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); | ||||
203 | } | ||||
204 | |||||
205 | static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
206 | unsigned Depth, const Query &Q); | ||||
207 | |||||
208 | static KnownBits computeKnownBits(const Value *V, unsigned Depth, | ||||
209 | const Query &Q); | ||||
210 | |||||
211 | KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, | ||||
212 | unsigned Depth, AssumptionCache *AC, | ||||
213 | const Instruction *CxtI, | ||||
214 | const DominatorTree *DT, | ||||
215 | OptimizationRemarkEmitter *ORE, | ||||
216 | bool UseInstrInfo) { | ||||
217 | return ::computeKnownBits( | ||||
218 | V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); | ||||
219 | } | ||||
220 | |||||
221 | KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
222 | const DataLayout &DL, unsigned Depth, | ||||
223 | AssumptionCache *AC, const Instruction *CxtI, | ||||
224 | const DominatorTree *DT, | ||||
225 | OptimizationRemarkEmitter *ORE, | ||||
226 | bool UseInstrInfo) { | ||||
227 | return ::computeKnownBits( | ||||
228 | V, DemandedElts, Depth, | ||||
229 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); | ||||
230 | } | ||||
231 | |||||
232 | bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, | ||||
233 | const DataLayout &DL, AssumptionCache *AC, | ||||
234 | const Instruction *CxtI, const DominatorTree *DT, | ||||
235 | bool UseInstrInfo) { | ||||
236 | assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType () && "LHS and RHS should have the same type") ? void (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 237, __extension__ __PRETTY_FUNCTION__ )) | ||||
237 | "LHS and RHS should have the same type")(static_cast <bool> (LHS->getType() == RHS->getType () && "LHS and RHS should have the same type") ? void (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"LHS and RHS should have the same type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 237, __extension__ __PRETTY_FUNCTION__ )); | ||||
238 | assert(LHS->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy () && "LHS and RHS should be integers") ? void (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\"" , "llvm/lib/Analysis/ValueTracking.cpp", 239, __extension__ __PRETTY_FUNCTION__ )) | ||||
239 | "LHS and RHS should be integers")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy () && "LHS and RHS should be integers") ? void (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy() && \"LHS and RHS should be integers\"" , "llvm/lib/Analysis/ValueTracking.cpp", 239, __extension__ __PRETTY_FUNCTION__ )); | ||||
240 | // Look for an inverted mask: (X & ~M) op (Y & M). | ||||
241 | { | ||||
242 | Value *M; | ||||
243 | if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && | ||||
244 | match(RHS, m_c_And(m_Specific(M), m_Value()))) | ||||
245 | return true; | ||||
246 | if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && | ||||
247 | match(LHS, m_c_And(m_Specific(M), m_Value()))) | ||||
248 | return true; | ||||
249 | } | ||||
250 | |||||
251 | // X op (Y & ~X) | ||||
252 | if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) || | ||||
253 | match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value()))) | ||||
254 | return true; | ||||
255 | |||||
256 | // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern | ||||
257 | // for constant Y. | ||||
258 | Value *Y; | ||||
259 | if (match(RHS, | ||||
260 | m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) || | ||||
261 | match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y)))) | ||||
262 | return true; | ||||
263 | |||||
264 | // Peek through extends to find a 'not' of the other side: | ||||
265 | // (ext Y) op ext(~Y) | ||||
266 | // (ext ~Y) op ext(Y) | ||||
267 | if ((match(LHS, m_ZExtOrSExt(m_Value(Y))) && | ||||
268 | match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))) || | ||||
269 | (match(RHS, m_ZExtOrSExt(m_Value(Y))) && | ||||
270 | match(LHS, m_ZExtOrSExt(m_Not(m_Specific(Y)))))) | ||||
271 | return true; | ||||
272 | |||||
273 | // Look for: (A & B) op ~(A | B) | ||||
274 | { | ||||
275 | Value *A, *B; | ||||
276 | if (match(LHS, m_And(m_Value(A), m_Value(B))) && | ||||
277 | match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) | ||||
278 | return true; | ||||
279 | if (match(RHS, m_And(m_Value(A), m_Value(B))) && | ||||
280 | match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) | ||||
281 | return true; | ||||
282 | } | ||||
283 | IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); | ||||
284 | KnownBits LHSKnown(IT->getBitWidth()); | ||||
285 | KnownBits RHSKnown(IT->getBitWidth()); | ||||
286 | computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); | ||||
287 | computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); | ||||
288 | return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown); | ||||
289 | } | ||||
290 | |||||
291 | bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) { | ||||
292 | return !I->user_empty() && all_of(I->users(), [](const User *U) { | ||||
293 | ICmpInst::Predicate P; | ||||
294 | return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P); | ||||
295 | }); | ||||
296 | } | ||||
297 | |||||
298 | static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, | ||||
299 | const Query &Q); | ||||
300 | |||||
301 | bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, | ||||
302 | bool OrZero, unsigned Depth, | ||||
303 | AssumptionCache *AC, const Instruction *CxtI, | ||||
304 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
305 | return ::isKnownToBeAPowerOfTwo( | ||||
306 | V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); | ||||
307 | } | ||||
308 | |||||
309 | static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, | ||||
310 | unsigned Depth, const Query &Q); | ||||
311 | |||||
312 | static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); | ||||
313 | |||||
314 | bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, | ||||
315 | AssumptionCache *AC, const Instruction *CxtI, | ||||
316 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
317 | return ::isKnownNonZero(V, Depth, | ||||
318 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); | ||||
319 | } | ||||
320 | |||||
321 | bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, | ||||
322 | unsigned Depth, AssumptionCache *AC, | ||||
323 | const Instruction *CxtI, const DominatorTree *DT, | ||||
324 | bool UseInstrInfo) { | ||||
325 | KnownBits Known = | ||||
326 | computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); | ||||
327 | return Known.isNonNegative(); | ||||
328 | } | ||||
329 | |||||
330 | bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, | ||||
331 | AssumptionCache *AC, const Instruction *CxtI, | ||||
332 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
333 | if (auto *CI = dyn_cast<ConstantInt>(V)) | ||||
334 | return CI->getValue().isStrictlyPositive(); | ||||
335 | |||||
336 | // TODO: We'd doing two recursive queries here. We should factor this such | ||||
337 | // that only a single query is needed. | ||||
338 | return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && | ||||
339 | isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); | ||||
340 | } | ||||
341 | |||||
342 | bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, | ||||
343 | AssumptionCache *AC, const Instruction *CxtI, | ||||
344 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
345 | KnownBits Known = | ||||
346 | computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); | ||||
347 | return Known.isNegative(); | ||||
348 | } | ||||
349 | |||||
350 | static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, | ||||
351 | const Query &Q); | ||||
352 | |||||
353 | bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, | ||||
354 | const DataLayout &DL, AssumptionCache *AC, | ||||
355 | const Instruction *CxtI, const DominatorTree *DT, | ||||
356 | bool UseInstrInfo) { | ||||
357 | return ::isKnownNonEqual(V1, V2, 0, | ||||
358 | Query(DL, AC, safeCxtI(V2, V1, CxtI), DT, | ||||
359 | UseInstrInfo, /*ORE=*/nullptr)); | ||||
360 | } | ||||
361 | |||||
362 | static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, | ||||
363 | const Query &Q); | ||||
364 | |||||
365 | bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, | ||||
366 | const DataLayout &DL, unsigned Depth, | ||||
367 | AssumptionCache *AC, const Instruction *CxtI, | ||||
368 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
369 | return ::MaskedValueIsZero( | ||||
370 | V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); | ||||
371 | } | ||||
372 | |||||
373 | static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, | ||||
374 | unsigned Depth, const Query &Q); | ||||
375 | |||||
376 | static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, | ||||
377 | const Query &Q) { | ||||
378 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
379 | APInt DemandedElts = | ||||
380 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
381 | return ComputeNumSignBits(V, DemandedElts, Depth, Q); | ||||
382 | } | ||||
383 | |||||
384 | unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, | ||||
385 | unsigned Depth, AssumptionCache *AC, | ||||
386 | const Instruction *CxtI, | ||||
387 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
388 | return ::ComputeNumSignBits( | ||||
389 | V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); | ||||
390 | } | ||||
391 | |||||
392 | unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL, | ||||
393 | unsigned Depth, AssumptionCache *AC, | ||||
394 | const Instruction *CxtI, | ||||
395 | const DominatorTree *DT) { | ||||
396 | unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT); | ||||
397 | return V->getType()->getScalarSizeInBits() - SignBits + 1; | ||||
398 | } | ||||
399 | |||||
400 | static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, | ||||
401 | bool NSW, const APInt &DemandedElts, | ||||
402 | KnownBits &KnownOut, KnownBits &Known2, | ||||
403 | unsigned Depth, const Query &Q) { | ||||
404 | computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); | ||||
405 | |||||
406 | // If one operand is unknown and we have no nowrap information, | ||||
407 | // the result will be unknown independently of the second operand. | ||||
408 | if (KnownOut.isUnknown() && !NSW) | ||||
409 | return; | ||||
410 | |||||
411 | computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); | ||||
412 | KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut); | ||||
413 | } | ||||
414 | |||||
415 | static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, | ||||
416 | const APInt &DemandedElts, KnownBits &Known, | ||||
417 | KnownBits &Known2, unsigned Depth, | ||||
418 | const Query &Q) { | ||||
419 | computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); | ||||
420 | computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); | ||||
421 | |||||
422 | bool isKnownNegative = false; | ||||
423 | bool isKnownNonNegative = false; | ||||
424 | // If the multiplication is known not to overflow, compute the sign bit. | ||||
425 | if (NSW) { | ||||
426 | if (Op0 == Op1) { | ||||
427 | // The product of a number with itself is non-negative. | ||||
428 | isKnownNonNegative = true; | ||||
429 | } else { | ||||
430 | bool isKnownNonNegativeOp1 = Known.isNonNegative(); | ||||
431 | bool isKnownNonNegativeOp0 = Known2.isNonNegative(); | ||||
432 | bool isKnownNegativeOp1 = Known.isNegative(); | ||||
433 | bool isKnownNegativeOp0 = Known2.isNegative(); | ||||
434 | // The product of two numbers with the same sign is non-negative. | ||||
435 | isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || | ||||
436 | (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); | ||||
437 | // The product of a negative number and a non-negative number is either | ||||
438 | // negative or zero. | ||||
439 | if (!isKnownNonNegative) | ||||
440 | isKnownNegative = | ||||
441 | (isKnownNegativeOp1 && isKnownNonNegativeOp0 && | ||||
442 | Known2.isNonZero()) || | ||||
443 | (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero()); | ||||
444 | } | ||||
445 | } | ||||
446 | |||||
447 | bool SelfMultiply = Op0 == Op1; | ||||
448 | // TODO: SelfMultiply can be poison, but not undef. | ||||
449 | if (SelfMultiply) | ||||
450 | SelfMultiply &= | ||||
451 | isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1); | ||||
452 | Known = KnownBits::mul(Known, Known2, SelfMultiply); | ||||
453 | |||||
454 | // Only make use of no-wrap flags if we failed to compute the sign bit | ||||
455 | // directly. This matters if the multiplication always overflows, in | ||||
456 | // which case we prefer to follow the result of the direct computation, | ||||
457 | // though as the program is invoking undefined behaviour we can choose | ||||
458 | // whatever we like here. | ||||
459 | if (isKnownNonNegative && !Known.isNegative()) | ||||
460 | Known.makeNonNegative(); | ||||
461 | else if (isKnownNegative && !Known.isNonNegative()) | ||||
462 | Known.makeNegative(); | ||||
463 | } | ||||
464 | |||||
465 | void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, | ||||
466 | KnownBits &Known) { | ||||
467 | unsigned BitWidth = Known.getBitWidth(); | ||||
468 | unsigned NumRanges = Ranges.getNumOperands() / 2; | ||||
469 | assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail ("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp", 469, __extension__ __PRETTY_FUNCTION__)); | ||||
470 | |||||
471 | Known.Zero.setAllBits(); | ||||
472 | Known.One.setAllBits(); | ||||
473 | |||||
474 | for (unsigned i = 0; i < NumRanges; ++i) { | ||||
475 | ConstantInt *Lower = | ||||
476 | mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); | ||||
477 | ConstantInt *Upper = | ||||
478 | mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); | ||||
479 | ConstantRange Range(Lower->getValue(), Upper->getValue()); | ||||
480 | |||||
481 | // The first CommonPrefixBits of all values in Range are equal. | ||||
482 | unsigned CommonPrefixBits = | ||||
483 | (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countl_zero(); | ||||
484 | APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); | ||||
485 | APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth); | ||||
486 | Known.One &= UnsignedMax & Mask; | ||||
487 | Known.Zero &= ~UnsignedMax & Mask; | ||||
488 | } | ||||
489 | } | ||||
490 | |||||
491 | static bool isEphemeralValueOf(const Instruction *I, const Value *E) { | ||||
492 | SmallVector<const Value *, 16> WorkSet(1, I); | ||||
493 | SmallPtrSet<const Value *, 32> Visited; | ||||
494 | SmallPtrSet<const Value *, 16> EphValues; | ||||
495 | |||||
496 | // The instruction defining an assumption's condition itself is always | ||||
497 | // considered ephemeral to that assumption (even if it has other | ||||
498 | // non-ephemeral users). See r246696's test case for an example. | ||||
499 | if (is_contained(I->operands(), E)) | ||||
500 | return true; | ||||
501 | |||||
502 | while (!WorkSet.empty()) { | ||||
503 | const Value *V = WorkSet.pop_back_val(); | ||||
504 | if (!Visited.insert(V).second) | ||||
505 | continue; | ||||
506 | |||||
507 | // If all uses of this value are ephemeral, then so is this value. | ||||
508 | if (llvm::all_of(V->users(), [&](const User *U) { | ||||
509 | return EphValues.count(U); | ||||
510 | })) { | ||||
511 | if (V == E) | ||||
512 | return true; | ||||
513 | |||||
514 | if (V == I || (isa<Instruction>(V) && | ||||
515 | !cast<Instruction>(V)->mayHaveSideEffects() && | ||||
516 | !cast<Instruction>(V)->isTerminator())) { | ||||
517 | EphValues.insert(V); | ||||
518 | if (const User *U = dyn_cast<User>(V)) | ||||
519 | append_range(WorkSet, U->operands()); | ||||
520 | } | ||||
521 | } | ||||
522 | } | ||||
523 | |||||
524 | return false; | ||||
525 | } | ||||
526 | |||||
527 | // Is this an intrinsic that cannot be speculated but also cannot trap? | ||||
528 | bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { | ||||
529 | if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I)) | ||||
530 | return CI->isAssumeLikeIntrinsic(); | ||||
531 | |||||
532 | return false; | ||||
533 | } | ||||
534 | |||||
535 | bool llvm::isValidAssumeForContext(const Instruction *Inv, | ||||
536 | const Instruction *CxtI, | ||||
537 | const DominatorTree *DT) { | ||||
538 | // There are two restrictions on the use of an assume: | ||||
539 | // 1. The assume must dominate the context (or the control flow must | ||||
540 | // reach the assume whenever it reaches the context). | ||||
541 | // 2. The context must not be in the assume's set of ephemeral values | ||||
542 | // (otherwise we will use the assume to prove that the condition | ||||
543 | // feeding the assume is trivially true, thus causing the removal of | ||||
544 | // the assume). | ||||
545 | |||||
546 | if (Inv->getParent() == CxtI->getParent()) { | ||||
547 | // If Inv and CtxI are in the same block, check if the assume (Inv) is first | ||||
548 | // in the BB. | ||||
549 | if (Inv->comesBefore(CxtI)) | ||||
550 | return true; | ||||
551 | |||||
552 | // Don't let an assume affect itself - this would cause the problems | ||||
553 | // `isEphemeralValueOf` is trying to prevent, and it would also make | ||||
554 | // the loop below go out of bounds. | ||||
555 | if (Inv == CxtI) | ||||
556 | return false; | ||||
557 | |||||
558 | // The context comes first, but they're both in the same block. | ||||
559 | // Make sure there is nothing in between that might interrupt | ||||
560 | // the control flow, not even CxtI itself. | ||||
561 | // We limit the scan distance between the assume and its context instruction | ||||
562 | // to avoid a compile-time explosion. This limit is chosen arbitrarily, so | ||||
563 | // it can be adjusted if needed (could be turned into a cl::opt). | ||||
564 | auto Range = make_range(CxtI->getIterator(), Inv->getIterator()); | ||||
565 | if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15)) | ||||
566 | return false; | ||||
567 | |||||
568 | return !isEphemeralValueOf(Inv, CxtI); | ||||
569 | } | ||||
570 | |||||
571 | // Inv and CxtI are in different blocks. | ||||
572 | if (DT) { | ||||
573 | if (DT->dominates(Inv, CxtI)) | ||||
574 | return true; | ||||
575 | } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { | ||||
576 | // We don't have a DT, but this trivially dominates. | ||||
577 | return true; | ||||
578 | } | ||||
579 | |||||
580 | return false; | ||||
581 | } | ||||
582 | |||||
583 | static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) { | ||||
584 | // v u> y implies v != 0. | ||||
585 | if (Pred == ICmpInst::ICMP_UGT) | ||||
586 | return true; | ||||
587 | |||||
588 | // Special-case v != 0 to also handle v != null. | ||||
589 | if (Pred == ICmpInst::ICMP_NE) | ||||
590 | return match(RHS, m_Zero()); | ||||
591 | |||||
592 | // All other predicates - rely on generic ConstantRange handling. | ||||
593 | const APInt *C; | ||||
594 | if (!match(RHS, m_APInt(C))) | ||||
595 | return false; | ||||
596 | |||||
597 | ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C); | ||||
598 | return !TrueValues.contains(APInt::getZero(C->getBitWidth())); | ||||
599 | } | ||||
600 | |||||
601 | static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { | ||||
602 | // Use of assumptions is context-sensitive. If we don't have a context, we | ||||
603 | // cannot use them! | ||||
604 | if (!Q.AC || !Q.CxtI) | ||||
605 | return false; | ||||
606 | |||||
607 | if (Q.CxtI && V->getType()->isPointerTy()) { | ||||
608 | SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull}; | ||||
609 | if (!NullPointerIsDefined(Q.CxtI->getFunction(), | ||||
610 | V->getType()->getPointerAddressSpace())) | ||||
611 | AttrKinds.push_back(Attribute::Dereferenceable); | ||||
612 | |||||
613 | if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC)) | ||||
614 | return true; | ||||
615 | } | ||||
616 | |||||
617 | for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { | ||||
618 | if (!AssumeVH) | ||||
619 | continue; | ||||
620 | CallInst *I = cast<CallInst>(AssumeVH); | ||||
621 | assert(I->getFunction() == Q.CxtI->getFunction() &&(static_cast <bool> (I->getFunction() == Q.CxtI-> getFunction() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 622, __extension__ __PRETTY_FUNCTION__ )) | ||||
622 | "Got assumption for the wrong function!")(static_cast <bool> (I->getFunction() == Q.CxtI-> getFunction() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getFunction() == Q.CxtI->getFunction() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 622, __extension__ __PRETTY_FUNCTION__ )); | ||||
623 | |||||
624 | // Warning: This loop can end up being somewhat performance sensitive. | ||||
625 | // We're running this loop for once for each value queried resulting in a | ||||
626 | // runtime of ~O(#assumes * #values). | ||||
627 | |||||
628 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 629, __extension__ __PRETTY_FUNCTION__ )) | ||||
629 | "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 629, __extension__ __PRETTY_FUNCTION__ )); | ||||
630 | |||||
631 | Value *RHS; | ||||
632 | CmpInst::Predicate Pred; | ||||
633 | auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); | ||||
634 | if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS)))) | ||||
635 | return false; | ||||
636 | |||||
637 | if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT)) | ||||
638 | return true; | ||||
639 | } | ||||
640 | |||||
641 | return false; | ||||
642 | } | ||||
643 | |||||
644 | static void computeKnownBitsFromCmp(const Value *V, const ICmpInst *Cmp, | ||||
645 | KnownBits &Known, unsigned Depth, | ||||
646 | const Query &Q) { | ||||
647 | unsigned BitWidth = Known.getBitWidth(); | ||||
648 | // We are attempting to compute known bits for the operands of an assume. | ||||
649 | // Do not try to use other assumptions for those recursive calls because | ||||
650 | // that can lead to mutual recursion and a compile-time explosion. | ||||
651 | // An example of the mutual recursion: computeKnownBits can call | ||||
652 | // isKnownNonZero which calls computeKnownBitsFromAssume (this function) | ||||
653 | // and so on. | ||||
654 | Query QueryNoAC = Q; | ||||
655 | QueryNoAC.AC = nullptr; | ||||
656 | |||||
657 | // Note that ptrtoint may change the bitwidth. | ||||
658 | Value *A, *B; | ||||
659 | auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); | ||||
660 | |||||
661 | CmpInst::Predicate Pred; | ||||
662 | uint64_t C; | ||||
663 | switch (Cmp->getPredicate()) { | ||||
664 | default: | ||||
665 | break; | ||||
666 | case ICmpInst::ICMP_EQ: | ||||
667 | // assume(v = a) | ||||
668 | if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A)))) { | ||||
669 | KnownBits RHSKnown = | ||||
670 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
671 | Known.Zero |= RHSKnown.Zero; | ||||
672 | Known.One |= RHSKnown.One; | ||||
673 | // assume(v & b = a) | ||||
674 | } else if (match(Cmp, | ||||
675 | m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A)))) { | ||||
676 | KnownBits RHSKnown = | ||||
677 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
678 | KnownBits MaskKnown = | ||||
679 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
680 | |||||
681 | // For those bits in the mask that are known to be one, we can propagate | ||||
682 | // known bits from the RHS to V. | ||||
683 | Known.Zero |= RHSKnown.Zero & MaskKnown.One; | ||||
684 | Known.One |= RHSKnown.One & MaskKnown.One; | ||||
685 | // assume(~(v & b) = a) | ||||
686 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), | ||||
687 | m_Value(A)))) { | ||||
688 | KnownBits RHSKnown = | ||||
689 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
690 | KnownBits MaskKnown = | ||||
691 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
692 | |||||
693 | // For those bits in the mask that are known to be one, we can propagate | ||||
694 | // inverted known bits from the RHS to V. | ||||
695 | Known.Zero |= RHSKnown.One & MaskKnown.One; | ||||
696 | Known.One |= RHSKnown.Zero & MaskKnown.One; | ||||
697 | // assume(v | b = a) | ||||
698 | } else if (match(Cmp, | ||||
699 | m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A)))) { | ||||
700 | KnownBits RHSKnown = | ||||
701 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
702 | KnownBits BKnown = | ||||
703 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
704 | |||||
705 | // For those bits in B that are known to be zero, we can propagate known | ||||
706 | // bits from the RHS to V. | ||||
707 | Known.Zero |= RHSKnown.Zero & BKnown.Zero; | ||||
708 | Known.One |= RHSKnown.One & BKnown.Zero; | ||||
709 | // assume(~(v | b) = a) | ||||
710 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), | ||||
711 | m_Value(A)))) { | ||||
712 | KnownBits RHSKnown = | ||||
713 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
714 | KnownBits BKnown = | ||||
715 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
716 | |||||
717 | // For those bits in B that are known to be zero, we can propagate | ||||
718 | // inverted known bits from the RHS to V. | ||||
719 | Known.Zero |= RHSKnown.One & BKnown.Zero; | ||||
720 | Known.One |= RHSKnown.Zero & BKnown.Zero; | ||||
721 | // assume(v ^ b = a) | ||||
722 | } else if (match(Cmp, | ||||
723 | m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A)))) { | ||||
724 | KnownBits RHSKnown = | ||||
725 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
726 | KnownBits BKnown = | ||||
727 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
728 | |||||
729 | // For those bits in B that are known to be zero, we can propagate known | ||||
730 | // bits from the RHS to V. For those bits in B that are known to be one, | ||||
731 | // we can propagate inverted known bits from the RHS to V. | ||||
732 | Known.Zero |= RHSKnown.Zero & BKnown.Zero; | ||||
733 | Known.One |= RHSKnown.One & BKnown.Zero; | ||||
734 | Known.Zero |= RHSKnown.One & BKnown.One; | ||||
735 | Known.One |= RHSKnown.Zero & BKnown.One; | ||||
736 | // assume(~(v ^ b) = a) | ||||
737 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), | ||||
738 | m_Value(A)))) { | ||||
739 | KnownBits RHSKnown = | ||||
740 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
741 | KnownBits BKnown = | ||||
742 | computeKnownBits(B, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
743 | |||||
744 | // For those bits in B that are known to be zero, we can propagate | ||||
745 | // inverted known bits from the RHS to V. For those bits in B that are | ||||
746 | // known to be one, we can propagate known bits from the RHS to V. | ||||
747 | Known.Zero |= RHSKnown.One & BKnown.Zero; | ||||
748 | Known.One |= RHSKnown.Zero & BKnown.Zero; | ||||
749 | Known.Zero |= RHSKnown.Zero & BKnown.One; | ||||
750 | Known.One |= RHSKnown.One & BKnown.One; | ||||
751 | // assume(v << c = a) | ||||
752 | } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), | ||||
753 | m_Value(A))) && | ||||
754 | C < BitWidth) { | ||||
755 | KnownBits RHSKnown = | ||||
756 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
757 | |||||
758 | // For those bits in RHS that are known, we can propagate them to known | ||||
759 | // bits in V shifted to the right by C. | ||||
760 | RHSKnown.Zero.lshrInPlace(C); | ||||
761 | Known.Zero |= RHSKnown.Zero; | ||||
762 | RHSKnown.One.lshrInPlace(C); | ||||
763 | Known.One |= RHSKnown.One; | ||||
764 | // assume(~(v << c) = a) | ||||
765 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), | ||||
766 | m_Value(A))) && | ||||
767 | C < BitWidth) { | ||||
768 | KnownBits RHSKnown = | ||||
769 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
770 | // For those bits in RHS that are known, we can propagate them inverted | ||||
771 | // to known bits in V shifted to the right by C. | ||||
772 | RHSKnown.One.lshrInPlace(C); | ||||
773 | Known.Zero |= RHSKnown.One; | ||||
774 | RHSKnown.Zero.lshrInPlace(C); | ||||
775 | Known.One |= RHSKnown.Zero; | ||||
776 | // assume(v >> c = a) | ||||
777 | } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), | ||||
778 | m_Value(A))) && | ||||
779 | C < BitWidth) { | ||||
780 | KnownBits RHSKnown = | ||||
781 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
782 | // For those bits in RHS that are known, we can propagate them to known | ||||
783 | // bits in V shifted to the right by C. | ||||
784 | Known.Zero |= RHSKnown.Zero << C; | ||||
785 | Known.One |= RHSKnown.One << C; | ||||
786 | // assume(~(v >> c) = a) | ||||
787 | } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), | ||||
788 | m_Value(A))) && | ||||
789 | C < BitWidth) { | ||||
790 | KnownBits RHSKnown = | ||||
791 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
792 | // For those bits in RHS that are known, we can propagate them inverted | ||||
793 | // to known bits in V shifted to the right by C. | ||||
794 | Known.Zero |= RHSKnown.One << C; | ||||
795 | Known.One |= RHSKnown.Zero << C; | ||||
796 | } | ||||
797 | break; | ||||
798 | case ICmpInst::ICMP_SGE: | ||||
799 | // assume(v >=_s c) where c is non-negative | ||||
800 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
801 | KnownBits RHSKnown = | ||||
802 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
803 | |||||
804 | if (RHSKnown.isNonNegative()) { | ||||
805 | // We know that the sign bit is zero. | ||||
806 | Known.makeNonNegative(); | ||||
807 | } | ||||
808 | } | ||||
809 | break; | ||||
810 | case ICmpInst::ICMP_SGT: | ||||
811 | // assume(v >_s c) where c is at least -1. | ||||
812 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
813 | KnownBits RHSKnown = | ||||
814 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
815 | |||||
816 | if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { | ||||
817 | // We know that the sign bit is zero. | ||||
818 | Known.makeNonNegative(); | ||||
819 | } | ||||
820 | } | ||||
821 | break; | ||||
822 | case ICmpInst::ICMP_SLE: | ||||
823 | // assume(v <=_s c) where c is negative | ||||
824 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
825 | KnownBits RHSKnown = | ||||
826 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
827 | |||||
828 | if (RHSKnown.isNegative()) { | ||||
829 | // We know that the sign bit is one. | ||||
830 | Known.makeNegative(); | ||||
831 | } | ||||
832 | } | ||||
833 | break; | ||||
834 | case ICmpInst::ICMP_SLT: | ||||
835 | // assume(v <_s c) where c is non-positive | ||||
836 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
837 | KnownBits RHSKnown = | ||||
838 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
839 | |||||
840 | if (RHSKnown.isZero() || RHSKnown.isNegative()) { | ||||
841 | // We know that the sign bit is one. | ||||
842 | Known.makeNegative(); | ||||
843 | } | ||||
844 | } | ||||
845 | break; | ||||
846 | case ICmpInst::ICMP_ULE: | ||||
847 | // assume(v <=_u c) | ||||
848 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
849 | KnownBits RHSKnown = | ||||
850 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
851 | |||||
852 | // Whatever high bits in c are zero are known to be zero. | ||||
853 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); | ||||
854 | } | ||||
855 | break; | ||||
856 | case ICmpInst::ICMP_ULT: | ||||
857 | // assume(v <_u c) | ||||
858 | if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A)))) { | ||||
859 | KnownBits RHSKnown = | ||||
860 | computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); | ||||
861 | |||||
862 | // If the RHS is known zero, then this assumption must be wrong (nothing | ||||
863 | // is unsigned less than zero). Signal a conflict and get out of here. | ||||
864 | if (RHSKnown.isZero()) { | ||||
865 | Known.Zero.setAllBits(); | ||||
866 | Known.One.setAllBits(); | ||||
867 | break; | ||||
868 | } | ||||
869 | |||||
870 | // Whatever high bits in c are zero are known to be zero (if c is a power | ||||
871 | // of 2, then one more). | ||||
872 | if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC)) | ||||
873 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); | ||||
874 | else | ||||
875 | Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); | ||||
876 | } | ||||
877 | break; | ||||
878 | case ICmpInst::ICMP_NE: { | ||||
879 | // assume (v & b != 0) where b is a power of 2 | ||||
880 | const APInt *BPow2; | ||||
881 | if (match(Cmp, m_ICmp(Pred, m_c_And(m_V, m_Power2(BPow2)), m_Zero()))) { | ||||
882 | Known.One |= BPow2->zextOrTrunc(BitWidth); | ||||
883 | } | ||||
884 | } break; | ||||
885 | } | ||||
886 | } | ||||
887 | |||||
888 | static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, | ||||
889 | unsigned Depth, const Query &Q) { | ||||
890 | // Use of assumptions is context-sensitive. If we don't have a context, we | ||||
891 | // cannot use them! | ||||
892 | if (!Q.AC || !Q.CxtI) | ||||
893 | return; | ||||
894 | |||||
895 | unsigned BitWidth = Known.getBitWidth(); | ||||
896 | |||||
897 | // Refine Known set if the pointer alignment is set by assume bundles. | ||||
898 | if (V->getType()->isPointerTy()) { | ||||
899 | if (RetainedKnowledge RK = getKnowledgeValidInContext( | ||||
900 | V, { Attribute::Alignment }, Q.CxtI, Q.DT, Q.AC)) { | ||||
901 | if (isPowerOf2_64(RK.ArgValue)) | ||||
902 | Known.Zero.setLowBits(Log2_64(RK.ArgValue)); | ||||
903 | } | ||||
904 | } | ||||
905 | |||||
906 | // Note that the patterns below need to be kept in sync with the code | ||||
907 | // in AssumptionCache::updateAffectedValues. | ||||
908 | |||||
909 | for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { | ||||
910 | if (!AssumeVH) | ||||
911 | continue; | ||||
912 | CallInst *I = cast<CallInst>(AssumeVH); | ||||
913 | assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 914, __extension__ __PRETTY_FUNCTION__ )) | ||||
914 | "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 914, __extension__ __PRETTY_FUNCTION__ )); | ||||
915 | |||||
916 | // Warning: This loop can end up being somewhat performance sensitive. | ||||
917 | // We're running this loop for once for each value queried resulting in a | ||||
918 | // runtime of ~O(#assumes * #values). | ||||
919 | |||||
920 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 921, __extension__ __PRETTY_FUNCTION__ )) | ||||
921 | "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 921, __extension__ __PRETTY_FUNCTION__ )); | ||||
922 | |||||
923 | Value *Arg = I->getArgOperand(0); | ||||
924 | |||||
925 | if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { | ||||
926 | assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?" ) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 926, __extension__ __PRETTY_FUNCTION__ )); | ||||
927 | (void)BitWidth; | ||||
928 | Known.setAllOnes(); | ||||
929 | return; | ||||
930 | } | ||||
931 | if (match(Arg, m_Not(m_Specific(V))) && | ||||
932 | isValidAssumeForContext(I, Q.CxtI, Q.DT)) { | ||||
933 | assert(BitWidth == 1 && "assume operand is not i1?")(static_cast <bool> (BitWidth == 1 && "assume operand is not i1?" ) ? void (0) : __assert_fail ("BitWidth == 1 && \"assume operand is not i1?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 933, __extension__ __PRETTY_FUNCTION__ )); | ||||
934 | (void)BitWidth; | ||||
935 | Known.setAllZero(); | ||||
936 | return; | ||||
937 | } | ||||
938 | |||||
939 | // The remaining tests are all recursive, so bail out if we hit the limit. | ||||
940 | if (Depth == MaxAnalysisRecursionDepth) | ||||
941 | continue; | ||||
942 | |||||
943 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); | ||||
944 | if (!Cmp) | ||||
945 | continue; | ||||
946 | |||||
947 | if (!isValidAssumeForContext(I, Q.CxtI, Q.DT)) | ||||
948 | continue; | ||||
949 | |||||
950 | computeKnownBitsFromCmp(V, Cmp, Known, Depth, Q); | ||||
951 | } | ||||
952 | |||||
953 | // If assumptions conflict with each other or previous known bits, then we | ||||
954 | // have a logical fallacy. It's possible that the assumption is not reachable, | ||||
955 | // so this isn't a real bug. On the other hand, the program may have undefined | ||||
956 | // behavior, or we might have a bug in the compiler. We can't assert/crash, so | ||||
957 | // clear out the known bits, try to warn the user, and hope for the best. | ||||
958 | if (Known.Zero.intersects(Known.One)) { | ||||
959 | Known.resetAll(); | ||||
960 | |||||
961 | if (Q.ORE) | ||||
962 | Q.ORE->emit([&]() { | ||||
963 | auto *CxtI = const_cast<Instruction *>(Q.CxtI); | ||||
964 | return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", | ||||
965 | CxtI) | ||||
966 | << "Detected conflicting code assumptions. Program may " | ||||
967 | "have undefined behavior, or compiler may have " | ||||
968 | "internal error."; | ||||
969 | }); | ||||
970 | } | ||||
971 | } | ||||
972 | |||||
973 | /// Compute known bits from a shift operator, including those with a | ||||
974 | /// non-constant shift amount. Known is the output of this function. Known2 is a | ||||
975 | /// pre-allocated temporary with the same bit width as Known and on return | ||||
976 | /// contains the known bit of the shift value source. KF is an | ||||
977 | /// operator-specific function that, given the known-bits and a shift amount, | ||||
978 | /// compute the implied known-bits of the shift operator's result respectively | ||||
979 | /// for that shift amount. The results from calling KF are conservatively | ||||
980 | /// combined for all permitted shift amounts. | ||||
981 | static void computeKnownBitsFromShiftOperator( | ||||
982 | const Operator *I, const APInt &DemandedElts, KnownBits &Known, | ||||
983 | KnownBits &Known2, unsigned Depth, const Query &Q, | ||||
984 | function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) { | ||||
985 | unsigned BitWidth = Known.getBitWidth(); | ||||
986 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
987 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); | ||||
988 | |||||
989 | // Note: We cannot use Known.Zero.getLimitedValue() here, because if | ||||
990 | // BitWidth > 64 and any upper bits are known, we'll end up returning the | ||||
991 | // limit value (which implies all bits are known). | ||||
992 | uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); | ||||
993 | uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); | ||||
994 | bool ShiftAmtIsConstant = Known.isConstant(); | ||||
995 | bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth); | ||||
996 | |||||
997 | if (ShiftAmtIsConstant) { | ||||
998 | Known = KF(Known2, Known); | ||||
999 | |||||
1000 | // If the known bits conflict, this must be an overflowing left shift, so | ||||
1001 | // the shift result is poison. We can return anything we want. Choose 0 for | ||||
1002 | // the best folding opportunity. | ||||
1003 | if (Known.hasConflict()) | ||||
1004 | Known.setAllZero(); | ||||
1005 | |||||
1006 | return; | ||||
1007 | } | ||||
1008 | |||||
1009 | // If the shift amount could be greater than or equal to the bit-width of the | ||||
1010 | // LHS, the value could be poison, but bail out because the check below is | ||||
1011 | // expensive. | ||||
1012 | // TODO: Should we just carry on? | ||||
1013 | if (MaxShiftAmtIsOutOfRange) { | ||||
1014 | Known.resetAll(); | ||||
1015 | return; | ||||
1016 | } | ||||
1017 | |||||
1018 | // It would be more-clearly correct to use the two temporaries for this | ||||
1019 | // calculation. Reusing the APInts here to prevent unnecessary allocations. | ||||
1020 | Known.resetAll(); | ||||
1021 | |||||
1022 | // If we know the shifter operand is nonzero, we can sometimes infer more | ||||
1023 | // known bits. However this is expensive to compute, so be lazy about it and | ||||
1024 | // only compute it when absolutely necessary. | ||||
1025 | std::optional<bool> ShifterOperandIsNonZero; | ||||
1026 | |||||
1027 | // Early exit if we can't constrain any well-defined shift amount. | ||||
1028 | if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && | ||||
1029 | !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { | ||||
1030 | ShifterOperandIsNonZero = | ||||
1031 | isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); | ||||
1032 | if (!*ShifterOperandIsNonZero) | ||||
1033 | return; | ||||
1034 | } | ||||
1035 | |||||
1036 | Known.Zero.setAllBits(); | ||||
1037 | Known.One.setAllBits(); | ||||
1038 | for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { | ||||
1039 | // Combine the shifted known input bits only for those shift amounts | ||||
1040 | // compatible with its known constraints. | ||||
1041 | if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) | ||||
1042 | continue; | ||||
1043 | if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) | ||||
1044 | continue; | ||||
1045 | // If we know the shifter is nonzero, we may be able to infer more known | ||||
1046 | // bits. This check is sunk down as far as possible to avoid the expensive | ||||
1047 | // call to isKnownNonZero if the cheaper checks above fail. | ||||
1048 | if (ShiftAmt == 0) { | ||||
1049 | if (!ShifterOperandIsNonZero) | ||||
1050 | ShifterOperandIsNonZero = | ||||
1051 | isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); | ||||
1052 | if (*ShifterOperandIsNonZero) | ||||
1053 | continue; | ||||
1054 | } | ||||
1055 | |||||
1056 | Known = KnownBits::commonBits( | ||||
1057 | Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt)))); | ||||
1058 | } | ||||
1059 | |||||
1060 | // If the known bits conflict, the result is poison. Return a 0 and hope the | ||||
1061 | // caller can further optimize that. | ||||
1062 | if (Known.hasConflict()) | ||||
1063 | Known.setAllZero(); | ||||
1064 | } | ||||
1065 | |||||
1066 | static KnownBits getKnownBitsFromAndXorOr(const Operator *I, | ||||
1067 | const APInt &DemandedElts, | ||||
1068 | const KnownBits &KnownLHS, | ||||
1069 | const KnownBits &KnownRHS, | ||||
1070 | unsigned Depth, const Query &Q) { | ||||
1071 | unsigned BitWidth = KnownLHS.getBitWidth(); | ||||
1072 | KnownBits KnownOut(BitWidth); | ||||
1073 | bool IsAnd = false; | ||||
1074 | bool HasKnownOne = !KnownLHS.One.isZero() || !KnownRHS.One.isZero(); | ||||
1075 | Value *X = nullptr, *Y = nullptr; | ||||
1076 | |||||
1077 | switch (I->getOpcode()) { | ||||
1078 | case Instruction::And: | ||||
1079 | KnownOut = KnownLHS & KnownRHS; | ||||
1080 | IsAnd = true; | ||||
1081 | // and(x, -x) is common idioms that will clear all but lowest set | ||||
1082 | // bit. If we have a single known bit in x, we can clear all bits | ||||
1083 | // above it. | ||||
1084 | // TODO: instcombine often reassociates independent `and` which can hide | ||||
1085 | // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x). | ||||
1086 | if (HasKnownOne && match(I, m_c_And(m_Value(X), m_Neg(m_Deferred(X))))) { | ||||
1087 | // -(-x) == x so using whichever (LHS/RHS) gets us a better result. | ||||
1088 | if (KnownLHS.countMaxTrailingZeros() <= KnownRHS.countMaxTrailingZeros()) | ||||
1089 | KnownOut = KnownLHS.blsi(); | ||||
1090 | else | ||||
1091 | KnownOut = KnownRHS.blsi(); | ||||
1092 | } | ||||
1093 | break; | ||||
1094 | case Instruction::Or: | ||||
1095 | KnownOut = KnownLHS | KnownRHS; | ||||
1096 | break; | ||||
1097 | case Instruction::Xor: | ||||
1098 | KnownOut = KnownLHS ^ KnownRHS; | ||||
1099 | // xor(x, x-1) is common idioms that will clear all but lowest set | ||||
1100 | // bit. If we have a single known bit in x, we can clear all bits | ||||
1101 | // above it. | ||||
1102 | // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C != | ||||
1103 | // -1 but for the purpose of demanded bits (xor(x, x-C) & | ||||
1104 | // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern | ||||
1105 | // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1). | ||||
1106 | if (HasKnownOne && | ||||
1107 | match(I, m_c_Xor(m_Value(X), m_c_Add(m_Deferred(X), m_AllOnes())))) { | ||||
1108 | const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS; | ||||
1109 | KnownOut = XBits.blsmsk(); | ||||
1110 | } | ||||
1111 | break; | ||||
1112 | default: | ||||
1113 | llvm_unreachable("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'")::llvm::llvm_unreachable_internal("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'" , "llvm/lib/Analysis/ValueTracking.cpp", 1113); | ||||
1114 | } | ||||
1115 | |||||
1116 | // and(x, add (x, -1)) is a common idiom that always clears the low bit; | ||||
1117 | // xor/or(x, add (x, -1)) is an idiom that will always set the low bit. | ||||
1118 | // here we handle the more general case of adding any odd number by | ||||
1119 | // matching the form and/xor/or(x, add(x, y)) where y is odd. | ||||
1120 | // TODO: This could be generalized to clearing any bit set in y where the | ||||
1121 | // following bit is known to be unset in y. | ||||
1122 | if (!KnownOut.Zero[0] && !KnownOut.One[0] && | ||||
1123 | (match(I, m_c_BinOp(m_Value(X), m_c_Add(m_Deferred(X), m_Value(Y)))) || | ||||
1124 | match(I, m_c_BinOp(m_Value(X), m_Sub(m_Deferred(X), m_Value(Y)))) || | ||||
1125 | match(I, m_c_BinOp(m_Value(X), m_Sub(m_Value(Y), m_Deferred(X)))))) { | ||||
1126 | KnownBits KnownY(BitWidth); | ||||
1127 | computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q); | ||||
1128 | if (KnownY.countMinTrailingOnes() > 0) { | ||||
1129 | if (IsAnd) | ||||
1130 | KnownOut.Zero.setBit(0); | ||||
1131 | else | ||||
1132 | KnownOut.One.setBit(0); | ||||
1133 | } | ||||
1134 | } | ||||
1135 | return KnownOut; | ||||
1136 | } | ||||
1137 | |||||
1138 | // Public so this can be used in `SimplifyDemandedUseBits`. | ||||
1139 | KnownBits llvm::analyzeKnownBitsFromAndXorOr( | ||||
1140 | const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, | ||||
1141 | unsigned Depth, const DataLayout &DL, AssumptionCache *AC, | ||||
1142 | const Instruction *CxtI, const DominatorTree *DT, | ||||
1143 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
1144 | auto *FVTy = dyn_cast<FixedVectorType>(I->getType()); | ||||
1145 | APInt DemandedElts = | ||||
1146 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
1147 | |||||
1148 | return getKnownBitsFromAndXorOr( | ||||
1149 | I, DemandedElts, KnownLHS, KnownRHS, Depth, | ||||
1150 | Query(DL, AC, safeCxtI(I, CxtI), DT, UseInstrInfo, ORE)); | ||||
1151 | } | ||||
1152 | |||||
1153 | ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) { | ||||
1154 | Attribute Attr = F->getFnAttribute(Attribute::VScaleRange); | ||||
1155 | // Without vscale_range, we only know that vscale is non-zero. | ||||
1156 | if (!Attr.isValid()) | ||||
1157 | return ConstantRange(APInt(BitWidth, 1), APInt::getZero(BitWidth)); | ||||
1158 | |||||
1159 | unsigned AttrMin = Attr.getVScaleRangeMin(); | ||||
1160 | // Minimum is larger than vscale width, result is always poison. | ||||
1161 | if ((unsigned)llvm::bit_width(AttrMin) > BitWidth) | ||||
1162 | return ConstantRange::getEmpty(BitWidth); | ||||
1163 | |||||
1164 | APInt Min(BitWidth, AttrMin); | ||||
1165 | std::optional<unsigned> AttrMax = Attr.getVScaleRangeMax(); | ||||
1166 | if (!AttrMax || (unsigned)llvm::bit_width(*AttrMax) > BitWidth) | ||||
1167 | return ConstantRange(Min, APInt::getZero(BitWidth)); | ||||
1168 | |||||
1169 | return ConstantRange(Min, APInt(BitWidth, *AttrMax) + 1); | ||||
1170 | } | ||||
1171 | |||||
1172 | static void computeKnownBitsFromOperator(const Operator *I, | ||||
1173 | const APInt &DemandedElts, | ||||
1174 | KnownBits &Known, unsigned Depth, | ||||
1175 | const Query &Q) { | ||||
1176 | unsigned BitWidth = Known.getBitWidth(); | ||||
1177 | |||||
1178 | KnownBits Known2(BitWidth); | ||||
1179 | switch (I->getOpcode()) { | ||||
1180 | default: break; | ||||
1181 | case Instruction::Load: | ||||
1182 | if (MDNode *MD = | ||||
1183 | Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) | ||||
1184 | computeKnownBitsFromRangeMetadata(*MD, Known); | ||||
1185 | break; | ||||
1186 | case Instruction::And: | ||||
1187 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); | ||||
1188 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1189 | |||||
1190 | Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); | ||||
1191 | break; | ||||
1192 | case Instruction::Or: | ||||
1193 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); | ||||
1194 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1195 | |||||
1196 | Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); | ||||
1197 | break; | ||||
1198 | case Instruction::Xor: | ||||
1199 | computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); | ||||
1200 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1201 | |||||
1202 | Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); | ||||
1203 | break; | ||||
1204 | case Instruction::Mul: { | ||||
1205 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); | ||||
1206 | computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, | ||||
1207 | Known, Known2, Depth, Q); | ||||
1208 | break; | ||||
1209 | } | ||||
1210 | case Instruction::UDiv: { | ||||
1211 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1212 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1213 | Known = KnownBits::udiv(Known, Known2); | ||||
1214 | break; | ||||
1215 | } | ||||
1216 | case Instruction::Select: { | ||||
1217 | const Value *LHS = nullptr, *RHS = nullptr; | ||||
1218 | SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; | ||||
1219 | if (SelectPatternResult::isMinOrMax(SPF)) { | ||||
1220 | computeKnownBits(RHS, Known, Depth + 1, Q); | ||||
1221 | computeKnownBits(LHS, Known2, Depth + 1, Q); | ||||
1222 | switch (SPF) { | ||||
1223 | default: | ||||
1224 | llvm_unreachable("Unhandled select pattern flavor!")::llvm::llvm_unreachable_internal("Unhandled select pattern flavor!" , "llvm/lib/Analysis/ValueTracking.cpp", 1224); | ||||
1225 | case SPF_SMAX: | ||||
1226 | Known = KnownBits::smax(Known, Known2); | ||||
1227 | break; | ||||
1228 | case SPF_SMIN: | ||||
1229 | Known = KnownBits::smin(Known, Known2); | ||||
1230 | break; | ||||
1231 | case SPF_UMAX: | ||||
1232 | Known = KnownBits::umax(Known, Known2); | ||||
1233 | break; | ||||
1234 | case SPF_UMIN: | ||||
1235 | Known = KnownBits::umin(Known, Known2); | ||||
1236 | break; | ||||
1237 | } | ||||
1238 | break; | ||||
1239 | } | ||||
1240 | |||||
1241 | computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); | ||||
1242 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1243 | |||||
1244 | // Only known if known in both the LHS and RHS. | ||||
1245 | Known = KnownBits::commonBits(Known, Known2); | ||||
1246 | |||||
1247 | if (SPF == SPF_ABS) { | ||||
1248 | // RHS from matchSelectPattern returns the negation part of abs pattern. | ||||
1249 | // If the negate has an NSW flag we can assume the sign bit of the result | ||||
1250 | // will be 0 because that makes abs(INT_MIN) undefined. | ||||
1251 | if (match(RHS, m_Neg(m_Specific(LHS))) && | ||||
1252 | Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS))) | ||||
1253 | Known.Zero.setSignBit(); | ||||
1254 | } | ||||
1255 | |||||
1256 | break; | ||||
1257 | } | ||||
1258 | case Instruction::FPTrunc: | ||||
1259 | case Instruction::FPExt: | ||||
1260 | case Instruction::FPToUI: | ||||
1261 | case Instruction::FPToSI: | ||||
1262 | case Instruction::SIToFP: | ||||
1263 | case Instruction::UIToFP: | ||||
1264 | break; // Can't work with floating point. | ||||
1265 | case Instruction::PtrToInt: | ||||
1266 | case Instruction::IntToPtr: | ||||
1267 | // Fall through and handle them the same as zext/trunc. | ||||
1268 | [[fallthrough]]; | ||||
1269 | case Instruction::ZExt: | ||||
1270 | case Instruction::Trunc: { | ||||
1271 | Type *SrcTy = I->getOperand(0)->getType(); | ||||
1272 | |||||
1273 | unsigned SrcBitWidth; | ||||
1274 | // Note that we handle pointer operands here because of inttoptr/ptrtoint | ||||
1275 | // which fall through here. | ||||
1276 | Type *ScalarTy = SrcTy->getScalarType(); | ||||
1277 | SrcBitWidth = ScalarTy->isPointerTy() ? | ||||
1278 | Q.DL.getPointerTypeSizeInBits(ScalarTy) : | ||||
1279 | Q.DL.getTypeSizeInBits(ScalarTy); | ||||
1280 | |||||
1281 | assert(SrcBitWidth && "SrcBitWidth can't be zero")(static_cast <bool> (SrcBitWidth && "SrcBitWidth can't be zero" ) ? void (0) : __assert_fail ("SrcBitWidth && \"SrcBitWidth can't be zero\"" , "llvm/lib/Analysis/ValueTracking.cpp", 1281, __extension__ __PRETTY_FUNCTION__ )); | ||||
1282 | Known = Known.anyextOrTrunc(SrcBitWidth); | ||||
1283 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1284 | Known = Known.zextOrTrunc(BitWidth); | ||||
1285 | break; | ||||
1286 | } | ||||
1287 | case Instruction::BitCast: { | ||||
1288 | Type *SrcTy = I->getOperand(0)->getType(); | ||||
1289 | if (SrcTy->isIntOrPtrTy() && | ||||
1290 | // TODO: For now, not handling conversions like: | ||||
1291 | // (bitcast i64 %x to <2 x i32>) | ||||
1292 | !I->getType()->isVectorTy()) { | ||||
1293 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1294 | break; | ||||
1295 | } | ||||
1296 | |||||
1297 | // Handle cast from vector integer type to scalar or vector integer. | ||||
1298 | auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy); | ||||
1299 | if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() || | ||||
1300 | !I->getType()->isIntOrIntVectorTy() || | ||||
1301 | isa<ScalableVectorType>(I->getType())) | ||||
1302 | break; | ||||
1303 | |||||
1304 | // Look through a cast from narrow vector elements to wider type. | ||||
1305 | // Examples: v4i32 -> v2i64, v3i8 -> v24 | ||||
1306 | unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits(); | ||||
1307 | if (BitWidth % SubBitWidth == 0) { | ||||
1308 | // Known bits are automatically intersected across demanded elements of a | ||||
1309 | // vector. So for example, if a bit is computed as known zero, it must be | ||||
1310 | // zero across all demanded elements of the vector. | ||||
1311 | // | ||||
1312 | // For this bitcast, each demanded element of the output is sub-divided | ||||
1313 | // across a set of smaller vector elements in the source vector. To get | ||||
1314 | // the known bits for an entire element of the output, compute the known | ||||
1315 | // bits for each sub-element sequentially. This is done by shifting the | ||||
1316 | // one-set-bit demanded elements parameter across the sub-elements for | ||||
1317 | // consecutive calls to computeKnownBits. We are using the demanded | ||||
1318 | // elements parameter as a mask operator. | ||||
1319 | // | ||||
1320 | // The known bits of each sub-element are then inserted into place | ||||
1321 | // (dependent on endian) to form the full result of known bits. | ||||
1322 | unsigned NumElts = DemandedElts.getBitWidth(); | ||||
1323 | unsigned SubScale = BitWidth / SubBitWidth; | ||||
1324 | APInt SubDemandedElts = APInt::getZero(NumElts * SubScale); | ||||
1325 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
1326 | if (DemandedElts[i]) | ||||
1327 | SubDemandedElts.setBit(i * SubScale); | ||||
1328 | } | ||||
1329 | |||||
1330 | KnownBits KnownSrc(SubBitWidth); | ||||
1331 | for (unsigned i = 0; i != SubScale; ++i) { | ||||
1332 | computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, | ||||
1333 | Depth + 1, Q); | ||||
1334 | unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i; | ||||
1335 | Known.insertBits(KnownSrc, ShiftElt * SubBitWidth); | ||||
1336 | } | ||||
1337 | } | ||||
1338 | break; | ||||
1339 | } | ||||
1340 | case Instruction::SExt: { | ||||
1341 | // Compute the bits in the result that are not present in the input. | ||||
1342 | unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
1343 | |||||
1344 | Known = Known.trunc(SrcBitWidth); | ||||
1345 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1346 | // If the sign bit of the input is known set or clear, then we know the | ||||
1347 | // top bits of the result. | ||||
1348 | Known = Known.sext(BitWidth); | ||||
1349 | break; | ||||
1350 | } | ||||
1351 | case Instruction::Shl: { | ||||
1352 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); | ||||
1353 | auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) { | ||||
1354 | KnownBits Result = KnownBits::shl(KnownVal, KnownAmt); | ||||
1355 | // If this shift has "nsw" keyword, then the result is either a poison | ||||
1356 | // value or has the same sign bit as the first operand. | ||||
1357 | if (NSW) { | ||||
1358 | if (KnownVal.Zero.isSignBitSet()) | ||||
1359 | Result.Zero.setSignBit(); | ||||
1360 | if (KnownVal.One.isSignBitSet()) | ||||
1361 | Result.One.setSignBit(); | ||||
1362 | } | ||||
1363 | return Result; | ||||
1364 | }; | ||||
1365 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, | ||||
1366 | KF); | ||||
1367 | // Trailing zeros of a right-shifted constant never decrease. | ||||
1368 | const APInt *C; | ||||
1369 | if (match(I->getOperand(0), m_APInt(C))) | ||||
1370 | Known.Zero.setLowBits(C->countr_zero()); | ||||
1371 | break; | ||||
1372 | } | ||||
1373 | case Instruction::LShr: { | ||||
1374 | auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { | ||||
1375 | return KnownBits::lshr(KnownVal, KnownAmt); | ||||
1376 | }; | ||||
1377 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, | ||||
1378 | KF); | ||||
1379 | // Leading zeros of a left-shifted constant never decrease. | ||||
1380 | const APInt *C; | ||||
1381 | if (match(I->getOperand(0), m_APInt(C))) | ||||
1382 | Known.Zero.setHighBits(C->countl_zero()); | ||||
1383 | break; | ||||
1384 | } | ||||
1385 | case Instruction::AShr: { | ||||
1386 | auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { | ||||
1387 | return KnownBits::ashr(KnownVal, KnownAmt); | ||||
1388 | }; | ||||
1389 | computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, | ||||
1390 | KF); | ||||
1391 | break; | ||||
1392 | } | ||||
1393 | case Instruction::Sub: { | ||||
1394 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); | ||||
1395 | computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, | ||||
1396 | DemandedElts, Known, Known2, Depth, Q); | ||||
1397 | break; | ||||
1398 | } | ||||
1399 | case Instruction::Add: { | ||||
1400 | bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); | ||||
1401 | computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, | ||||
1402 | DemandedElts, Known, Known2, Depth, Q); | ||||
1403 | break; | ||||
1404 | } | ||||
1405 | case Instruction::SRem: | ||||
1406 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1407 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1408 | Known = KnownBits::srem(Known, Known2); | ||||
1409 | break; | ||||
1410 | |||||
1411 | case Instruction::URem: | ||||
1412 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1413 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1414 | Known = KnownBits::urem(Known, Known2); | ||||
1415 | break; | ||||
1416 | case Instruction::Alloca: | ||||
1417 | Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign())); | ||||
1418 | break; | ||||
1419 | case Instruction::GetElementPtr: { | ||||
1420 | // Analyze all of the subscripts of this getelementptr instruction | ||||
1421 | // to determine if we can prove known low zero bits. | ||||
1422 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1423 | // Accumulate the constant indices in a separate variable | ||||
1424 | // to minimize the number of calls to computeForAddSub. | ||||
1425 | APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); | ||||
1426 | |||||
1427 | gep_type_iterator GTI = gep_type_begin(I); | ||||
1428 | for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { | ||||
1429 | // TrailZ can only become smaller, short-circuit if we hit zero. | ||||
1430 | if (Known.isUnknown()) | ||||
1431 | break; | ||||
1432 | |||||
1433 | Value *Index = I->getOperand(i); | ||||
1434 | |||||
1435 | // Handle case when index is zero. | ||||
1436 | Constant *CIndex = dyn_cast<Constant>(Index); | ||||
1437 | if (CIndex && CIndex->isZeroValue()) | ||||
1438 | continue; | ||||
1439 | |||||
1440 | if (StructType *STy = GTI.getStructTypeOrNull()) { | ||||
1441 | // Handle struct member offset arithmetic. | ||||
1442 | |||||
1443 | assert(CIndex &&(static_cast <bool> (CIndex && "Access to structure field must be known at compile time" ) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\"" , "llvm/lib/Analysis/ValueTracking.cpp", 1444, __extension__ __PRETTY_FUNCTION__ )) | ||||
1444 | "Access to structure field must be known at compile time")(static_cast <bool> (CIndex && "Access to structure field must be known at compile time" ) ? void (0) : __assert_fail ("CIndex && \"Access to structure field must be known at compile time\"" , "llvm/lib/Analysis/ValueTracking.cpp", 1444, __extension__ __PRETTY_FUNCTION__ )); | ||||
1445 | |||||
1446 | if (CIndex->getType()->isVectorTy()) | ||||
1447 | Index = CIndex->getSplatValue(); | ||||
1448 | |||||
1449 | unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); | ||||
1450 | const StructLayout *SL = Q.DL.getStructLayout(STy); | ||||
1451 | uint64_t Offset = SL->getElementOffset(Idx); | ||||
1452 | AccConstIndices += Offset; | ||||
1453 | continue; | ||||
1454 | } | ||||
1455 | |||||
1456 | // Handle array index arithmetic. | ||||
1457 | Type *IndexedTy = GTI.getIndexedType(); | ||||
1458 | if (!IndexedTy->isSized()) { | ||||
1459 | Known.resetAll(); | ||||
1460 | break; | ||||
1461 | } | ||||
1462 | |||||
1463 | unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); | ||||
1464 | KnownBits IndexBits(IndexBitWidth); | ||||
1465 | computeKnownBits(Index, IndexBits, Depth + 1, Q); | ||||
1466 | TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); | ||||
1467 | uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue(); | ||||
1468 | KnownBits ScalingFactor(IndexBitWidth); | ||||
1469 | // Multiply by current sizeof type. | ||||
1470 | // &A[i] == A + i * sizeof(*A[i]). | ||||
1471 | if (IndexTypeSize.isScalable()) { | ||||
1472 | // For scalable types the only thing we know about sizeof is | ||||
1473 | // that this is a multiple of the minimum size. | ||||
1474 | ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes)); | ||||
1475 | } else if (IndexBits.isConstant()) { | ||||
1476 | APInt IndexConst = IndexBits.getConstant(); | ||||
1477 | APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); | ||||
1478 | IndexConst *= ScalingFactor; | ||||
1479 | AccConstIndices += IndexConst.sextOrTrunc(BitWidth); | ||||
1480 | continue; | ||||
1481 | } else { | ||||
1482 | ScalingFactor = | ||||
1483 | KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes)); | ||||
1484 | } | ||||
1485 | IndexBits = KnownBits::mul(IndexBits, ScalingFactor); | ||||
1486 | |||||
1487 | // If the offsets have a different width from the pointer, according | ||||
1488 | // to the language reference we need to sign-extend or truncate them | ||||
1489 | // to the width of the pointer. | ||||
1490 | IndexBits = IndexBits.sextOrTrunc(BitWidth); | ||||
1491 | |||||
1492 | // Note that inbounds does *not* guarantee nsw for the addition, as only | ||||
1493 | // the offset is signed, while the base address is unsigned. | ||||
1494 | Known = KnownBits::computeForAddSub( | ||||
1495 | /*Add=*/true, /*NSW=*/false, Known, IndexBits); | ||||
1496 | } | ||||
1497 | if (!Known.isUnknown() && !AccConstIndices.isZero()) { | ||||
1498 | KnownBits Index = KnownBits::makeConstant(AccConstIndices); | ||||
1499 | Known = KnownBits::computeForAddSub( | ||||
1500 | /*Add=*/true, /*NSW=*/false, Known, Index); | ||||
1501 | } | ||||
1502 | break; | ||||
1503 | } | ||||
1504 | case Instruction::PHI: { | ||||
1505 | const PHINode *P = cast<PHINode>(I); | ||||
1506 | BinaryOperator *BO = nullptr; | ||||
1507 | Value *R = nullptr, *L = nullptr; | ||||
1508 | if (matchSimpleRecurrence(P, BO, R, L)) { | ||||
1509 | // Handle the case of a simple two-predecessor recurrence PHI. | ||||
1510 | // There's a lot more that could theoretically be done here, but | ||||
1511 | // this is sufficient to catch some interesting cases. | ||||
1512 | unsigned Opcode = BO->getOpcode(); | ||||
1513 | |||||
1514 | // If this is a shift recurrence, we know the bits being shifted in. | ||||
1515 | // We can combine that with information about the start value of the | ||||
1516 | // recurrence to conclude facts about the result. | ||||
1517 | if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr || | ||||
1518 | Opcode == Instruction::Shl) && | ||||
1519 | BO->getOperand(0) == I) { | ||||
1520 | |||||
1521 | // We have matched a recurrence of the form: | ||||
1522 | // %iv = [R, %entry], [%iv.next, %backedge] | ||||
1523 | // %iv.next = shift_op %iv, L | ||||
1524 | |||||
1525 | // Recurse with the phi context to avoid concern about whether facts | ||||
1526 | // inferred hold at original context instruction. TODO: It may be | ||||
1527 | // correct to use the original context. IF warranted, explore and | ||||
1528 | // add sufficient tests to cover. | ||||
1529 | Query RecQ = Q; | ||||
1530 | RecQ.CxtI = P; | ||||
1531 | computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); | ||||
1532 | switch (Opcode) { | ||||
1533 | case Instruction::Shl: | ||||
1534 | // A shl recurrence will only increase the tailing zeros | ||||
1535 | Known.Zero.setLowBits(Known2.countMinTrailingZeros()); | ||||
1536 | break; | ||||
1537 | case Instruction::LShr: | ||||
1538 | // A lshr recurrence will preserve the leading zeros of the | ||||
1539 | // start value | ||||
1540 | Known.Zero.setHighBits(Known2.countMinLeadingZeros()); | ||||
1541 | break; | ||||
1542 | case Instruction::AShr: | ||||
1543 | // An ashr recurrence will extend the initial sign bit | ||||
1544 | Known.Zero.setHighBits(Known2.countMinLeadingZeros()); | ||||
1545 | Known.One.setHighBits(Known2.countMinLeadingOnes()); | ||||
1546 | break; | ||||
1547 | }; | ||||
1548 | } | ||||
1549 | |||||
1550 | // Check for operations that have the property that if | ||||
1551 | // both their operands have low zero bits, the result | ||||
1552 | // will have low zero bits. | ||||
1553 | if (Opcode == Instruction::Add || | ||||
1554 | Opcode == Instruction::Sub || | ||||
1555 | Opcode == Instruction::And || | ||||
1556 | Opcode == Instruction::Or || | ||||
1557 | Opcode == Instruction::Mul) { | ||||
1558 | // Change the context instruction to the "edge" that flows into the | ||||
1559 | // phi. This is important because that is where the value is actually | ||||
1560 | // "evaluated" even though it is used later somewhere else. (see also | ||||
1561 | // D69571). | ||||
1562 | Query RecQ = Q; | ||||
1563 | |||||
1564 | unsigned OpNum = P->getOperand(0) == R ? 0 : 1; | ||||
1565 | Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator(); | ||||
1566 | Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator(); | ||||
1567 | |||||
1568 | // Ok, we have a PHI of the form L op= R. Check for low | ||||
1569 | // zero bits. | ||||
1570 | RecQ.CxtI = RInst; | ||||
1571 | computeKnownBits(R, Known2, Depth + 1, RecQ); | ||||
1572 | |||||
1573 | // We need to take the minimum number of known bits | ||||
1574 | KnownBits Known3(BitWidth); | ||||
1575 | RecQ.CxtI = LInst; | ||||
1576 | computeKnownBits(L, Known3, Depth + 1, RecQ); | ||||
1577 | |||||
1578 | Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), | ||||
1579 | Known3.countMinTrailingZeros())); | ||||
1580 | |||||
1581 | auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO); | ||||
1582 | if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { | ||||
1583 | // If initial value of recurrence is nonnegative, and we are adding | ||||
1584 | // a nonnegative number with nsw, the result can only be nonnegative | ||||
1585 | // or poison value regardless of the number of times we execute the | ||||
1586 | // add in phi recurrence. If initial value is negative and we are | ||||
1587 | // adding a negative number with nsw, the result can only be | ||||
1588 | // negative or poison value. Similar arguments apply to sub and mul. | ||||
1589 | // | ||||
1590 | // (add non-negative, non-negative) --> non-negative | ||||
1591 | // (add negative, negative) --> negative | ||||
1592 | if (Opcode == Instruction::Add) { | ||||
1593 | if (Known2.isNonNegative() && Known3.isNonNegative()) | ||||
1594 | Known.makeNonNegative(); | ||||
1595 | else if (Known2.isNegative() && Known3.isNegative()) | ||||
1596 | Known.makeNegative(); | ||||
1597 | } | ||||
1598 | |||||
1599 | // (sub nsw non-negative, negative) --> non-negative | ||||
1600 | // (sub nsw negative, non-negative) --> negative | ||||
1601 | else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) { | ||||
1602 | if (Known2.isNonNegative() && Known3.isNegative()) | ||||
1603 | Known.makeNonNegative(); | ||||
1604 | else if (Known2.isNegative() && Known3.isNonNegative()) | ||||
1605 | Known.makeNegative(); | ||||
1606 | } | ||||
1607 | |||||
1608 | // (mul nsw non-negative, non-negative) --> non-negative | ||||
1609 | else if (Opcode == Instruction::Mul && Known2.isNonNegative() && | ||||
1610 | Known3.isNonNegative()) | ||||
1611 | Known.makeNonNegative(); | ||||
1612 | } | ||||
1613 | |||||
1614 | break; | ||||
1615 | } | ||||
1616 | } | ||||
1617 | |||||
1618 | // Unreachable blocks may have zero-operand PHI nodes. | ||||
1619 | if (P->getNumIncomingValues() == 0) | ||||
1620 | break; | ||||
1621 | |||||
1622 | // Otherwise take the unions of the known bit sets of the operands, | ||||
1623 | // taking conservative care to avoid excessive recursion. | ||||
1624 | if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) { | ||||
1625 | // Skip if every incoming value references to ourself. | ||||
1626 | if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) | ||||
1627 | break; | ||||
1628 | |||||
1629 | Known.Zero.setAllBits(); | ||||
1630 | Known.One.setAllBits(); | ||||
1631 | for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { | ||||
1632 | Value *IncValue = P->getIncomingValue(u); | ||||
1633 | // Skip direct self references. | ||||
1634 | if (IncValue == P) continue; | ||||
1635 | |||||
1636 | // Change the context instruction to the "edge" that flows into the | ||||
1637 | // phi. This is important because that is where the value is actually | ||||
1638 | // "evaluated" even though it is used later somewhere else. (see also | ||||
1639 | // D69571). | ||||
1640 | Query RecQ = Q; | ||||
1641 | RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); | ||||
1642 | |||||
1643 | Known2 = KnownBits(BitWidth); | ||||
1644 | |||||
1645 | // Recurse, but cap the recursion to one level, because we don't | ||||
1646 | // want to waste time spinning around in loops. | ||||
1647 | computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ); | ||||
1648 | |||||
1649 | // If this failed, see if we can use a conditional branch into the phi | ||||
1650 | // to help us determine the range of the value. | ||||
1651 | if (Known2.isUnknown()) { | ||||
1652 | ICmpInst::Predicate Pred; | ||||
1653 | const APInt *RHSC; | ||||
1654 | BasicBlock *TrueSucc, *FalseSucc; | ||||
1655 | // TODO: Use RHS Value and compute range from its known bits. | ||||
1656 | if (match(RecQ.CxtI, | ||||
1657 | m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)), | ||||
1658 | m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) { | ||||
1659 | // Check for cases of duplicate successors. | ||||
1660 | if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) { | ||||
1661 | // If we're using the false successor, invert the predicate. | ||||
1662 | if (FalseSucc == P->getParent()) | ||||
1663 | Pred = CmpInst::getInversePredicate(Pred); | ||||
1664 | |||||
1665 | switch (Pred) { | ||||
1666 | case CmpInst::Predicate::ICMP_EQ: | ||||
1667 | Known2 = KnownBits::makeConstant(*RHSC); | ||||
1668 | break; | ||||
1669 | case CmpInst::Predicate::ICMP_ULE: | ||||
1670 | Known2.Zero.setHighBits(RHSC->countl_zero()); | ||||
1671 | break; | ||||
1672 | case CmpInst::Predicate::ICMP_ULT: | ||||
1673 | Known2.Zero.setHighBits((*RHSC - 1).countl_zero()); | ||||
1674 | break; | ||||
1675 | default: | ||||
1676 | // TODO - add additional integer predicate handling. | ||||
1677 | break; | ||||
1678 | } | ||||
1679 | } | ||||
1680 | } | ||||
1681 | } | ||||
1682 | |||||
1683 | Known = KnownBits::commonBits(Known, Known2); | ||||
1684 | // If all bits have been ruled out, there's no need to check | ||||
1685 | // more operands. | ||||
1686 | if (Known.isUnknown()) | ||||
1687 | break; | ||||
1688 | } | ||||
1689 | } | ||||
1690 | break; | ||||
1691 | } | ||||
1692 | case Instruction::Call: | ||||
1693 | case Instruction::Invoke: | ||||
1694 | // If range metadata is attached to this call, set known bits from that, | ||||
1695 | // and then intersect with known bits based on other properties of the | ||||
1696 | // function. | ||||
1697 | if (MDNode *MD = | ||||
1698 | Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) | ||||
1699 | computeKnownBitsFromRangeMetadata(*MD, Known); | ||||
1700 | if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) { | ||||
1701 | computeKnownBits(RV, Known2, Depth + 1, Q); | ||||
1702 | Known.Zero |= Known2.Zero; | ||||
1703 | Known.One |= Known2.One; | ||||
1704 | } | ||||
1705 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { | ||||
1706 | switch (II->getIntrinsicID()) { | ||||
1707 | default: break; | ||||
1708 | case Intrinsic::abs: { | ||||
1709 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1710 | bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); | ||||
1711 | Known = Known2.abs(IntMinIsPoison); | ||||
1712 | break; | ||||
1713 | } | ||||
1714 | case Intrinsic::bitreverse: | ||||
1715 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1716 | Known.Zero |= Known2.Zero.reverseBits(); | ||||
1717 | Known.One |= Known2.One.reverseBits(); | ||||
1718 | break; | ||||
1719 | case Intrinsic::bswap: | ||||
1720 | computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); | ||||
1721 | Known.Zero |= Known2.Zero.byteSwap(); | ||||
1722 | Known.One |= Known2.One.byteSwap(); | ||||
1723 | break; | ||||
1724 | case Intrinsic::ctlz: { | ||||
1725 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1726 | // If we have a known 1, its position is our upper bound. | ||||
1727 | unsigned PossibleLZ = Known2.countMaxLeadingZeros(); | ||||
1728 | // If this call is poison for 0 input, the result will be less than 2^n. | ||||
1729 | if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) | ||||
1730 | PossibleLZ = std::min(PossibleLZ, BitWidth - 1); | ||||
1731 | unsigned LowBits = llvm::bit_width(PossibleLZ); | ||||
1732 | Known.Zero.setBitsFrom(LowBits); | ||||
1733 | break; | ||||
1734 | } | ||||
1735 | case Intrinsic::cttz: { | ||||
1736 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1737 | // If we have a known 1, its position is our upper bound. | ||||
1738 | unsigned PossibleTZ = Known2.countMaxTrailingZeros(); | ||||
1739 | // If this call is poison for 0 input, the result will be less than 2^n. | ||||
1740 | if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) | ||||
1741 | PossibleTZ = std::min(PossibleTZ, BitWidth - 1); | ||||
1742 | unsigned LowBits = llvm::bit_width(PossibleTZ); | ||||
1743 | Known.Zero.setBitsFrom(LowBits); | ||||
1744 | break; | ||||
1745 | } | ||||
1746 | case Intrinsic::ctpop: { | ||||
1747 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1748 | // We can bound the space the count needs. Also, bits known to be zero | ||||
1749 | // can't contribute to the population. | ||||
1750 | unsigned BitsPossiblySet = Known2.countMaxPopulation(); | ||||
1751 | unsigned LowBits = llvm::bit_width(BitsPossiblySet); | ||||
1752 | Known.Zero.setBitsFrom(LowBits); | ||||
1753 | // TODO: we could bound KnownOne using the lower bound on the number | ||||
1754 | // of bits which might be set provided by popcnt KnownOne2. | ||||
1755 | break; | ||||
1756 | } | ||||
1757 | case Intrinsic::fshr: | ||||
1758 | case Intrinsic::fshl: { | ||||
1759 | const APInt *SA; | ||||
1760 | if (!match(I->getOperand(2), m_APInt(SA))) | ||||
1761 | break; | ||||
1762 | |||||
1763 | // Normalize to funnel shift left. | ||||
1764 | uint64_t ShiftAmt = SA->urem(BitWidth); | ||||
1765 | if (II->getIntrinsicID() == Intrinsic::fshr) | ||||
1766 | ShiftAmt = BitWidth - ShiftAmt; | ||||
1767 | |||||
1768 | KnownBits Known3(BitWidth); | ||||
1769 | computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); | ||||
1770 | computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); | ||||
1771 | |||||
1772 | Known.Zero = | ||||
1773 | Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); | ||||
1774 | Known.One = | ||||
1775 | Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); | ||||
1776 | break; | ||||
1777 | } | ||||
1778 | case Intrinsic::uadd_sat: | ||||
1779 | case Intrinsic::usub_sat: { | ||||
1780 | bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat; | ||||
1781 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1782 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1783 | |||||
1784 | // Add: Leading ones of either operand are preserved. | ||||
1785 | // Sub: Leading zeros of LHS and leading ones of RHS are preserved | ||||
1786 | // as leading zeros in the result. | ||||
1787 | unsigned LeadingKnown; | ||||
1788 | if (IsAdd) | ||||
1789 | LeadingKnown = std::max(Known.countMinLeadingOnes(), | ||||
1790 | Known2.countMinLeadingOnes()); | ||||
1791 | else | ||||
1792 | LeadingKnown = std::max(Known.countMinLeadingZeros(), | ||||
1793 | Known2.countMinLeadingOnes()); | ||||
1794 | |||||
1795 | Known = KnownBits::computeForAddSub( | ||||
1796 | IsAdd, /* NSW */ false, Known, Known2); | ||||
1797 | |||||
1798 | // We select between the operation result and all-ones/zero | ||||
1799 | // respectively, so we can preserve known ones/zeros. | ||||
1800 | if (IsAdd) { | ||||
1801 | Known.One.setHighBits(LeadingKnown); | ||||
1802 | Known.Zero.clearAllBits(); | ||||
1803 | } else { | ||||
1804 | Known.Zero.setHighBits(LeadingKnown); | ||||
1805 | Known.One.clearAllBits(); | ||||
1806 | } | ||||
1807 | break; | ||||
1808 | } | ||||
1809 | case Intrinsic::umin: | ||||
1810 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1811 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1812 | Known = KnownBits::umin(Known, Known2); | ||||
1813 | break; | ||||
1814 | case Intrinsic::umax: | ||||
1815 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1816 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1817 | Known = KnownBits::umax(Known, Known2); | ||||
1818 | break; | ||||
1819 | case Intrinsic::smin: | ||||
1820 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1821 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1822 | Known = KnownBits::smin(Known, Known2); | ||||
1823 | break; | ||||
1824 | case Intrinsic::smax: | ||||
1825 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1826 | computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); | ||||
1827 | Known = KnownBits::smax(Known, Known2); | ||||
1828 | break; | ||||
1829 | case Intrinsic::x86_sse42_crc32_64_64: | ||||
1830 | Known.Zero.setBitsFrom(32); | ||||
1831 | break; | ||||
1832 | case Intrinsic::riscv_vsetvli: | ||||
1833 | case Intrinsic::riscv_vsetvlimax: | ||||
1834 | // Assume that VL output is >= 65536. | ||||
1835 | // TODO: Take SEW and LMUL into account. | ||||
1836 | if (BitWidth > 17) | ||||
1837 | Known.Zero.setBitsFrom(17); | ||||
1838 | break; | ||||
1839 | case Intrinsic::vscale: { | ||||
1840 | if (!II->getParent() || !II->getFunction()) | ||||
1841 | break; | ||||
1842 | |||||
1843 | Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits(); | ||||
1844 | break; | ||||
1845 | } | ||||
1846 | } | ||||
1847 | } | ||||
1848 | break; | ||||
1849 | case Instruction::ShuffleVector: { | ||||
1850 | auto *Shuf = dyn_cast<ShuffleVectorInst>(I); | ||||
1851 | // FIXME: Do we need to handle ConstantExpr involving shufflevectors? | ||||
1852 | if (!Shuf) { | ||||
1853 | Known.resetAll(); | ||||
1854 | return; | ||||
1855 | } | ||||
1856 | // For undef elements, we don't know anything about the common state of | ||||
1857 | // the shuffle result. | ||||
1858 | APInt DemandedLHS, DemandedRHS; | ||||
1859 | if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) { | ||||
1860 | Known.resetAll(); | ||||
1861 | return; | ||||
1862 | } | ||||
1863 | Known.One.setAllBits(); | ||||
1864 | Known.Zero.setAllBits(); | ||||
1865 | if (!!DemandedLHS) { | ||||
1866 | const Value *LHS = Shuf->getOperand(0); | ||||
1867 | computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q); | ||||
1868 | // If we don't know any bits, early out. | ||||
1869 | if (Known.isUnknown()) | ||||
1870 | break; | ||||
1871 | } | ||||
1872 | if (!!DemandedRHS) { | ||||
1873 | const Value *RHS = Shuf->getOperand(1); | ||||
1874 | computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q); | ||||
1875 | Known = KnownBits::commonBits(Known, Known2); | ||||
1876 | } | ||||
1877 | break; | ||||
1878 | } | ||||
1879 | case Instruction::InsertElement: { | ||||
1880 | if (isa<ScalableVectorType>(I->getType())) { | ||||
1881 | Known.resetAll(); | ||||
1882 | return; | ||||
1883 | } | ||||
1884 | const Value *Vec = I->getOperand(0); | ||||
1885 | const Value *Elt = I->getOperand(1); | ||||
1886 | auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); | ||||
1887 | // Early out if the index is non-constant or out-of-range. | ||||
1888 | unsigned NumElts = DemandedElts.getBitWidth(); | ||||
1889 | if (!CIdx || CIdx->getValue().uge(NumElts)) { | ||||
1890 | Known.resetAll(); | ||||
1891 | return; | ||||
1892 | } | ||||
1893 | Known.One.setAllBits(); | ||||
1894 | Known.Zero.setAllBits(); | ||||
1895 | unsigned EltIdx = CIdx->getZExtValue(); | ||||
1896 | // Do we demand the inserted element? | ||||
1897 | if (DemandedElts[EltIdx]) { | ||||
1898 | computeKnownBits(Elt, Known, Depth + 1, Q); | ||||
1899 | // If we don't know any bits, early out. | ||||
1900 | if (Known.isUnknown()) | ||||
1901 | break; | ||||
1902 | } | ||||
1903 | // We don't need the base vector element that has been inserted. | ||||
1904 | APInt DemandedVecElts = DemandedElts; | ||||
1905 | DemandedVecElts.clearBit(EltIdx); | ||||
1906 | if (!!DemandedVecElts) { | ||||
1907 | computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q); | ||||
1908 | Known = KnownBits::commonBits(Known, Known2); | ||||
1909 | } | ||||
1910 | break; | ||||
1911 | } | ||||
1912 | case Instruction::ExtractElement: { | ||||
1913 | // Look through extract element. If the index is non-constant or | ||||
1914 | // out-of-range demand all elements, otherwise just the extracted element. | ||||
1915 | const Value *Vec = I->getOperand(0); | ||||
1916 | const Value *Idx = I->getOperand(1); | ||||
1917 | auto *CIdx = dyn_cast<ConstantInt>(Idx); | ||||
1918 | if (isa<ScalableVectorType>(Vec->getType())) { | ||||
1919 | // FIXME: there's probably *something* we can do with scalable vectors | ||||
1920 | Known.resetAll(); | ||||
1921 | break; | ||||
1922 | } | ||||
1923 | unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); | ||||
1924 | APInt DemandedVecElts = APInt::getAllOnes(NumElts); | ||||
1925 | if (CIdx && CIdx->getValue().ult(NumElts)) | ||||
1926 | DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); | ||||
1927 | computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q); | ||||
1928 | break; | ||||
1929 | } | ||||
1930 | case Instruction::ExtractValue: | ||||
1931 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { | ||||
1932 | const ExtractValueInst *EVI = cast<ExtractValueInst>(I); | ||||
1933 | if (EVI->getNumIndices() != 1) break; | ||||
1934 | if (EVI->getIndices()[0] == 0) { | ||||
1935 | switch (II->getIntrinsicID()) { | ||||
1936 | default: break; | ||||
1937 | case Intrinsic::uadd_with_overflow: | ||||
1938 | case Intrinsic::sadd_with_overflow: | ||||
1939 | computeKnownBitsAddSub(true, II->getArgOperand(0), | ||||
1940 | II->getArgOperand(1), false, DemandedElts, | ||||
1941 | Known, Known2, Depth, Q); | ||||
1942 | break; | ||||
1943 | case Intrinsic::usub_with_overflow: | ||||
1944 | case Intrinsic::ssub_with_overflow: | ||||
1945 | computeKnownBitsAddSub(false, II->getArgOperand(0), | ||||
1946 | II->getArgOperand(1), false, DemandedElts, | ||||
1947 | Known, Known2, Depth, Q); | ||||
1948 | break; | ||||
1949 | case Intrinsic::umul_with_overflow: | ||||
1950 | case Intrinsic::smul_with_overflow: | ||||
1951 | computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, | ||||
1952 | DemandedElts, Known, Known2, Depth, Q); | ||||
1953 | break; | ||||
1954 | } | ||||
1955 | } | ||||
1956 | } | ||||
1957 | break; | ||||
1958 | case Instruction::Freeze: | ||||
1959 | if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, | ||||
1960 | Depth + 1)) | ||||
1961 | computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); | ||||
1962 | break; | ||||
1963 | } | ||||
1964 | } | ||||
1965 | |||||
1966 | /// Determine which bits of V are known to be either zero or one and return | ||||
1967 | /// them. | ||||
1968 | KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
1969 | unsigned Depth, const Query &Q) { | ||||
1970 | KnownBits Known(getBitWidth(V->getType(), Q.DL)); | ||||
1971 | computeKnownBits(V, DemandedElts, Known, Depth, Q); | ||||
1972 | return Known; | ||||
1973 | } | ||||
1974 | |||||
1975 | /// Determine which bits of V are known to be either zero or one and return | ||||
1976 | /// them. | ||||
1977 | KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { | ||||
1978 | KnownBits Known(getBitWidth(V->getType(), Q.DL)); | ||||
1979 | computeKnownBits(V, Known, Depth, Q); | ||||
1980 | return Known; | ||||
1981 | } | ||||
1982 | |||||
1983 | /// Determine which bits of V are known to be either zero or one and return | ||||
1984 | /// them in the Known bit set. | ||||
1985 | /// | ||||
1986 | /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that | ||||
1987 | /// we cannot optimize based on the assumption that it is zero without changing | ||||
1988 | /// it to be an explicit zero. If we don't change it to zero, other code could | ||||
1989 | /// optimized based on the contradictory assumption that it is non-zero. | ||||
1990 | /// Because instcombine aggressively folds operations with undef args anyway, | ||||
1991 | /// this won't lose us code quality. | ||||
1992 | /// | ||||
1993 | /// This function is defined on values with integer type, values with pointer | ||||
1994 | /// type, and vectors of integers. In the case | ||||
1995 | /// where V is a vector, known zero, and known one values are the | ||||
1996 | /// same width as the vector element, and the bit is set only if it is true | ||||
1997 | /// for all of the demanded elements in the vector specified by DemandedElts. | ||||
1998 | void computeKnownBits(const Value *V, const APInt &DemandedElts, | ||||
1999 | KnownBits &Known, unsigned Depth, const Query &Q) { | ||||
2000 | if (!DemandedElts) { | ||||
2001 | // No demanded elts, better to assume we don't know anything. | ||||
2002 | Known.resetAll(); | ||||
2003 | return; | ||||
2004 | } | ||||
2005 | |||||
2006 | assert(V && "No Value?")(static_cast <bool> (V && "No Value?") ? void ( 0) : __assert_fail ("V && \"No Value?\"", "llvm/lib/Analysis/ValueTracking.cpp" , 2006, __extension__ __PRETTY_FUNCTION__)); | ||||
2007 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2007, __extension__ __PRETTY_FUNCTION__ )); | ||||
2008 | |||||
2009 | #ifndef NDEBUG | ||||
2010 | Type *Ty = V->getType(); | ||||
2011 | unsigned BitWidth = Known.getBitWidth(); | ||||
2012 | |||||
2013 | assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth ) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!" ) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2014, __extension__ __PRETTY_FUNCTION__ )) | ||||
2014 | "Not integer or pointer type!")(static_cast <bool> ((Ty->isIntOrIntVectorTy(BitWidth ) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!" ) ? void (0) : __assert_fail ("(Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && \"Not integer or pointer type!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2014, __extension__ __PRETTY_FUNCTION__ )); | ||||
2015 | |||||
2016 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { | ||||
2017 | assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__ )) | ||||
2018 | FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__ )) | ||||
2019 | "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2019, __extension__ __PRETTY_FUNCTION__ )); | ||||
2020 | } else { | ||||
2021 | assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars or scalable vectors" ) ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars or scalable vectors\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2022, __extension__ __PRETTY_FUNCTION__ )) | ||||
2022 | "DemandedElt width should be 1 for scalars or scalable vectors")(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars or scalable vectors" ) ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars or scalable vectors\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2022, __extension__ __PRETTY_FUNCTION__ )); | ||||
2023 | } | ||||
2024 | |||||
2025 | Type *ScalarTy = Ty->getScalarType(); | ||||
2026 | if (ScalarTy->isPointerTy()) { | ||||
2027 | assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits (ScalarTy) && "V and Known should have same BitWidth" ) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2028, __extension__ __PRETTY_FUNCTION__ )) | ||||
2028 | "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getPointerTypeSizeInBits (ScalarTy) && "V and Known should have same BitWidth" ) ? void (0) : __assert_fail ("BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2028, __extension__ __PRETTY_FUNCTION__ )); | ||||
2029 | } else { | ||||
2030 | assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits (ScalarTy) && "V and Known should have same BitWidth" ) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2031, __extension__ __PRETTY_FUNCTION__ )) | ||||
2031 | "V and Known should have same BitWidth")(static_cast <bool> (BitWidth == Q.DL.getTypeSizeInBits (ScalarTy) && "V and Known should have same BitWidth" ) ? void (0) : __assert_fail ("BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && \"V and Known should have same BitWidth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2031, __extension__ __PRETTY_FUNCTION__ )); | ||||
2032 | } | ||||
2033 | #endif | ||||
2034 | |||||
2035 | const APInt *C; | ||||
2036 | if (match(V, m_APInt(C))) { | ||||
2037 | // We know all of the bits for a scalar constant or a splat vector constant! | ||||
2038 | Known = KnownBits::makeConstant(*C); | ||||
2039 | return; | ||||
2040 | } | ||||
2041 | // Null and aggregate-zero are all-zeros. | ||||
2042 | if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { | ||||
2043 | Known.setAllZero(); | ||||
2044 | return; | ||||
2045 | } | ||||
2046 | // Handle a constant vector by taking the intersection of the known bits of | ||||
2047 | // each element. | ||||
2048 | if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) { | ||||
2049 | assert(!isa<ScalableVectorType>(V->getType()))(static_cast <bool> (!isa<ScalableVectorType>(V-> getType())) ? void (0) : __assert_fail ("!isa<ScalableVectorType>(V->getType())" , "llvm/lib/Analysis/ValueTracking.cpp", 2049, __extension__ __PRETTY_FUNCTION__ )); | ||||
2050 | // We know that CDV must be a vector of integers. Take the intersection of | ||||
2051 | // each element. | ||||
2052 | Known.Zero.setAllBits(); Known.One.setAllBits(); | ||||
2053 | for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { | ||||
2054 | if (!DemandedElts[i]) | ||||
2055 | continue; | ||||
2056 | APInt Elt = CDV->getElementAsAPInt(i); | ||||
2057 | Known.Zero &= ~Elt; | ||||
2058 | Known.One &= Elt; | ||||
2059 | } | ||||
2060 | return; | ||||
2061 | } | ||||
2062 | |||||
2063 | if (const auto *CV = dyn_cast<ConstantVector>(V)) { | ||||
2064 | assert(!isa<ScalableVectorType>(V->getType()))(static_cast <bool> (!isa<ScalableVectorType>(V-> getType())) ? void (0) : __assert_fail ("!isa<ScalableVectorType>(V->getType())" , "llvm/lib/Analysis/ValueTracking.cpp", 2064, __extension__ __PRETTY_FUNCTION__ )); | ||||
2065 | // We know that CV must be a vector of integers. Take the intersection of | ||||
2066 | // each element. | ||||
2067 | Known.Zero.setAllBits(); Known.One.setAllBits(); | ||||
2068 | for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { | ||||
2069 | if (!DemandedElts[i]) | ||||
2070 | continue; | ||||
2071 | Constant *Element = CV->getAggregateElement(i); | ||||
2072 | auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); | ||||
2073 | if (!ElementCI) { | ||||
2074 | Known.resetAll(); | ||||
2075 | return; | ||||
2076 | } | ||||
2077 | const APInt &Elt = ElementCI->getValue(); | ||||
2078 | Known.Zero &= ~Elt; | ||||
2079 | Known.One &= Elt; | ||||
2080 | } | ||||
2081 | return; | ||||
2082 | } | ||||
2083 | |||||
2084 | // Start out not knowing anything. | ||||
2085 | Known.resetAll(); | ||||
2086 | |||||
2087 | // We can't imply anything about undefs. | ||||
2088 | if (isa<UndefValue>(V)) | ||||
2089 | return; | ||||
2090 | |||||
2091 | // There's no point in looking through other users of ConstantData for | ||||
2092 | // assumptions. Confirm that we've handled them all. | ||||
2093 | assert(!isa<ConstantData>(V) && "Unhandled constant data!")(static_cast <bool> (!isa<ConstantData>(V) && "Unhandled constant data!") ? void (0) : __assert_fail ("!isa<ConstantData>(V) && \"Unhandled constant data!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2093, __extension__ __PRETTY_FUNCTION__ )); | ||||
2094 | |||||
2095 | // All recursive calls that increase depth must come after this. | ||||
2096 | if (Depth == MaxAnalysisRecursionDepth) | ||||
2097 | return; | ||||
2098 | |||||
2099 | // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has | ||||
2100 | // the bits of its aliasee. | ||||
2101 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { | ||||
2102 | if (!GA->isInterposable()) | ||||
2103 | computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); | ||||
2104 | return; | ||||
2105 | } | ||||
2106 | |||||
2107 | if (const Operator *I = dyn_cast<Operator>(V)) | ||||
2108 | computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q); | ||||
2109 | |||||
2110 | // Aligned pointers have trailing zeros - refine Known.Zero set | ||||
2111 | if (isa<PointerType>(V->getType())) { | ||||
2112 | Align Alignment = V->getPointerAlignment(Q.DL); | ||||
2113 | Known.Zero.setLowBits(Log2(Alignment)); | ||||
2114 | } | ||||
2115 | |||||
2116 | // computeKnownBitsFromAssume strictly refines Known. | ||||
2117 | // Therefore, we run them after computeKnownBitsFromOperator. | ||||
2118 | |||||
2119 | // Check whether a nearby assume intrinsic can determine some known bits. | ||||
2120 | computeKnownBitsFromAssume(V, Known, Depth, Q); | ||||
2121 | |||||
2122 | assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?")(static_cast <bool> ((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?") ? void (0) : __assert_fail ("(Known.Zero & Known.One) == 0 && \"Bits known to be one AND zero?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2122, __extension__ __PRETTY_FUNCTION__ )); | ||||
2123 | } | ||||
2124 | |||||
2125 | /// Try to detect a recurrence that the value of the induction variable is | ||||
2126 | /// always a power of two (or zero). | ||||
2127 | static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, | ||||
2128 | unsigned Depth, Query &Q) { | ||||
2129 | BinaryOperator *BO = nullptr; | ||||
2130 | Value *Start = nullptr, *Step = nullptr; | ||||
2131 | if (!matchSimpleRecurrence(PN, BO, Start, Step)) | ||||
2132 | return false; | ||||
2133 | |||||
2134 | // Initial value must be a power of two. | ||||
2135 | for (const Use &U : PN->operands()) { | ||||
2136 | if (U.get() == Start) { | ||||
2137 | // Initial value comes from a different BB, need to adjust context | ||||
2138 | // instruction for analysis. | ||||
2139 | Q.CxtI = PN->getIncomingBlock(U)->getTerminator(); | ||||
2140 | if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q)) | ||||
2141 | return false; | ||||
2142 | } | ||||
2143 | } | ||||
2144 | |||||
2145 | // Except for Mul, the induction variable must be on the left side of the | ||||
2146 | // increment expression, otherwise its value can be arbitrary. | ||||
2147 | if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step) | ||||
2148 | return false; | ||||
2149 | |||||
2150 | Q.CxtI = BO->getParent()->getTerminator(); | ||||
2151 | switch (BO->getOpcode()) { | ||||
2152 | case Instruction::Mul: | ||||
2153 | // Power of two is closed under multiplication. | ||||
2154 | return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || | ||||
2155 | Q.IIQ.hasNoSignedWrap(BO)) && | ||||
2156 | isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q); | ||||
2157 | case Instruction::SDiv: | ||||
2158 | // Start value must not be signmask for signed division, so simply being a | ||||
2159 | // power of two is not sufficient, and it has to be a constant. | ||||
2160 | if (!match(Start, m_Power2()) || match(Start, m_SignMask())) | ||||
2161 | return false; | ||||
2162 | [[fallthrough]]; | ||||
2163 | case Instruction::UDiv: | ||||
2164 | // Divisor must be a power of two. | ||||
2165 | // If OrZero is false, cannot guarantee induction variable is non-zero after | ||||
2166 | // division, same for Shr, unless it is exact division. | ||||
2167 | return (OrZero || Q.IIQ.isExact(BO)) && | ||||
2168 | isKnownToBeAPowerOfTwo(Step, false, Depth, Q); | ||||
2169 | case Instruction::Shl: | ||||
2170 | return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO); | ||||
2171 | case Instruction::AShr: | ||||
2172 | if (!match(Start, m_Power2()) || match(Start, m_SignMask())) | ||||
2173 | return false; | ||||
2174 | [[fallthrough]]; | ||||
2175 | case Instruction::LShr: | ||||
2176 | return OrZero || Q.IIQ.isExact(BO); | ||||
2177 | default: | ||||
2178 | return false; | ||||
2179 | } | ||||
2180 | } | ||||
2181 | |||||
2182 | /// Return true if the given value is known to have exactly one | ||||
2183 | /// bit set when defined. For vectors return true if every element is known to | ||||
2184 | /// be a power of two when defined. Supports values with integer or pointer | ||||
2185 | /// types and vectors of integers. | ||||
2186 | bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, | ||||
2187 | const Query &Q) { | ||||
2188 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2188, __extension__ __PRETTY_FUNCTION__ )); | ||||
2189 | |||||
2190 | // Attempt to match against constants. | ||||
2191 | if (OrZero && match(V, m_Power2OrZero())) | ||||
2192 | return true; | ||||
2193 | if (match(V, m_Power2())) | ||||
2194 | return true; | ||||
2195 | |||||
2196 | // 1 << X is clearly a power of two if the one is not shifted off the end. If | ||||
2197 | // it is shifted off the end then the result is undefined. | ||||
2198 | if (match(V, m_Shl(m_One(), m_Value()))) | ||||
2199 | return true; | ||||
2200 | |||||
2201 | // (signmask) >>l X is clearly a power of two if the one is not shifted off | ||||
2202 | // the bottom. If it is shifted off the bottom then the result is undefined. | ||||
2203 | if (match(V, m_LShr(m_SignMask(), m_Value()))) | ||||
2204 | return true; | ||||
2205 | |||||
2206 | // The remaining tests are all recursive, so bail out if we hit the limit. | ||||
2207 | if (Depth++ == MaxAnalysisRecursionDepth) | ||||
2208 | return false; | ||||
2209 | |||||
2210 | Value *X = nullptr, *Y = nullptr; | ||||
2211 | // A shift left or a logical shift right of a power of two is a power of two | ||||
2212 | // or zero. | ||||
2213 | if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || | ||||
2214 | match(V, m_LShr(m_Value(X), m_Value())))) | ||||
2215 | return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); | ||||
2216 | |||||
2217 | if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) | ||||
2218 | return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); | ||||
2219 | |||||
2220 | if (const SelectInst *SI = dyn_cast<SelectInst>(V)) | ||||
2221 | return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && | ||||
2222 | isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); | ||||
2223 | |||||
2224 | // Peek through min/max. | ||||
2225 | if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) { | ||||
2226 | return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) && | ||||
2227 | isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q); | ||||
2228 | } | ||||
2229 | |||||
2230 | if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { | ||||
2231 | // A power of two and'd with anything is a power of two or zero. | ||||
2232 | if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || | ||||
2233 | isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) | ||||
2234 | return true; | ||||
2235 | // X & (-X) is always a power of two or zero. | ||||
2236 | if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) | ||||
2237 | return true; | ||||
2238 | return false; | ||||
2239 | } | ||||
2240 | |||||
2241 | // Adding a power-of-two or zero to the same power-of-two or zero yields | ||||
2242 | // either the original power-of-two, a larger power-of-two or zero. | ||||
2243 | if (match(V, m_Add(m_Value(X), m_Value(Y)))) { | ||||
2244 | const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); | ||||
2245 | if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || | ||||
2246 | Q.IIQ.hasNoSignedWrap(VOBO)) { | ||||
2247 | if (match(X, m_And(m_Specific(Y), m_Value())) || | ||||
2248 | match(X, m_And(m_Value(), m_Specific(Y)))) | ||||
2249 | if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) | ||||
2250 | return true; | ||||
2251 | if (match(Y, m_And(m_Specific(X), m_Value())) || | ||||
2252 | match(Y, m_And(m_Value(), m_Specific(X)))) | ||||
2253 | if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) | ||||
2254 | return true; | ||||
2255 | |||||
2256 | unsigned BitWidth = V->getType()->getScalarSizeInBits(); | ||||
2257 | KnownBits LHSBits(BitWidth); | ||||
2258 | computeKnownBits(X, LHSBits, Depth, Q); | ||||
2259 | |||||
2260 | KnownBits RHSBits(BitWidth); | ||||
2261 | computeKnownBits(Y, RHSBits, Depth, Q); | ||||
2262 | // If i8 V is a power of two or zero: | ||||
2263 | // ZeroBits: 1 1 1 0 1 1 1 1 | ||||
2264 | // ~ZeroBits: 0 0 0 1 0 0 0 0 | ||||
2265 | if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) | ||||
2266 | // If OrZero isn't set, we cannot give back a zero result. | ||||
2267 | // Make sure either the LHS or RHS has a bit set. | ||||
2268 | if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) | ||||
2269 | return true; | ||||
2270 | } | ||||
2271 | } | ||||
2272 | |||||
2273 | // A PHI node is power of two if all incoming values are power of two, or if | ||||
2274 | // it is an induction variable where in each step its value is a power of two. | ||||
2275 | if (const PHINode *PN = dyn_cast<PHINode>(V)) { | ||||
2276 | Query RecQ = Q; | ||||
2277 | |||||
2278 | // Check if it is an induction variable and always power of two. | ||||
2279 | if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ)) | ||||
2280 | return true; | ||||
2281 | |||||
2282 | // Recursively check all incoming values. Limit recursion to 2 levels, so | ||||
2283 | // that search complexity is limited to number of operands^2. | ||||
2284 | unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); | ||||
2285 | return llvm::all_of(PN->operands(), [&](const Use &U) { | ||||
2286 | // Value is power of 2 if it is coming from PHI node itself by induction. | ||||
2287 | if (U.get() == PN) | ||||
2288 | return true; | ||||
2289 | |||||
2290 | // Change the context instruction to the incoming block where it is | ||||
2291 | // evaluated. | ||||
2292 | RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); | ||||
2293 | return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ); | ||||
2294 | }); | ||||
2295 | } | ||||
2296 | |||||
2297 | // An exact divide or right shift can only shift off zero bits, so the result | ||||
2298 | // is a power of two only if the first operand is a power of two and not | ||||
2299 | // copying a sign bit (sdiv int_min, 2). | ||||
2300 | if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || | ||||
2301 | match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { | ||||
2302 | return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, | ||||
2303 | Depth, Q); | ||||
2304 | } | ||||
2305 | |||||
2306 | return false; | ||||
2307 | } | ||||
2308 | |||||
2309 | /// Test whether a GEP's result is known to be non-null. | ||||
2310 | /// | ||||
2311 | /// Uses properties inherent in a GEP to try to determine whether it is known | ||||
2312 | /// to be non-null. | ||||
2313 | /// | ||||
2314 | /// Currently this routine does not support vector GEPs. | ||||
2315 | static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, | ||||
2316 | const Query &Q) { | ||||
2317 | const Function *F = nullptr; | ||||
2318 | if (const Instruction *I = dyn_cast<Instruction>(GEP)) | ||||
2319 | F = I->getFunction(); | ||||
2320 | |||||
2321 | if (!GEP->isInBounds() || | ||||
2322 | NullPointerIsDefined(F, GEP->getPointerAddressSpace())) | ||||
2323 | return false; | ||||
2324 | |||||
2325 | // FIXME: Support vector-GEPs. | ||||
2326 | assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP")(static_cast <bool> (GEP->getType()->isPointerTy( ) && "We only support plain pointer GEP") ? void (0) : __assert_fail ("GEP->getType()->isPointerTy() && \"We only support plain pointer GEP\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2326, __extension__ __PRETTY_FUNCTION__ )); | ||||
2327 | |||||
2328 | // If the base pointer is non-null, we cannot walk to a null address with an | ||||
2329 | // inbounds GEP in address space zero. | ||||
2330 | if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) | ||||
2331 | return true; | ||||
2332 | |||||
2333 | // Walk the GEP operands and see if any operand introduces a non-zero offset. | ||||
2334 | // If so, then the GEP cannot produce a null pointer, as doing so would | ||||
2335 | // inherently violate the inbounds contract within address space zero. | ||||
2336 | for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); | ||||
2337 | GTI != GTE; ++GTI) { | ||||
2338 | // Struct types are easy -- they must always be indexed by a constant. | ||||
2339 | if (StructType *STy = GTI.getStructTypeOrNull()) { | ||||
2340 | ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); | ||||
2341 | unsigned ElementIdx = OpC->getZExtValue(); | ||||
2342 | const StructLayout *SL = Q.DL.getStructLayout(STy); | ||||
2343 | uint64_t ElementOffset = SL->getElementOffset(ElementIdx); | ||||
2344 | if (ElementOffset > 0) | ||||
2345 | return true; | ||||
2346 | continue; | ||||
2347 | } | ||||
2348 | |||||
2349 | // If we have a zero-sized type, the index doesn't matter. Keep looping. | ||||
2350 | if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).isZero()) | ||||
2351 | continue; | ||||
2352 | |||||
2353 | // Fast path the constant operand case both for efficiency and so we don't | ||||
2354 | // increment Depth when just zipping down an all-constant GEP. | ||||
2355 | if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { | ||||
2356 | if (!OpC->isZero()) | ||||
2357 | return true; | ||||
2358 | continue; | ||||
2359 | } | ||||
2360 | |||||
2361 | // We post-increment Depth here because while isKnownNonZero increments it | ||||
2362 | // as well, when we pop back up that increment won't persist. We don't want | ||||
2363 | // to recurse 10k times just because we have 10k GEP operands. We don't | ||||
2364 | // bail completely out because we want to handle constant GEPs regardless | ||||
2365 | // of depth. | ||||
2366 | if (Depth++ >= MaxAnalysisRecursionDepth) | ||||
2367 | continue; | ||||
2368 | |||||
2369 | if (isKnownNonZero(GTI.getOperand(), Depth, Q)) | ||||
2370 | return true; | ||||
2371 | } | ||||
2372 | |||||
2373 | return false; | ||||
2374 | } | ||||
2375 | |||||
2376 | static bool isKnownNonNullFromDominatingCondition(const Value *V, | ||||
2377 | const Instruction *CtxI, | ||||
2378 | const DominatorTree *DT) { | ||||
2379 | assert(!isa<Constant>(V) && "Called for constant?")(static_cast <bool> (!isa<Constant>(V) && "Called for constant?") ? void (0) : __assert_fail ("!isa<Constant>(V) && \"Called for constant?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2379, __extension__ __PRETTY_FUNCTION__ )); | ||||
2380 | |||||
2381 | if (!CtxI || !DT) | ||||
2382 | return false; | ||||
2383 | |||||
2384 | unsigned NumUsesExplored = 0; | ||||
2385 | for (const auto *U : V->users()) { | ||||
2386 | // Avoid massive lists | ||||
2387 | if (NumUsesExplored >= DomConditionsMaxUses) | ||||
2388 | break; | ||||
2389 | NumUsesExplored++; | ||||
2390 | |||||
2391 | // If the value is used as an argument to a call or invoke, then argument | ||||
2392 | // attributes may provide an answer about null-ness. | ||||
2393 | if (const auto *CB = dyn_cast<CallBase>(U)) | ||||
2394 | if (auto *CalledFunc = CB->getCalledFunction()) | ||||
2395 | for (const Argument &Arg : CalledFunc->args()) | ||||
2396 | if (CB->getArgOperand(Arg.getArgNo()) == V && | ||||
2397 | Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) && | ||||
2398 | DT->dominates(CB, CtxI)) | ||||
2399 | return true; | ||||
2400 | |||||
2401 | // If the value is used as a load/store, then the pointer must be non null. | ||||
2402 | if (V == getLoadStorePointerOperand(U)) { | ||||
2403 | const Instruction *I = cast<Instruction>(U); | ||||
2404 | if (!NullPointerIsDefined(I->getFunction(), | ||||
2405 | V->getType()->getPointerAddressSpace()) && | ||||
2406 | DT->dominates(I, CtxI)) | ||||
2407 | return true; | ||||
2408 | } | ||||
2409 | |||||
2410 | // Consider only compare instructions uniquely controlling a branch | ||||
2411 | Value *RHS; | ||||
2412 | CmpInst::Predicate Pred; | ||||
2413 | if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS)))) | ||||
2414 | continue; | ||||
2415 | |||||
2416 | bool NonNullIfTrue; | ||||
2417 | if (cmpExcludesZero(Pred, RHS)) | ||||
2418 | NonNullIfTrue = true; | ||||
2419 | else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS)) | ||||
2420 | NonNullIfTrue = false; | ||||
2421 | else | ||||
2422 | continue; | ||||
2423 | |||||
2424 | SmallVector<const User *, 4> WorkList; | ||||
2425 | SmallPtrSet<const User *, 4> Visited; | ||||
2426 | for (const auto *CmpU : U->users()) { | ||||
2427 | assert(WorkList.empty() && "Should be!")(static_cast <bool> (WorkList.empty() && "Should be!" ) ? void (0) : __assert_fail ("WorkList.empty() && \"Should be!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2427, __extension__ __PRETTY_FUNCTION__ )); | ||||
2428 | if (Visited.insert(CmpU).second) | ||||
2429 | WorkList.push_back(CmpU); | ||||
2430 | |||||
2431 | while (!WorkList.empty()) { | ||||
2432 | auto *Curr = WorkList.pop_back_val(); | ||||
2433 | |||||
2434 | // If a user is an AND, add all its users to the work list. We only | ||||
2435 | // propagate "pred != null" condition through AND because it is only | ||||
2436 | // correct to assume that all conditions of AND are met in true branch. | ||||
2437 | // TODO: Support similar logic of OR and EQ predicate? | ||||
2438 | if (NonNullIfTrue) | ||||
2439 | if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) { | ||||
2440 | for (const auto *CurrU : Curr->users()) | ||||
2441 | if (Visited.insert(CurrU).second) | ||||
2442 | WorkList.push_back(CurrU); | ||||
2443 | continue; | ||||
2444 | } | ||||
2445 | |||||
2446 | if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { | ||||
2447 | assert(BI->isConditional() && "uses a comparison!")(static_cast <bool> (BI->isConditional() && "uses a comparison!" ) ? void (0) : __assert_fail ("BI->isConditional() && \"uses a comparison!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2447, __extension__ __PRETTY_FUNCTION__ )); | ||||
2448 | |||||
2449 | BasicBlock *NonNullSuccessor = | ||||
2450 | BI->getSuccessor(NonNullIfTrue ? 0 : 1); | ||||
2451 | BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); | ||||
2452 | if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) | ||||
2453 | return true; | ||||
2454 | } else if (NonNullIfTrue && isGuard(Curr) && | ||||
2455 | DT->dominates(cast<Instruction>(Curr), CtxI)) { | ||||
2456 | return true; | ||||
2457 | } | ||||
2458 | } | ||||
2459 | } | ||||
2460 | } | ||||
2461 | |||||
2462 | return false; | ||||
2463 | } | ||||
2464 | |||||
2465 | /// Does the 'Range' metadata (which must be a valid MD_range operand list) | ||||
2466 | /// ensure that the value it's attached to is never Value? 'RangeType' is | ||||
2467 | /// is the type of the value described by the range. | ||||
2468 | static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { | ||||
2469 | const unsigned NumRanges = Ranges->getNumOperands() / 2; | ||||
2470 | assert(NumRanges >= 1)(static_cast <bool> (NumRanges >= 1) ? void (0) : __assert_fail ("NumRanges >= 1", "llvm/lib/Analysis/ValueTracking.cpp", 2470, __extension__ __PRETTY_FUNCTION__)); | ||||
2471 | for (unsigned i = 0; i < NumRanges; ++i) { | ||||
2472 | ConstantInt *Lower = | ||||
2473 | mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); | ||||
2474 | ConstantInt *Upper = | ||||
2475 | mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); | ||||
2476 | ConstantRange Range(Lower->getValue(), Upper->getValue()); | ||||
2477 | if (Range.contains(Value)) | ||||
2478 | return false; | ||||
2479 | } | ||||
2480 | return true; | ||||
2481 | } | ||||
2482 | |||||
2483 | /// Try to detect a recurrence that monotonically increases/decreases from a | ||||
2484 | /// non-zero starting value. These are common as induction variables. | ||||
2485 | static bool isNonZeroRecurrence(const PHINode *PN) { | ||||
2486 | BinaryOperator *BO = nullptr; | ||||
2487 | Value *Start = nullptr, *Step = nullptr; | ||||
2488 | const APInt *StartC, *StepC; | ||||
2489 | if (!matchSimpleRecurrence(PN, BO, Start, Step) || | ||||
2490 | !match(Start, m_APInt(StartC)) || StartC->isZero()) | ||||
2491 | return false; | ||||
2492 | |||||
2493 | switch (BO->getOpcode()) { | ||||
2494 | case Instruction::Add: | ||||
2495 | // Starting from non-zero and stepping away from zero can never wrap back | ||||
2496 | // to zero. | ||||
2497 | return BO->hasNoUnsignedWrap() || | ||||
2498 | (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) && | ||||
2499 | StartC->isNegative() == StepC->isNegative()); | ||||
2500 | case Instruction::Mul: | ||||
2501 | return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) && | ||||
2502 | match(Step, m_APInt(StepC)) && !StepC->isZero(); | ||||
2503 | case Instruction::Shl: | ||||
2504 | return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap(); | ||||
2505 | case Instruction::AShr: | ||||
2506 | case Instruction::LShr: | ||||
2507 | return BO->isExact(); | ||||
2508 | default: | ||||
2509 | return false; | ||||
2510 | } | ||||
2511 | } | ||||
2512 | |||||
2513 | static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, | ||||
2514 | const Query &Q, unsigned BitWidth, Value *X, Value *Y, | ||||
2515 | bool NSW) { | ||||
2516 | KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); | ||||
2517 | KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); | ||||
2518 | |||||
2519 | // If X and Y are both non-negative (as signed values) then their sum is not | ||||
2520 | // zero unless both X and Y are zero. | ||||
2521 | if (XKnown.isNonNegative() && YKnown.isNonNegative()) | ||||
2522 | if (isKnownNonZero(Y, DemandedElts, Depth, Q) || | ||||
2523 | isKnownNonZero(X, DemandedElts, Depth, Q)) | ||||
2524 | return true; | ||||
2525 | |||||
2526 | // If X and Y are both negative (as signed values) then their sum is not | ||||
2527 | // zero unless both X and Y equal INT_MIN. | ||||
2528 | if (XKnown.isNegative() && YKnown.isNegative()) { | ||||
2529 | APInt Mask = APInt::getSignedMaxValue(BitWidth); | ||||
2530 | // The sign bit of X is set. If some other bit is set then X is not equal | ||||
2531 | // to INT_MIN. | ||||
2532 | if (XKnown.One.intersects(Mask)) | ||||
2533 | return true; | ||||
2534 | // The sign bit of Y is set. If some other bit is set then Y is not equal | ||||
2535 | // to INT_MIN. | ||||
2536 | if (YKnown.One.intersects(Mask)) | ||||
2537 | return true; | ||||
2538 | } | ||||
2539 | |||||
2540 | // The sum of a non-negative number and a power of two is not zero. | ||||
2541 | if (XKnown.isNonNegative() && | ||||
2542 | isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) | ||||
2543 | return true; | ||||
2544 | if (YKnown.isNonNegative() && | ||||
2545 | isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) | ||||
2546 | return true; | ||||
2547 | |||||
2548 | return KnownBits::computeForAddSub(/*Add*/ true, NSW, XKnown, YKnown) | ||||
2549 | .isNonZero(); | ||||
2550 | } | ||||
2551 | |||||
2552 | static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, | ||||
2553 | const Query &Q, unsigned BitWidth, Value *X, | ||||
2554 | Value *Y) { | ||||
2555 | if (auto *C = dyn_cast<Constant>(X)) | ||||
2556 | if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Depth, Q)) | ||||
2557 | return true; | ||||
2558 | |||||
2559 | KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); | ||||
2560 | if (XKnown.isUnknown()) | ||||
2561 | return false; | ||||
2562 | KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); | ||||
2563 | // If X != Y then X - Y is non zero. | ||||
2564 | std::optional<bool> ne = KnownBits::ne(XKnown, YKnown); | ||||
2565 | // If we are unable to compute if X != Y, we won't be able to do anything | ||||
2566 | // computing the knownbits of the sub expression so just return here. | ||||
2567 | return ne && *ne; | ||||
2568 | } | ||||
2569 | |||||
2570 | static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, | ||||
2571 | unsigned Depth, const Query &Q, | ||||
2572 | const KnownBits &KnownVal) { | ||||
2573 | auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) { | ||||
2574 | switch (I->getOpcode()) { | ||||
2575 | case Instruction::Shl: | ||||
2576 | return Lhs.shl(Rhs); | ||||
2577 | case Instruction::LShr: | ||||
2578 | return Lhs.lshr(Rhs); | ||||
2579 | case Instruction::AShr: | ||||
2580 | return Lhs.ashr(Rhs); | ||||
2581 | default: | ||||
2582 | llvm_unreachable("Unknown Shift Opcode")::llvm::llvm_unreachable_internal("Unknown Shift Opcode", "llvm/lib/Analysis/ValueTracking.cpp" , 2582); | ||||
2583 | } | ||||
2584 | }; | ||||
2585 | |||||
2586 | auto InvShiftOp = [&](const APInt &Lhs, const APInt &Rhs) { | ||||
2587 | switch (I->getOpcode()) { | ||||
2588 | case Instruction::Shl: | ||||
2589 | return Lhs.lshr(Rhs); | ||||
2590 | case Instruction::LShr: | ||||
2591 | case Instruction::AShr: | ||||
2592 | return Lhs.shl(Rhs); | ||||
2593 | default: | ||||
2594 | llvm_unreachable("Unknown Shift Opcode")::llvm::llvm_unreachable_internal("Unknown Shift Opcode", "llvm/lib/Analysis/ValueTracking.cpp" , 2594); | ||||
2595 | } | ||||
2596 | }; | ||||
2597 | |||||
2598 | if (KnownVal.isUnknown()) | ||||
2599 | return false; | ||||
2600 | |||||
2601 | KnownBits KnownCnt = | ||||
2602 | computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2603 | APInt MaxShift = KnownCnt.getMaxValue(); | ||||
2604 | unsigned NumBits = KnownVal.getBitWidth(); | ||||
2605 | if (MaxShift.uge(NumBits)) | ||||
2606 | return false; | ||||
2607 | |||||
2608 | if (!ShiftOp(KnownVal.One, MaxShift).isZero()) | ||||
2609 | return true; | ||||
2610 | |||||
2611 | // If all of the bits shifted out are known to be zero, and Val is known | ||||
2612 | // non-zero then at least one non-zero bit must remain. | ||||
2613 | if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift) | ||||
2614 | .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) && | ||||
2615 | isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q)) | ||||
2616 | return true; | ||||
2617 | |||||
2618 | return false; | ||||
2619 | } | ||||
2620 | |||||
2621 | /// Return true if the given value is known to be non-zero when defined. For | ||||
2622 | /// vectors, return true if every demanded element is known to be non-zero when | ||||
2623 | /// defined. For pointers, if the context instruction and dominator tree are | ||||
2624 | /// specified, perform context-sensitive analysis and return true if the | ||||
2625 | /// pointer couldn't possibly be null at the specified instruction. | ||||
2626 | /// Supports values with integer or pointer type and vectors of integers. | ||||
2627 | bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, | ||||
2628 | const Query &Q) { | ||||
2629 | |||||
2630 | #ifndef NDEBUG | ||||
2631 | Type *Ty = V->getType(); | ||||
2632 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2632, __extension__ __PRETTY_FUNCTION__ )); | ||||
2633 | |||||
2634 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { | ||||
2635 | assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__ )) | ||||
2636 | FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__ )) | ||||
2637 | "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2637, __extension__ __PRETTY_FUNCTION__ )); | ||||
2638 | } else { | ||||
2639 | assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2640, __extension__ __PRETTY_FUNCTION__ )) | ||||
2640 | "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "llvm/lib/Analysis/ValueTracking.cpp", 2640, __extension__ __PRETTY_FUNCTION__ )); | ||||
2641 | } | ||||
2642 | #endif | ||||
2643 | |||||
2644 | if (auto *C = dyn_cast<Constant>(V)) { | ||||
2645 | if (C->isNullValue()) | ||||
2646 | return false; | ||||
2647 | if (isa<ConstantInt>(C)) | ||||
2648 | // Must be non-zero due to null test above. | ||||
2649 | return true; | ||||
2650 | |||||
2651 | // For constant vectors, check that all elements are undefined or known | ||||
2652 | // non-zero to determine that the whole vector is known non-zero. | ||||
2653 | if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) { | ||||
2654 | for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { | ||||
2655 | if (!DemandedElts[i]) | ||||
2656 | continue; | ||||
2657 | Constant *Elt = C->getAggregateElement(i); | ||||
2658 | if (!Elt || Elt->isNullValue()) | ||||
2659 | return false; | ||||
2660 | if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) | ||||
2661 | return false; | ||||
2662 | } | ||||
2663 | return true; | ||||
2664 | } | ||||
2665 | |||||
2666 | // A global variable in address space 0 is non null unless extern weak | ||||
2667 | // or an absolute symbol reference. Other address spaces may have null as a | ||||
2668 | // valid address for a global, so we can't assume anything. | ||||
2669 | if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { | ||||
2670 | if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && | ||||
2671 | GV->getType()->getAddressSpace() == 0) | ||||
2672 | return true; | ||||
2673 | } | ||||
2674 | |||||
2675 | // For constant expressions, fall through to the Operator code below. | ||||
2676 | if (!isa<ConstantExpr>(V)) | ||||
2677 | return false; | ||||
2678 | } | ||||
2679 | |||||
2680 | if (auto *I = dyn_cast<Instruction>(V)) { | ||||
2681 | if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { | ||||
2682 | // If the possible ranges don't contain zero, then the value is | ||||
2683 | // definitely non-zero. | ||||
2684 | if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { | ||||
2685 | const APInt ZeroValue(Ty->getBitWidth(), 0); | ||||
2686 | if (rangeMetadataExcludesValue(Ranges, ZeroValue)) | ||||
2687 | return true; | ||||
2688 | } | ||||
2689 | } | ||||
2690 | } | ||||
2691 | |||||
2692 | if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q)) | ||||
2693 | return true; | ||||
2694 | |||||
2695 | // Some of the tests below are recursive, so bail out if we hit the limit. | ||||
2696 | if (Depth++ >= MaxAnalysisRecursionDepth) | ||||
2697 | return false; | ||||
2698 | |||||
2699 | // Check for pointer simplifications. | ||||
2700 | |||||
2701 | if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) { | ||||
2702 | // Alloca never returns null, malloc might. | ||||
2703 | if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) | ||||
2704 | return true; | ||||
2705 | |||||
2706 | // A byval, inalloca may not be null in a non-default addres space. A | ||||
2707 | // nonnull argument is assumed never 0. | ||||
2708 | if (const Argument *A = dyn_cast<Argument>(V)) { | ||||
2709 | if (((A->hasPassPointeeByValueCopyAttr() && | ||||
2710 | !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) || | ||||
2711 | A->hasNonNullAttr())) | ||||
2712 | return true; | ||||
2713 | } | ||||
2714 | |||||
2715 | // A Load tagged with nonnull metadata is never null. | ||||
2716 | if (const LoadInst *LI = dyn_cast<LoadInst>(V)) | ||||
2717 | if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) | ||||
2718 | return true; | ||||
2719 | |||||
2720 | if (const auto *Call = dyn_cast<CallBase>(V)) { | ||||
2721 | if (Call->isReturnNonNull()) | ||||
2722 | return true; | ||||
2723 | if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) | ||||
2724 | return isKnownNonZero(RP, Depth, Q); | ||||
2725 | } | ||||
2726 | } | ||||
2727 | |||||
2728 | if (!isa<Constant>(V) && | ||||
2729 | isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) | ||||
2730 | return true; | ||||
2731 | |||||
2732 | const Operator *I = dyn_cast<Operator>(V); | ||||
2733 | if (!I) | ||||
2734 | return false; | ||||
2735 | |||||
2736 | unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); | ||||
2737 | switch (I->getOpcode()) { | ||||
2738 | case Instruction::GetElementPtr: | ||||
2739 | if (I->getType()->isPointerTy()) | ||||
2740 | return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q); | ||||
2741 | break; | ||||
2742 | case Instruction::BitCast: { | ||||
2743 | // We need to be a bit careful here. We can only peek through the bitcast | ||||
2744 | // if the scalar size of elements in the operand are smaller than and a | ||||
2745 | // multiple of the size they are casting too. Take three cases: | ||||
2746 | // | ||||
2747 | // 1) Unsafe: | ||||
2748 | // bitcast <2 x i16> %NonZero to <4 x i8> | ||||
2749 | // | ||||
2750 | // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a | ||||
2751 | // <4 x i8> requires that all 4 i8 elements be non-zero which isn't | ||||
2752 | // guranteed (imagine just sign bit set in the 2 i16 elements). | ||||
2753 | // | ||||
2754 | // 2) Unsafe: | ||||
2755 | // bitcast <4 x i3> %NonZero to <3 x i4> | ||||
2756 | // | ||||
2757 | // Even though the scalar size of the src (`i3`) is smaller than the | ||||
2758 | // scalar size of the dst `i4`, because `i3` is not a multiple of `i4` | ||||
2759 | // its possible for the `3 x i4` elements to be zero because there are | ||||
2760 | // some elements in the destination that don't contain any full src | ||||
2761 | // element. | ||||
2762 | // | ||||
2763 | // 3) Safe: | ||||
2764 | // bitcast <4 x i8> %NonZero to <2 x i16> | ||||
2765 | // | ||||
2766 | // This is always safe as non-zero in the 4 i8 elements implies | ||||
2767 | // non-zero in the combination of any two adjacent ones. Since i8 is a | ||||
2768 | // multiple of i16, each i16 is guranteed to have 2 full i8 elements. | ||||
2769 | // This all implies the 2 i16 elements are non-zero. | ||||
2770 | Type *FromTy = I->getOperand(0)->getType(); | ||||
2771 | if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) && | ||||
2772 | (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0) | ||||
2773 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2774 | } break; | ||||
2775 | case Instruction::IntToPtr: | ||||
2776 | // Note that we have to take special care to avoid looking through | ||||
2777 | // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well | ||||
2778 | // as casts that can alter the value, e.g., AddrSpaceCasts. | ||||
2779 | if (!isa<ScalableVectorType>(I->getType()) && | ||||
2780 | Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= | ||||
2781 | Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) | ||||
2782 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2783 | break; | ||||
2784 | case Instruction::PtrToInt: | ||||
2785 | // Similar to int2ptr above, we can look through ptr2int here if the cast | ||||
2786 | // is a no-op or an extend and not a truncate. | ||||
2787 | if (!isa<ScalableVectorType>(I->getType()) && | ||||
2788 | Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= | ||||
2789 | Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) | ||||
2790 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2791 | break; | ||||
2792 | case Instruction::Sub: | ||||
2793 | return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), | ||||
2794 | I->getOperand(1)); | ||||
2795 | case Instruction::Or: | ||||
2796 | // X | Y != 0 if X != 0 or Y != 0. | ||||
2797 | return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) || | ||||
2798 | isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2799 | case Instruction::SExt: | ||||
2800 | case Instruction::ZExt: | ||||
2801 | // ext X != 0 if X != 0. | ||||
2802 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2803 | |||||
2804 | case Instruction::Shl: { | ||||
2805 | // shl nsw/nuw can't remove any non-zero bits. | ||||
2806 | const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); | ||||
2807 | if (Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO)) | ||||
2808 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2809 | |||||
2810 | // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined | ||||
2811 | // if the lowest bit is shifted off the end. | ||||
2812 | KnownBits Known(BitWidth); | ||||
2813 | computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q); | ||||
2814 | if (Known.One[0]) | ||||
2815 | return true; | ||||
2816 | |||||
2817 | return isNonZeroShift(I, DemandedElts, Depth, Q, Known); | ||||
2818 | } | ||||
2819 | case Instruction::LShr: | ||||
2820 | case Instruction::AShr: { | ||||
2821 | // shr exact can only shift out zero bits. | ||||
2822 | const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); | ||||
2823 | if (BO->isExact()) | ||||
2824 | return isKnownNonZero(I->getOperand(0), Depth, Q); | ||||
2825 | |||||
2826 | // shr X, Y != 0 if X is negative. Note that the value of the shift is not | ||||
2827 | // defined if the sign bit is shifted off the end. | ||||
2828 | KnownBits Known = | ||||
2829 | computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2830 | if (Known.isNegative()) | ||||
2831 | return true; | ||||
2832 | |||||
2833 | return isNonZeroShift(I, DemandedElts, Depth, Q, Known); | ||||
2834 | } | ||||
2835 | case Instruction::UDiv: | ||||
2836 | case Instruction::SDiv: | ||||
2837 | // X / Y | ||||
2838 | // div exact can only produce a zero if the dividend is zero. | ||||
2839 | if (cast<PossiblyExactOperator>(I)->isExact()) | ||||
2840 | return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2841 | if (I->getOpcode() == Instruction::UDiv) { | ||||
2842 | std::optional<bool> XUgeY; | ||||
2843 | KnownBits XKnown = | ||||
2844 | computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2845 | if (!XKnown.isUnknown()) { | ||||
2846 | KnownBits YKnown = | ||||
2847 | computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2848 | // If X u>= Y then div is non zero (0/0 is UB). | ||||
2849 | XUgeY = KnownBits::uge(XKnown, YKnown); | ||||
2850 | } | ||||
2851 | // If X is total unknown or X u< Y we won't be able to prove non-zero | ||||
2852 | // with compute known bits so just return early. | ||||
2853 | return XUgeY && *XUgeY; | ||||
2854 | } | ||||
2855 | break; | ||||
2856 | case Instruction::Add: { | ||||
2857 | // X + Y. | ||||
2858 | |||||
2859 | // If Add has nuw wrap flag, then if either X or Y is non-zero the result is | ||||
2860 | // non-zero. | ||||
2861 | auto *BO = cast<OverflowingBinaryOperator>(V); | ||||
2862 | if (Q.IIQ.hasNoUnsignedWrap(BO)) | ||||
2863 | return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) || | ||||
2864 | isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2865 | |||||
2866 | return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), | ||||
2867 | I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO)); | ||||
2868 | } | ||||
2869 | case Instruction::Mul: { | ||||
2870 | // If X and Y are non-zero then so is X * Y as long as the multiplication | ||||
2871 | // does not overflow. | ||||
2872 | const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); | ||||
2873 | if (Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) | ||||
2874 | return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) && | ||||
2875 | isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2876 | |||||
2877 | // If either X or Y is odd, then if the other is non-zero the result can't | ||||
2878 | // be zero. | ||||
2879 | KnownBits XKnown = | ||||
2880 | computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2881 | if (XKnown.One[0]) | ||||
2882 | return isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2883 | |||||
2884 | KnownBits YKnown = | ||||
2885 | computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); | ||||
2886 | if (YKnown.One[0]) | ||||
2887 | return XKnown.isNonZero() || | ||||
2888 | isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q); | ||||
2889 | |||||
2890 | return KnownBits::mul(XKnown, YKnown).isNonZero(); | ||||
2891 | } | ||||
2892 | case Instruction::Select: | ||||
2893 | // (C ? X : Y) != 0 if X != 0 and Y != 0. | ||||
2894 | if (isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) && | ||||
2895 | isKnownNonZero(I->getOperand(2), DemandedElts, Depth, Q)) | ||||
2896 | return true; | ||||
2897 | break; | ||||
2898 | case Instruction::PHI: { | ||||
2899 | auto *PN = cast<PHINode>(I); | ||||
2900 | if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN)) | ||||
2901 | return true; | ||||
2902 | |||||
2903 | // Check if all incoming values are non-zero using recursion. | ||||
2904 | Query RecQ = Q; | ||||
2905 | unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); | ||||
2906 | return llvm::all_of(PN->operands(), [&](const Use &U) { | ||||
2907 | if (U.get() == PN) | ||||
2908 | return true; | ||||
2909 | RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); | ||||
2910 | return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ); | ||||
2911 | }); | ||||
2912 | } | ||||
2913 | case Instruction::ExtractElement: | ||||
2914 | if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) { | ||||
2915 | const Value *Vec = EEI->getVectorOperand(); | ||||
2916 | const Value *Idx = EEI->getIndexOperand(); | ||||
2917 | auto *CIdx = dyn_cast<ConstantInt>(Idx); | ||||
2918 | if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { | ||||
2919 | unsigned NumElts = VecTy->getNumElements(); | ||||
2920 | APInt DemandedVecElts = APInt::getAllOnes(NumElts); | ||||
2921 | if (CIdx && CIdx->getValue().ult(NumElts)) | ||||
2922 | DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); | ||||
2923 | return isKnownNonZero(Vec, DemandedVecElts, Depth, Q); | ||||
2924 | } | ||||
2925 | } | ||||
2926 | break; | ||||
2927 | case Instruction::Freeze: | ||||
2928 | return isKnownNonZero(I->getOperand(0), Depth, Q) && | ||||
2929 | isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, | ||||
2930 | Depth); | ||||
2931 | case Instruction::Call: | ||||
2932 | if (auto *II = dyn_cast<IntrinsicInst>(I)) { | ||||
2933 | switch (II->getIntrinsicID()) { | ||||
2934 | case Intrinsic::sshl_sat: | ||||
2935 | case Intrinsic::ushl_sat: | ||||
2936 | case Intrinsic::abs: | ||||
2937 | case Intrinsic::bitreverse: | ||||
2938 | case Intrinsic::bswap: | ||||
2939 | case Intrinsic::ctpop: | ||||
2940 | return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); | ||||
2941 | case Intrinsic::ssub_sat: | ||||
2942 | return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, | ||||
2943 | II->getArgOperand(0), II->getArgOperand(1)); | ||||
2944 | case Intrinsic::sadd_sat: | ||||
2945 | return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, | ||||
2946 | II->getArgOperand(0), II->getArgOperand(1), | ||||
2947 | /*NSW*/ true); | ||||
2948 | case Intrinsic::umax: | ||||
2949 | case Intrinsic::uadd_sat: | ||||
2950 | return isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q) || | ||||
2951 | isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); | ||||
2952 | case Intrinsic::smin: | ||||
2953 | case Intrinsic::smax: { | ||||
2954 | auto KnownOpImpliesNonZero = [&](const KnownBits &K) { | ||||
2955 | return II->getIntrinsicID() == Intrinsic::smin | ||||
2956 | ? K.isNegative() | ||||
2957 | : K.isStrictlyPositive(); | ||||
2958 | }; | ||||
2959 | KnownBits XKnown = | ||||
2960 | computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q); | ||||
2961 | if (KnownOpImpliesNonZero(XKnown)) | ||||
2962 | return true; | ||||
2963 | KnownBits YKnown = | ||||
2964 | computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q); | ||||
2965 | if (KnownOpImpliesNonZero(YKnown)) | ||||
2966 | return true; | ||||
2967 | |||||
2968 | if (XKnown.isNonZero() && YKnown.isNonZero()) | ||||
2969 | return true; | ||||
2970 | } | ||||
2971 | [[fallthrough]]; | ||||
2972 | case Intrinsic::umin: | ||||
2973 | return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q) && | ||||
2974 | isKnownNonZero(II->getArgOperand(1), DemandedElts, Depth, Q); | ||||
2975 | case Intrinsic::cttz: | ||||
2976 | return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) | ||||
2977 | .Zero[0]; | ||||
2978 | case Intrinsic::ctlz: | ||||
2979 | return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) | ||||
2980 | .isNonNegative(); | ||||
2981 | case Intrinsic::fshr: | ||||
2982 | case Intrinsic::fshl: | ||||
2983 | // If Op0 == Op1, this is a rotate. rotate(x, y) != 0 iff x != 0. | ||||
2984 | if (II->getArgOperand(0) == II->getArgOperand(1)) | ||||
2985 | return isKnownNonZero(II->getArgOperand(0), DemandedElts, Depth, Q); | ||||
2986 | break; | ||||
2987 | case Intrinsic::vscale: | ||||
2988 | return true; | ||||
2989 | default: | ||||
2990 | break; | ||||
2991 | } | ||||
2992 | } | ||||
2993 | break; | ||||
2994 | } | ||||
2995 | |||||
2996 | KnownBits Known(BitWidth); | ||||
2997 | computeKnownBits(V, DemandedElts, Known, Depth, Q); | ||||
2998 | return Known.One != 0; | ||||
2999 | } | ||||
3000 | |||||
3001 | bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { | ||||
3002 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
3003 | APInt DemandedElts = | ||||
3004 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
3005 | return isKnownNonZero(V, DemandedElts, Depth, Q); | ||||
3006 | } | ||||
3007 | |||||
3008 | /// If the pair of operators are the same invertible function, return the | ||||
3009 | /// the operands of the function corresponding to each input. Otherwise, | ||||
3010 | /// return std::nullopt. An invertible function is one that is 1-to-1 and maps | ||||
3011 | /// every input value to exactly one output value. This is equivalent to | ||||
3012 | /// saying that Op1 and Op2 are equal exactly when the specified pair of | ||||
3013 | /// operands are equal, (except that Op1 and Op2 may be poison more often.) | ||||
3014 | static std::optional<std::pair<Value*, Value*>> | ||||
3015 | getInvertibleOperands(const Operator *Op1, | ||||
3016 | const Operator *Op2) { | ||||
3017 | if (Op1->getOpcode() != Op2->getOpcode()) | ||||
3018 | return std::nullopt; | ||||
3019 | |||||
3020 | auto getOperands = [&](unsigned OpNum) -> auto { | ||||
3021 | return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum)); | ||||
3022 | }; | ||||
3023 | |||||
3024 | switch (Op1->getOpcode()) { | ||||
3025 | default: | ||||
3026 | break; | ||||
3027 | case Instruction::Add: | ||||
3028 | case Instruction::Sub: | ||||
3029 | if (Op1->getOperand(0) == Op2->getOperand(0)) | ||||
3030 | return getOperands(1); | ||||
3031 | if (Op1->getOperand(1) == Op2->getOperand(1)) | ||||
3032 | return getOperands(0); | ||||
3033 | break; | ||||
3034 | case Instruction::Mul: { | ||||
3035 | // invertible if A * B == (A * B) mod 2^N where A, and B are integers | ||||
3036 | // and N is the bitwdith. The nsw case is non-obvious, but proven by | ||||
3037 | // alive2: https://alive2.llvm.org/ce/z/Z6D5qK | ||||
3038 | auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); | ||||
3039 | auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); | ||||
3040 | if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && | ||||
3041 | (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) | ||||
3042 | break; | ||||
3043 | |||||
3044 | // Assume operand order has been canonicalized | ||||
3045 | if (Op1->getOperand(1) == Op2->getOperand(1) && | ||||
3046 | isa<ConstantInt>(Op1->getOperand(1)) && | ||||
3047 | !cast<ConstantInt>(Op1->getOperand(1))->isZero()) | ||||
3048 | return getOperands(0); | ||||
3049 | break; | ||||
3050 | } | ||||
3051 | case Instruction::Shl: { | ||||
3052 | // Same as multiplies, with the difference that we don't need to check | ||||
3053 | // for a non-zero multiply. Shifts always multiply by non-zero. | ||||
3054 | auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); | ||||
3055 | auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); | ||||
3056 | if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && | ||||
3057 | (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) | ||||
3058 | break; | ||||
3059 | |||||
3060 | if (Op1->getOperand(1) == Op2->getOperand(1)) | ||||
3061 | return getOperands(0); | ||||
3062 | break; | ||||
3063 | } | ||||
3064 | case Instruction::AShr: | ||||
3065 | case Instruction::LShr: { | ||||
3066 | auto *PEO1 = cast<PossiblyExactOperator>(Op1); | ||||
3067 | auto *PEO2 = cast<PossiblyExactOperator>(Op2); | ||||
3068 | if (!PEO1->isExact() || !PEO2->isExact()) | ||||
3069 | break; | ||||
3070 | |||||
3071 | if (Op1->getOperand(1) == Op2->getOperand(1)) | ||||
3072 | return getOperands(0); | ||||
3073 | break; | ||||
3074 | } | ||||
3075 | case Instruction::SExt: | ||||
3076 | case Instruction::ZExt: | ||||
3077 | if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType()) | ||||
3078 | return getOperands(0); | ||||
3079 | break; | ||||
3080 | case Instruction::PHI: { | ||||
3081 | const PHINode *PN1 = cast<PHINode>(Op1); | ||||
3082 | const PHINode *PN2 = cast<PHINode>(Op2); | ||||
3083 | |||||
3084 | // If PN1 and PN2 are both recurrences, can we prove the entire recurrences | ||||
3085 | // are a single invertible function of the start values? Note that repeated | ||||
3086 | // application of an invertible function is also invertible | ||||
3087 | BinaryOperator *BO1 = nullptr; | ||||
3088 | Value *Start1 = nullptr, *Step1 = nullptr; | ||||
3089 | BinaryOperator *BO2 = nullptr; | ||||
3090 | Value *Start2 = nullptr, *Step2 = nullptr; | ||||
3091 | if (PN1->getParent() != PN2->getParent() || | ||||
3092 | !matchSimpleRecurrence(PN1, BO1, Start1, Step1) || | ||||
3093 | !matchSimpleRecurrence(PN2, BO2, Start2, Step2)) | ||||
3094 | break; | ||||
3095 | |||||
3096 | auto Values = getInvertibleOperands(cast<Operator>(BO1), | ||||
3097 | cast<Operator>(BO2)); | ||||
3098 | if (!Values) | ||||
3099 | break; | ||||
3100 | |||||
3101 | // We have to be careful of mutually defined recurrences here. Ex: | ||||
3102 | // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V | ||||
3103 | // * X_i = Y_i = X_(i-1) OP Y_(i-1) | ||||
3104 | // The invertibility of these is complicated, and not worth reasoning | ||||
3105 | // about (yet?). | ||||
3106 | if (Values->first != PN1 || Values->second != PN2) | ||||
3107 | break; | ||||
3108 | |||||
3109 | return std::make_pair(Start1, Start2); | ||||
3110 | } | ||||
3111 | } | ||||
3112 | return std::nullopt; | ||||
3113 | } | ||||
3114 | |||||
3115 | /// Return true if V2 == V1 + X, where X is known non-zero. | ||||
3116 | static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, | ||||
3117 | const Query &Q) { | ||||
3118 | const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); | ||||
3119 | if (!BO || BO->getOpcode() != Instruction::Add) | ||||
3120 | return false; | ||||
3121 | Value *Op = nullptr; | ||||
3122 | if (V2 == BO->getOperand(0)) | ||||
3123 | Op = BO->getOperand(1); | ||||
3124 | else if (V2 == BO->getOperand(1)) | ||||
3125 | Op = BO->getOperand(0); | ||||
3126 | else | ||||
3127 | return false; | ||||
3128 | return isKnownNonZero(Op, Depth + 1, Q); | ||||
3129 | } | ||||
3130 | |||||
3131 | /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and | ||||
3132 | /// the multiplication is nuw or nsw. | ||||
3133 | static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth, | ||||
3134 | const Query &Q) { | ||||
3135 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { | ||||
3136 | const APInt *C; | ||||
3137 | return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) && | ||||
3138 | (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && | ||||
3139 | !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q); | ||||
3140 | } | ||||
3141 | return false; | ||||
3142 | } | ||||
3143 | |||||
3144 | /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and | ||||
3145 | /// the shift is nuw or nsw. | ||||
3146 | static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth, | ||||
3147 | const Query &Q) { | ||||
3148 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { | ||||
3149 | const APInt *C; | ||||
3150 | return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) && | ||||
3151 | (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && | ||||
3152 | !C->isZero() && isKnownNonZero(V1, Depth + 1, Q); | ||||
3153 | } | ||||
3154 | return false; | ||||
3155 | } | ||||
3156 | |||||
3157 | static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, | ||||
3158 | unsigned Depth, const Query &Q) { | ||||
3159 | // Check two PHIs are in same block. | ||||
3160 | if (PN1->getParent() != PN2->getParent()) | ||||
3161 | return false; | ||||
3162 | |||||
3163 | SmallPtrSet<const BasicBlock *, 8> VisitedBBs; | ||||
3164 | bool UsedFullRecursion = false; | ||||
3165 | for (const BasicBlock *IncomBB : PN1->blocks()) { | ||||
3166 | if (!VisitedBBs.insert(IncomBB).second) | ||||
3167 | continue; // Don't reprocess blocks that we have dealt with already. | ||||
3168 | const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB); | ||||
3169 | const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB); | ||||
3170 | const APInt *C1, *C2; | ||||
3171 | if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2) | ||||
3172 | continue; | ||||
3173 | |||||
3174 | // Only one pair of phi operands is allowed for full recursion. | ||||
3175 | if (UsedFullRecursion) | ||||
3176 | return false; | ||||
3177 | |||||
3178 | Query RecQ = Q; | ||||
3179 | RecQ.CxtI = IncomBB->getTerminator(); | ||||
3180 | if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ)) | ||||
3181 | return false; | ||||
3182 | UsedFullRecursion = true; | ||||
3183 | } | ||||
3184 | return true; | ||||
3185 | } | ||||
3186 | |||||
3187 | /// Return true if it is known that V1 != V2. | ||||
3188 | static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, | ||||
3189 | const Query &Q) { | ||||
3190 | if (V1 == V2) | ||||
3191 | return false; | ||||
3192 | if (V1->getType() != V2->getType()) | ||||
3193 | // We can't look through casts yet. | ||||
3194 | return false; | ||||
3195 | |||||
3196 | if (Depth >= MaxAnalysisRecursionDepth) | ||||
3197 | return false; | ||||
3198 | |||||
3199 | // See if we can recurse through (exactly one of) our operands. This | ||||
3200 | // requires our operation be 1-to-1 and map every input value to exactly | ||||
3201 | // one output value. Such an operation is invertible. | ||||
3202 | auto *O1 = dyn_cast<Operator>(V1); | ||||
3203 | auto *O2 = dyn_cast<Operator>(V2); | ||||
3204 | if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { | ||||
3205 | if (auto Values = getInvertibleOperands(O1, O2)) | ||||
3206 | return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q); | ||||
3207 | |||||
3208 | if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) { | ||||
3209 | const PHINode *PN2 = cast<PHINode>(V2); | ||||
3210 | // FIXME: This is missing a generalization to handle the case where one is | ||||
3211 | // a PHI and another one isn't. | ||||
3212 | if (isNonEqualPHIs(PN1, PN2, Depth, Q)) | ||||
3213 | return true; | ||||
3214 | }; | ||||
3215 | } | ||||
3216 | |||||
3217 | if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q)) | ||||
3218 | return true; | ||||
3219 | |||||
3220 | if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q)) | ||||
3221 | return true; | ||||
3222 | |||||
3223 | if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q)) | ||||
3224 | return true; | ||||
3225 | |||||
3226 | if (V1->getType()->isIntOrIntVectorTy()) { | ||||
3227 | // Are any known bits in V1 contradictory to known bits in V2? If V1 | ||||
3228 | // has a known zero where V2 has a known one, they must not be equal. | ||||
3229 | KnownBits Known1 = computeKnownBits(V1, Depth, Q); | ||||
3230 | KnownBits Known2 = computeKnownBits(V2, Depth, Q); | ||||
3231 | |||||
3232 | if (Known1.Zero.intersects(Known2.One) || | ||||
3233 | Known2.Zero.intersects(Known1.One)) | ||||
3234 | return true; | ||||
3235 | } | ||||
3236 | return false; | ||||
3237 | } | ||||
3238 | |||||
3239 | /// Return true if 'V & Mask' is known to be zero. We use this predicate to | ||||
3240 | /// simplify operations downstream. Mask is known to be zero for bits that V | ||||
3241 | /// cannot have. | ||||
3242 | /// | ||||
3243 | /// This function is defined on values with integer type, values with pointer | ||||
3244 | /// type, and vectors of integers. In the case | ||||
3245 | /// where V is a vector, the mask, known zero, and known one values are the | ||||
3246 | /// same width as the vector element, and the bit is set only if it is true | ||||
3247 | /// for all of the elements in the vector. | ||||
3248 | bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, | ||||
3249 | const Query &Q) { | ||||
3250 | KnownBits Known(Mask.getBitWidth()); | ||||
3251 | computeKnownBits(V, Known, Depth, Q); | ||||
3252 | return Mask.isSubsetOf(Known.Zero); | ||||
3253 | } | ||||
3254 | |||||
3255 | // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). | ||||
3256 | // Returns the input and lower/upper bounds. | ||||
3257 | static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, | ||||
3258 | const APInt *&CLow, const APInt *&CHigh) { | ||||
3259 | assert(isa<Operator>(Select) &&(static_cast <bool> (isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction:: Select && "Input should be a Select!") ? void (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__ )) | ||||
3260 | cast<Operator>(Select)->getOpcode() == Instruction::Select &&(static_cast <bool> (isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction:: Select && "Input should be a Select!") ? void (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__ )) | ||||
3261 | "Input should be a Select!")(static_cast <bool> (isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction:: Select && "Input should be a Select!") ? void (0) : __assert_fail ("isa<Operator>(Select) && cast<Operator>(Select)->getOpcode() == Instruction::Select && \"Input should be a Select!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3261, __extension__ __PRETTY_FUNCTION__ )); | ||||
3262 | |||||
3263 | const Value *LHS = nullptr, *RHS = nullptr; | ||||
3264 | SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; | ||||
3265 | if (SPF != SPF_SMAX && SPF != SPF_SMIN) | ||||
3266 | return false; | ||||
3267 | |||||
3268 | if (!match(RHS, m_APInt(CLow))) | ||||
3269 | return false; | ||||
3270 | |||||
3271 | const Value *LHS2 = nullptr, *RHS2 = nullptr; | ||||
3272 | SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; | ||||
3273 | if (getInverseMinMaxFlavor(SPF) != SPF2) | ||||
3274 | return false; | ||||
3275 | |||||
3276 | if (!match(RHS2, m_APInt(CHigh))) | ||||
3277 | return false; | ||||
3278 | |||||
3279 | if (SPF == SPF_SMIN) | ||||
3280 | std::swap(CLow, CHigh); | ||||
3281 | |||||
3282 | In = LHS2; | ||||
3283 | return CLow->sle(*CHigh); | ||||
3284 | } | ||||
3285 | |||||
3286 | static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, | ||||
3287 | const APInt *&CLow, | ||||
3288 | const APInt *&CHigh) { | ||||
3289 | assert((II->getIntrinsicID() == Intrinsic::smin ||(static_cast <bool> ((II->getIntrinsicID() == Intrinsic ::smin || II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3290, __extension__ __PRETTY_FUNCTION__ )) | ||||
3290 | II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax")(static_cast <bool> ((II->getIntrinsicID() == Intrinsic ::smin || II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax") ? void (0) : __assert_fail ("(II->getIntrinsicID() == Intrinsic::smin || II->getIntrinsicID() == Intrinsic::smax) && \"Must be smin/smax\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3290, __extension__ __PRETTY_FUNCTION__ )); | ||||
3291 | |||||
3292 | Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); | ||||
3293 | auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); | ||||
3294 | if (!InnerII || InnerII->getIntrinsicID() != InverseID || | ||||
3295 | !match(II->getArgOperand(1), m_APInt(CLow)) || | ||||
3296 | !match(InnerII->getArgOperand(1), m_APInt(CHigh))) | ||||
3297 | return false; | ||||
3298 | |||||
3299 | if (II->getIntrinsicID() == Intrinsic::smin) | ||||
3300 | std::swap(CLow, CHigh); | ||||
3301 | return CLow->sle(*CHigh); | ||||
3302 | } | ||||
3303 | |||||
3304 | /// For vector constants, loop over the elements and find the constant with the | ||||
3305 | /// minimum number of sign bits. Return 0 if the value is not a vector constant | ||||
3306 | /// or if any element was not analyzed; otherwise, return the count for the | ||||
3307 | /// element with the minimum number of sign bits. | ||||
3308 | static unsigned computeNumSignBitsVectorConstant(const Value *V, | ||||
3309 | const APInt &DemandedElts, | ||||
3310 | unsigned TyBits) { | ||||
3311 | const auto *CV = dyn_cast<Constant>(V); | ||||
3312 | if (!CV || !isa<FixedVectorType>(CV->getType())) | ||||
3313 | return 0; | ||||
3314 | |||||
3315 | unsigned MinSignBits = TyBits; | ||||
3316 | unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); | ||||
3317 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
3318 | if (!DemandedElts[i]) | ||||
3319 | continue; | ||||
3320 | // If we find a non-ConstantInt, bail out. | ||||
3321 | auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); | ||||
3322 | if (!Elt) | ||||
3323 | return 0; | ||||
3324 | |||||
3325 | MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); | ||||
3326 | } | ||||
3327 | |||||
3328 | return MinSignBits; | ||||
3329 | } | ||||
3330 | |||||
3331 | static unsigned ComputeNumSignBitsImpl(const Value *V, | ||||
3332 | const APInt &DemandedElts, | ||||
3333 | unsigned Depth, const Query &Q); | ||||
3334 | |||||
3335 | static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, | ||||
3336 | unsigned Depth, const Query &Q) { | ||||
3337 | unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); | ||||
3338 | assert(Result > 0 && "At least one sign bit needs to be present!")(static_cast <bool> (Result > 0 && "At least one sign bit needs to be present!" ) ? void (0) : __assert_fail ("Result > 0 && \"At least one sign bit needs to be present!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3338, __extension__ __PRETTY_FUNCTION__ )); | ||||
3339 | return Result; | ||||
3340 | } | ||||
3341 | |||||
3342 | /// Return the number of times the sign bit of the register is replicated into | ||||
3343 | /// the other bits. We know that at least 1 bit is always equal to the sign bit | ||||
3344 | /// (itself), but other cases can give us information. For example, immediately | ||||
3345 | /// after an "ashr X, 2", we know that the top 3 bits are all equal to each | ||||
3346 | /// other, so we return 3. For vectors, return the number of sign bits for the | ||||
3347 | /// vector element with the minimum number of known sign bits of the demanded | ||||
3348 | /// elements in the vector specified by DemandedElts. | ||||
3349 | static unsigned ComputeNumSignBitsImpl(const Value *V, | ||||
3350 | const APInt &DemandedElts, | ||||
3351 | unsigned Depth, const Query &Q) { | ||||
3352 | Type *Ty = V->getType(); | ||||
3353 | #ifndef NDEBUG | ||||
3354 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3354, __extension__ __PRETTY_FUNCTION__ )); | ||||
3355 | |||||
3356 | if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { | ||||
3357 | assert((static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__ )) | ||||
3358 | FVTy->getNumElements() == DemandedElts.getBitWidth() &&(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__ )) | ||||
3359 | "DemandedElt width should equal the fixed vector number of elements")(static_cast <bool> (FVTy->getNumElements() == DemandedElts .getBitWidth() && "DemandedElt width should equal the fixed vector number of elements" ) ? void (0) : __assert_fail ("FVTy->getNumElements() == DemandedElts.getBitWidth() && \"DemandedElt width should equal the fixed vector number of elements\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3359, __extension__ __PRETTY_FUNCTION__ )); | ||||
3360 | } else { | ||||
3361 | assert(DemandedElts == APInt(1, 1) &&(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3362, __extension__ __PRETTY_FUNCTION__ )) | ||||
3362 | "DemandedElt width should be 1 for scalars")(static_cast <bool> (DemandedElts == APInt(1, 1) && "DemandedElt width should be 1 for scalars") ? void (0) : __assert_fail ("DemandedElts == APInt(1, 1) && \"DemandedElt width should be 1 for scalars\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3362, __extension__ __PRETTY_FUNCTION__ )); | ||||
3363 | } | ||||
3364 | #endif | ||||
3365 | |||||
3366 | // We return the minimum number of sign bits that are guaranteed to be present | ||||
3367 | // in V, so for undef we have to conservatively return 1. We don't have the | ||||
3368 | // same behavior for poison though -- that's a FIXME today. | ||||
3369 | |||||
3370 | Type *ScalarTy = Ty->getScalarType(); | ||||
3371 | unsigned TyBits = ScalarTy->isPointerTy() ? | ||||
3372 | Q.DL.getPointerTypeSizeInBits(ScalarTy) : | ||||
3373 | Q.DL.getTypeSizeInBits(ScalarTy); | ||||
3374 | |||||
3375 | unsigned Tmp, Tmp2; | ||||
3376 | unsigned FirstAnswer = 1; | ||||
3377 | |||||
3378 | // Note that ConstantInt is handled by the general computeKnownBits case | ||||
3379 | // below. | ||||
3380 | |||||
3381 | if (Depth == MaxAnalysisRecursionDepth) | ||||
3382 | return 1; | ||||
3383 | |||||
3384 | if (auto *U = dyn_cast<Operator>(V)) { | ||||
3385 | switch (Operator::getOpcode(V)) { | ||||
3386 | default: break; | ||||
3387 | case Instruction::SExt: | ||||
3388 | Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
3389 | return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; | ||||
3390 | |||||
3391 | case Instruction::SDiv: { | ||||
3392 | const APInt *Denominator; | ||||
3393 | // sdiv X, C -> adds log(C) sign bits. | ||||
3394 | if (match(U->getOperand(1), m_APInt(Denominator))) { | ||||
3395 | |||||
3396 | // Ignore non-positive denominator. | ||||
3397 | if (!Denominator->isStrictlyPositive()) | ||||
3398 | break; | ||||
3399 | |||||
3400 | // Calculate the incoming numerator bits. | ||||
3401 | unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3402 | |||||
3403 | // Add floor(log(C)) bits to the numerator bits. | ||||
3404 | return std::min(TyBits, NumBits + Denominator->logBase2()); | ||||
3405 | } | ||||
3406 | break; | ||||
3407 | } | ||||
3408 | |||||
3409 | case Instruction::SRem: { | ||||
3410 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3411 | |||||
3412 | const APInt *Denominator; | ||||
3413 | // srem X, C -> we know that the result is within [-C+1,C) when C is a | ||||
3414 | // positive constant. This let us put a lower bound on the number of sign | ||||
3415 | // bits. | ||||
3416 | if (match(U->getOperand(1), m_APInt(Denominator))) { | ||||
3417 | |||||
3418 | // Ignore non-positive denominator. | ||||
3419 | if (Denominator->isStrictlyPositive()) { | ||||
3420 | // Calculate the leading sign bit constraints by examining the | ||||
3421 | // denominator. Given that the denominator is positive, there are two | ||||
3422 | // cases: | ||||
3423 | // | ||||
3424 | // 1. The numerator is positive. The result range is [0,C) and | ||||
3425 | // [0,C) u< (1 << ceilLogBase2(C)). | ||||
3426 | // | ||||
3427 | // 2. The numerator is negative. Then the result range is (-C,0] and | ||||
3428 | // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). | ||||
3429 | // | ||||
3430 | // Thus a lower bound on the number of sign bits is `TyBits - | ||||
3431 | // ceilLogBase2(C)`. | ||||
3432 | |||||
3433 | unsigned ResBits = TyBits - Denominator->ceilLogBase2(); | ||||
3434 | Tmp = std::max(Tmp, ResBits); | ||||
3435 | } | ||||
3436 | } | ||||
3437 | return Tmp; | ||||
3438 | } | ||||
3439 | |||||
3440 | case Instruction::AShr: { | ||||
3441 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3442 | // ashr X, C -> adds C sign bits. Vectors too. | ||||
3443 | const APInt *ShAmt; | ||||
3444 | if (match(U->getOperand(1), m_APInt(ShAmt))) { | ||||
3445 | if (ShAmt->uge(TyBits)) | ||||
3446 | break; // Bad shift. | ||||
3447 | unsigned ShAmtLimited = ShAmt->getZExtValue(); | ||||
3448 | Tmp += ShAmtLimited; | ||||
3449 | if (Tmp > TyBits) Tmp = TyBits; | ||||
3450 | } | ||||
3451 | return Tmp; | ||||
3452 | } | ||||
3453 | case Instruction::Shl: { | ||||
3454 | const APInt *ShAmt; | ||||
3455 | if (match(U->getOperand(1), m_APInt(ShAmt))) { | ||||
3456 | // shl destroys sign bits. | ||||
3457 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3458 | if (ShAmt->uge(TyBits) || // Bad shift. | ||||
3459 | ShAmt->uge(Tmp)) break; // Shifted all sign bits out. | ||||
3460 | Tmp2 = ShAmt->getZExtValue(); | ||||
3461 | return Tmp - Tmp2; | ||||
3462 | } | ||||
3463 | break; | ||||
3464 | } | ||||
3465 | case Instruction::And: | ||||
3466 | case Instruction::Or: | ||||
3467 | case Instruction::Xor: // NOT is handled here. | ||||
3468 | // Logical binary ops preserve the number of sign bits at the worst. | ||||
3469 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3470 | if (Tmp != 1) { | ||||
3471 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3472 | FirstAnswer = std::min(Tmp, Tmp2); | ||||
3473 | // We computed what we know about the sign bits as our first | ||||
3474 | // answer. Now proceed to the generic code that uses | ||||
3475 | // computeKnownBits, and pick whichever answer is better. | ||||
3476 | } | ||||
3477 | break; | ||||
3478 | |||||
3479 | case Instruction::Select: { | ||||
3480 | // If we have a clamp pattern, we know that the number of sign bits will | ||||
3481 | // be the minimum of the clamp min/max range. | ||||
3482 | const Value *X; | ||||
3483 | const APInt *CLow, *CHigh; | ||||
3484 | if (isSignedMinMaxClamp(U, X, CLow, CHigh)) | ||||
3485 | return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); | ||||
3486 | |||||
3487 | Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3488 | if (Tmp == 1) break; | ||||
3489 | Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); | ||||
3490 | return std::min(Tmp, Tmp2); | ||||
3491 | } | ||||
3492 | |||||
3493 | case Instruction::Add: | ||||
3494 | // Add can have at most one carry bit. Thus we know that the output | ||||
3495 | // is, at worst, one more bit than the inputs. | ||||
3496 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3497 | if (Tmp == 1) break; | ||||
3498 | |||||
3499 | // Special case decrementing a value (ADD X, -1): | ||||
3500 | if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) | ||||
3501 | if (CRHS->isAllOnesValue()) { | ||||
3502 | KnownBits Known(TyBits); | ||||
3503 | computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); | ||||
3504 | |||||
3505 | // If the input is known to be 0 or 1, the output is 0/-1, which is | ||||
3506 | // all sign bits set. | ||||
3507 | if ((Known.Zero | 1).isAllOnes()) | ||||
3508 | return TyBits; | ||||
3509 | |||||
3510 | // If we are subtracting one from a positive number, there is no carry | ||||
3511 | // out of the result. | ||||
3512 | if (Known.isNonNegative()) | ||||
3513 | return Tmp; | ||||
3514 | } | ||||
3515 | |||||
3516 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3517 | if (Tmp2 == 1) break; | ||||
3518 | return std::min(Tmp, Tmp2) - 1; | ||||
3519 | |||||
3520 | case Instruction::Sub: | ||||
3521 | Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3522 | if (Tmp2 == 1) break; | ||||
3523 | |||||
3524 | // Handle NEG. | ||||
3525 | if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) | ||||
3526 | if (CLHS->isNullValue()) { | ||||
3527 | KnownBits Known(TyBits); | ||||
3528 | computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); | ||||
3529 | // If the input is known to be 0 or 1, the output is 0/-1, which is | ||||
3530 | // all sign bits set. | ||||
3531 | if ((Known.Zero | 1).isAllOnes()) | ||||
3532 | return TyBits; | ||||
3533 | |||||
3534 | // If the input is known to be positive (the sign bit is known clear), | ||||
3535 | // the output of the NEG has the same number of sign bits as the | ||||
3536 | // input. | ||||
3537 | if (Known.isNonNegative()) | ||||
3538 | return Tmp2; | ||||
3539 | |||||
3540 | // Otherwise, we treat this like a SUB. | ||||
3541 | } | ||||
3542 | |||||
3543 | // Sub can have at most one carry bit. Thus we know that the output | ||||
3544 | // is, at worst, one more bit than the inputs. | ||||
3545 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3546 | if (Tmp == 1) break; | ||||
3547 | return std::min(Tmp, Tmp2) - 1; | ||||
3548 | |||||
3549 | case Instruction::Mul: { | ||||
3550 | // The output of the Mul can be at most twice the valid bits in the | ||||
3551 | // inputs. | ||||
3552 | unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3553 | if (SignBitsOp0 == 1) break; | ||||
3554 | unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); | ||||
3555 | if (SignBitsOp1 == 1) break; | ||||
3556 | unsigned OutValidBits = | ||||
3557 | (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); | ||||
3558 | return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; | ||||
3559 | } | ||||
3560 | |||||
3561 | case Instruction::PHI: { | ||||
3562 | const PHINode *PN = cast<PHINode>(U); | ||||
3563 | unsigned NumIncomingValues = PN->getNumIncomingValues(); | ||||
3564 | // Don't analyze large in-degree PHIs. | ||||
3565 | if (NumIncomingValues > 4) break; | ||||
3566 | // Unreachable blocks may have zero-operand PHI nodes. | ||||
3567 | if (NumIncomingValues == 0) break; | ||||
3568 | |||||
3569 | // Take the minimum of all incoming values. This can't infinitely loop | ||||
3570 | // because of our depth threshold. | ||||
3571 | Query RecQ = Q; | ||||
3572 | Tmp = TyBits; | ||||
3573 | for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) { | ||||
3574 | if (Tmp == 1) return Tmp; | ||||
3575 | RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); | ||||
3576 | Tmp = std::min( | ||||
3577 | Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ)); | ||||
3578 | } | ||||
3579 | return Tmp; | ||||
3580 | } | ||||
3581 | |||||
3582 | case Instruction::Trunc: { | ||||
3583 | // If the input contained enough sign bits that some remain after the | ||||
3584 | // truncation, then we can make use of that. Otherwise we don't know | ||||
3585 | // anything. | ||||
3586 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3587 | unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
3588 | if (Tmp > (OperandTyBits - TyBits)) | ||||
3589 | return Tmp - (OperandTyBits - TyBits); | ||||
3590 | |||||
3591 | return 1; | ||||
3592 | } | ||||
3593 | |||||
3594 | case Instruction::ExtractElement: | ||||
3595 | // Look through extract element. At the moment we keep this simple and | ||||
3596 | // skip tracking the specific element. But at least we might find | ||||
3597 | // information valid for all elements of the vector (for example if vector | ||||
3598 | // is sign extended, shifted, etc). | ||||
3599 | return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3600 | |||||
3601 | case Instruction::ShuffleVector: { | ||||
3602 | // Collect the minimum number of sign bits that are shared by every vector | ||||
3603 | // element referenced by the shuffle. | ||||
3604 | auto *Shuf = dyn_cast<ShuffleVectorInst>(U); | ||||
3605 | if (!Shuf) { | ||||
3606 | // FIXME: Add support for shufflevector constant expressions. | ||||
3607 | return 1; | ||||
3608 | } | ||||
3609 | APInt DemandedLHS, DemandedRHS; | ||||
3610 | // For undef elements, we don't know anything about the common state of | ||||
3611 | // the shuffle result. | ||||
3612 | if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) | ||||
3613 | return 1; | ||||
3614 | Tmp = std::numeric_limits<unsigned>::max(); | ||||
3615 | if (!!DemandedLHS) { | ||||
3616 | const Value *LHS = Shuf->getOperand(0); | ||||
3617 | Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q); | ||||
3618 | } | ||||
3619 | // If we don't know anything, early out and try computeKnownBits | ||||
3620 | // fall-back. | ||||
3621 | if (Tmp == 1) | ||||
3622 | break; | ||||
3623 | if (!!DemandedRHS) { | ||||
3624 | const Value *RHS = Shuf->getOperand(1); | ||||
3625 | Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q); | ||||
3626 | Tmp = std::min(Tmp, Tmp2); | ||||
3627 | } | ||||
3628 | // If we don't know anything, early out and try computeKnownBits | ||||
3629 | // fall-back. | ||||
3630 | if (Tmp == 1) | ||||
3631 | break; | ||||
3632 | assert(Tmp <= TyBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= TyBits && "Failed to determine minimum sign bits" ) ? void (0) : __assert_fail ("Tmp <= TyBits && \"Failed to determine minimum sign bits\"" , "llvm/lib/Analysis/ValueTracking.cpp", 3632, __extension__ __PRETTY_FUNCTION__ )); | ||||
3633 | return Tmp; | ||||
3634 | } | ||||
3635 | case Instruction::Call: { | ||||
3636 | if (const auto *II = dyn_cast<IntrinsicInst>(U)) { | ||||
3637 | switch (II->getIntrinsicID()) { | ||||
3638 | default: break; | ||||
3639 | case Intrinsic::abs: | ||||
3640 | Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); | ||||
3641 | if (Tmp == 1) break; | ||||
3642 | |||||
3643 | // Absolute value reduces number of sign bits by at most 1. | ||||
3644 | return Tmp - 1; | ||||
3645 | case Intrinsic::smin: | ||||
3646 | case Intrinsic::smax: { | ||||
3647 | const APInt *CLow, *CHigh; | ||||
3648 | if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh)) | ||||
3649 | return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); | ||||
3650 | } | ||||
3651 | } | ||||
3652 | } | ||||
3653 | } | ||||
3654 | } | ||||
3655 | } | ||||
3656 | |||||
3657 | // Finally, if we can prove that the top bits of the result are 0's or 1's, | ||||
3658 | // use this information. | ||||
3659 | |||||
3660 | // If we can examine all elements of a vector constant successfully, we're | ||||
3661 | // done (we can't do any better than that). If not, keep trying. | ||||
3662 | if (unsigned VecSignBits = | ||||
3663 | computeNumSignBitsVectorConstant(V, DemandedElts, TyBits)) | ||||
3664 | return VecSignBits; | ||||
3665 | |||||
3666 | KnownBits Known(TyBits); | ||||
3667 | computeKnownBits(V, DemandedElts, Known, Depth, Q); | ||||
3668 | |||||
3669 | // If we know that the sign bit is either zero or one, determine the number of | ||||
3670 | // identical bits in the top of the input value. | ||||
3671 | return std::max(FirstAnswer, Known.countMinSignBits()); | ||||
3672 | } | ||||
3673 | |||||
3674 | Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB, | ||||
3675 | const TargetLibraryInfo *TLI) { | ||||
3676 | const Function *F = CB.getCalledFunction(); | ||||
3677 | if (!F) | ||||
3678 | return Intrinsic::not_intrinsic; | ||||
3679 | |||||
3680 | if (F->isIntrinsic()) | ||||
3681 | return F->getIntrinsicID(); | ||||
3682 | |||||
3683 | // We are going to infer semantics of a library function based on mapping it | ||||
3684 | // to an LLVM intrinsic. Check that the library function is available from | ||||
3685 | // this callbase and in this environment. | ||||
3686 | LibFunc Func; | ||||
3687 | if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) || | ||||
3688 | !CB.onlyReadsMemory()) | ||||
3689 | return Intrinsic::not_intrinsic; | ||||
3690 | |||||
3691 | switch (Func) { | ||||
3692 | default: | ||||
3693 | break; | ||||
3694 | case LibFunc_sin: | ||||
3695 | case LibFunc_sinf: | ||||
3696 | case LibFunc_sinl: | ||||
3697 | return Intrinsic::sin; | ||||
3698 | case LibFunc_cos: | ||||
3699 | case LibFunc_cosf: | ||||
3700 | case LibFunc_cosl: | ||||
3701 | return Intrinsic::cos; | ||||
3702 | case LibFunc_exp: | ||||
3703 | case LibFunc_expf: | ||||
3704 | case LibFunc_expl: | ||||
3705 | return Intrinsic::exp; | ||||
3706 | case LibFunc_exp2: | ||||
3707 | case LibFunc_exp2f: | ||||
3708 | case LibFunc_exp2l: | ||||
3709 | return Intrinsic::exp2; | ||||
3710 | case LibFunc_log: | ||||
3711 | case LibFunc_logf: | ||||
3712 | case LibFunc_logl: | ||||
3713 | return Intrinsic::log; | ||||
3714 | case LibFunc_log10: | ||||
3715 | case LibFunc_log10f: | ||||
3716 | case LibFunc_log10l: | ||||
3717 | return Intrinsic::log10; | ||||
3718 | case LibFunc_log2: | ||||
3719 | case LibFunc_log2f: | ||||
3720 | case LibFunc_log2l: | ||||
3721 | return Intrinsic::log2; | ||||
3722 | case LibFunc_fabs: | ||||
3723 | case LibFunc_fabsf: | ||||
3724 | case LibFunc_fabsl: | ||||
3725 | return Intrinsic::fabs; | ||||
3726 | case LibFunc_fmin: | ||||
3727 | case LibFunc_fminf: | ||||
3728 | case LibFunc_fminl: | ||||
3729 | return Intrinsic::minnum; | ||||
3730 | case LibFunc_fmax: | ||||
3731 | case LibFunc_fmaxf: | ||||
3732 | case LibFunc_fmaxl: | ||||
3733 | return Intrinsic::maxnum; | ||||
3734 | case LibFunc_copysign: | ||||
3735 | case LibFunc_copysignf: | ||||
3736 | case LibFunc_copysignl: | ||||
3737 | return Intrinsic::copysign; | ||||
3738 | case LibFunc_floor: | ||||
3739 | case LibFunc_floorf: | ||||
3740 | case LibFunc_floorl: | ||||
3741 | return Intrinsic::floor; | ||||
3742 | case LibFunc_ceil: | ||||
3743 | case LibFunc_ceilf: | ||||
3744 | case LibFunc_ceill: | ||||
3745 | return Intrinsic::ceil; | ||||
3746 | case LibFunc_trunc: | ||||
3747 | case LibFunc_truncf: | ||||
3748 | case LibFunc_truncl: | ||||
3749 | return Intrinsic::trunc; | ||||
3750 | case LibFunc_rint: | ||||
3751 | case LibFunc_rintf: | ||||
3752 | case LibFunc_rintl: | ||||
3753 | return Intrinsic::rint; | ||||
3754 | case LibFunc_nearbyint: | ||||
3755 | case LibFunc_nearbyintf: | ||||
3756 | case LibFunc_nearbyintl: | ||||
3757 | return Intrinsic::nearbyint; | ||||
3758 | case LibFunc_round: | ||||
3759 | case LibFunc_roundf: | ||||
3760 | case LibFunc_roundl: | ||||
3761 | return Intrinsic::round; | ||||
3762 | case LibFunc_roundeven: | ||||
3763 | case LibFunc_roundevenf: | ||||
3764 | case LibFunc_roundevenl: | ||||
3765 | return Intrinsic::roundeven; | ||||
3766 | case LibFunc_pow: | ||||
3767 | case LibFunc_powf: | ||||
3768 | case LibFunc_powl: | ||||
3769 | return Intrinsic::pow; | ||||
3770 | case LibFunc_sqrt: | ||||
3771 | case LibFunc_sqrtf: | ||||
3772 | case LibFunc_sqrtl: | ||||
3773 | return Intrinsic::sqrt; | ||||
3774 | } | ||||
3775 | |||||
3776 | return Intrinsic::not_intrinsic; | ||||
3777 | } | ||||
3778 | |||||
3779 | /// Return true if we can prove that the specified FP value is never equal to | ||||
3780 | /// -0.0. | ||||
3781 | /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee | ||||
3782 | /// that a value is not -0.0. It only guarantees that -0.0 may be treated | ||||
3783 | /// the same as +0.0 in floating-point ops. | ||||
3784 | bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, | ||||
3785 | unsigned Depth) { | ||||
3786 | if (auto *CFP = dyn_cast<ConstantFP>(V)) | ||||
3787 | return !CFP->getValueAPF().isNegZero(); | ||||
3788 | |||||
3789 | if (Depth == MaxAnalysisRecursionDepth) | ||||
3790 | return false; | ||||
3791 | |||||
3792 | auto *Op = dyn_cast<Operator>(V); | ||||
3793 | if (!Op) | ||||
3794 | return false; | ||||
3795 | |||||
3796 | // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. | ||||
3797 | if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) | ||||
3798 | return true; | ||||
3799 | |||||
3800 | // sitofp and uitofp turn into +0.0 for zero. | ||||
3801 | if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) | ||||
3802 | return true; | ||||
3803 | |||||
3804 | if (auto *Call = dyn_cast<CallInst>(Op)) { | ||||
3805 | Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI); | ||||
3806 | switch (IID) { | ||||
3807 | default: | ||||
3808 | break; | ||||
3809 | // sqrt(-0.0) = -0.0, no other negative results are possible. | ||||
3810 | case Intrinsic::sqrt: | ||||
3811 | case Intrinsic::canonicalize: | ||||
3812 | return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); | ||||
3813 | case Intrinsic::experimental_constrained_sqrt: { | ||||
3814 | // NOTE: This rounding mode restriction may be too strict. | ||||
3815 | const auto *CI = cast<ConstrainedFPIntrinsic>(Call); | ||||
3816 | if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven) | ||||
3817 | return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); | ||||
3818 | else | ||||
3819 | return false; | ||||
3820 | } | ||||
3821 | // fabs(x) != -0.0 | ||||
3822 | case Intrinsic::fabs: | ||||
3823 | return true; | ||||
3824 | // sitofp and uitofp turn into +0.0 for zero. | ||||
3825 | case Intrinsic::experimental_constrained_sitofp: | ||||
3826 | case Intrinsic::experimental_constrained_uitofp: | ||||
3827 | return true; | ||||
3828 | } | ||||
3829 | } | ||||
3830 | |||||
3831 | return false; | ||||
3832 | } | ||||
3833 | |||||
3834 | /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a | ||||
3835 | /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign | ||||
3836 | /// bit despite comparing equal. | ||||
3837 | static bool cannotBeOrderedLessThanZeroImpl(const Value *V, | ||||
3838 | const TargetLibraryInfo *TLI, | ||||
3839 | bool SignBitOnly, | ||||
3840 | unsigned Depth) { | ||||
3841 | // TODO: This function does not do the right thing when SignBitOnly is true | ||||
3842 | // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform | ||||
3843 | // which flips the sign bits of NaNs. See | ||||
3844 | // https://llvm.org/bugs/show_bug.cgi?id=31702. | ||||
3845 | |||||
3846 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { | ||||
3847 | return !CFP->getValueAPF().isNegative() || | ||||
3848 | (!SignBitOnly && CFP->getValueAPF().isZero()); | ||||
3849 | } | ||||
3850 | |||||
3851 | // Handle vector of constants. | ||||
3852 | if (auto *CV = dyn_cast<Constant>(V)) { | ||||
3853 | if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) { | ||||
3854 | unsigned NumElts = CVFVTy->getNumElements(); | ||||
3855 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
3856 | auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); | ||||
3857 | if (!CFP) | ||||
3858 | return false; | ||||
3859 | if (CFP->getValueAPF().isNegative() && | ||||
3860 | (SignBitOnly || !CFP->getValueAPF().isZero())) | ||||
3861 | return false; | ||||
3862 | } | ||||
3863 | |||||
3864 | // All non-negative ConstantFPs. | ||||
3865 | return true; | ||||
3866 | } | ||||
3867 | } | ||||
3868 | |||||
3869 | if (Depth == MaxAnalysisRecursionDepth) | ||||
3870 | return false; | ||||
3871 | |||||
3872 | const Operator *I = dyn_cast<Operator>(V); | ||||
3873 | if (!I) | ||||
3874 | return false; | ||||
3875 | |||||
3876 | switch (I->getOpcode()) { | ||||
3877 | default: | ||||
3878 | break; | ||||
3879 | // Unsigned integers are always nonnegative. | ||||
3880 | case Instruction::UIToFP: | ||||
3881 | return true; | ||||
3882 | case Instruction::FDiv: | ||||
3883 | // X / X is always exactly 1.0 or a NaN. | ||||
3884 | if (I->getOperand(0) == I->getOperand(1) && | ||||
3885 | (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) | ||||
3886 | return true; | ||||
3887 | |||||
3888 | // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN). | ||||
3889 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3890 | Depth + 1) && | ||||
3891 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, | ||||
3892 | /*SignBitOnly*/ true, Depth + 1); | ||||
3893 | case Instruction::FMul: | ||||
3894 | // X * X is always non-negative or a NaN. | ||||
3895 | if (I->getOperand(0) == I->getOperand(1) && | ||||
3896 | (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) | ||||
3897 | return true; | ||||
3898 | |||||
3899 | [[fallthrough]]; | ||||
3900 | case Instruction::FAdd: | ||||
3901 | case Instruction::FRem: | ||||
3902 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3903 | Depth + 1) && | ||||
3904 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, | ||||
3905 | Depth + 1); | ||||
3906 | case Instruction::Select: | ||||
3907 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, | ||||
3908 | Depth + 1) && | ||||
3909 | cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, | ||||
3910 | Depth + 1); | ||||
3911 | case Instruction::FPExt: | ||||
3912 | case Instruction::FPTrunc: | ||||
3913 | // Widening/narrowing never change sign. | ||||
3914 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3915 | Depth + 1); | ||||
3916 | case Instruction::ExtractElement: | ||||
3917 | // Look through extract element. At the moment we keep this simple and skip | ||||
3918 | // tracking the specific element. But at least we might find information | ||||
3919 | // valid for all elements of the vector. | ||||
3920 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3921 | Depth + 1); | ||||
3922 | case Instruction::Call: | ||||
3923 | const auto *CI = cast<CallInst>(I); | ||||
3924 | Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI); | ||||
3925 | switch (IID) { | ||||
3926 | default: | ||||
3927 | break; | ||||
3928 | case Intrinsic::canonicalize: | ||||
3929 | case Intrinsic::arithmetic_fence: | ||||
3930 | case Intrinsic::floor: | ||||
3931 | case Intrinsic::ceil: | ||||
3932 | case Intrinsic::trunc: | ||||
3933 | case Intrinsic::rint: | ||||
3934 | case Intrinsic::nearbyint: | ||||
3935 | case Intrinsic::round: | ||||
3936 | case Intrinsic::roundeven: | ||||
3937 | case Intrinsic::fptrunc_round: | ||||
3938 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, Depth + 1); | ||||
3939 | case Intrinsic::maxnum: { | ||||
3940 | Value *V0 = I->getOperand(0), *V1 = I->getOperand(1); | ||||
3941 | auto isPositiveNum = [&](Value *V) { | ||||
3942 | if (SignBitOnly) { | ||||
3943 | // With SignBitOnly, this is tricky because the result of | ||||
3944 | // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is | ||||
3945 | // a constant strictly greater than 0.0. | ||||
3946 | const APFloat *C; | ||||
3947 | return match(V, m_APFloat(C)) && | ||||
3948 | *C > APFloat::getZero(C->getSemantics()); | ||||
3949 | } | ||||
3950 | |||||
3951 | // -0.0 compares equal to 0.0, so if this operand is at least -0.0, | ||||
3952 | // maxnum can't be ordered-less-than-zero. | ||||
3953 | return isKnownNeverNaN(V, TLI) && | ||||
3954 | cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1); | ||||
3955 | }; | ||||
3956 | |||||
3957 | // TODO: This could be improved. We could also check that neither operand | ||||
3958 | // has its sign bit set (and at least 1 is not-NAN?). | ||||
3959 | return isPositiveNum(V0) || isPositiveNum(V1); | ||||
3960 | } | ||||
3961 | |||||
3962 | case Intrinsic::maximum: | ||||
3963 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3964 | Depth + 1) || | ||||
3965 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, | ||||
3966 | Depth + 1); | ||||
3967 | case Intrinsic::minnum: | ||||
3968 | case Intrinsic::minimum: | ||||
3969 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
3970 | Depth + 1) && | ||||
3971 | cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, | ||||
3972 | Depth + 1); | ||||
3973 | case Intrinsic::exp: | ||||
3974 | case Intrinsic::exp2: | ||||
3975 | case Intrinsic::fabs: | ||||
3976 | return true; | ||||
3977 | case Intrinsic::copysign: | ||||
3978 | // Only the sign operand matters. | ||||
3979 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, true, | ||||
3980 | Depth + 1); | ||||
3981 | case Intrinsic::sqrt: | ||||
3982 | // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. | ||||
3983 | if (!SignBitOnly) | ||||
3984 | return true; | ||||
3985 | return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || | ||||
3986 | CannotBeNegativeZero(CI->getOperand(0), TLI)); | ||||
3987 | |||||
3988 | case Intrinsic::powi: | ||||
3989 | if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { | ||||
3990 | // powi(x,n) is non-negative if n is even. | ||||
3991 | if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) | ||||
3992 | return true; | ||||
3993 | } | ||||
3994 | // TODO: This is not correct. Given that exp is an integer, here are the | ||||
3995 | // ways that pow can return a negative value: | ||||
3996 | // | ||||
3997 | // pow(x, exp) --> negative if exp is odd and x is negative. | ||||
3998 | // pow(-0, exp) --> -inf if exp is negative odd. | ||||
3999 | // pow(-0, exp) --> -0 if exp is positive odd. | ||||
4000 | // pow(-inf, exp) --> -0 if exp is negative odd. | ||||
4001 | // pow(-inf, exp) --> -inf if exp is positive odd. | ||||
4002 | // | ||||
4003 | // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, | ||||
4004 | // but we must return false if x == -0. Unfortunately we do not currently | ||||
4005 | // have a way of expressing this constraint. See details in | ||||
4006 | // https://llvm.org/bugs/show_bug.cgi?id=31702. | ||||
4007 | return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, | ||||
4008 | Depth + 1); | ||||
4009 | |||||
4010 | case Intrinsic::fma: | ||||
4011 | case Intrinsic::fmuladd: | ||||
4012 | // x*x+y is non-negative if y is non-negative. | ||||
4013 | return I->getOperand(0) == I->getOperand(1) && | ||||
4014 | (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && | ||||
4015 | cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, | ||||
4016 | Depth + 1); | ||||
4017 | } | ||||
4018 | break; | ||||
4019 | } | ||||
4020 | return false; | ||||
4021 | } | ||||
4022 | |||||
4023 | bool llvm::CannotBeOrderedLessThanZero(const Value *V, | ||||
4024 | const TargetLibraryInfo *TLI) { | ||||
4025 | return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); | ||||
4026 | } | ||||
4027 | |||||
4028 | bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { | ||||
4029 | return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); | ||||
4030 | } | ||||
4031 | |||||
4032 | bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, | ||||
4033 | unsigned Depth) { | ||||
4034 | assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy () && "Querying for Inf on non-FP type") ? void (0) : __assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for Inf on non-FP type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4034, __extension__ __PRETTY_FUNCTION__ )); | ||||
4035 | |||||
4036 | // If we're told that infinities won't happen, assume they won't. | ||||
4037 | if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) | ||||
4038 | if (FPMathOp->hasNoInfs()) | ||||
4039 | return true; | ||||
4040 | |||||
4041 | if (const auto *Arg = dyn_cast<Argument>(V)) { | ||||
4042 | if ((Arg->getNoFPClass() & fcInf) == fcInf) | ||||
4043 | return true; | ||||
4044 | } | ||||
4045 | |||||
4046 | // TODO: Use fpclass like API for isKnown queries and distinguish +inf from | ||||
4047 | // -inf. | ||||
4048 | if (const auto *CB = dyn_cast<CallBase>(V)) { | ||||
4049 | if ((CB->getRetNoFPClass() & fcInf) == fcInf) | ||||
4050 | return true; | ||||
4051 | } | ||||
4052 | |||||
4053 | // Handle scalar constants. | ||||
4054 | if (auto *CFP = dyn_cast<ConstantFP>(V)) | ||||
4055 | return !CFP->isInfinity(); | ||||
4056 | |||||
4057 | if (Depth == MaxAnalysisRecursionDepth) | ||||
4058 | return false; | ||||
4059 | |||||
4060 | if (auto *Inst = dyn_cast<Instruction>(V)) { | ||||
4061 | switch (Inst->getOpcode()) { | ||||
4062 | case Instruction::Select: { | ||||
4063 | return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) && | ||||
4064 | isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1); | ||||
4065 | } | ||||
4066 | case Instruction::SIToFP: | ||||
4067 | case Instruction::UIToFP: { | ||||
4068 | // Get width of largest magnitude integer (remove a bit if signed). | ||||
4069 | // This still works for a signed minimum value because the largest FP | ||||
4070 | // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx). | ||||
4071 | int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
4072 | if (Inst->getOpcode() == Instruction::SIToFP) | ||||
4073 | --IntSize; | ||||
4074 | |||||
4075 | // If the exponent of the largest finite FP value can hold the largest | ||||
4076 | // integer, the result of the cast must be finite. | ||||
4077 | Type *FPTy = Inst->getType()->getScalarType(); | ||||
4078 | return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize; | ||||
4079 | } | ||||
4080 | case Instruction::FNeg: | ||||
4081 | case Instruction::FPExt: { | ||||
4082 | // Peek through to source op. If it is not infinity, this is not infinity. | ||||
4083 | return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1); | ||||
4084 | } | ||||
4085 | case Instruction::FPTrunc: { | ||||
4086 | // Need a range check. | ||||
4087 | return false; | ||||
4088 | } | ||||
4089 | default: | ||||
4090 | break; | ||||
4091 | } | ||||
4092 | |||||
4093 | if (const auto *II = dyn_cast<IntrinsicInst>(V)) { | ||||
4094 | switch (II->getIntrinsicID()) { | ||||
4095 | case Intrinsic::sin: | ||||
4096 | case Intrinsic::cos: | ||||
4097 | // Return NaN on infinite inputs. | ||||
4098 | return true; | ||||
4099 | case Intrinsic::fabs: | ||||
4100 | case Intrinsic::sqrt: | ||||
4101 | case Intrinsic::canonicalize: | ||||
4102 | case Intrinsic::copysign: | ||||
4103 | case Intrinsic::arithmetic_fence: | ||||
4104 | case Intrinsic::trunc: | ||||
4105 | return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1); | ||||
4106 | case Intrinsic::floor: | ||||
4107 | case Intrinsic::ceil: | ||||
4108 | case Intrinsic::rint: | ||||
4109 | case Intrinsic::nearbyint: | ||||
4110 | case Intrinsic::round: | ||||
4111 | case Intrinsic::roundeven: | ||||
4112 | // PPC_FP128 is a special case. | ||||
4113 | if (V->getType()->isMultiUnitFPType()) | ||||
4114 | return false; | ||||
4115 | return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1); | ||||
4116 | case Intrinsic::fptrunc_round: | ||||
4117 | // Requires knowing the value range. | ||||
4118 | return false; | ||||
4119 | case Intrinsic::minnum: | ||||
4120 | case Intrinsic::maxnum: | ||||
4121 | case Intrinsic::minimum: | ||||
4122 | case Intrinsic::maximum: | ||||
4123 | return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) && | ||||
4124 | isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1); | ||||
4125 | case Intrinsic::log: | ||||
4126 | case Intrinsic::log10: | ||||
4127 | case Intrinsic::log2: | ||||
4128 | // log(+inf) -> +inf | ||||
4129 | // log([+-]0.0) -> -inf | ||||
4130 | // log(-inf) -> nan | ||||
4131 | // log(-x) -> nan | ||||
4132 | // TODO: We lack API to check the == 0 case. | ||||
4133 | return false; | ||||
4134 | case Intrinsic::exp: | ||||
4135 | case Intrinsic::exp2: | ||||
4136 | case Intrinsic::pow: | ||||
4137 | case Intrinsic::powi: | ||||
4138 | case Intrinsic::fma: | ||||
4139 | case Intrinsic::fmuladd: | ||||
4140 | // These can return infinities on overflow cases, so it's hard to prove | ||||
4141 | // anything about it. | ||||
4142 | return false; | ||||
4143 | default: | ||||
4144 | break; | ||||
4145 | } | ||||
4146 | } | ||||
4147 | } | ||||
4148 | |||||
4149 | // try to handle fixed width vector constants | ||||
4150 | auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
4151 | if (VFVTy && isa<Constant>(V)) { | ||||
4152 | // For vectors, verify that each element is not infinity. | ||||
4153 | unsigned NumElts = VFVTy->getNumElements(); | ||||
4154 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
4155 | Constant *Elt = cast<Constant>(V)->getAggregateElement(i); | ||||
4156 | if (!Elt) | ||||
4157 | return false; | ||||
4158 | if (isa<UndefValue>(Elt)) | ||||
4159 | continue; | ||||
4160 | auto *CElt = dyn_cast<ConstantFP>(Elt); | ||||
4161 | if (!CElt || CElt->isInfinity()) | ||||
4162 | return false; | ||||
4163 | } | ||||
4164 | // All elements were confirmed non-infinity or undefined. | ||||
4165 | return true; | ||||
4166 | } | ||||
4167 | |||||
4168 | // was not able to prove that V never contains infinity | ||||
4169 | return false; | ||||
4170 | } | ||||
4171 | |||||
4172 | bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, | ||||
4173 | unsigned Depth) { | ||||
4174 | assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type")(static_cast <bool> (V->getType()->isFPOrFPVectorTy () && "Querying for NaN on non-FP type") ? void (0) : __assert_fail ("V->getType()->isFPOrFPVectorTy() && \"Querying for NaN on non-FP type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4174, __extension__ __PRETTY_FUNCTION__ )); | ||||
4175 | |||||
4176 | // If we're told that NaNs won't happen, assume they won't. | ||||
4177 | if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) | ||||
4178 | if (FPMathOp->hasNoNaNs()) | ||||
4179 | return true; | ||||
4180 | |||||
4181 | if (const auto *Arg = dyn_cast<Argument>(V)) { | ||||
4182 | if ((Arg->getNoFPClass() & fcNan) == fcNan) | ||||
4183 | return true; | ||||
4184 | } | ||||
4185 | |||||
4186 | // TODO: Use fpclass like API for isKnown queries and distinguish snan from | ||||
4187 | // qnan. | ||||
4188 | if (const auto *CB = dyn_cast<CallBase>(V)) { | ||||
4189 | FPClassTest Mask = CB->getRetNoFPClass(); | ||||
4190 | if ((Mask & fcNan) == fcNan) | ||||
4191 | return true; | ||||
4192 | } | ||||
4193 | |||||
4194 | // Handle scalar constants. | ||||
4195 | if (auto *CFP = dyn_cast<ConstantFP>(V)) | ||||
4196 | return !CFP->isNaN(); | ||||
4197 | |||||
4198 | if (Depth == MaxAnalysisRecursionDepth) | ||||
4199 | return false; | ||||
4200 | |||||
4201 | if (auto *Inst = dyn_cast<Instruction>(V)) { | ||||
4202 | switch (Inst->getOpcode()) { | ||||
4203 | case Instruction::FAdd: | ||||
4204 | case Instruction::FSub: | ||||
4205 | // Adding positive and negative infinity produces NaN. | ||||
4206 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && | ||||
4207 | isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && | ||||
4208 | (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) || | ||||
4209 | isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1)); | ||||
4210 | |||||
4211 | case Instruction::FMul: | ||||
4212 | // Zero multiplied with infinity produces NaN. | ||||
4213 | // FIXME: If neither side can be zero fmul never produces NaN. | ||||
4214 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && | ||||
4215 | isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) && | ||||
4216 | isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && | ||||
4217 | isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1); | ||||
4218 | |||||
4219 | case Instruction::FDiv: | ||||
4220 | case Instruction::FRem: | ||||
4221 | // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN. | ||||
4222 | return false; | ||||
4223 | |||||
4224 | case Instruction::Select: { | ||||
4225 | return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && | ||||
4226 | isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); | ||||
4227 | } | ||||
4228 | case Instruction::SIToFP: | ||||
4229 | case Instruction::UIToFP: | ||||
4230 | return true; | ||||
4231 | case Instruction::FPTrunc: | ||||
4232 | case Instruction::FPExt: | ||||
4233 | case Instruction::FNeg: | ||||
4234 | return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); | ||||
4235 | default: | ||||
4236 | break; | ||||
4237 | } | ||||
4238 | } | ||||
4239 | |||||
4240 | if (const auto *II = dyn_cast<IntrinsicInst>(V)) { | ||||
4241 | switch (II->getIntrinsicID()) { | ||||
4242 | case Intrinsic::canonicalize: | ||||
4243 | case Intrinsic::fabs: | ||||
4244 | case Intrinsic::copysign: | ||||
4245 | case Intrinsic::exp: | ||||
4246 | case Intrinsic::exp2: | ||||
4247 | case Intrinsic::floor: | ||||
4248 | case Intrinsic::ceil: | ||||
4249 | case Intrinsic::trunc: | ||||
4250 | case Intrinsic::rint: | ||||
4251 | case Intrinsic::nearbyint: | ||||
4252 | case Intrinsic::round: | ||||
4253 | case Intrinsic::roundeven: | ||||
4254 | case Intrinsic::arithmetic_fence: | ||||
4255 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); | ||||
4256 | case Intrinsic::sqrt: | ||||
4257 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && | ||||
4258 | CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); | ||||
4259 | case Intrinsic::minnum: | ||||
4260 | case Intrinsic::maxnum: | ||||
4261 | // If either operand is not NaN, the result is not NaN. | ||||
4262 | return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) || | ||||
4263 | isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1); | ||||
4264 | default: | ||||
4265 | return false; | ||||
4266 | } | ||||
4267 | } | ||||
4268 | |||||
4269 | // Try to handle fixed width vector constants | ||||
4270 | auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
4271 | if (VFVTy && isa<Constant>(V)) { | ||||
4272 | // For vectors, verify that each element is not NaN. | ||||
4273 | unsigned NumElts = VFVTy->getNumElements(); | ||||
4274 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
4275 | Constant *Elt = cast<Constant>(V)->getAggregateElement(i); | ||||
4276 | if (!Elt) | ||||
4277 | return false; | ||||
4278 | if (isa<UndefValue>(Elt)) | ||||
4279 | continue; | ||||
4280 | auto *CElt = dyn_cast<ConstantFP>(Elt); | ||||
4281 | if (!CElt || CElt->isNaN()) | ||||
4282 | return false; | ||||
4283 | } | ||||
4284 | // All elements were confirmed not-NaN or undefined. | ||||
4285 | return true; | ||||
4286 | } | ||||
4287 | |||||
4288 | // Was not able to prove that V never contains NaN | ||||
4289 | return false; | ||||
4290 | } | ||||
4291 | |||||
4292 | /// Return true if it's possible to assume IEEE treatment of input denormals in | ||||
4293 | /// \p F for \p Val. | ||||
4294 | static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) { | ||||
4295 | Ty = Ty->getScalarType(); | ||||
4296 | return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE; | ||||
4297 | } | ||||
4298 | |||||
4299 | bool KnownFPClass::isKnownNeverLogicalZero(const Function &F, Type *Ty) const { | ||||
4300 | return isKnownNeverZero() && | ||||
4301 | (isKnownNeverSubnormal() || inputDenormalIsIEEE(F, Ty)); | ||||
4302 | } | ||||
4303 | |||||
4304 | /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the | ||||
4305 | /// same result as an fcmp with the given operands. | ||||
4306 | std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred, | ||||
4307 | const Function &F, | ||||
4308 | Value *LHS, Value *RHS, | ||||
4309 | bool LookThroughSrc) { | ||||
4310 | const APFloat *ConstRHS; | ||||
4311 | if (!match(RHS, m_APFloat(ConstRHS))) | ||||
4312 | return {nullptr, fcNone}; | ||||
4313 | |||||
4314 | if (ConstRHS->isZero()) { | ||||
4315 | // Compares with fcNone are only exactly equal to fcZero if input denormals | ||||
4316 | // are not flushed. | ||||
4317 | // TODO: Handle DAZ by expanding masks to cover subnormal cases. | ||||
4318 | if (Pred != FCmpInst::FCMP_ORD && Pred != FCmpInst::FCMP_UNO && | ||||
4319 | !inputDenormalIsIEEE(F, LHS->getType())) | ||||
4320 | return {nullptr, fcNone}; | ||||
4321 | |||||
4322 | switch (Pred) { | ||||
4323 | case FCmpInst::FCMP_OEQ: // Match x == 0.0 | ||||
4324 | return {LHS, fcZero}; | ||||
4325 | case FCmpInst::FCMP_UEQ: // Match isnan(x) || (x == 0.0) | ||||
4326 | return {LHS, fcZero | fcNan}; | ||||
4327 | case FCmpInst::FCMP_UNE: // Match (x != 0.0) | ||||
4328 | return {LHS, ~fcZero}; | ||||
4329 | case FCmpInst::FCMP_ONE: // Match !isnan(x) && x != 0.0 | ||||
4330 | return {LHS, ~fcNan & ~fcZero}; | ||||
4331 | case FCmpInst::FCMP_ORD: | ||||
4332 | // Canonical form of ord/uno is with a zero. We could also handle | ||||
4333 | // non-canonical other non-NaN constants or LHS == RHS. | ||||
4334 | return {LHS, ~fcNan}; | ||||
4335 | case FCmpInst::FCMP_UNO: | ||||
4336 | return {LHS, fcNan}; | ||||
4337 | case FCmpInst::FCMP_OGT: // x > 0 | ||||
4338 | return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf}; | ||||
4339 | case FCmpInst::FCMP_UGT: // isnan(x) || x > 0 | ||||
4340 | return {LHS, fcPosSubnormal | fcPosNormal | fcPosInf | fcNan}; | ||||
4341 | case FCmpInst::FCMP_OGE: // x >= 0 | ||||
4342 | return {LHS, fcPositive | fcNegZero}; | ||||
4343 | case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0 | ||||
4344 | return {LHS, fcPositive | fcNegZero | fcNan}; | ||||
4345 | case FCmpInst::FCMP_OLT: // x < 0 | ||||
4346 | return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf}; | ||||
4347 | case FCmpInst::FCMP_ULT: // isnan(x) || x < 0 | ||||
4348 | return {LHS, fcNegSubnormal | fcNegNormal | fcNegInf | fcNan}; | ||||
4349 | case FCmpInst::FCMP_OLE: // x <= 0 | ||||
4350 | return {LHS, fcNegative | fcPosZero}; | ||||
4351 | case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0 | ||||
4352 | return {LHS, fcNegative | fcPosZero | fcNan}; | ||||
4353 | default: | ||||
4354 | break; | ||||
4355 | } | ||||
4356 | |||||
4357 | return {nullptr, fcNone}; | ||||
4358 | } | ||||
4359 | |||||
4360 | Value *Src = LHS; | ||||
4361 | const bool IsFabs = LookThroughSrc && match(LHS, m_FAbs(m_Value(Src))); | ||||
4362 | |||||
4363 | // Compute the test mask that would return true for the ordered comparisons. | ||||
4364 | FPClassTest Mask; | ||||
4365 | |||||
4366 | if (ConstRHS->isInfinity()) { | ||||
4367 | switch (Pred) { | ||||
4368 | case FCmpInst::FCMP_OEQ: | ||||
4369 | case FCmpInst::FCMP_UNE: { | ||||
4370 | // Match __builtin_isinf patterns | ||||
4371 | // | ||||
4372 | // fcmp oeq x, +inf -> is_fpclass x, fcPosInf | ||||
4373 | // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf | ||||
4374 | // fcmp oeq x, -inf -> is_fpclass x, fcNegInf | ||||
4375 | // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false | ||||
4376 | // | ||||
4377 | // fcmp une x, +inf -> is_fpclass x, ~fcPosInf | ||||
4378 | // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf | ||||
4379 | // fcmp une x, -inf -> is_fpclass x, ~fcNegInf | ||||
4380 | // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true | ||||
4381 | |||||
4382 | if (ConstRHS->isNegative()) { | ||||
4383 | Mask = fcNegInf; | ||||
4384 | if (IsFabs) | ||||
4385 | Mask = fcNone; | ||||
4386 | } else { | ||||
4387 | Mask = fcPosInf; | ||||
4388 | if (IsFabs) | ||||
4389 | Mask |= fcNegInf; | ||||
4390 | } | ||||
4391 | |||||
4392 | break; | ||||
4393 | } | ||||
4394 | case FCmpInst::FCMP_ONE: | ||||
4395 | case FCmpInst::FCMP_UEQ: { | ||||
4396 | // Match __builtin_isinf patterns | ||||
4397 | // fcmp one x, -inf -> is_fpclass x, fcNegInf | ||||
4398 | // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan | ||||
4399 | // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan | ||||
4400 | // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan | ||||
4401 | // | ||||
4402 | // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan | ||||
4403 | // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan | ||||
4404 | // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan | ||||
4405 | // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan | ||||
4406 | if (ConstRHS->isNegative()) { | ||||
4407 | Mask = ~fcNegInf & ~fcNan; | ||||
4408 | if (IsFabs) | ||||
4409 | Mask = ~fcNan; | ||||
4410 | } else { | ||||
4411 | Mask = ~fcPosInf & ~fcNan; | ||||
4412 | if (IsFabs) | ||||
4413 | Mask &= ~fcNegInf; | ||||
4414 | } | ||||
4415 | |||||
4416 | break; | ||||
4417 | } | ||||
4418 | case FCmpInst::FCMP_OLT: | ||||
4419 | case FCmpInst::FCMP_UGE: { | ||||
4420 | if (ConstRHS->isNegative()) // TODO | ||||
4421 | return {nullptr, fcNone}; | ||||
4422 | |||||
4423 | // fcmp olt fabs(x), +inf -> fcFinite | ||||
4424 | // fcmp uge fabs(x), +inf -> ~fcFinite | ||||
4425 | // fcmp olt x, +inf -> fcFinite|fcNegInf | ||||
4426 | // fcmp uge x, +inf -> ~(fcFinite|fcNegInf) | ||||
4427 | Mask = fcFinite; | ||||
4428 | if (!IsFabs) | ||||
4429 | Mask |= fcNegInf; | ||||
4430 | break; | ||||
4431 | } | ||||
4432 | case FCmpInst::FCMP_OGE: | ||||
4433 | case FCmpInst::FCMP_ULT: { | ||||
4434 | if (ConstRHS->isNegative()) // TODO | ||||
4435 | return {nullptr, fcNone}; | ||||
4436 | |||||
4437 | // fcmp oge fabs(x), +inf -> fcInf | ||||
4438 | // fcmp oge x, +inf -> fcPosInf | ||||
4439 | // fcmp ult fabs(x), +inf -> ~fcInf | ||||
4440 | // fcmp ult x, +inf -> ~fcPosInf | ||||
4441 | Mask = fcPosInf; | ||||
4442 | if (IsFabs) | ||||
4443 | Mask |= fcNegInf; | ||||
4444 | break; | ||||
4445 | } | ||||
4446 | default: | ||||
4447 | return {nullptr, fcNone}; | ||||
4448 | } | ||||
4449 | } else if (ConstRHS->isSmallestNormalized() && !ConstRHS->isNegative()) { | ||||
4450 | // Match pattern that's used in __builtin_isnormal. | ||||
4451 | switch (Pred) { | ||||
4452 | case FCmpInst::FCMP_OLT: | ||||
4453 | case FCmpInst::FCMP_UGE: { | ||||
4454 | // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero | ||||
4455 | // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero | ||||
4456 | // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf | ||||
4457 | // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero) | ||||
4458 | Mask = fcZero | fcSubnormal; | ||||
4459 | if (!IsFabs) | ||||
4460 | Mask |= fcNegNormal | fcNegInf; | ||||
4461 | |||||
4462 | break; | ||||
4463 | } | ||||
4464 | case FCmpInst::FCMP_OGE: | ||||
4465 | case FCmpInst::FCMP_ULT: { | ||||
4466 | // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf | ||||
4467 | // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal | ||||
4468 | // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf) | ||||
4469 | // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal) | ||||
4470 | Mask = fcPosInf | fcPosNormal; | ||||
4471 | if (IsFabs) | ||||
4472 | Mask |= fcNegInf | fcNegNormal; | ||||
4473 | break; | ||||
4474 | } | ||||
4475 | default: | ||||
4476 | return {nullptr, fcNone}; | ||||
4477 | } | ||||
4478 | } else | ||||
4479 | return {nullptr, fcNone}; | ||||
4480 | |||||
4481 | // Invert the comparison for the unordered cases. | ||||
4482 | if (FCmpInst::isUnordered(Pred)) | ||||
4483 | Mask = ~Mask; | ||||
4484 | |||||
4485 | return {Src, Mask}; | ||||
4486 | } | ||||
4487 | |||||
4488 | static FPClassTest computeKnownFPClassFromAssumes(const Value *V, | ||||
4489 | const Query &Q) { | ||||
4490 | FPClassTest KnownFromAssume = fcAllFlags; | ||||
4491 | |||||
4492 | // Try to restrict the floating-point classes based on information from | ||||
4493 | // assumptions. | ||||
4494 | for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { | ||||
4495 | if (!AssumeVH) | ||||
4496 | continue; | ||||
4497 | CallInst *I = cast<CallInst>(AssumeVH); | ||||
4498 | const Function *F = I->getFunction(); | ||||
4499 | |||||
4500 | assert(F == Q.CxtI->getParent()->getParent() &&(static_cast <bool> (F == Q.CxtI->getParent()->getParent () && "Got assumption for the wrong function!") ? void (0) : __assert_fail ("F == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4501, __extension__ __PRETTY_FUNCTION__ )) | ||||
4501 | "Got assumption for the wrong function!")(static_cast <bool> (F == Q.CxtI->getParent()->getParent () && "Got assumption for the wrong function!") ? void (0) : __assert_fail ("F == Q.CxtI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4501, __extension__ __PRETTY_FUNCTION__ )); | ||||
4502 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4503, __extension__ __PRETTY_FUNCTION__ )) | ||||
4503 | "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4503, __extension__ __PRETTY_FUNCTION__ )); | ||||
4504 | |||||
4505 | if (!isValidAssumeForContext(I, Q.CxtI, Q.DT)) | ||||
4506 | continue; | ||||
4507 | |||||
4508 | CmpInst::Predicate Pred; | ||||
4509 | Value *LHS, *RHS; | ||||
4510 | uint64_t ClassVal = 0; | ||||
4511 | if (match(I->getArgOperand(0), m_FCmp(Pred, m_Value(LHS), m_Value(RHS)))) { | ||||
4512 | auto [TestedValue, TestedMask] = | ||||
4513 | fcmpToClassTest(Pred, *F, LHS, RHS, true); | ||||
4514 | // First see if we can fold in fabs/fneg into the test. | ||||
4515 | if (TestedValue == V) | ||||
4516 | KnownFromAssume &= TestedMask; | ||||
4517 | else { | ||||
4518 | // Try again without the lookthrough if we found a different source | ||||
4519 | // value. | ||||
4520 | auto [TestedValue, TestedMask] = | ||||
4521 | fcmpToClassTest(Pred, *F, LHS, RHS, false); | ||||
4522 | if (TestedValue == V) | ||||
4523 | KnownFromAssume &= TestedMask; | ||||
4524 | } | ||||
4525 | } else if (match(I->getArgOperand(0), | ||||
4526 | m_Intrinsic<Intrinsic::is_fpclass>( | ||||
4527 | m_Value(LHS), m_ConstantInt(ClassVal)))) { | ||||
4528 | KnownFromAssume &= static_cast<FPClassTest>(ClassVal); | ||||
4529 | } | ||||
4530 | } | ||||
4531 | |||||
4532 | return KnownFromAssume; | ||||
4533 | } | ||||
4534 | |||||
4535 | void computeKnownFPClass(const Value *V, const APInt &DemandedElts, | ||||
4536 | FPClassTest InterestedClasses, KnownFPClass &Known, | ||||
4537 | unsigned Depth, const Query &Q, | ||||
4538 | const TargetLibraryInfo *TLI); | ||||
4539 | |||||
4540 | static void computeKnownFPClass(const Value *V, KnownFPClass &Known, | ||||
4541 | FPClassTest InterestedClasses, unsigned Depth, | ||||
4542 | const Query &Q, const TargetLibraryInfo *TLI) { | ||||
4543 | auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
4544 | APInt DemandedElts = | ||||
4545 | FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); | ||||
4546 | computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q, TLI); | ||||
4547 | } | ||||
4548 | |||||
4549 | static void computeKnownFPClassForFPTrunc(const Operator *Op, | ||||
4550 | const APInt &DemandedElts, | ||||
4551 | FPClassTest InterestedClasses, | ||||
4552 | KnownFPClass &Known, unsigned Depth, | ||||
4553 | const Query &Q, | ||||
4554 | const TargetLibraryInfo *TLI) { | ||||
4555 | if ((InterestedClasses & fcNan) == fcNone) | ||||
4556 | return; | ||||
4557 | |||||
4558 | KnownFPClass KnownSrc; | ||||
4559 | computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, | ||||
4560 | KnownSrc, Depth + 1, Q, TLI); | ||||
4561 | if (KnownSrc.isKnownNeverNaN()) | ||||
4562 | Known.knownNot(fcNan); | ||||
4563 | |||||
4564 | // Infinity needs a range check. | ||||
4565 | // TODO: Sign bit should be preserved | ||||
4566 | } | ||||
4567 | |||||
4568 | // TODO: Merge implementations of isKnownNeverNaN, isKnownNeverInfinity, | ||||
4569 | // CannotBeNegativeZero, cannotBeOrderedLessThanZero into here. | ||||
4570 | |||||
4571 | void computeKnownFPClass(const Value *V, const APInt &DemandedElts, | ||||
4572 | FPClassTest InterestedClasses, KnownFPClass &Known, | ||||
4573 | unsigned Depth, const Query &Q, | ||||
4574 | const TargetLibraryInfo *TLI) { | ||||
4575 | assert(Known.isUnknown() && "should not be called with known information")(static_cast <bool> (Known.isUnknown() && "should not be called with known information" ) ? void (0) : __assert_fail ("Known.isUnknown() && \"should not be called with known information\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4575, __extension__ __PRETTY_FUNCTION__ )); | ||||
4576 | |||||
4577 | if (!DemandedElts) { | ||||
4578 | // No demanded elts, better to assume we don't know anything. | ||||
4579 | Known.resetAll(); | ||||
4580 | return; | ||||
4581 | } | ||||
4582 | |||||
4583 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth") ? void (0) : __assert_fail ( "Depth <= MaxAnalysisRecursionDepth && \"Limit Search Depth\"" , "llvm/lib/Analysis/ValueTracking.cpp", 4583, __extension__ __PRETTY_FUNCTION__ )); | ||||
4584 | |||||
4585 | if (auto *CFP = dyn_cast_or_null<ConstantFP>(V)) { | ||||
4586 | Known.KnownFPClasses = CFP->getValueAPF().classify(); | ||||
4587 | Known.SignBit = CFP->isNegative(); | ||||
4588 | return; | ||||
4589 | } | ||||
4590 | |||||
4591 | // Try to handle fixed width vector constants | ||||
4592 | auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); | ||||
4593 | const Constant *CV = dyn_cast<Constant>(V); | ||||
4594 | if (VFVTy && CV) { | ||||
4595 | Known.KnownFPClasses = fcNone; | ||||
4596 | |||||
4597 | // For vectors, verify that each element is not NaN. | ||||
4598 | unsigned NumElts = VFVTy->getNumElements(); | ||||
4599 | for (unsigned i = 0; i != NumElts; ++i) { | ||||
4600 | Constant *Elt = CV->getAggregateElement(i); | ||||
4601 | if (!Elt) { | ||||
4602 | Known = KnownFPClass(); | ||||
4603 | return; | ||||
4604 | } | ||||
4605 | if (isa<UndefValue>(Elt)) | ||||
4606 | continue; | ||||
4607 | auto *CElt = dyn_cast<ConstantFP>(Elt); | ||||
4608 | if (!CElt) { | ||||
4609 | Known = KnownFPClass(); | ||||
4610 | return; | ||||
4611 | } | ||||
4612 | |||||
4613 | KnownFPClass KnownElt{CElt->getValueAPF().classify(), CElt->isNegative()}; | ||||
4614 | Known |= KnownElt; | ||||
4615 | } | ||||
4616 | |||||
4617 | return; | ||||
4618 | } | ||||
4619 | |||||
4620 | FPClassTest KnownNotFromFlags = fcNone; | ||||
4621 | if (const auto *CB = dyn_cast<CallBase>(V)) | ||||
4622 | KnownNotFromFlags |= CB->getRetNoFPClass(); | ||||
4623 | else if (const auto *Arg = dyn_cast<Argument>(V)) | ||||
4624 | KnownNotFromFlags |= Arg->getNoFPClass(); | ||||
4625 | |||||
4626 | const Operator *Op = dyn_cast<Operator>(V); | ||||
4627 | if (const FPMathOperator *FPOp = dyn_cast_or_null<FPMathOperator>(Op)) { | ||||
4628 | if (FPOp->hasNoNaNs()) | ||||
4629 | KnownNotFromFlags |= fcNan; | ||||
4630 | if (FPOp->hasNoInfs()) | ||||
4631 | KnownNotFromFlags |= fcInf; | ||||
4632 | } | ||||
4633 | |||||
4634 | if (Q.AC) { | ||||
4635 | FPClassTest AssumedClasses = computeKnownFPClassFromAssumes(V, Q); | ||||
4636 | KnownNotFromFlags |= ~AssumedClasses; | ||||
4637 | } | ||||
4638 | |||||
4639 | // We no longer need to find out about these bits from inputs if we can | ||||
4640 | // assume this from flags/attributes. | ||||
4641 | InterestedClasses &= ~KnownNotFromFlags; | ||||
4642 | |||||
4643 | auto ClearClassesFromFlags = make_scope_exit([=, &Known] { | ||||
4644 | Known.knownNot(KnownNotFromFlags); | ||||
4645 | }); | ||||
4646 | |||||
4647 | if (!Op) | ||||
4648 | return; | ||||
4649 | |||||
4650 | // All recursive calls that increase depth must come after this. | ||||
4651 | if (Depth == MaxAnalysisRecursionDepth) | ||||
4652 | return; | ||||
4653 | |||||
4654 | const unsigned Opc = Op->getOpcode(); | ||||
4655 | switch (Opc) { | ||||
4656 | case Instruction::FNeg: { | ||||
4657 | computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, | ||||
4658 | Known, Depth + 1, Q, TLI); | ||||
4659 | Known.fneg(); | ||||
4660 | break; | ||||
4661 | } | ||||
4662 | case Instruction::Select: { | ||||
4663 | KnownFPClass Known2; | ||||
4664 | computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedClasses, | ||||
4665 | Known, Depth + 1, Q, TLI); | ||||
4666 | computeKnownFPClass(Op->getOperand(2), DemandedElts, InterestedClasses, | ||||
4667 | Known2, Depth + 1, Q, TLI); | ||||
4668 | Known |= Known2; | ||||
4669 | break; | ||||
4670 | } | ||||
4671 | case Instruction::Call: { | ||||
4672 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op)) { | ||||
4673 | const Intrinsic::ID IID = II->getIntrinsicID(); | ||||
4674 | switch (IID) { | ||||
4675 | case Intrinsic::fabs: { | ||||
4676 | if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) { | ||||
4677 | // If we only care about the sign bit we don't need to inspect the | ||||
4678 | // operand. | ||||
4679 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4680 | InterestedClasses, Known, Depth + 1, Q, TLI); | ||||
4681 | } | ||||
4682 | |||||
4683 | Known.fabs(); | ||||
4684 | break; | ||||
4685 | } | ||||
4686 | case Intrinsic::copysign: { | ||||
4687 | KnownFPClass KnownSign; | ||||
4688 | |||||
4689 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4690 | InterestedClasses, Known, Depth + 1, Q, TLI); | ||||
4691 | computeKnownFPClass(II->getArgOperand(1), DemandedElts, | ||||
4692 | InterestedClasses, KnownSign, Depth + 1, Q, TLI); | ||||
4693 | Known.copysign(KnownSign); | ||||
4694 | break; | ||||
4695 | } | ||||
4696 | case Intrinsic::fma: | ||||
4697 | case Intrinsic::fmuladd: { | ||||
4698 | if ((InterestedClasses & fcNegative) == fcNone) | ||||
4699 | break; | ||||
4700 | |||||
4701 | if (II->getArgOperand(0) != II->getArgOperand(1)) | ||||
4702 | break; | ||||
4703 | |||||
4704 | // The multiply cannot be -0 and therefore the add can't be -0 | ||||
4705 | Known.knownNot(fcNegZero); | ||||
4706 | |||||
4707 | // x * x + y is non-negative if y is non-negative. | ||||
4708 | KnownFPClass KnownAddend; | ||||
4709 | computeKnownFPClass(II->getArgOperand(2), DemandedElts, | ||||
4710 | InterestedClasses, KnownAddend, Depth + 1, Q, TLI); | ||||
4711 | |||||
4712 | // TODO: Known sign bit with no nans | ||||
4713 | if (KnownAddend.cannotBeOrderedLessThanZero()) | ||||
4714 | Known.knownNot(fcNegative); | ||||
4715 | break; | ||||
4716 | } | ||||
4717 | case Intrinsic::sin: | ||||
4718 | case Intrinsic::cos: { | ||||
4719 | // Return NaN on infinite inputs. | ||||
4720 | KnownFPClass KnownSrc; | ||||
4721 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4722 | InterestedClasses, KnownSrc, Depth + 1, Q, TLI); | ||||
4723 | Known.knownNot(fcInf); | ||||
4724 | if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity()) | ||||
4725 | Known.knownNot(fcNan); | ||||
4726 | break; | ||||
4727 | } | ||||
4728 | |||||
4729 | case Intrinsic::maxnum: | ||||
4730 | case Intrinsic::minnum: | ||||
4731 | case Intrinsic::minimum: | ||||
4732 | case Intrinsic::maximum: { | ||||
4733 | KnownFPClass KnownLHS, KnownRHS; | ||||
4734 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4735 | InterestedClasses, KnownLHS, Depth + 1, Q, TLI); | ||||
4736 | computeKnownFPClass(II->getArgOperand(1), DemandedElts, | ||||
4737 | InterestedClasses, KnownRHS, Depth + 1, Q, TLI); | ||||
4738 | |||||
4739 | bool NeverNaN = | ||||
4740 | KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN(); | ||||
4741 | Known = KnownLHS | KnownRHS; | ||||
4742 | |||||
4743 | // If either operand is not NaN, the result is not NaN. | ||||
4744 | if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum)) | ||||
4745 | Known.knownNot(fcNan); | ||||
4746 | |||||
4747 | if (IID == Intrinsic::maxnum) { | ||||
4748 | // If at least one operand is known to be positive, the result must be | ||||
4749 | // positive. | ||||
4750 | if ((KnownLHS.cannotBeOrderedLessThanZero() && | ||||
4751 | KnownLHS.isKnownNeverNaN()) || | ||||
4752 | (KnownRHS.cannotBeOrderedLessThanZero() && | ||||
4753 | KnownRHS.isKnownNeverNaN())) | ||||
4754 | Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); | ||||
4755 | } else if (IID == Intrinsic::maximum) { | ||||
4756 | // If at least one operand is known to be positive, the result must be | ||||
4757 | // positive. | ||||
4758 | if (KnownLHS.cannotBeOrderedLessThanZero() || | ||||
4759 | KnownRHS.cannotBeOrderedLessThanZero()) | ||||
4760 | Known.knownNot(KnownFPClass::OrderedLessThanZeroMask); | ||||
4761 | } else if (IID == Intrinsic::minnum) { | ||||
4762 | // If at least one operand is known to be negative, the result must be | ||||
4763 | // negative. | ||||
4764 | if ((KnownLHS.cannotBeOrderedGreaterThanZero() && | ||||
4765 | KnownLHS.isKnownNeverNaN()) || | ||||
4766 | (KnownRHS.cannotBeOrderedGreaterThanZero() && | ||||
4767 | KnownRHS.isKnownNeverNaN())) | ||||
4768 | Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask); | ||||
4769 | } else { | ||||
4770 | // If at least one operand is known to be negative, the result must be | ||||
4771 | // negative. | ||||
4772 | if (KnownLHS.cannotBeOrderedGreaterThanZero() || | ||||
4773 | KnownRHS.cannotBeOrderedGreaterThanZero()) | ||||
4774 | Known.knownNot(KnownFPClass::OrderedGreaterThanZeroMask); | ||||
4775 | } | ||||
4776 | |||||
4777 | // Fixup zero handling if denormals could be returned as a zero. | ||||
4778 | // | ||||
4779 | // As there's no spec for denormal flushing, be conservative with the | ||||
4780 | // treatment of denormals that could be flushed to zero. For older | ||||
4781 | // subtargets on AMDGPU the min/max instructions would not flush the | ||||
4782 | // output and return the original value. | ||||
4783 | // | ||||
4784 | // TODO: This could be refined based on the sign | ||||
4785 | if ((Known.KnownFPClasses & fcZero) != fcNone && | ||||
4786 | !Known.isKnownNeverSubnormal()) { | ||||
4787 | const Function *Parent = II->getFunction(); | ||||
4788 | DenormalMode Mode = Parent->getDenormalMode( | ||||
4789 | II->getType()->getScalarType()->getFltSemantics()); | ||||
4790 | if (Mode != DenormalMode::getIEEE()) | ||||
4791 | Known.KnownFPClasses |= fcZero; | ||||
4792 | } | ||||
4793 | |||||
4794 | break; | ||||
4795 | } | ||||
4796 | case Intrinsic::canonicalize: { | ||||
4797 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4798 | InterestedClasses, Known, Depth + 1, Q, TLI); | ||||
4799 | // Canonicalize is guaranteed to quiet signaling nans. | ||||
4800 | Known.knownNot(fcSNan); | ||||
4801 | |||||
4802 | // If the parent function flushes denormals, the canonical output cannot | ||||
4803 | // be a denormal. | ||||
4804 | const fltSemantics &FPType = II->getType()->getFltSemantics(); | ||||
4805 | DenormalMode DenormMode = II->getFunction()->getDenormalMode(FPType); | ||||
4806 | if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero()) | ||||
4807 | Known.knownNot(fcSubnormal); | ||||
4808 | |||||
4809 | if (DenormMode.Input == DenormalMode::PositiveZero || | ||||
4810 | (DenormMode.Output == DenormalMode::PositiveZero && | ||||
4811 | DenormMode.Input == DenormalMode::IEEE)) | ||||
4812 | Known.knownNot(fcNegZero); | ||||
4813 | |||||
4814 | break; | ||||
4815 | } | ||||
4816 | case Intrinsic::trunc: { | ||||
4817 | KnownFPClass KnownSrc; | ||||
4818 | |||||
4819 | FPClassTest InterestedSrcs = InterestedClasses; | ||||
4820 | if (InterestedClasses & fcZero) | ||||
4821 | InterestedClasses |= fcNormal | fcSubnormal; | ||||
4822 | |||||
4823 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, | ||||
4824 | KnownSrc, Depth + 1, Q, TLI); | ||||
4825 | |||||
4826 | // Integer results cannot be subnormal. | ||||
4827 | Known.knownNot(fcSubnormal); | ||||
4828 | |||||
4829 | // trunc passes through infinities. | ||||
4830 | if (KnownSrc.isKnownNeverPosInfinity()) | ||||
4831 | Known.knownNot(fcPosInf); | ||||
4832 | if (KnownSrc.isKnownNeverNegInfinity()) | ||||
4833 | Known.knownNot(fcNegInf); | ||||
4834 | |||||
4835 | // Non-constrained intrinsics do not guarantee signaling nan quieting. | ||||
4836 | if (KnownSrc.isKnownNeverNaN()) | ||||
4837 | Known.knownNot(fcNan); | ||||
4838 | |||||
4839 | if (KnownSrc.isKnownNever(fcPosNormal)) | ||||
4840 | Known.knownNot(fcPosNormal); | ||||
4841 | |||||
4842 | if (KnownSrc.isKnownNever(fcNegNormal)) | ||||
4843 | Known.knownNot(fcNegNormal); | ||||
4844 | |||||
4845 | if (KnownSrc.isKnownNever(fcPosZero | fcPosSubnormal | fcPosNormal)) | ||||
4846 | Known.knownNot(fcPosZero); | ||||
4847 | |||||
4848 | if (KnownSrc.isKnownNever(fcNegZero | fcNegSubnormal | fcNegNormal)) | ||||
4849 | Known.knownNot(fcNegZero); | ||||
4850 | |||||
4851 | // Sign should be preserved | ||||
4852 | Known.SignBit = KnownSrc.SignBit; | ||||
4853 | break; | ||||
4854 | } | ||||
4855 | case Intrinsic::exp: | ||||
4856 | case Intrinsic::exp2: { | ||||
4857 | Known.knownNot(fcNegative); | ||||
4858 | if ((InterestedClasses & fcNan) == fcNone) | ||||
4859 | break; | ||||
4860 | |||||
4861 | KnownFPClass KnownSrc; | ||||
4862 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4863 | InterestedClasses, KnownSrc, Depth + 1, Q, TLI); | ||||
4864 | if (KnownSrc.isKnownNeverNaN()) { | ||||
4865 | Known.knownNot(fcNan); | ||||
4866 | Known.SignBit = false; | ||||
4867 | } | ||||
4868 | |||||
4869 | break; | ||||
4870 | } | ||||
4871 | case Intrinsic::fptrunc_round: { | ||||
4872 | computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, | ||||
4873 | Known, Depth, Q, TLI); | ||||
4874 | break; | ||||
4875 | } | ||||
4876 | case Intrinsic::log: | ||||
4877 | case Intrinsic::log10: | ||||
4878 | case Intrinsic::log2: | ||||
4879 | case Intrinsic::experimental_constrained_log: | ||||
4880 | case Intrinsic::experimental_constrained_log10: | ||||
4881 | case Intrinsic::experimental_constrained_log2: { | ||||
4882 | // log(+inf) -> +inf | ||||
4883 | // log([+-]0.0) -> -inf | ||||
4884 | // log(-inf) -> nan | ||||
4885 | // log(-x) -> nan | ||||
4886 | if ((InterestedClasses & (fcNan | fcInf)) == fcNone) | ||||
4887 | break; | ||||
4888 | |||||
4889 | FPClassTest InterestedSrcs = InterestedClasses; | ||||
4890 | if ((InterestedClasses & fcNegInf) != fcNone) | ||||
4891 | InterestedSrcs |= fcZero | fcSubnormal; | ||||
4892 | if ((InterestedClasses & fcNan) != fcNone) | ||||
4893 | InterestedSrcs |= fcNan | (fcNegative & ~fcNan); | ||||
4894 | |||||
4895 | KnownFPClass KnownSrc; | ||||
4896 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, | ||||
4897 | KnownSrc, Depth + 1, Q, TLI); | ||||
4898 | |||||
4899 | if (KnownSrc.isKnownNeverPosInfinity()) | ||||
4900 | Known.knownNot(fcPosInf); | ||||
4901 | |||||
4902 | if (KnownSrc.isKnownNeverNaN() && | ||||
4903 | KnownSrc.cannotBeOrderedLessThanZero()) | ||||
4904 | Known.knownNot(fcNan); | ||||
4905 | |||||
4906 | if (KnownSrc.isKnownNeverLogicalZero(*II->getFunction(), II->getType())) | ||||
4907 | Known.knownNot(fcNegInf); | ||||
4908 | |||||
4909 | break; | ||||
4910 | } | ||||
4911 | case Intrinsic::powi: { | ||||
4912 | if ((InterestedClasses & fcNegative) == fcNone) | ||||
4913 | break; | ||||
4914 | |||||
4915 | const Value *Exp = II->getArgOperand(1); | ||||
4916 | unsigned BitWidth = | ||||
4917 | Exp->getType()->getScalarType()->getIntegerBitWidth(); | ||||
4918 | KnownBits ExponentKnownBits(BitWidth); | ||||
4919 | computeKnownBits(Exp, DemandedElts, ExponentKnownBits, Depth + 1, Q); | ||||
4920 | |||||
4921 | if (ExponentKnownBits.Zero[0]) { // Is even | ||||
4922 | Known.knownNot(fcNegative); | ||||
4923 | break; | ||||
4924 | } | ||||
4925 | |||||
4926 | // Given that exp is an integer, here are the | ||||
4927 | // ways that pow can return a negative value: | ||||
4928 | // | ||||
4929 | // pow(-x, exp) --> negative if exp is odd and x is negative. | ||||
4930 | // pow(-0, exp) --> -inf if exp is negative odd. | ||||
4931 | // pow(-0, exp) --> -0 if exp is positive odd. | ||||
4932 | // pow(-inf, exp) --> -0 if exp is negative odd. | ||||
4933 | // pow(-inf, exp) --> -inf if exp is positive odd. | ||||
4934 | KnownFPClass KnownSrc; | ||||
4935 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative, | ||||
4936 | KnownSrc, Depth + 1, Q, TLI); | ||||
4937 | if (KnownSrc.isKnownNever(fcNegative)) | ||||
4938 | Known.knownNot(fcNegative); | ||||
4939 | break; | ||||
4940 | } | ||||
4941 | case Intrinsic::arithmetic_fence: { | ||||
4942 | computeKnownFPClass(II->getArgOperand(0), DemandedElts, | ||||
4943 | InterestedClasses, Known, Depth + 1, Q, TLI); | ||||
4944 | break; | ||||
4945 | } | ||||
4946 | case Intrinsic::experimental_constrained_sitofp: | ||||
4947 | case Intrinsic::experimental_constrained_uitofp: | ||||
4948 | // Cannot produce nan | ||||
4949 | Known.knownNot(fcNan); | ||||
4950 | |||||
4951 | // sitofp and uitofp turn into +0.0 for zero. | ||||
4952 | Known.knownNot(fcNegZero); | ||||
4953 | |||||
4954 | // Integers cannot be subnormal | ||||
4955 | Known.knownNot(fcSubnormal); | ||||
4956 | |||||
4957 | if (IID == Intrinsic::experimental_constrained_uitofp) | ||||
4958 | Known.signBitMustBeZero(); | ||||
4959 | |||||
4960 | // TODO: Copy inf handling from instructions | ||||
4961 | break; | ||||
4962 | default: | ||||
4963 | break; | ||||
4964 | } | ||||
4965 | } | ||||
4966 | |||||
4967 | break; | ||||
4968 | } | ||||
4969 | case Instruction::FAdd: | ||||
4970 | case Instruction::FSub: { | ||||
4971 | KnownFPClass KnownLHS, KnownRHS; | ||||
4972 | computeKnownFPClass(Op->getOperand(1), DemandedElts, fcNan | fcInf, | ||||
4973 | KnownRHS, Depth + 1, Q, TLI); | ||||
4974 | if (KnownRHS.isKnownNeverNaN()) { | ||||
4975 | // RHS is canonically cheaper to compute. Skip inspecting the LHS if | ||||
4976 | // there's no point. | ||||
4977 | computeKnownFPClass(Op->getOperand(0), DemandedElts, fcNan | fcInf, | ||||
4978 | KnownLHS, Depth + 1, Q, TLI); | ||||
4979 | // Adding positive and negative infinity produces NaN. | ||||
4980 | // TODO: Check sign of infinities. | ||||
4981 | if (KnownLHS.isKnownNeverNaN() && | ||||
4982 | (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity())) | ||||
4983 | Known.knownNot(fcNan); | ||||
4984 | } | ||||
4985 | |||||
4986 | break; | ||||
4987 | } | ||||
4988 | case Instruction::FMul: { | ||||
4989 | // X * X is always non-negative or a NaN. | ||||
4990 | if (Op->getOperand(0) == Op->getOperand(1)) | ||||
4991 | Known.knownNot(fcNegative); | ||||
4992 | |||||
4993 | if ((InterestedClasses & fcNan) != fcNan) | ||||
4994 | break; | ||||
4995 | |||||
4996 | KnownFPClass KnownLHS, KnownRHS; | ||||
4997 | computeKnownFPClass(Op->getOperand(1), DemandedElts, | ||||
4998 | fcNan | fcInf | fcZero | fcSubnormal, KnownRHS, | ||||
4999 | Depth + 1, Q, TLI); | ||||
5000 | if (KnownRHS.isKnownNeverNaN() && | ||||
5001 | (KnownRHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverZero())) { | ||||
5002 | computeKnownFPClass(Op->getOperand(0), DemandedElts, | ||||
5003 | fcNan | fcInf | fcZero, KnownLHS, Depth + 1, Q, TLI); | ||||
5004 | if (!KnownLHS.isKnownNeverNaN()) | ||||
5005 | break; | ||||
5006 | |||||
5007 | const Function *F = cast<Instruction>(Op)->getFunction(); | ||||
5008 | |||||
5009 | // If neither side can be zero (or nan) fmul never produces NaN. | ||||
5010 | // TODO: Check operand combinations. | ||||
5011 | // e.g. fmul nofpclass(inf nan zero), nofpclass(nan) -> nofpclass(nan) | ||||
5012 | if ((KnownLHS.isKnownNeverInfinity() || | ||||
5013 | KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) && | ||||
5014 | (KnownRHS.isKnownNeverInfinity() || | ||||
5015 | KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) | ||||
5016 | Known.knownNot(fcNan); | ||||
5017 | } | ||||
5018 | |||||
5019 | break; | ||||
5020 | } | ||||
5021 | case Instruction::FDiv: | ||||
5022 | case Instruction::FRem: { | ||||
5023 | if (Op->getOperand(0) == Op->getOperand(1)) { | ||||
5024 | // TODO: Could filter out snan if we inspect the operand | ||||
5025 | if (Op->getOpcode() == Instruction::FDiv) { | ||||
5026 | // X / X is always exactly 1.0 or a NaN. | ||||
5027 | Known.KnownFPClasses = fcNan | fcPosNormal; | ||||
5028 | } else { | ||||
5029 | // X % X is always exactly [+-]0.0 or a NaN. | ||||
5030 | Known.KnownFPClasses = fcNan | fcZero; | ||||
5031 | } | ||||
5032 | |||||
5033 | break; | ||||
5034 | } | ||||
5035 | |||||
5036 | const bool WantNan = (InterestedClasses & fcNan) != fcNone; | ||||
5037 | const bool WantNegative = (InterestedClasses & fcNegative) != fcNone; | ||||
5038 | const bool WantPositive = | ||||
5039 | Opc == Instruction::FRem && (InterestedClasses & fcPositive) != fcNone; | ||||
5040 | if (!WantNan && !WantNegative && !WantPositive) | ||||
5041 | break; | ||||
5042 | |||||
5043 | KnownFPClass KnownLHS, KnownRHS; | ||||
5044 | |||||
5045 | computeKnownFPClass(Op->getOperand(1), DemandedElts, | ||||
5046 | fcNan | fcInf | fcZero | fcNegative, KnownRHS, | ||||
5047 | Depth + 1, Q, TLI); | ||||
5048 | |||||
5049 | bool KnowSomethingUseful = | ||||
5050 | KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative); | ||||
5051 | |||||
5052 | if (KnowSomethingUseful || WantPositive) { | ||||
5053 | const FPClassTest InterestedLHS = | ||||
5054 | WantPositive ? fcAllFlags | ||||
5055 | : fcNan | fcInf | fcZero | fcSubnormal | fcNegative; | ||||
5056 | |||||
5057 | computeKnownFPClass(Op->getOperand(0), DemandedElts, | ||||
5058 | InterestedClasses & InterestedLHS, KnownLHS, | ||||
5059 | Depth + 1, Q, TLI); | ||||
5060 | } | ||||
5061 | |||||
5062 | const Function *F = cast<Instruction>(Op)->getFunction(); | ||||
5063 | |||||
5064 | if (Op->getOpcode() == Instruction::FDiv) { | ||||
5065 | // Only 0/0, Inf/Inf produce NaN. | ||||
5066 | if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && | ||||
5067 | (KnownLHS.isKnownNeverInfinity() || | ||||
5068 | KnownRHS.isKnownNeverInfinity()) && | ||||
5069 | (KnownLHS.isKnownNeverLogicalZero(*F, Op->getType()) || | ||||
5070 | KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) { | ||||
5071 | Known.knownNot(fcNan); | ||||
5072 | } | ||||
5073 | |||||
5074 | // X / -0.0 is -Inf (or NaN). | ||||
5075 | // +X / +X is +X | ||||
5076 | if (KnownLHS.isKnownNever(fcNegative) && KnownRHS.isKnownNever(fcNegative)) | ||||
5077 | Known.knownNot(fcNegative); | ||||
5078 | } else { | ||||
5079 | // Inf REM x and x REM 0 produce NaN. | ||||
5080 | if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() && | ||||
5081 | KnownLHS.isKnownNeverInfinity() && | ||||
5082 | KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) { | ||||
5083 | Known.knownNot(fcNan); | ||||
5084 | } | ||||
5085 | |||||
5086 | // The sign for frem is the same as the first operand. | ||||
5087 | if (KnownLHS.isKnownNever(fcNegative)) | ||||
5088 | Known.knownNot(fcNegative); | ||||
5089 | if (KnownLHS.isKnownNever(fcPositive)) | ||||
5090 | Known.knownNot(fcPositive); | ||||
5091 | } | ||||
5092 | |||||
5093 | break; | ||||
5094 | } | ||||
5095 | case Instruction::FPExt: { | ||||
5096 | // Infinity, nan and zero propagate from source. | ||||
5097 | computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, | ||||
5098 | Known, Depth + 1, Q, TLI); | ||||
5099 | |||||
5100 | const fltSemantics &DstTy = | ||||
5101 | Op->getType()->getScalarType()->getFltSemantics(); | ||||
5102 | const fltSemantics &SrcTy = | ||||
5103 | Op->getOperand(0)->getType()->getScalarType()->getFltSemantics(); | ||||
5104 | |||||
5105 | // All subnormal inputs should be in the normal range in the result type. | ||||
5106 | if (APFloat::isRepresentableAsNormalIn(SrcTy, DstTy)) | ||||
5107 | Known.knownNot(fcSubnormal); | ||||
5108 | |||||
5109 | // Sign bit of a nan isn't guaranteed. | ||||
5110 | if (!Known.isKnownNeverNaN()) | ||||
5111 | Known.SignBit = std::nullopt; | ||||
5112 | break; | ||||
5113 | } | ||||
5114 | case Instruction::FPTrunc: { | ||||
5115 | computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known, | ||||
5116 | Depth, Q, TLI); | ||||
5117 | break; | ||||
5118 | } | ||||
5119 | case Instruction::SIToFP: | ||||
5120 | case Instruction::UIToFP: { | ||||
5121 | // Cannot produce nan | ||||
5122 | Known.knownNot(fcNan); | ||||
5123 | |||||
5124 | // Integers cannot be subnormal | ||||
5125 | Known.knownNot(fcSubnormal); | ||||
5126 | |||||
5127 | // sitofp and uitofp turn into +0.0 for zero. | ||||
5128 | Known.knownNot(fcNegZero); | ||||
5129 | if (Op->getOpcode() == Instruction::UIToFP) | ||||
5130 | Known.signBitMustBeZero(); | ||||
5131 | |||||
5132 | if (InterestedClasses & fcInf) { | ||||
5133 | // Get width of largest magnitude integer (remove a bit if signed). | ||||
5134 | // This still works for a signed minimum value because the largest FP | ||||
5135 | // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx). | ||||
5136 | int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits(); | ||||
5137 | if (Op->getOpcode() == Instruction::SIToFP) | ||||
5138 | --IntSize; | ||||
5139 | |||||
5140 | // If the exponent of the largest finite FP value can hold the largest | ||||
5141 | // integer, the result of the cast must be finite. | ||||
5142 | Type *FPTy = Op->getType()->getScalarType(); | ||||
5143 | if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize) | ||||
5144 | Known.knownNot(fcInf); | ||||
5145 | } | ||||
5146 | |||||
5147 | break; | ||||
5148 | } | ||||
5149 | case Instruction::ExtractElement: { | ||||
5150 | // Look through extract element. If the index is non-constant or | ||||
5151 | // out-of-range demand all elements, otherwise just the extracted element. | ||||
5152 | const Value *Vec = Op->getOperand(0); | ||||
5153 | const Value *Idx = Op->getOperand(1); | ||||
5154 | auto *CIdx = dyn_cast<ConstantInt>(Idx); | ||||
5155 | |||||
5156 | if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { | ||||
5157 | unsigned NumElts = VecTy->getNumElements(); | ||||
5158 | APInt DemandedVecElts = APInt::getAllOnes(NumElts); | ||||
5159 | if (CIdx && CIdx->getValue().ult(NumElts)) | ||||
5160 | DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); | ||||
5161 | return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known, | ||||
5162 | Depth + 1, Q, TLI); | ||||
5163 | } | ||||
5164 | |||||
5165 | break; | ||||
5166 | } | ||||
5167 | case Instruction::InsertElement: { | ||||
5168 | if (isa<ScalableVectorType>(Op->getType())) | ||||
5169 | return; | ||||
5170 | |||||
5171 | const Value *Vec = Op->getOperand(0); | ||||
5172 | const Value *Elt = Op->getOperand(1); | ||||
5173 | auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2)); | ||||
5174 | // Early out if the index is non-constant or out-of-range. | ||||
5175 | unsigned NumElts = DemandedElts.getBitWidth(); | ||||
5176 | if (!CIdx || CIdx->getValue().uge(NumElts)) | ||||
5177 | return; | ||||
5178 | |||||
5179 | unsigned EltIdx = CIdx->getZExtValue(); | ||||
5180 | // Do we demand the inserted element? | ||||
5181 | if (DemandedElts[EltIdx]) { | ||||
5182 | computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q, TLI); | ||||
5183 | // If we don't know any bits, early out. | ||||
5184 | if (Known.isUnknown()) | ||||
5185 | break; | ||||
5186 | } else { | ||||
5187 | Known.KnownFPClasses = fcNone; | ||||
5188 | } | ||||
5189 | |||||
5190 | // We don't need the base vector element that has been inserted. | ||||
5191 | APInt DemandedVecElts = DemandedElts; | ||||
5192 | DemandedVecElts.clearBit(EltIdx); | ||||
5193 | if (!!DemandedVecElts) { | ||||
5194 | KnownFPClass Known2; | ||||
5195 | computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2, | ||||
5196 | Depth + 1, Q, TLI); | ||||
5197 | Known |= Known2; | ||||
5198 | } | ||||
5199 | |||||
5200 | break; | ||||
5201 | } | ||||
5202 | case Instruction::ShuffleVector: { | ||||
5203 | // For undef elements, we don't know anything about the common state of | ||||
5204 | // the shuffle result. | ||||
5205 | APInt DemandedLHS, DemandedRHS; | ||||
5206 | auto *Shuf = dyn_cast<ShuffleVectorInst>(Op); | ||||
5207 | if (!Shuf || !getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) | ||||
5208 | return; | ||||
5209 | |||||
5210 | if (!!DemandedLHS) { | ||||
5211 | const Value *LHS = Shuf->getOperand(0); | ||||
5212 | computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known, | ||||
5213 | Depth + 1, Q, TLI); | ||||
5214 | |||||
5215 | // If we don't know any bits, early out. | ||||
5216 | if (Known.isUnknown()) | ||||
5217 | break; | ||||
5218 | } else { | ||||
5219 | Known.KnownFPClasses = fcNone; | ||||
5220 | } | ||||
5221 | |||||
5222 | if (!!DemandedRHS) { | ||||
5223 | KnownFPClass Known2; | ||||
5224 | const Value *RHS = Shuf->getOperand(1); | ||||
5225 | computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2, | ||||
5226 | Depth + 1, Q, TLI); | ||||
5227 | Known |= Known2; | ||||
5228 | } | ||||
5229 | |||||
5230 | break; | ||||
5231 | } | ||||
5232 | case Instruction::ExtractValue: { | ||||
5233 | computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, | ||||
5234 | Known, Depth + 1, Q, TLI); | ||||
5235 | break; | ||||
5236 | } | ||||
5237 | default: | ||||
5238 | break; | ||||
5239 | } | ||||
5240 | } | ||||
5241 | |||||
5242 | KnownFPClass llvm::computeKnownFPClass( | ||||
5243 | const Value *V, const APInt &DemandedElts, const DataLayout &DL, | ||||
5244 | FPClassTest InterestedClasses, unsigned Depth, const TargetLibraryInfo *TLI, | ||||
5245 | AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, | ||||
5246 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
5247 | KnownFPClass KnownClasses; | ||||
5248 | ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth, | ||||
5249 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE), | ||||
5250 | TLI); | ||||
5251 | return KnownClasses; | ||||
5252 | } | ||||
5253 | |||||
5254 | KnownFPClass | ||||
5255 | llvm::computeKnownFPClass(const Value *V, const DataLayout &DL, | ||||
5256 | FPClassTest InterestedClasses, unsigned Depth, | ||||
5257 | const TargetLibraryInfo *TLI, AssumptionCache *AC, | ||||
5258 | const Instruction *CxtI, const DominatorTree *DT, | ||||
5259 | OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { | ||||
5260 | KnownFPClass Known; | ||||
5261 | ::computeKnownFPClass(V, Known, InterestedClasses, Depth, | ||||
5262 | Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE), | ||||
5263 | TLI); | ||||
5264 | return Known; | ||||
5265 | } | ||||
5266 | |||||
5267 | Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { | ||||
5268 | |||||
5269 | // All byte-wide stores are splatable, even of arbitrary variables. | ||||
5270 | if (V->getType()->isIntegerTy(8)) | ||||
5271 | return V; | ||||
5272 | |||||
5273 | LLVMContext &Ctx = V->getContext(); | ||||
5274 | |||||
5275 | // Undef don't care. | ||||
5276 | auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); | ||||
5277 | if (isa<UndefValue>(V)) | ||||
5278 | return UndefInt8; | ||||
5279 | |||||
5280 | // Return Undef for zero-sized type. | ||||
5281 | if (!DL.getTypeStoreSize(V->getType()).isNonZero()) | ||||
5282 | return UndefInt8; | ||||
5283 | |||||
5284 | Constant *C = dyn_cast<Constant>(V); | ||||
5285 | if (!C) { | ||||
5286 | // Conceptually, we could handle things like: | ||||
5287 | // %a = zext i8 %X to i16 | ||||
5288 | // %b = shl i16 %a, 8 | ||||
5289 | // %c = or i16 %a, %b | ||||
5290 | // but until there is an example that actually needs this, it doesn't seem | ||||
5291 | // worth worrying about. | ||||
5292 | return nullptr; | ||||
5293 | } | ||||
5294 | |||||
5295 | // Handle 'null' ConstantArrayZero etc. | ||||
5296 | if (C->isNullValue()) | ||||
5297 | return Constant::getNullValue(Type::getInt8Ty(Ctx)); | ||||
5298 | |||||
5299 | // Constant floating-point values can be handled as integer values if the | ||||
5300 | // corresponding integer value is "byteable". An important case is 0.0. | ||||
5301 | if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { | ||||
5302 | Type *Ty = nullptr; | ||||
5303 | if (CFP->getType()->isHalfTy()) | ||||
5304 | Ty = Type::getInt16Ty(Ctx); | ||||
5305 | else if (CFP->getType()->isFloatTy()) | ||||
5306 | Ty = Type::getInt32Ty(Ctx); | ||||
5307 | else if (CFP->getType()->isDoubleTy()) | ||||
5308 | Ty = Type::getInt64Ty(Ctx); | ||||
5309 | // Don't handle long double formats, which have strange constraints. | ||||
5310 | return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL) | ||||
5311 | : nullptr; | ||||
5312 | } | ||||
5313 | |||||
5314 | // We can handle constant integers that are multiple of 8 bits. | ||||
5315 | if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { | ||||
5316 | if (CI->getBitWidth() % 8 == 0) { | ||||
5317 | assert(CI->getBitWidth() > 8 && "8 bits should be handled above!")(static_cast <bool> (CI->getBitWidth() > 8 && "8 bits should be handled above!") ? void (0) : __assert_fail ("CI->getBitWidth() > 8 && \"8 bits should be handled above!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5317, __extension__ __PRETTY_FUNCTION__ )); | ||||
5318 | if (!CI->getValue().isSplat(8)) | ||||
5319 | return nullptr; | ||||
5320 | return ConstantInt::get(Ctx, CI->getValue().trunc(8)); | ||||
5321 | } | ||||
5322 | } | ||||
5323 | |||||
5324 | if (auto *CE = dyn_cast<ConstantExpr>(C)) { | ||||
5325 | if (CE->getOpcode() == Instruction::IntToPtr) { | ||||
5326 | if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) { | ||||
5327 | unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace()); | ||||
5328 | return isBytewiseValue( | ||||
5329 | ConstantExpr::getIntegerCast(CE->getOperand(0), | ||||
5330 | Type::getIntNTy(Ctx, BitWidth), false), | ||||
5331 | DL); | ||||
5332 | } | ||||
5333 | } | ||||
5334 | } | ||||
5335 | |||||
5336 | auto Merge = [&](Value *LHS, Value *RHS) -> Value * { | ||||
5337 | if (LHS == RHS) | ||||
5338 | return LHS; | ||||
5339 | if (!LHS || !RHS) | ||||
5340 | return nullptr; | ||||
5341 | if (LHS == UndefInt8) | ||||
5342 | return RHS; | ||||
5343 | if (RHS == UndefInt8) | ||||
5344 | return LHS; | ||||
5345 | return nullptr; | ||||
5346 | }; | ||||
5347 | |||||
5348 | if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { | ||||
5349 | Value *Val = UndefInt8; | ||||
5350 | for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) | ||||
5351 | if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) | ||||
5352 | return nullptr; | ||||
5353 | return Val; | ||||
5354 | } | ||||
5355 | |||||
5356 | if (isa<ConstantAggregate>(C)) { | ||||
5357 | Value *Val = UndefInt8; | ||||
5358 | for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) | ||||
5359 | if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) | ||||
5360 | return nullptr; | ||||
5361 | return Val; | ||||
5362 | } | ||||
5363 | |||||
5364 | // Don't try to handle the handful of other constants. | ||||
5365 | return nullptr; | ||||
5366 | } | ||||
5367 | |||||
5368 | // This is the recursive version of BuildSubAggregate. It takes a few different | ||||
5369 | // arguments. Idxs is the index within the nested struct From that we are | ||||
5370 | // looking at now (which is of type IndexedType). IdxSkip is the number of | ||||
5371 | // indices from Idxs that should be left out when inserting into the resulting | ||||
5372 | // struct. To is the result struct built so far, new insertvalue instructions | ||||
5373 | // build on that. | ||||
5374 | static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, | ||||
5375 | SmallVectorImpl<unsigned> &Idxs, | ||||
5376 | unsigned IdxSkip, | ||||
5377 | Instruction *InsertBefore) { | ||||
5378 | StructType *STy = dyn_cast<StructType>(IndexedType); | ||||
| |||||
5379 | if (STy
| ||||
5380 | // Save the original To argument so we can modify it | ||||
5381 | Value *OrigTo = To; | ||||
5382 | // General case, the type indexed by Idxs is a struct | ||||
5383 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { | ||||
5384 | // Process each struct element recursively | ||||
5385 | Idxs.push_back(i); | ||||
5386 | Value *PrevTo = To; | ||||
5387 | To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, | ||||
5388 | InsertBefore); | ||||
5389 | Idxs.pop_back(); | ||||
5390 | if (!To) { | ||||
5391 | // Couldn't find any inserted value for this index? Cleanup | ||||
5392 | while (PrevTo != OrigTo) { | ||||
5393 | InsertValueInst* Del = cast<InsertValueInst>(PrevTo); | ||||
5394 | PrevTo = Del->getAggregateOperand(); | ||||
5395 | Del->eraseFromParent(); | ||||
5396 | } | ||||
5397 | // Stop processing elements | ||||
5398 | break; | ||||
5399 | } | ||||
5400 | } | ||||
5401 | // If we successfully found a value for each of our subaggregates | ||||
5402 | if (To) | ||||
5403 | return To; | ||||
5404 | } | ||||
5405 | // Base case, the type indexed by SourceIdxs is not a struct, or not all of | ||||
5406 | // the struct's elements had a value that was inserted directly. In the latter | ||||
5407 | // case, perhaps we can't determine each of the subelements individually, but | ||||
5408 | // we might be able to find the complete struct somewhere. | ||||
5409 | |||||
5410 | // Find the value that is at that particular spot | ||||
5411 | Value *V = FindInsertedValue(From, Idxs); | ||||
5412 | |||||
5413 | if (!V) | ||||
5414 | return nullptr; | ||||
5415 | |||||
5416 | // Insert the value in the new (sub) aggregate | ||||
5417 | return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp", | ||||
5418 | InsertBefore); | ||||
5419 | } | ||||
5420 | |||||
5421 | // This helper takes a nested struct and extracts a part of it (which is again a | ||||
5422 | // struct) into a new value. For example, given the struct: | ||||
5423 | // { a, { b, { c, d }, e } } | ||||
5424 | // and the indices "1, 1" this returns | ||||
5425 | // { c, d }. | ||||
5426 | // | ||||
5427 | // It does this by inserting an insertvalue for each element in the resulting | ||||
5428 | // struct, as opposed to just inserting a single struct. This will only work if | ||||
5429 | // each of the elements of the substruct are known (ie, inserted into From by an | ||||
5430 | // insertvalue instruction somewhere). | ||||
5431 | // | ||||
5432 | // All inserted insertvalue instructions are inserted before InsertBefore | ||||
5433 | static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, | ||||
5434 | Instruction *InsertBefore) { | ||||
5435 | assert(InsertBefore && "Must have someplace to insert!")(static_cast <bool> (InsertBefore && "Must have someplace to insert!" ) ? void (0) : __assert_fail ("InsertBefore && \"Must have someplace to insert!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5435, __extension__ __PRETTY_FUNCTION__ )); | ||||
5436 | Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), | ||||
5437 | idx_range); | ||||
5438 | Value *To = PoisonValue::get(IndexedType); | ||||
5439 | SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); | ||||
5440 | unsigned IdxSkip = Idxs.size(); | ||||
5441 | |||||
5442 | return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); | ||||
5443 | } | ||||
5444 | |||||
5445 | /// Given an aggregate and a sequence of indices, see if the scalar value | ||||
5446 | /// indexed is already around as a register, for example if it was inserted | ||||
5447 | /// directly into the aggregate. | ||||
5448 | /// | ||||
5449 | /// If InsertBefore is not null, this function will duplicate (modified) | ||||
5450 | /// insertvalues when a part of a nested struct is extracted. | ||||
5451 | Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, | ||||
5452 | Instruction *InsertBefore) { | ||||
5453 | // Nothing to index? Just return V then (this is useful at the end of our | ||||
5454 | // recursion). | ||||
5455 | if (idx_range.empty()) | ||||
5456 | return V; | ||||
5457 | // We have indices, so V should have an indexable type. | ||||
5458 | assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&(static_cast <bool> ((V->getType()->isStructTy() || V->getType()->isArrayTy()) && "Not looking at a struct or array?" ) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5459, __extension__ __PRETTY_FUNCTION__ )) | ||||
5459 | "Not looking at a struct or array?")(static_cast <bool> ((V->getType()->isStructTy() || V->getType()->isArrayTy()) && "Not looking at a struct or array?" ) ? void (0) : __assert_fail ("(V->getType()->isStructTy() || V->getType()->isArrayTy()) && \"Not looking at a struct or array?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5459, __extension__ __PRETTY_FUNCTION__ )); | ||||
5460 | assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&(static_cast <bool> (ExtractValueInst::getIndexedType(V ->getType(), idx_range) && "Invalid indices for type?" ) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5461, __extension__ __PRETTY_FUNCTION__ )) | ||||
5461 | "Invalid indices for type?")(static_cast <bool> (ExtractValueInst::getIndexedType(V ->getType(), idx_range) && "Invalid indices for type?" ) ? void (0) : __assert_fail ("ExtractValueInst::getIndexedType(V->getType(), idx_range) && \"Invalid indices for type?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5461, __extension__ __PRETTY_FUNCTION__ )); | ||||
5462 | |||||
5463 | if (Constant *C = dyn_cast<Constant>(V)) { | ||||
5464 | C = C->getAggregateElement(idx_range[0]); | ||||
5465 | if (!C) return nullptr; | ||||
5466 | return FindInsertedValue(C, idx_range.slice(1), InsertBefore); | ||||
5467 | } | ||||
5468 | |||||
5469 | if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { | ||||
5470 | // Loop the indices for the insertvalue instruction in parallel with the | ||||
5471 | // requested indices | ||||
5472 | const unsigned *req_idx = idx_range.begin(); | ||||
5473 | for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); | ||||
5474 | i != e; ++i, ++req_idx) { | ||||
5475 | if (req_idx == idx_range.end()) { | ||||
5476 | // We can't handle this without inserting insertvalues | ||||
5477 | if (!InsertBefore) | ||||
5478 | return nullptr; | ||||
5479 | |||||
5480 | // The requested index identifies a part of a nested aggregate. Handle | ||||
5481 | // this specially. For example, | ||||
5482 | // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 | ||||
5483 | // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 | ||||
5484 | // %C = extractvalue {i32, { i32, i32 } } %B, 1 | ||||
5485 | // This can be changed into | ||||
5486 | // %A = insertvalue {i32, i32 } undef, i32 10, 0 | ||||
5487 | // %C = insertvalue {i32, i32 } %A, i32 11, 1 | ||||
5488 | // which allows the unused 0,0 element from the nested struct to be | ||||
5489 | // removed. | ||||
5490 | return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx), | ||||
5491 | InsertBefore); | ||||
5492 | } | ||||
5493 | |||||
5494 | // This insert value inserts something else than what we are looking for. | ||||
5495 | // See if the (aggregate) value inserted into has the value we are | ||||
5496 | // looking for, then. | ||||
5497 | if (*req_idx != *i) | ||||
5498 | return FindInsertedValue(I->getAggregateOperand(), idx_range, | ||||
5499 | InsertBefore); | ||||
5500 | } | ||||
5501 | // If we end up here, the indices of the insertvalue match with those | ||||
5502 | // requested (though possibly only partially). Now we recursively look at | ||||
5503 | // the inserted value, passing any remaining indices. | ||||
5504 | return FindInsertedValue(I->getInsertedValueOperand(), | ||||
5505 | ArrayRef(req_idx, idx_range.end()), InsertBefore); | ||||
5506 | } | ||||
5507 | |||||
5508 | if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { | ||||
5509 | // If we're extracting a value from an aggregate that was extracted from | ||||
5510 | // something else, we can extract from that something else directly instead. | ||||
5511 | // However, we will need to chain I's indices with the requested indices. | ||||
5512 | |||||
5513 | // Calculate the number of indices required | ||||
5514 | unsigned size = I->getNumIndices() + idx_range.size(); | ||||
5515 | // Allocate some space to put the new indices in | ||||
5516 | SmallVector<unsigned, 5> Idxs; | ||||
5517 | Idxs.reserve(size); | ||||
5518 | // Add indices from the extract value instruction | ||||
5519 | Idxs.append(I->idx_begin(), I->idx_end()); | ||||
5520 | |||||
5521 | // Add requested indices | ||||
5522 | Idxs.append(idx_range.begin(), idx_range.end()); | ||||
5523 | |||||
5524 | assert(Idxs.size() == size(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?" ) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5525, __extension__ __PRETTY_FUNCTION__ )) | ||||
5525 | && "Number of indices added not correct?")(static_cast <bool> (Idxs.size() == size && "Number of indices added not correct?" ) ? void (0) : __assert_fail ("Idxs.size() == size && \"Number of indices added not correct?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5525, __extension__ __PRETTY_FUNCTION__ )); | ||||
5526 | |||||
5527 | return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); | ||||
5528 | } | ||||
5529 | // Otherwise, we don't know (such as, extracting from a function return value | ||||
5530 | // or load instruction) | ||||
5531 | return nullptr; | ||||
5532 | } | ||||
5533 | |||||
5534 | bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, | ||||
5535 | unsigned CharSize) { | ||||
5536 | // Make sure the GEP has exactly three arguments. | ||||
5537 | if (GEP->getNumOperands() != 3) | ||||
5538 | return false; | ||||
5539 | |||||
5540 | // Make sure the index-ee is a pointer to array of \p CharSize integers. | ||||
5541 | // CharSize. | ||||
5542 | ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); | ||||
5543 | if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) | ||||
5544 | return false; | ||||
5545 | |||||
5546 | // Check to make sure that the first operand of the GEP is an integer and | ||||
5547 | // has value 0 so that we are sure we're indexing into the initializer. | ||||
5548 | const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); | ||||
5549 | if (!FirstIdx || !FirstIdx->isZero()) | ||||
5550 | return false; | ||||
5551 | |||||
5552 | return true; | ||||
5553 | } | ||||
5554 | |||||
5555 | // If V refers to an initialized global constant, set Slice either to | ||||
5556 | // its initializer if the size of its elements equals ElementSize, or, | ||||
5557 | // for ElementSize == 8, to its representation as an array of unsiged | ||||
5558 | // char. Return true on success. | ||||
5559 | // Offset is in the unit "nr of ElementSize sized elements". | ||||
5560 | bool llvm::getConstantDataArrayInfo(const Value *V, | ||||
5561 | ConstantDataArraySlice &Slice, | ||||
5562 | unsigned ElementSize, uint64_t Offset) { | ||||
5563 | assert(V && "V should not be null.")(static_cast <bool> (V && "V should not be null." ) ? void (0) : __assert_fail ("V && \"V should not be null.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5563, __extension__ __PRETTY_FUNCTION__ )); | ||||
5564 | assert((ElementSize % 8) == 0 &&(static_cast <bool> ((ElementSize % 8) == 0 && "ElementSize expected to be a multiple of the size of a byte." ) ? void (0) : __assert_fail ("(ElementSize % 8) == 0 && \"ElementSize expected to be a multiple of the size of a byte.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5565, __extension__ __PRETTY_FUNCTION__ )) | ||||
5565 | "ElementSize expected to be a multiple of the size of a byte.")(static_cast <bool> ((ElementSize % 8) == 0 && "ElementSize expected to be a multiple of the size of a byte." ) ? void (0) : __assert_fail ("(ElementSize % 8) == 0 && \"ElementSize expected to be a multiple of the size of a byte.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5565, __extension__ __PRETTY_FUNCTION__ )); | ||||
5566 | unsigned ElementSizeInBytes = ElementSize / 8; | ||||
5567 | |||||
5568 | // Drill down into the pointer expression V, ignoring any intervening | ||||
5569 | // casts, and determine the identity of the object it references along | ||||
5570 | // with the cumulative byte offset into it. | ||||
5571 | const GlobalVariable *GV = | ||||
5572 | dyn_cast<GlobalVariable>(getUnderlyingObject(V)); | ||||
5573 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) | ||||
5574 | // Fail if V is not based on constant global object. | ||||
5575 | return false; | ||||
5576 | |||||
5577 | const DataLayout &DL = GV->getParent()->getDataLayout(); | ||||
5578 | APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0); | ||||
5579 | |||||
5580 | if (GV != V->stripAndAccumulateConstantOffsets(DL, Off, | ||||
5581 | /*AllowNonInbounds*/ true)) | ||||
5582 | // Fail if a constant offset could not be determined. | ||||
5583 | return false; | ||||
5584 | |||||
5585 | uint64_t StartIdx = Off.getLimitedValue(); | ||||
5586 | if (StartIdx == UINT64_MAX(18446744073709551615UL)) | ||||
5587 | // Fail if the constant offset is excessive. | ||||
5588 | return false; | ||||
5589 | |||||
5590 | // Off/StartIdx is in the unit of bytes. So we need to convert to number of | ||||
5591 | // elements. Simply bail out if that isn't possible. | ||||
5592 | if ((StartIdx % ElementSizeInBytes) != 0) | ||||
5593 | return false; | ||||
5594 | |||||
5595 | Offset += StartIdx / ElementSizeInBytes; | ||||
5596 | ConstantDataArray *Array = nullptr; | ||||
5597 | ArrayType *ArrayTy = nullptr; | ||||
5598 | |||||
5599 | if (GV->getInitializer()->isNullValue()) { | ||||
5600 | Type *GVTy = GV->getValueType(); | ||||
5601 | uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue(); | ||||
5602 | uint64_t Length = SizeInBytes / ElementSizeInBytes; | ||||
5603 | |||||
5604 | Slice.Array = nullptr; | ||||
5605 | Slice.Offset = 0; | ||||
5606 | // Return an empty Slice for undersized constants to let callers | ||||
5607 | // transform even undefined library calls into simpler, well-defined | ||||
5608 | // expressions. This is preferable to making the calls although it | ||||
5609 | // prevents sanitizers from detecting such calls. | ||||
5610 | Slice.Length = Length < Offset ? 0 : Length - Offset; | ||||
5611 | return true; | ||||
5612 | } | ||||
5613 | |||||
5614 | auto *Init = const_cast<Constant *>(GV->getInitializer()); | ||||
5615 | if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) { | ||||
5616 | Type *InitElTy = ArrayInit->getElementType(); | ||||
5617 | if (InitElTy->isIntegerTy(ElementSize)) { | ||||
5618 | // If Init is an initializer for an array of the expected type | ||||
5619 | // and size, use it as is. | ||||
5620 | Array = ArrayInit; | ||||
5621 | ArrayTy = ArrayInit->getType(); | ||||
5622 | } | ||||
5623 | } | ||||
5624 | |||||
5625 | if (!Array) { | ||||
5626 | if (ElementSize != 8) | ||||
5627 | // TODO: Handle conversions to larger integral types. | ||||
5628 | return false; | ||||
5629 | |||||
5630 | // Otherwise extract the portion of the initializer starting | ||||
5631 | // at Offset as an array of bytes, and reset Offset. | ||||
5632 | Init = ReadByteArrayFromGlobal(GV, Offset); | ||||
5633 | if (!Init) | ||||
5634 | return false; | ||||
5635 | |||||
5636 | Offset = 0; | ||||
5637 | Array = dyn_cast<ConstantDataArray>(Init); | ||||
5638 | ArrayTy = dyn_cast<ArrayType>(Init->getType()); | ||||
5639 | } | ||||
5640 | |||||
5641 | uint64_t NumElts = ArrayTy->getArrayNumElements(); | ||||
5642 | if (Offset > NumElts) | ||||
5643 | return false; | ||||
5644 | |||||
5645 | Slice.Array = Array; | ||||
5646 | Slice.Offset = Offset; | ||||
5647 | Slice.Length = NumElts - Offset; | ||||
5648 | return true; | ||||
5649 | } | ||||
5650 | |||||
5651 | /// Extract bytes from the initializer of the constant array V, which need | ||||
5652 | /// not be a nul-terminated string. On success, store the bytes in Str and | ||||
5653 | /// return true. When TrimAtNul is set, Str will contain only the bytes up | ||||
5654 | /// to but not including the first nul. Return false on failure. | ||||
5655 | bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, | ||||
5656 | bool TrimAtNul) { | ||||
5657 | ConstantDataArraySlice Slice; | ||||
5658 | if (!getConstantDataArrayInfo(V, Slice, 8)) | ||||
5659 | return false; | ||||
5660 | |||||
5661 | if (Slice.Array == nullptr) { | ||||
5662 | if (TrimAtNul) { | ||||
5663 | // Return a nul-terminated string even for an empty Slice. This is | ||||
5664 | // safe because all existing SimplifyLibcalls callers require string | ||||
5665 | // arguments and the behavior of the functions they fold is undefined | ||||
5666 | // otherwise. Folding the calls this way is preferable to making | ||||
5667 | // the undefined library calls, even though it prevents sanitizers | ||||
5668 | // from reporting such calls. | ||||
5669 | Str = StringRef(); | ||||
5670 | return true; | ||||
5671 | } | ||||
5672 | if (Slice.Length == 1) { | ||||
5673 | Str = StringRef("", 1); | ||||
5674 | return true; | ||||
5675 | } | ||||
5676 | // We cannot instantiate a StringRef as we do not have an appropriate string | ||||
5677 | // of 0s at hand. | ||||
5678 | return false; | ||||
5679 | } | ||||
5680 | |||||
5681 | // Start out with the entire array in the StringRef. | ||||
5682 | Str = Slice.Array->getAsString(); | ||||
5683 | // Skip over 'offset' bytes. | ||||
5684 | Str = Str.substr(Slice.Offset); | ||||
5685 | |||||
5686 | if (TrimAtNul) { | ||||
5687 | // Trim off the \0 and anything after it. If the array is not nul | ||||
5688 | // terminated, we just return the whole end of string. The client may know | ||||
5689 | // some other way that the string is length-bound. | ||||
5690 | Str = Str.substr(0, Str.find('\0')); | ||||
5691 | } | ||||
5692 | return true; | ||||
5693 | } | ||||
5694 | |||||
5695 | // These next two are very similar to the above, but also look through PHI | ||||
5696 | // nodes. | ||||
5697 | // TODO: See if we can integrate these two together. | ||||
5698 | |||||
5699 | /// If we can compute the length of the string pointed to by | ||||
5700 | /// the specified pointer, return 'len+1'. If we can't, return 0. | ||||
5701 | static uint64_t GetStringLengthH(const Value *V, | ||||
5702 | SmallPtrSetImpl<const PHINode*> &PHIs, | ||||
5703 | unsigned CharSize) { | ||||
5704 | // Look through noop bitcast instructions. | ||||
5705 | V = V->stripPointerCasts(); | ||||
5706 | |||||
5707 | // If this is a PHI node, there are two cases: either we have already seen it | ||||
5708 | // or we haven't. | ||||
5709 | if (const PHINode *PN = dyn_cast<PHINode>(V)) { | ||||
5710 | if (!PHIs.insert(PN).second) | ||||
5711 | return ~0ULL; // already in the set. | ||||
5712 | |||||
5713 | // If it was new, see if all the input strings are the same length. | ||||
5714 | uint64_t LenSoFar = ~0ULL; | ||||
5715 | for (Value *IncValue : PN->incoming_values()) { | ||||
5716 | uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); | ||||
5717 | if (Len == 0) return 0; // Unknown length -> unknown. | ||||
5718 | |||||
5719 | if (Len == ~0ULL) continue; | ||||
5720 | |||||
5721 | if (Len != LenSoFar && LenSoFar != ~0ULL) | ||||
5722 | return 0; // Disagree -> unknown. | ||||
5723 | LenSoFar = Len; | ||||
5724 | } | ||||
5725 | |||||
5726 | // Success, all agree. | ||||
5727 | return LenSoFar; | ||||
5728 | } | ||||
5729 | |||||
5730 | // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) | ||||
5731 | if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { | ||||
5732 | uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); | ||||
5733 | if (Len1 == 0) return 0; | ||||
5734 | uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); | ||||
5735 | if (Len2 == 0) return 0; | ||||
5736 | if (Len1 == ~0ULL) return Len2; | ||||
5737 | if (Len2 == ~0ULL) return Len1; | ||||
5738 | if (Len1 != Len2) return 0; | ||||
5739 | return Len1; | ||||
5740 | } | ||||
5741 | |||||
5742 | // Otherwise, see if we can read the string. | ||||
5743 | ConstantDataArraySlice Slice; | ||||
5744 | if (!getConstantDataArrayInfo(V, Slice, CharSize)) | ||||
5745 | return 0; | ||||
5746 | |||||
5747 | if (Slice.Array == nullptr) | ||||
5748 | // Zeroinitializer (including an empty one). | ||||
5749 | return 1; | ||||
5750 | |||||
5751 | // Search for the first nul character. Return a conservative result even | ||||
5752 | // when there is no nul. This is safe since otherwise the string function | ||||
5753 | // being folded such as strlen is undefined, and can be preferable to | ||||
5754 | // making the undefined library call. | ||||
5755 | unsigned NullIndex = 0; | ||||
5756 | for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { | ||||
5757 | if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) | ||||
5758 | break; | ||||
5759 | } | ||||
5760 | |||||
5761 | return NullIndex + 1; | ||||
5762 | } | ||||
5763 | |||||
5764 | /// If we can compute the length of the string pointed to by | ||||
5765 | /// the specified pointer, return 'len+1'. If we can't, return 0. | ||||
5766 | uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { | ||||
5767 | if (!V->getType()->isPointerTy()) | ||||
5768 | return 0; | ||||
5769 | |||||
5770 | SmallPtrSet<const PHINode*, 32> PHIs; | ||||
5771 | uint64_t Len = GetStringLengthH(V, PHIs, CharSize); | ||||
5772 | // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return | ||||
5773 | // an empty string as a length. | ||||
5774 | return Len == ~0ULL ? 1 : Len; | ||||
5775 | } | ||||
5776 | |||||
5777 | const Value * | ||||
5778 | llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call, | ||||
5779 | bool MustPreserveNullness) { | ||||
5780 | assert(Call &&(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls" ) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5781, __extension__ __PRETTY_FUNCTION__ )) | ||||
5781 | "getArgumentAliasingToReturnedPointer only works on nonnull calls")(static_cast <bool> (Call && "getArgumentAliasingToReturnedPointer only works on nonnull calls" ) ? void (0) : __assert_fail ("Call && \"getArgumentAliasingToReturnedPointer only works on nonnull calls\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5781, __extension__ __PRETTY_FUNCTION__ )); | ||||
5782 | if (const Value *RV = Call->getReturnedArgOperand()) | ||||
5783 | return RV; | ||||
5784 | // This can be used only as a aliasing property. | ||||
5785 | if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( | ||||
5786 | Call, MustPreserveNullness)) | ||||
5787 | return Call->getArgOperand(0); | ||||
5788 | return nullptr; | ||||
5789 | } | ||||
5790 | |||||
5791 | bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( | ||||
5792 | const CallBase *Call, bool MustPreserveNullness) { | ||||
5793 | switch (Call->getIntrinsicID()) { | ||||
5794 | case Intrinsic::launder_invariant_group: | ||||
5795 | case Intrinsic::strip_invariant_group: | ||||
5796 | case Intrinsic::aarch64_irg: | ||||
5797 | case Intrinsic::aarch64_tagp: | ||||
5798 | return true; | ||||
5799 | case Intrinsic::ptrmask: | ||||
5800 | return !MustPreserveNullness; | ||||
5801 | default: | ||||
5802 | return false; | ||||
5803 | } | ||||
5804 | } | ||||
5805 | |||||
5806 | /// \p PN defines a loop-variant pointer to an object. Check if the | ||||
5807 | /// previous iteration of the loop was referring to the same object as \p PN. | ||||
5808 | static bool isSameUnderlyingObjectInLoop(const PHINode *PN, | ||||
5809 | const LoopInfo *LI) { | ||||
5810 | // Find the loop-defined value. | ||||
5811 | Loop *L = LI->getLoopFor(PN->getParent()); | ||||
5812 | if (PN->getNumIncomingValues() != 2) | ||||
5813 | return true; | ||||
5814 | |||||
5815 | // Find the value from previous iteration. | ||||
5816 | auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); | ||||
5817 | if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) | ||||
5818 | PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); | ||||
5819 | if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) | ||||
5820 | return true; | ||||
5821 | |||||
5822 | // If a new pointer is loaded in the loop, the pointer references a different | ||||
5823 | // object in every iteration. E.g.: | ||||
5824 | // for (i) | ||||
5825 | // int *p = a[i]; | ||||
5826 | // ... | ||||
5827 | if (auto *Load = dyn_cast<LoadInst>(PrevValue)) | ||||
5828 | if (!L->isLoopInvariant(Load->getPointerOperand())) | ||||
5829 | return false; | ||||
5830 | return true; | ||||
5831 | } | ||||
5832 | |||||
5833 | const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) { | ||||
5834 | if (!V->getType()->isPointerTy()) | ||||
5835 | return V; | ||||
5836 | for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { | ||||
5837 | if (auto *GEP = dyn_cast<GEPOperator>(V)) { | ||||
5838 | V = GEP->getPointerOperand(); | ||||
5839 | } else if (Operator::getOpcode(V) == Instruction::BitCast || | ||||
5840 | Operator::getOpcode(V) == Instruction::AddrSpaceCast) { | ||||
5841 | V = cast<Operator>(V)->getOperand(0); | ||||
5842 | if (!V->getType()->isPointerTy()) | ||||
5843 | return V; | ||||
5844 | } else if (auto *GA = dyn_cast<GlobalAlias>(V)) { | ||||
5845 | if (GA->isInterposable()) | ||||
5846 | return V; | ||||
5847 | V = GA->getAliasee(); | ||||
5848 | } else { | ||||
5849 | if (auto *PHI = dyn_cast<PHINode>(V)) { | ||||
5850 | // Look through single-arg phi nodes created by LCSSA. | ||||
5851 | if (PHI->getNumIncomingValues() == 1) { | ||||
5852 | V = PHI->getIncomingValue(0); | ||||
5853 | continue; | ||||
5854 | } | ||||
5855 | } else if (auto *Call = dyn_cast<CallBase>(V)) { | ||||
5856 | // CaptureTracking can know about special capturing properties of some | ||||
5857 | // intrinsics like launder.invariant.group, that can't be expressed with | ||||
5858 | // the attributes, but have properties like returning aliasing pointer. | ||||
5859 | // Because some analysis may assume that nocaptured pointer is not | ||||
5860 | // returned from some special intrinsic (because function would have to | ||||
5861 | // be marked with returns attribute), it is crucial to use this function | ||||
5862 | // because it should be in sync with CaptureTracking. Not using it may | ||||
5863 | // cause weird miscompilations where 2 aliasing pointers are assumed to | ||||
5864 | // noalias. | ||||
5865 | if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { | ||||
5866 | V = RP; | ||||
5867 | continue; | ||||
5868 | } | ||||
5869 | } | ||||
5870 | |||||
5871 | return V; | ||||
5872 | } | ||||
5873 | assert(V->getType()->isPointerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isPointerTy() && "Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isPointerTy() && \"Unexpected operand type!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5873, __extension__ __PRETTY_FUNCTION__ )); | ||||
5874 | } | ||||
5875 | return V; | ||||
5876 | } | ||||
5877 | |||||
5878 | void llvm::getUnderlyingObjects(const Value *V, | ||||
5879 | SmallVectorImpl<const Value *> &Objects, | ||||
5880 | LoopInfo *LI, unsigned MaxLookup) { | ||||
5881 | SmallPtrSet<const Value *, 4> Visited; | ||||
5882 | SmallVector<const Value *, 4> Worklist; | ||||
5883 | Worklist.push_back(V); | ||||
5884 | do { | ||||
5885 | const Value *P = Worklist.pop_back_val(); | ||||
5886 | P = getUnderlyingObject(P, MaxLookup); | ||||
5887 | |||||
5888 | if (!Visited.insert(P).second) | ||||
5889 | continue; | ||||
5890 | |||||
5891 | if (auto *SI = dyn_cast<SelectInst>(P)) { | ||||
5892 | Worklist.push_back(SI->getTrueValue()); | ||||
5893 | Worklist.push_back(SI->getFalseValue()); | ||||
5894 | continue; | ||||
5895 | } | ||||
5896 | |||||
5897 | if (auto *PN = dyn_cast<PHINode>(P)) { | ||||
5898 | // If this PHI changes the underlying object in every iteration of the | ||||
5899 | // loop, don't look through it. Consider: | ||||
5900 | // int **A; | ||||
5901 | // for (i) { | ||||
5902 | // Prev = Curr; // Prev = PHI (Prev_0, Curr) | ||||
5903 | // Curr = A[i]; | ||||
5904 | // *Prev, *Curr; | ||||
5905 | // | ||||
5906 | // Prev is tracking Curr one iteration behind so they refer to different | ||||
5907 | // underlying objects. | ||||
5908 | if (!LI || !LI->isLoopHeader(PN->getParent()) || | ||||
5909 | isSameUnderlyingObjectInLoop(PN, LI)) | ||||
5910 | append_range(Worklist, PN->incoming_values()); | ||||
5911 | continue; | ||||
5912 | } | ||||
5913 | |||||
5914 | Objects.push_back(P); | ||||
5915 | } while (!Worklist.empty()); | ||||
5916 | } | ||||
5917 | |||||
5918 | /// This is the function that does the work of looking through basic | ||||
5919 | /// ptrtoint+arithmetic+inttoptr sequences. | ||||
5920 | static const Value *getUnderlyingObjectFromInt(const Value *V) { | ||||
5921 | do { | ||||
5922 | if (const Operator *U = dyn_cast<Operator>(V)) { | ||||
5923 | // If we find a ptrtoint, we can transfer control back to the | ||||
5924 | // regular getUnderlyingObjectFromInt. | ||||
5925 | if (U->getOpcode() == Instruction::PtrToInt) | ||||
5926 | return U->getOperand(0); | ||||
5927 | // If we find an add of a constant, a multiplied value, or a phi, it's | ||||
5928 | // likely that the other operand will lead us to the base | ||||
5929 | // object. We don't have to worry about the case where the | ||||
5930 | // object address is somehow being computed by the multiply, | ||||
5931 | // because our callers only care when the result is an | ||||
5932 | // identifiable object. | ||||
5933 | if (U->getOpcode() != Instruction::Add || | ||||
5934 | (!isa<ConstantInt>(U->getOperand(1)) && | ||||
5935 | Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && | ||||
5936 | !isa<PHINode>(U->getOperand(1)))) | ||||
5937 | return V; | ||||
5938 | V = U->getOperand(0); | ||||
5939 | } else { | ||||
5940 | return V; | ||||
5941 | } | ||||
5942 | assert(V->getType()->isIntegerTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isIntegerTy() && "Unexpected operand type!") ? void (0) : __assert_fail ("V->getType()->isIntegerTy() && \"Unexpected operand type!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 5942, __extension__ __PRETTY_FUNCTION__ )); | ||||
5943 | } while (true); | ||||
5944 | } | ||||
5945 | |||||
5946 | /// This is a wrapper around getUnderlyingObjects and adds support for basic | ||||
5947 | /// ptrtoint+arithmetic+inttoptr sequences. | ||||
5948 | /// It returns false if unidentified object is found in getUnderlyingObjects. | ||||
5949 | bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, | ||||
5950 | SmallVectorImpl<Value *> &Objects) { | ||||
5951 | SmallPtrSet<const Value *, 16> Visited; | ||||
5952 | SmallVector<const Value *, 4> Working(1, V); | ||||
5953 | do { | ||||
5954 | V = Working.pop_back_val(); | ||||
5955 | |||||
5956 | SmallVector<const Value *, 4> Objs; | ||||
5957 | getUnderlyingObjects(V, Objs); | ||||
5958 | |||||
5959 | for (const Value *V : Objs) { | ||||
5960 | if (!Visited.insert(V).second) | ||||
5961 | continue; | ||||
5962 | if (Operator::getOpcode(V) == Instruction::IntToPtr) { | ||||
5963 | const Value *O = | ||||
5964 | getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); | ||||
5965 | if (O->getType()->isPointerTy()) { | ||||
5966 | Working.push_back(O); | ||||
5967 | continue; | ||||
5968 | } | ||||
5969 | } | ||||
5970 | // If getUnderlyingObjects fails to find an identifiable object, | ||||
5971 | // getUnderlyingObjectsForCodeGen also fails for safety. | ||||
5972 | if (!isIdentifiedObject(V)) { | ||||
5973 | Objects.clear(); | ||||
5974 | return false; | ||||
5975 | } | ||||
5976 | Objects.push_back(const_cast<Value *>(V)); | ||||
5977 | } | ||||
5978 | } while (!Working.empty()); | ||||
5979 | return true; | ||||
5980 | } | ||||
5981 | |||||
5982 | AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) { | ||||
5983 | AllocaInst *Result = nullptr; | ||||
5984 | SmallPtrSet<Value *, 4> Visited; | ||||
5985 | SmallVector<Value *, 4> Worklist; | ||||
5986 | |||||
5987 | auto AddWork = [&](Value *V) { | ||||
5988 | if (Visited.insert(V).second) | ||||
5989 | Worklist.push_back(V); | ||||
5990 | }; | ||||
5991 | |||||
5992 | AddWork(V); | ||||
5993 | do { | ||||
5994 | V = Worklist.pop_back_val(); | ||||
5995 | assert(Visited.count(V))(static_cast <bool> (Visited.count(V)) ? void (0) : __assert_fail ("Visited.count(V)", "llvm/lib/Analysis/ValueTracking.cpp", 5995 , __extension__ __PRETTY_FUNCTION__)); | ||||
5996 | |||||
5997 | if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { | ||||
5998 | if (Result && Result != AI) | ||||
5999 | return nullptr; | ||||
6000 | Result = AI; | ||||
6001 | } else if (CastInst *CI = dyn_cast<CastInst>(V)) { | ||||
6002 | AddWork(CI->getOperand(0)); | ||||
6003 | } else if (PHINode *PN = dyn_cast<PHINode>(V)) { | ||||
6004 | for (Value *IncValue : PN->incoming_values()) | ||||
6005 | AddWork(IncValue); | ||||
6006 | } else if (auto *SI = dyn_cast<SelectInst>(V)) { | ||||
6007 | AddWork(SI->getTrueValue()); | ||||
6008 | AddWork(SI->getFalseValue()); | ||||
6009 | } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { | ||||
6010 | if (OffsetZero && !GEP->hasAllZeroIndices()) | ||||
6011 | return nullptr; | ||||
6012 | AddWork(GEP->getPointerOperand()); | ||||
6013 | } else if (CallBase *CB = dyn_cast<CallBase>(V)) { | ||||
6014 | Value *Returned = CB->getReturnedArgOperand(); | ||||
6015 | if (Returned) | ||||
6016 | AddWork(Returned); | ||||
6017 | else | ||||
6018 | return nullptr; | ||||
6019 | } else { | ||||
6020 | return nullptr; | ||||
6021 | } | ||||
6022 | } while (!Worklist.empty()); | ||||
6023 | |||||
6024 | return Result; | ||||
6025 | } | ||||
6026 | |||||
6027 | static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper( | ||||
6028 | const Value *V, bool AllowLifetime, bool AllowDroppable) { | ||||
6029 | for (const User *U : V->users()) { | ||||
6030 | const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); | ||||
6031 | if (!II) | ||||
6032 | return false; | ||||
6033 | |||||
6034 | if (AllowLifetime && II->isLifetimeStartOrEnd()) | ||||
6035 | continue; | ||||
6036 | |||||
6037 | if (AllowDroppable && II->isDroppable()) | ||||
6038 | continue; | ||||
6039 | |||||
6040 | return false; | ||||
6041 | } | ||||
6042 | return true; | ||||
6043 | } | ||||
6044 | |||||
6045 | bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { | ||||
6046 | return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( | ||||
6047 | V, /* AllowLifetime */ true, /* AllowDroppable */ false); | ||||
6048 | } | ||||
6049 | bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) { | ||||
6050 | return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( | ||||
6051 | V, /* AllowLifetime */ true, /* AllowDroppable */ true); | ||||
6052 | } | ||||
6053 | |||||
6054 | bool llvm::mustSuppressSpeculation(const LoadInst &LI) { | ||||
6055 | if (!LI.isUnordered()) | ||||
6056 | return true; | ||||
6057 | const Function &F = *LI.getFunction(); | ||||
6058 | // Speculative load may create a race that did not exist in the source. | ||||
6059 | return F.hasFnAttribute(Attribute::SanitizeThread) || | ||||
6060 | // Speculative load may load data from dirty regions. | ||||
6061 | F.hasFnAttribute(Attribute::SanitizeAddress) || | ||||
6062 | F.hasFnAttribute(Attribute::SanitizeHWAddress); | ||||
6063 | } | ||||
6064 | |||||
6065 | bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst, | ||||
6066 | const Instruction *CtxI, | ||||
6067 | AssumptionCache *AC, | ||||
6068 | const DominatorTree *DT, | ||||
6069 | const TargetLibraryInfo *TLI) { | ||||
6070 | return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI, | ||||
6071 | AC, DT, TLI); | ||||
6072 | } | ||||
6073 | |||||
6074 | bool llvm::isSafeToSpeculativelyExecuteWithOpcode( | ||||
6075 | unsigned Opcode, const Instruction *Inst, const Instruction *CtxI, | ||||
6076 | AssumptionCache *AC, const DominatorTree *DT, | ||||
6077 | const TargetLibraryInfo *TLI) { | ||||
6078 | #ifndef NDEBUG | ||||
6079 | if (Inst->getOpcode() != Opcode) { | ||||
6080 | // Check that the operands are actually compatible with the Opcode override. | ||||
6081 | auto hasEqualReturnAndLeadingOperandTypes = | ||||
6082 | [](const Instruction *Inst, unsigned NumLeadingOperands) { | ||||
6083 | if (Inst->getNumOperands() < NumLeadingOperands) | ||||
6084 | return false; | ||||
6085 | const Type *ExpectedType = Inst->getType(); | ||||
6086 | for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp) | ||||
6087 | if (Inst->getOperand(ItOp)->getType() != ExpectedType) | ||||
6088 | return false; | ||||
6089 | return true; | ||||
6090 | }; | ||||
6091 | assert(!Instruction::isBinaryOp(Opcode) ||(static_cast <bool> (!Instruction::isBinaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 2)) ? void (0) : __assert_fail ("!Instruction::isBinaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 2)" , "llvm/lib/Analysis/ValueTracking.cpp", 6092, __extension__ __PRETTY_FUNCTION__ )) | ||||
6092 | hasEqualReturnAndLeadingOperandTypes(Inst, 2))(static_cast <bool> (!Instruction::isBinaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 2)) ? void (0) : __assert_fail ("!Instruction::isBinaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 2)" , "llvm/lib/Analysis/ValueTracking.cpp", 6092, __extension__ __PRETTY_FUNCTION__ )); | ||||
6093 | assert(!Instruction::isUnaryOp(Opcode) ||(static_cast <bool> (!Instruction::isUnaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 1)) ? void (0) : __assert_fail ("!Instruction::isUnaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 1)" , "llvm/lib/Analysis/ValueTracking.cpp", 6094, __extension__ __PRETTY_FUNCTION__ )) | ||||
6094 | hasEqualReturnAndLeadingOperandTypes(Inst, 1))(static_cast <bool> (!Instruction::isUnaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 1)) ? void (0) : __assert_fail ("!Instruction::isUnaryOp(Opcode) || hasEqualReturnAndLeadingOperandTypes(Inst, 1)" , "llvm/lib/Analysis/ValueTracking.cpp", 6094, __extension__ __PRETTY_FUNCTION__ )); | ||||
6095 | } | ||||
6096 | #endif | ||||
6097 | |||||
6098 | switch (Opcode) { | ||||
6099 | default: | ||||
6100 | return true; | ||||
6101 | case Instruction::UDiv: | ||||
6102 | case Instruction::URem: { | ||||
6103 | // x / y is undefined if y == 0. | ||||
6104 | const APInt *V; | ||||
6105 | if (match(Inst->getOperand(1), m_APInt(V))) | ||||
6106 | return *V != 0; | ||||
6107 | return false; | ||||
6108 | } | ||||
6109 | case Instruction::SDiv: | ||||
6110 | case Instruction::SRem: { | ||||
6111 | // x / y is undefined if y == 0 or x == INT_MIN and y == -1 | ||||
6112 | const APInt *Numerator, *Denominator; | ||||
6113 | if (!match(Inst->getOperand(1), m_APInt(Denominator))) | ||||
6114 | return false; | ||||
6115 | // We cannot hoist this division if the denominator is 0. | ||||
6116 | if (*Denominator == 0) | ||||
6117 | return false; | ||||
6118 | // It's safe to hoist if the denominator is not 0 or -1. | ||||
6119 | if (!Denominator->isAllOnes()) | ||||
6120 | return true; | ||||
6121 | // At this point we know that the denominator is -1. It is safe to hoist as | ||||
6122 | // long we know that the numerator is not INT_MIN. | ||||
6123 | if (match(Inst->getOperand(0), m_APInt(Numerator))) | ||||
6124 | return !Numerator->isMinSignedValue(); | ||||
6125 | // The numerator *might* be MinSignedValue. | ||||
6126 | return false; | ||||
6127 | } | ||||
6128 | case Instruction::Load: { | ||||
6129 | const LoadInst *LI = dyn_cast<LoadInst>(Inst); | ||||
6130 | if (!LI) | ||||
6131 | return false; | ||||
6132 | if (mustSuppressSpeculation(*LI)) | ||||
6133 | return false; | ||||
6134 | const DataLayout &DL = LI->getModule()->getDataLayout(); | ||||
6135 | return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), | ||||
6136 | LI->getType(), LI->getAlign(), DL, | ||||
6137 | CtxI, AC, DT, TLI); | ||||
6138 | } | ||||
6139 | case Instruction::Call: { | ||||
6140 | auto *CI = dyn_cast<const CallInst>(Inst); | ||||
6141 | if (!CI) | ||||
6142 | return false; | ||||
6143 | const Function *Callee = CI->getCalledFunction(); | ||||
6144 | |||||
6145 | // The called function could have undefined behavior or side-effects, even | ||||
6146 | // if marked readnone nounwind. | ||||
6147 | return Callee && Callee->isSpeculatable(); | ||||
6148 | } | ||||
6149 | case Instruction::VAArg: | ||||
6150 | case Instruction::Alloca: | ||||
6151 | case Instruction::Invoke: | ||||
6152 | case Instruction::CallBr: | ||||
6153 | case Instruction::PHI: | ||||
6154 | case Instruction::Store: | ||||
6155 | case Instruction::Ret: | ||||
6156 | case Instruction::Br: | ||||
6157 | case Instruction::IndirectBr: | ||||
6158 | case Instruction::Switch: | ||||
6159 | case Instruction::Unreachable: | ||||
6160 | case Instruction::Fence: | ||||
6161 | case Instruction::AtomicRMW: | ||||
6162 | case Instruction::AtomicCmpXchg: | ||||
6163 | case Instruction::LandingPad: | ||||
6164 | case Instruction::Resume: | ||||
6165 | case Instruction::CatchSwitch: | ||||
6166 | case Instruction::CatchPad: | ||||
6167 | case Instruction::CatchRet: | ||||
6168 | case Instruction::CleanupPad: | ||||
6169 | case Instruction::CleanupRet: | ||||
6170 | return false; // Misc instructions which have effects | ||||
6171 | } | ||||
6172 | } | ||||
6173 | |||||
6174 | bool llvm::mayHaveNonDefUseDependency(const Instruction &I) { | ||||
6175 | if (I.mayReadOrWriteMemory()) | ||||
6176 | // Memory dependency possible | ||||
6177 | return true; | ||||
6178 | if (!isSafeToSpeculativelyExecute(&I)) | ||||
6179 | // Can't move above a maythrow call or infinite loop. Or if an | ||||
6180 | // inalloca alloca, above a stacksave call. | ||||
6181 | return true; | ||||
6182 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) | ||||
6183 | // 1) Can't reorder two inf-loop calls, even if readonly | ||||
6184 | // 2) Also can't reorder an inf-loop call below a instruction which isn't | ||||
6185 | // safe to speculative execute. (Inverse of above) | ||||
6186 | return true; | ||||
6187 | return false; | ||||
6188 | } | ||||
6189 | |||||
6190 | /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult. | ||||
6191 | static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) { | ||||
6192 | switch (OR) { | ||||
6193 | case ConstantRange::OverflowResult::MayOverflow: | ||||
6194 | return OverflowResult::MayOverflow; | ||||
6195 | case ConstantRange::OverflowResult::AlwaysOverflowsLow: | ||||
6196 | return OverflowResult::AlwaysOverflowsLow; | ||||
6197 | case ConstantRange::OverflowResult::AlwaysOverflowsHigh: | ||||
6198 | return OverflowResult::AlwaysOverflowsHigh; | ||||
6199 | case ConstantRange::OverflowResult::NeverOverflows: | ||||
6200 | return OverflowResult::NeverOverflows; | ||||
6201 | } | ||||
6202 | llvm_unreachable("Unknown OverflowResult")::llvm::llvm_unreachable_internal("Unknown OverflowResult", "llvm/lib/Analysis/ValueTracking.cpp" , 6202); | ||||
6203 | } | ||||
6204 | |||||
6205 | /// Combine constant ranges from computeConstantRange() and computeKnownBits(). | ||||
6206 | static ConstantRange computeConstantRangeIncludingKnownBits( | ||||
6207 | const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth, | ||||
6208 | AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, | ||||
6209 | OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) { | ||||
6210 | KnownBits Known = computeKnownBits( | ||||
6211 | V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo); | ||||
6212 | ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned); | ||||
6213 | ConstantRange CR2 = computeConstantRange(V, ForSigned, UseInstrInfo); | ||||
6214 | ConstantRange::PreferredRangeType RangeType = | ||||
6215 | ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned; | ||||
6216 | return CR1.intersectWith(CR2, RangeType); | ||||
6217 | } | ||||
6218 | |||||
6219 | OverflowResult llvm::computeOverflowForUnsignedMul( | ||||
6220 | const Value *LHS, const Value *RHS, const DataLayout &DL, | ||||
6221 | AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, | ||||
6222 | bool UseInstrInfo) { | ||||
6223 | KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, | ||||
6224 | nullptr, UseInstrInfo); | ||||
6225 | KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, | ||||
6226 | nullptr, UseInstrInfo); | ||||
6227 | ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false); | ||||
6228 | ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false); | ||||
6229 | return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange)); | ||||
6230 | } | ||||
6231 | |||||
6232 | OverflowResult | ||||
6233 | llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, | ||||
6234 | const DataLayout &DL, AssumptionCache *AC, | ||||
6235 | const Instruction *CxtI, | ||||
6236 | const DominatorTree *DT, bool UseInstrInfo) { | ||||
6237 | // Multiplying n * m significant bits yields a result of n + m significant | ||||
6238 | // bits. If the total number of significant bits does not exceed the | ||||
6239 | // result bit width (minus 1), there is no overflow. | ||||
6240 | // This means if we have enough leading sign bits in the operands | ||||
6241 | // we can guarantee that the result does not overflow. | ||||
6242 | // Ref: "Hacker's Delight" by Henry Warren | ||||
6243 | unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); | ||||
6244 | |||||
6245 | // Note that underestimating the number of sign bits gives a more | ||||
6246 | // conservative answer. | ||||
6247 | unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + | ||||
6248 | ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); | ||||
6249 | |||||
6250 | // First handle the easy case: if we have enough sign bits there's | ||||
6251 | // definitely no overflow. | ||||
6252 | if (SignBits > BitWidth + 1) | ||||
6253 | return OverflowResult::NeverOverflows; | ||||
6254 | |||||
6255 | // There are two ambiguous cases where there can be no overflow: | ||||
6256 | // SignBits == BitWidth + 1 and | ||||
6257 | // SignBits == BitWidth | ||||
6258 | // The second case is difficult to check, therefore we only handle the | ||||
6259 | // first case. | ||||
6260 | if (SignBits == BitWidth + 1) { | ||||
6261 | // It overflows only when both arguments are negative and the true | ||||
6262 | // product is exactly the minimum negative number. | ||||
6263 | // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 | ||||
6264 | // For simplicity we just check if at least one side is not negative. | ||||
6265 | KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, | ||||
6266 | nullptr, UseInstrInfo); | ||||
6267 | KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, | ||||
6268 | nullptr, UseInstrInfo); | ||||
6269 | if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) | ||||
6270 | return OverflowResult::NeverOverflows; | ||||
6271 | } | ||||
6272 | return OverflowResult::MayOverflow; | ||||
6273 | } | ||||
6274 | |||||
6275 | OverflowResult llvm::computeOverflowForUnsignedAdd( | ||||
6276 | const Value *LHS, const Value *RHS, const DataLayout &DL, | ||||
6277 | AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, | ||||
6278 | bool UseInstrInfo) { | ||||
6279 | ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( | ||||
6280 | LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, | ||||
6281 | nullptr, UseInstrInfo); | ||||
6282 | ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( | ||||
6283 | RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, | ||||
6284 | nullptr, UseInstrInfo); | ||||
6285 | return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange)); | ||||
6286 | } | ||||
6287 | |||||
6288 | static OverflowResult computeOverflowForSignedAdd(const Value *LHS, | ||||
6289 | const Value *RHS, | ||||
6290 | const AddOperator *Add, | ||||
6291 | const DataLayout &DL, | ||||
6292 | AssumptionCache *AC, | ||||
6293 | const Instruction *CxtI, | ||||
6294 | const DominatorTree *DT) { | ||||
6295 | if (Add && Add->hasNoSignedWrap()) { | ||||
6296 | return OverflowResult::NeverOverflows; | ||||
6297 | } | ||||
6298 | |||||
6299 | // If LHS and RHS each have at least two sign bits, the addition will look | ||||
6300 | // like | ||||
6301 | // | ||||
6302 | // XX..... + | ||||
6303 | // YY..... | ||||
6304 | // | ||||
6305 | // If the carry into the most significant position is 0, X and Y can't both | ||||
6306 | // be 1 and therefore the carry out of the addition is also 0. | ||||
6307 | // | ||||
6308 | // If the carry into the most significant position is 1, X and Y can't both | ||||
6309 | // be 0 and therefore the carry out of the addition is also 1. | ||||
6310 | // | ||||
6311 | // Since the carry into the most significant position is always equal to | ||||
6312 | // the carry out of the addition, there is no signed overflow. | ||||
6313 | if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && | ||||
6314 | ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) | ||||
6315 | return OverflowResult::NeverOverflows; | ||||
6316 | |||||
6317 | ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( | ||||
6318 | LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); | ||||
6319 | ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( | ||||
6320 | RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); | ||||
6321 | OverflowResult OR = | ||||
6322 | mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange)); | ||||
6323 | if (OR != OverflowResult::MayOverflow) | ||||
6324 | return OR; | ||||
6325 | |||||
6326 | // The remaining code needs Add to be available. Early returns if not so. | ||||
6327 | if (!Add) | ||||
6328 | return OverflowResult::MayOverflow; | ||||
6329 | |||||
6330 | // If the sign of Add is the same as at least one of the operands, this add | ||||
6331 | // CANNOT overflow. If this can be determined from the known bits of the | ||||
6332 | // operands the above signedAddMayOverflow() check will have already done so. | ||||
6333 | // The only other way to improve on the known bits is from an assumption, so | ||||
6334 | // call computeKnownBitsFromAssume() directly. | ||||
6335 | bool LHSOrRHSKnownNonNegative = | ||||
6336 | (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative()); | ||||
6337 | bool LHSOrRHSKnownNegative = | ||||
6338 | (LHSRange.isAllNegative() || RHSRange.isAllNegative()); | ||||
6339 | if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { | ||||
6340 | KnownBits AddKnown(LHSRange.getBitWidth()); | ||||
6341 | computeKnownBitsFromAssume( | ||||
6342 | Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true)); | ||||
6343 | if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || | ||||
6344 | (AddKnown.isNegative() && LHSOrRHSKnownNegative)) | ||||
6345 | return OverflowResult::NeverOverflows; | ||||
6346 | } | ||||
6347 | |||||
6348 | return OverflowResult::MayOverflow; | ||||
6349 | } | ||||
6350 | |||||
6351 | OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, | ||||
6352 | const Value *RHS, | ||||
6353 | const DataLayout &DL, | ||||
6354 | AssumptionCache *AC, | ||||
6355 | const Instruction *CxtI, | ||||
6356 | const DominatorTree *DT) { | ||||
6357 | // X - (X % ?) | ||||
6358 | // The remainder of a value can't have greater magnitude than itself, | ||||
6359 | // so the subtraction can't overflow. | ||||
6360 | |||||
6361 | // X - (X -nuw ?) | ||||
6362 | // In the minimal case, this would simplify to "?", so there's no subtract | ||||
6363 | // at all. But if this analysis is used to peek through casts, for example, | ||||
6364 | // then determining no-overflow may allow other transforms. | ||||
6365 | |||||
6366 | // TODO: There are other patterns like this. | ||||
6367 | // See simplifyICmpWithBinOpOnLHS() for candidates. | ||||
6368 | if (match(RHS, m_URem(m_Specific(LHS), m_Value())) || | ||||
6369 | match(RHS, m_NUWSub(m_Specific(LHS), m_Value()))) | ||||
6370 | if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT)) | ||||
6371 | return OverflowResult::NeverOverflows; | ||||
6372 | |||||
6373 | // Checking for conditions implied by dominating conditions may be expensive. | ||||
6374 | // Limit it to usub_with_overflow calls for now. | ||||
6375 | if (match(CxtI, | ||||
6376 | m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value()))) | ||||
6377 | if (auto C = | ||||
6378 | isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) { | ||||
6379 | if (*C) | ||||
6380 | return OverflowResult::NeverOverflows; | ||||
6381 | return OverflowResult::AlwaysOverflowsLow; | ||||
6382 | } | ||||
6383 | ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( | ||||
6384 | LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); | ||||
6385 | ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( | ||||
6386 | RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); | ||||
6387 | return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange)); | ||||
6388 | } | ||||
6389 | |||||
6390 | OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, | ||||
6391 | const Value *RHS, | ||||
6392 | const DataLayout &DL, | ||||
6393 | AssumptionCache *AC, | ||||
6394 | const Instruction *CxtI, | ||||
6395 | const DominatorTree *DT) { | ||||
6396 | // X - (X % ?) | ||||
6397 | // The remainder of a value can't have greater magnitude than itself, | ||||
6398 | // so the subtraction can't overflow. | ||||
6399 | |||||
6400 | // X - (X -nsw ?) | ||||
6401 | // In the minimal case, this would simplify to "?", so there's no subtract | ||||
6402 | // at all. But if this analysis is used to peek through casts, for example, | ||||
6403 | // then determining no-overflow may allow other transforms. | ||||
6404 | if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) || | ||||
6405 | match(RHS, m_NSWSub(m_Specific(LHS), m_Value()))) | ||||
6406 | if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT)) | ||||
6407 | return OverflowResult::NeverOverflows; | ||||
6408 | |||||
6409 | // If LHS and RHS each have at least two sign bits, the subtraction | ||||
6410 | // cannot overflow. | ||||
6411 | if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && | ||||
6412 | ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) | ||||
6413 | return OverflowResult::NeverOverflows; | ||||
6414 | |||||
6415 | ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( | ||||
6416 | LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); | ||||
6417 | ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( | ||||
6418 | RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); | ||||
6419 | return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); | ||||
6420 | } | ||||
6421 | |||||
6422 | bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, | ||||
6423 | const DominatorTree &DT) { | ||||
6424 | SmallVector<const BranchInst *, 2> GuardingBranches; | ||||
6425 | SmallVector<const ExtractValueInst *, 2> Results; | ||||
6426 | |||||
6427 | for (const User *U : WO->users()) { | ||||
6428 | if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { | ||||
6429 | assert(EVI->getNumIndices() == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getNumIndices() == 1 && "Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getNumIndices() == 1 && \"Obvious from CI's type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 6429, __extension__ __PRETTY_FUNCTION__ )); | ||||
6430 | |||||
6431 | if (EVI->getIndices()[0] == 0) | ||||
6432 | Results.push_back(EVI); | ||||
6433 | else { | ||||
6434 | assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type")(static_cast <bool> (EVI->getIndices()[0] == 1 && "Obvious from CI's type") ? void (0) : __assert_fail ("EVI->getIndices()[0] == 1 && \"Obvious from CI's type\"" , "llvm/lib/Analysis/ValueTracking.cpp", 6434, __extension__ __PRETTY_FUNCTION__ )); | ||||
6435 | |||||
6436 | for (const auto *U : EVI->users()) | ||||
6437 | if (const auto *B = dyn_cast<BranchInst>(U)) { | ||||
6438 | assert(B->isConditional() && "How else is it using an i1?")(static_cast <bool> (B->isConditional() && "How else is it using an i1?" ) ? void (0) : __assert_fail ("B->isConditional() && \"How else is it using an i1?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 6438, __extension__ __PRETTY_FUNCTION__ )); | ||||
6439 | GuardingBranches.push_back(B); | ||||
6440 | } | ||||
6441 | } | ||||
6442 | } else { | ||||
6443 | // We are using the aggregate directly in a way we don't want to analyze | ||||
6444 | // here (storing it to a global, say). | ||||
6445 | return false; | ||||
6446 | } | ||||
6447 | } | ||||
6448 | |||||
6449 | auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { | ||||
6450 | BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); | ||||
6451 | if (!NoWrapEdge.isSingleEdge()) | ||||
6452 | return false; | ||||
6453 | |||||
6454 | // Check if all users of the add are provably no-wrap. | ||||
6455 | for (const auto *Result : Results) { | ||||
6456 | // If the extractvalue itself is not executed on overflow, the we don't | ||||
6457 | // need to check each use separately, since domination is transitive. | ||||
6458 | if (DT.dominates(NoWrapEdge, Result->getParent())) | ||||
6459 | continue; | ||||
6460 | |||||
6461 | for (const auto &RU : Result->uses()) | ||||
6462 | if (!DT.dominates(NoWrapEdge, RU)) | ||||
6463 | return false; | ||||
6464 | } | ||||
6465 | |||||
6466 | return true; | ||||
6467 | }; | ||||
6468 | |||||
6469 | return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); | ||||
6470 | } | ||||
6471 | |||||
6472 | /// Shifts return poison if shiftwidth is larger than the bitwidth. | ||||
6473 | static bool shiftAmountKnownInRange(const Value *ShiftAmount) { | ||||
6474 | auto *C = dyn_cast<Constant>(ShiftAmount); | ||||
6475 | if (!C) | ||||
6476 | return false; | ||||
6477 | |||||
6478 | // Shifts return poison if shiftwidth is larger than the bitwidth. | ||||
6479 | SmallVector<const Constant *, 4> ShiftAmounts; | ||||
6480 | if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) { | ||||
6481 | unsigned NumElts = FVTy->getNumElements(); | ||||
6482 | for (unsigned i = 0; i < NumElts; ++i) | ||||
6483 | ShiftAmounts.push_back(C->getAggregateElement(i)); | ||||
6484 | } else if (isa<ScalableVectorType>(C->getType())) | ||||
6485 | return false; // Can't tell, just return false to be safe | ||||
6486 | else | ||||
6487 | ShiftAmounts.push_back(C); | ||||
6488 | |||||
6489 | bool Safe = llvm::all_of(ShiftAmounts, [](const Constant *C) { | ||||
6490 | auto *CI = dyn_cast_or_null<ConstantInt>(C); | ||||
6491 | return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth()); | ||||
6492 | }); | ||||
6493 | |||||
6494 | return Safe; | ||||
6495 | } | ||||
6496 | |||||
6497 | static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly, | ||||
6498 | bool ConsiderFlagsAndMetadata) { | ||||
6499 | |||||
6500 | if (ConsiderFlagsAndMetadata && Op->hasPoisonGeneratingFlagsOrMetadata()) | ||||
6501 | return true; | ||||
6502 | |||||
6503 | unsigned Opcode = Op->getOpcode(); | ||||
6504 | |||||
6505 | // Check whether opcode is a poison/undef-generating operation | ||||
6506 | switch (Opcode) { | ||||
6507 | case Instruction::Shl: | ||||
6508 | case Instruction::AShr: | ||||
6509 | case Instruction::LShr: | ||||
6510 | return !shiftAmountKnownInRange(Op->getOperand(1)); | ||||
6511 | case Instruction::FPToSI: | ||||
6512 | case Instruction::FPToUI: | ||||
6513 | // fptosi/ui yields poison if the resulting value does not fit in the | ||||
6514 | // destination type. | ||||
6515 | return true; | ||||
6516 | case Instruction::Call: | ||||
6517 | if (auto *II = dyn_cast<IntrinsicInst>(Op)) { | ||||
6518 | switch (II->getIntrinsicID()) { | ||||
6519 | // TODO: Add more intrinsics. | ||||
6520 | case Intrinsic::ctlz: | ||||
6521 | case Intrinsic::cttz: | ||||
6522 | case Intrinsic::abs: | ||||
6523 | if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue()) | ||||
6524 | return false; | ||||
6525 | break; | ||||
6526 | case Intrinsic::ctpop: | ||||
6527 | case Intrinsic::bswap: | ||||
6528 | case Intrinsic::bitreverse: | ||||
6529 | case Intrinsic::fshl: | ||||
6530 | case Intrinsic::fshr: | ||||
6531 | case Intrinsic::smax: | ||||
6532 | case Intrinsic::smin: | ||||
6533 | case Intrinsic::umax: | ||||
6534 | case Intrinsic::umin: | ||||
6535 | case Intrinsic::ptrmask: | ||||
6536 | case Intrinsic::fptoui_sat: | ||||
6537 | case Intrinsic::fptosi_sat: | ||||
6538 | case Intrinsic::sadd_with_overflow: | ||||
6539 | case Intrinsic::ssub_with_overflow: | ||||
6540 | case Intrinsic::smul_with_overflow: | ||||
6541 | case Intrinsic::uadd_with_overflow: | ||||
6542 | case Intrinsic::usub_with_overflow: | ||||
6543 | case Intrinsic::umul_with_overflow: | ||||
6544 | case Intrinsic::sadd_sat: | ||||
6545 | case Intrinsic::uadd_sat: | ||||
6546 | case Intrinsic::ssub_sat: | ||||
6547 | case Intrinsic::usub_sat: | ||||
6548 | return false; | ||||
6549 | case Intrinsic::sshl_sat: | ||||
6550 | case Intrinsic::ushl_sat: | ||||
6551 | return !shiftAmountKnownInRange(II->getArgOperand(1)); | ||||
6552 | case Intrinsic::fma: | ||||
6553 | case Intrinsic::fmuladd: | ||||
6554 | case Intrinsic::sqrt: | ||||
6555 | case Intrinsic::powi: | ||||
6556 | case Intrinsic::sin: | ||||
6557 | case Intrinsic::cos: | ||||
6558 | case Intrinsic::pow: | ||||
6559 | case Intrinsic::log: | ||||
6560 | case Intrinsic::log10: | ||||
6561 | case Intrinsic::log2: | ||||
6562 | case Intrinsic::exp: | ||||
6563 | case Intrinsic::exp2: | ||||
6564 | case Intrinsic::fabs: | ||||
6565 | case Intrinsic::copysign: | ||||
6566 | case Intrinsic::floor: | ||||
6567 | case Intrinsic::ceil: | ||||
6568 | case Intrinsic::trunc: | ||||
6569 | case Intrinsic::rint: | ||||
6570 | case Intrinsic::nearbyint: | ||||
6571 | case Intrinsic::round: | ||||
6572 | case Intrinsic::roundeven: | ||||
6573 | case Intrinsic::fptrunc_round: | ||||
6574 | case Intrinsic::canonicalize: | ||||
6575 | case Intrinsic::arithmetic_fence: | ||||
6576 | case Intrinsic::minnum: | ||||
6577 | case Intrinsic::maxnum: | ||||
6578 | case Intrinsic::minimum: | ||||
6579 | case Intrinsic::maximum: | ||||
6580 | case Intrinsic::is_fpclass: | ||||
6581 | return false; | ||||
6582 | case Intrinsic::lround: | ||||
6583 | case Intrinsic::llround: | ||||
6584 | case Intrinsic::lrint: | ||||
6585 | case Intrinsic::llrint: | ||||
6586 | // If the value doesn't fit an unspecified value is returned (but this | ||||
6587 | // is not poison). | ||||
6588 | return false; | ||||
6589 | } | ||||
6590 | } | ||||
6591 | [[fallthrough]]; | ||||
6592 | case Instruction::CallBr: | ||||
6593 | case Instruction::Invoke: { | ||||
6594 | const auto *CB = cast<CallBase>(Op); | ||||
6595 | return !CB->hasRetAttr(Attribute::NoUndef); | ||||
6596 | } | ||||
6597 | case Instruction::InsertElement: | ||||
6598 | case Instruction::ExtractElement: { | ||||
6599 | // If index exceeds the length of the vector, it returns poison | ||||
6600 | auto *VTy = cast<VectorType>(Op->getOperand(0)->getType()); | ||||
6601 | unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1; | ||||
6602 | auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp)); | ||||
6603 | if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue())) | ||||
6604 | return true; | ||||
6605 | return false; | ||||
6606 | } | ||||
6607 | case Instruction::ShuffleVector: { | ||||
6608 | // shufflevector may return undef. | ||||
6609 | if (PoisonOnly) | ||||
6610 | return false; | ||||
6611 | ArrayRef<int> Mask = isa<ConstantExpr>(Op) | ||||
6612 | ? cast<ConstantExpr>(Op)->getShuffleMask() | ||||
6613 | : cast<ShuffleVectorInst>(Op)->getShuffleMask(); | ||||
6614 | return is_contained(Mask, PoisonMaskElem); | ||||
6615 | } | ||||
6616 | case Instruction::FNeg: | ||||
6617 | case Instruction::PHI: | ||||
6618 | case Instruction::Select: | ||||
6619 | case Instruction::URem: | ||||
6620 | case Instruction::SRem: | ||||
6621 | case Instruction::ExtractValue: | ||||
6622 | case Instruction::InsertValue: | ||||
6623 | case Instruction::Freeze: | ||||
6624 | case Instruction::ICmp: | ||||
6625 | case Instruction::FCmp: | ||||
6626 | case Instruction::FAdd: | ||||
6627 | case Instruction::FSub: | ||||
6628 | case Instruction::FMul: | ||||
6629 | case Instruction::FDiv: | ||||
6630 | case Instruction::FRem: | ||||
6631 | return false; | ||||
6632 | case Instruction::GetElementPtr: | ||||
6633 | // inbounds is handled above | ||||
6634 | // TODO: what about inrange on constexpr? | ||||
6635 | return false; | ||||
6636 | default: { | ||||
6637 | const auto *CE = dyn_cast<ConstantExpr>(Op); | ||||
6638 | if (isa<CastInst>(Op) || (CE && CE->isCast())) | ||||
6639 | return false; | ||||
6640 | else if (Instruction::isBinaryOp(Opcode)) | ||||
6641 | return false; | ||||
6642 | // Be conservative and return true. | ||||
6643 | return true; | ||||
6644 | } | ||||
6645 | } | ||||
6646 | } | ||||
6647 | |||||
6648 | bool llvm::canCreateUndefOrPoison(const Operator *Op, | ||||
6649 | bool ConsiderFlagsAndMetadata) { | ||||
6650 | return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false, | ||||
6651 | ConsiderFlagsAndMetadata); | ||||
6652 | } | ||||
6653 | |||||
6654 | bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) { | ||||
6655 | return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true, | ||||
6656 | ConsiderFlagsAndMetadata); | ||||
6657 | } | ||||
6658 | |||||
6659 | static bool directlyImpliesPoison(const Value *ValAssumedPoison, | ||||
6660 | const Value *V, unsigned Depth) { | ||||
6661 | if (ValAssumedPoison == V) | ||||
6662 | return true; | ||||
6663 | |||||
6664 | const unsigned MaxDepth = 2; | ||||
6665 | if (Depth >= MaxDepth) | ||||
6666 | return false; | ||||
6667 | |||||
6668 | if (const auto *I = dyn_cast<Instruction>(V)) { | ||||
6669 | if (any_of(I->operands(), [=](const Use &Op) { | ||||
6670 | return propagatesPoison(Op) && | ||||
6671 | directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1); | ||||
6672 | })) | ||||
6673 | return true; | ||||
6674 | |||||
6675 | // V = extractvalue V0, idx | ||||
6676 | // V2 = extractvalue V0, idx2 | ||||
6677 | // V0's elements are all poison or not. (e.g., add_with_overflow) | ||||
6678 | const WithOverflowInst *II; | ||||
6679 | if (match(I, m_ExtractValue(m_WithOverflowInst(II))) && | ||||
6680 | (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) || | ||||
6681 | llvm::is_contained(II->args(), ValAssumedPoison))) | ||||
6682 | return true; | ||||
6683 | } | ||||
6684 | return false; | ||||
6685 | } | ||||
6686 | |||||
6687 | static bool impliesPoison(const Value *ValAssumedPoison, const Value *V, | ||||
6688 | unsigned Depth) { | ||||
6689 | if (isGuaranteedNotToBePoison(ValAssumedPoison)) | ||||
6690 | return true; | ||||
6691 | |||||
6692 | if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0)) | ||||
6693 | return true; | ||||
6694 | |||||
6695 | const unsigned MaxDepth = 2; | ||||
6696 | if (Depth >= MaxDepth) | ||||
6697 | return false; | ||||
6698 | |||||
6699 | const auto *I = dyn_cast<Instruction>(ValAssumedPoison); | ||||
6700 | if (I && !canCreatePoison(cast<Operator>(I))) { | ||||
6701 | return all_of(I->operands(), [=](const Value *Op) { | ||||
6702 | return impliesPoison(Op, V, Depth + 1); | ||||
6703 | }); | ||||
6704 | } | ||||
6705 | return false; | ||||
6706 | } | ||||
6707 | |||||
6708 | bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) { | ||||
6709 | return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0); | ||||
6710 | } | ||||
6711 | |||||
6712 | static bool programUndefinedIfUndefOrPoison(const Value *V, | ||||
6713 | bool PoisonOnly); | ||||
6714 | |||||
6715 | static bool isGuaranteedNotToBeUndefOrPoison(const Value *V, | ||||
6716 | AssumptionCache *AC, | ||||
6717 | const Instruction *CtxI, | ||||
6718 | const DominatorTree *DT, | ||||
6719 | unsigned Depth, bool PoisonOnly) { | ||||
6720 | if (Depth >= MaxAnalysisRecursionDepth) | ||||
6721 | return false; | ||||
6722 | |||||
6723 | if (isa<MetadataAsValue>(V)) | ||||
6724 | return false; | ||||
6725 | |||||
6726 | if (const auto *A = dyn_cast<Argument>(V)) { | ||||
6727 | if (A->hasAttribute(Attribute::NoUndef) || | ||||
6728 | A->hasAttribute(Attribute::Dereferenceable) || | ||||
6729 | A->hasAttribute(Attribute::DereferenceableOrNull)) | ||||
6730 | return true; | ||||
6731 | } | ||||
6732 | |||||
6733 | if (auto *C = dyn_cast<Constant>(V)) { | ||||
6734 | if (isa<UndefValue>(C)) | ||||
6735 | return PoisonOnly && !isa<PoisonValue>(C); | ||||
6736 | |||||
6737 | if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) || | ||||
6738 | isa<ConstantPointerNull>(C) || isa<Function>(C)) | ||||
6739 | return true; | ||||
6740 | |||||
6741 | if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C)) | ||||
6742 | return (PoisonOnly ? !C->containsPoisonElement() | ||||
6743 | : !C->containsUndefOrPoisonElement()) && | ||||
6744 | !C->containsConstantExpression(); | ||||
6745 | } | ||||
6746 | |||||
6747 | // Strip cast operations from a pointer value. | ||||
6748 | // Note that stripPointerCastsSameRepresentation can strip off getelementptr | ||||
6749 | // inbounds with zero offset. To guarantee that the result isn't poison, the | ||||
6750 | // stripped pointer is checked as it has to be pointing into an allocated | ||||
6751 | // object or be null `null` to ensure `inbounds` getelement pointers with a | ||||
6752 | // zero offset could not produce poison. | ||||
6753 | // It can strip off addrspacecast that do not change bit representation as | ||||
6754 | // well. We believe that such addrspacecast is equivalent to no-op. | ||||
6755 | auto *StrippedV = V->stripPointerCastsSameRepresentation(); | ||||
6756 | if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) || | ||||
6757 | isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV)) | ||||
6758 | return true; | ||||
6759 | |||||
6760 | auto OpCheck = [&](const Value *V) { | ||||
6761 | return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, | ||||
6762 | PoisonOnly); | ||||
6763 | }; | ||||
6764 | |||||
6765 | if (auto *Opr = dyn_cast<Operator>(V)) { | ||||
6766 | // If the value is a freeze instruction, then it can never | ||||
6767 | // be undef or poison. | ||||
6768 | if (isa<FreezeInst>(V)) | ||||
6769 | return true; | ||||
6770 | |||||
6771 | if (const auto *CB = dyn_cast<CallBase>(V)) { | ||||
6772 | if (CB->hasRetAttr(Attribute::NoUndef)) | ||||
6773 | return true; | ||||
6774 | } | ||||
6775 | |||||
6776 | if (const auto *PN = dyn_cast<PHINode>(V)) { | ||||
6777 | unsigned Num = PN->getNumIncomingValues(); | ||||
6778 | bool IsWellDefined = true; | ||||
6779 | for (unsigned i = 0; i < Num; ++i) { | ||||
6780 | auto *TI = PN->getIncomingBlock(i)->getTerminator(); | ||||
6781 | if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, | ||||
6782 | DT, Depth + 1, PoisonOnly)) { | ||||
6783 | IsWellDefined = false; | ||||
6784 | break; | ||||
6785 | } | ||||
6786 | } | ||||
6787 | if (IsWellDefined) | ||||
6788 | return true; | ||||
6789 | } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck)) | ||||
6790 | return true; | ||||
6791 | } | ||||
6792 | |||||
6793 | if (auto *I = dyn_cast<LoadInst>(V)) | ||||
6794 | if (I->hasMetadata(LLVMContext::MD_noundef) || | ||||
6795 | I->hasMetadata(LLVMContext::MD_dereferenceable) || | ||||
6796 | I->hasMetadata(LLVMContext::MD_dereferenceable_or_null)) | ||||
6797 | return true; | ||||
6798 | |||||
6799 | if (programUndefinedIfUndefOrPoison(V, PoisonOnly)) | ||||
6800 | return true; | ||||
6801 | |||||
6802 | // CxtI may be null or a cloned instruction. | ||||
6803 | if (!CtxI || !CtxI->getParent() || !DT) | ||||
6804 | return false; | ||||
6805 | |||||
6806 | auto *DNode = DT->getNode(CtxI->getParent()); | ||||
6807 | if (!DNode) | ||||
6808 | // Unreachable block | ||||
6809 | return false; | ||||
6810 | |||||
6811 | // If V is used as a branch condition before reaching CtxI, V cannot be | ||||
6812 | // undef or poison. | ||||
6813 | // br V, BB1, BB2 | ||||
6814 | // BB1: | ||||
6815 | // CtxI ; V cannot be undef or poison here | ||||
6816 | auto *Dominator = DNode->getIDom(); | ||||
6817 | while (Dominator) { | ||||
6818 | auto *TI = Dominator->getBlock()->getTerminator(); | ||||
6819 | |||||
6820 | Value *Cond = nullptr; | ||||
6821 | if (auto BI = dyn_cast_or_null<BranchInst>(TI)) { | ||||
6822 | if (BI->isConditional()) | ||||
6823 | Cond = BI->getCondition(); | ||||
6824 | } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) { | ||||
6825 | Cond = SI->getCondition(); | ||||
6826 | } | ||||
6827 | |||||
6828 | if (Cond) { | ||||
6829 | if (Cond == V) | ||||
6830 | return true; | ||||
6831 | else if (PoisonOnly && isa<Operator>(Cond)) { | ||||
6832 | // For poison, we can analyze further | ||||
6833 | auto *Opr = cast<Operator>(Cond); | ||||
6834 | if (any_of(Opr->operands(), | ||||
6835 | [V](const Use &U) { return V == U && propagatesPoison(U); })) | ||||
6836 | return true; | ||||
6837 | } | ||||
6838 | } | ||||
6839 | |||||
6840 | Dominator = Dominator->getIDom(); | ||||
6841 | } | ||||
6842 | |||||
6843 | if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC)) | ||||
6844 | return true; | ||||
6845 | |||||
6846 | return false; | ||||
6847 | } | ||||
6848 | |||||
6849 | bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC, | ||||
6850 | const Instruction *CtxI, | ||||
6851 | const DominatorTree *DT, | ||||
6852 | unsigned Depth) { | ||||
6853 | return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false); | ||||
6854 | } | ||||
6855 | |||||
6856 | bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC, | ||||
6857 | const Instruction *CtxI, | ||||
6858 | const DominatorTree *DT, unsigned Depth) { | ||||
6859 | return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true); | ||||
6860 | } | ||||
6861 | |||||
6862 | /// Return true if undefined behavior would provably be executed on the path to | ||||
6863 | /// OnPathTo if Root produced a posion result. Note that this doesn't say | ||||
6864 | /// anything about whether OnPathTo is actually executed or whether Root is | ||||
6865 | /// actually poison. This can be used to assess whether a new use of Root can | ||||
6866 | /// be added at a location which is control equivalent with OnPathTo (such as | ||||
6867 | /// immediately before it) without introducing UB which didn't previously | ||||
6868 | /// exist. Note that a false result conveys no information. | ||||
6869 | bool llvm::mustExecuteUBIfPoisonOnPathTo(Instruction *Root, | ||||
6870 | Instruction *OnPathTo, | ||||
6871 | DominatorTree *DT) { | ||||
6872 | // Basic approach is to assume Root is poison, propagate poison forward | ||||
6873 | // through all users we can easily track, and then check whether any of those | ||||
6874 | // users are provable UB and must execute before out exiting block might | ||||
6875 | // exit. | ||||
6876 | |||||
6877 | // The set of all recursive users we've visited (which are assumed to all be | ||||
6878 | // poison because of said visit) | ||||
6879 | SmallSet<const Value *, 16> KnownPoison; | ||||
6880 | SmallVector<const Instruction*, 16> Worklist; | ||||
6881 | Worklist.push_back(Root); | ||||
6882 | while (!Worklist.empty()) { | ||||
6883 | const Instruction *I = Worklist.pop_back_val(); | ||||
6884 | |||||
6885 | // If we know this must trigger UB on a path leading our target. | ||||
6886 | if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo)) | ||||
6887 | return true; | ||||
6888 | |||||
6889 | // If we can't analyze propagation through this instruction, just skip it | ||||
6890 | // and transitive users. Safe as false is a conservative result. | ||||
6891 | if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) { | ||||
6892 | return KnownPoison.contains(U) && propagatesPoison(U); | ||||
6893 | })) | ||||
6894 | continue; | ||||
6895 | |||||
6896 | if (KnownPoison.insert(I).second) | ||||
6897 | for (const User *User : I->users()) | ||||
6898 | Worklist.push_back(cast<Instruction>(User)); | ||||
6899 | } | ||||
6900 | |||||
6901 | // Might be non-UB, or might have a path we couldn't prove must execute on | ||||
6902 | // way to exiting bb. | ||||
6903 | return false; | ||||
6904 | } | ||||
6905 | |||||
6906 | OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, | ||||
6907 | const DataLayout &DL, | ||||
6908 | AssumptionCache *AC, | ||||
6909 | const Instruction *CxtI, | ||||
6910 | const DominatorTree *DT) { | ||||
6911 | return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), | ||||
6912 | Add, DL, AC, CxtI, DT); | ||||
6913 | } | ||||
6914 | |||||
6915 | OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, | ||||
6916 | const Value *RHS, | ||||
6917 | const DataLayout &DL, | ||||
6918 | AssumptionCache *AC, | ||||
6919 | const Instruction *CxtI, | ||||
6920 | const DominatorTree *DT) { | ||||
6921 | return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); | ||||
6922 | } | ||||
6923 | |||||
6924 | bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { | ||||
6925 | // Note: An atomic operation isn't guaranteed to return in a reasonable amount | ||||
6926 | // of time because it's possible for another thread to interfere with it for an | ||||
6927 | // arbitrary length of time, but programs aren't allowed to rely on that. | ||||
6928 | |||||
6929 | // If there is no successor, then execution can't transfer to it. | ||||
6930 | if (isa<ReturnInst>(I)) | ||||
6931 | return false; | ||||
6932 | if (isa<UnreachableInst>(I)) | ||||
6933 | return false; | ||||
6934 | |||||
6935 | // Note: Do not add new checks here; instead, change Instruction::mayThrow or | ||||
6936 | // Instruction::willReturn. | ||||
6937 | // | ||||
6938 | // FIXME: Move this check into Instruction::willReturn. | ||||
6939 | if (isa<CatchPadInst>(I)) { | ||||
6940 | switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) { | ||||
6941 | default: | ||||
6942 | // A catchpad may invoke exception object constructors and such, which | ||||
6943 | // in some languages can be arbitrary code, so be conservative by default. | ||||
6944 | return false; | ||||
6945 | case EHPersonality::CoreCLR: | ||||
6946 | // For CoreCLR, it just involves a type test. | ||||
6947 | return true; | ||||
6948 | } | ||||
6949 | } | ||||
6950 | |||||
6951 | // An instruction that returns without throwing must transfer control flow | ||||
6952 | // to a successor. | ||||
6953 | return !I->mayThrow() && I->willReturn(); | ||||
6954 | } | ||||
6955 | |||||
6956 | bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { | ||||
6957 | // TODO: This is slightly conservative for invoke instruction since exiting | ||||
6958 | // via an exception *is* normal control for them. | ||||
6959 | for (const Instruction &I : *BB) | ||||
6960 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) | ||||
6961 | return false; | ||||
6962 | return true; | ||||
6963 | } | ||||
6964 | |||||
6965 | bool llvm::isGuaranteedToTransferExecutionToSuccessor( | ||||
6966 | BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, | ||||
6967 | unsigned ScanLimit) { | ||||
6968 | return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End), | ||||
6969 | ScanLimit); | ||||
6970 | } | ||||
6971 | |||||
6972 | bool llvm::isGuaranteedToTransferExecutionToSuccessor( | ||||
6973 | iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) { | ||||
6974 | assert(ScanLimit && "scan limit must be non-zero")(static_cast <bool> (ScanLimit && "scan limit must be non-zero" ) ? void (0) : __assert_fail ("ScanLimit && \"scan limit must be non-zero\"" , "llvm/lib/Analysis/ValueTracking.cpp", 6974, __extension__ __PRETTY_FUNCTION__ )); | ||||
6975 | for (const Instruction &I : Range) { | ||||
6976 | if (isa<DbgInfoIntrinsic>(I)) | ||||
6977 | continue; | ||||
6978 | if (--ScanLimit == 0) | ||||
6979 | return false; | ||||
6980 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) | ||||
6981 | return false; | ||||
6982 | } | ||||
6983 | return true; | ||||
6984 | } | ||||
6985 | |||||
6986 | bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, | ||||
6987 | const Loop *L) { | ||||
6988 | // The loop header is guaranteed to be executed for every iteration. | ||||
6989 | // | ||||
6990 | // FIXME: Relax this constraint to cover all basic blocks that are | ||||
6991 | // guaranteed to be executed at every iteration. | ||||
6992 | if (I->getParent() != L->getHeader()) return false; | ||||
6993 | |||||
6994 | for (const Instruction &LI : *L->getHeader()) { | ||||
6995 | if (&LI == I) return true; | ||||
6996 | if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; | ||||
6997 | } | ||||
6998 | llvm_unreachable("Instruction not contained in its own parent basic block.")::llvm::llvm_unreachable_internal("Instruction not contained in its own parent basic block." , "llvm/lib/Analysis/ValueTracking.cpp", 6998); | ||||
6999 | } | ||||
7000 | |||||
7001 | bool llvm::propagatesPoison(const Use &PoisonOp) { | ||||
7002 | const Operator *I = cast<Operator>(PoisonOp.getUser()); | ||||
7003 | switch (I->getOpcode()) { | ||||
7004 | case Instruction::Freeze: | ||||
7005 | case Instruction::PHI: | ||||
7006 | case Instruction::Invoke: | ||||
7007 | return false; | ||||
7008 | case Instruction::Select: | ||||
7009 | return PoisonOp.getOperandNo() == 0; | ||||
7010 | case Instruction::Call: | ||||
7011 | if (auto *II = dyn_cast<IntrinsicInst>(I)) { | ||||
7012 | switch (II->getIntrinsicID()) { | ||||
7013 | // TODO: Add more intrinsics. | ||||
7014 | case Intrinsic::sadd_with_overflow: | ||||
7015 | case Intrinsic::ssub_with_overflow: | ||||
7016 | case Intrinsic::smul_with_overflow: | ||||
7017 | case Intrinsic::uadd_with_overflow: | ||||
7018 | case Intrinsic::usub_with_overflow: | ||||
7019 | case Intrinsic::umul_with_overflow: | ||||
7020 | // If an input is a vector containing a poison element, the | ||||
7021 | // two output vectors (calculated results, overflow bits)' | ||||
7022 | // corresponding lanes are poison. | ||||
7023 | return true; | ||||
7024 | case Intrinsic::ctpop: | ||||
7025 | return true; | ||||
7026 | } | ||||
7027 | } | ||||
7028 | return false; | ||||
7029 | case Instruction::ICmp: | ||||
7030 | case Instruction::FCmp: | ||||
7031 | case Instruction::GetElementPtr: | ||||
7032 | return true; | ||||
7033 | default: | ||||
7034 | if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I)) | ||||
7035 | return true; | ||||
7036 | |||||
7037 | // Be conservative and return false. | ||||
7038 | return false; | ||||
7039 | } | ||||
7040 | } | ||||
7041 | |||||
7042 | void llvm::getGuaranteedWellDefinedOps( | ||||
7043 | const Instruction *I, SmallVectorImpl<const Value *> &Operands) { | ||||
7044 | switch (I->getOpcode()) { | ||||
7045 | case Instruction::Store: | ||||
7046 | Operands.push_back(cast<StoreInst>(I)->getPointerOperand()); | ||||
7047 | break; | ||||
7048 | |||||
7049 | case Instruction::Load: | ||||
7050 | Operands.push_back(cast<LoadInst>(I)->getPointerOperand()); | ||||
7051 | break; | ||||
7052 | |||||
7053 | // Since dereferenceable attribute imply noundef, atomic operations | ||||
7054 | // also implicitly have noundef pointers too | ||||
7055 | case Instruction::AtomicCmpXchg: | ||||
7056 | Operands.push_back(cast<AtomicCmpXchgInst>(I)->getPointerOperand()); | ||||
7057 | break; | ||||
7058 | |||||
7059 | case Instruction::AtomicRMW: | ||||
7060 | Operands.push_back(cast<AtomicRMWInst>(I)->getPointerOperand()); | ||||
7061 | break; | ||||
7062 | |||||
7063 | case Instruction::Call: | ||||
7064 | case Instruction::Invoke: { | ||||
7065 | const CallBase *CB = cast<CallBase>(I); | ||||
7066 | if (CB->isIndirectCall()) | ||||
7067 | Operands.push_back(CB->getCalledOperand()); | ||||
7068 | for (unsigned i = 0; i < CB->arg_size(); ++i) { | ||||
7069 | if (CB->paramHasAttr(i, Attribute::NoUndef) || | ||||
7070 | CB->paramHasAttr(i, Attribute::Dereferenceable)) | ||||
7071 | Operands.push_back(CB->getArgOperand(i)); | ||||
7072 | } | ||||
7073 | break; | ||||
7074 | } | ||||
7075 | case Instruction::Ret: | ||||
7076 | if (I->getFunction()->hasRetAttribute(Attribute::NoUndef)) | ||||
7077 | Operands.push_back(I->getOperand(0)); | ||||
7078 | break; | ||||
7079 | case Instruction::Switch: | ||||
7080 | Operands.push_back(cast<SwitchInst>(I)->getCondition()); | ||||
7081 | break; | ||||
7082 | case Instruction::Br: { | ||||
7083 | auto *BR = cast<BranchInst>(I); | ||||
7084 | if (BR->isConditional()) | ||||
7085 | Operands.push_back(BR->getCondition()); | ||||
7086 | break; | ||||
7087 | } | ||||
7088 | default: | ||||
7089 | break; | ||||
7090 | } | ||||
7091 | } | ||||
7092 | |||||
7093 | void llvm::getGuaranteedNonPoisonOps(const Instruction *I, | ||||
7094 | SmallVectorImpl<const Value *> &Operands) { | ||||
7095 | getGuaranteedWellDefinedOps(I, Operands); | ||||
7096 | switch (I->getOpcode()) { | ||||
7097 | // Divisors of these operations are allowed to be partially undef. | ||||
7098 | case Instruction::UDiv: | ||||
7099 | case Instruction::SDiv: | ||||
7100 | case Instruction::URem: | ||||
7101 | case Instruction::SRem: | ||||
7102 | Operands.push_back(I->getOperand(1)); | ||||
7103 | break; | ||||
7104 | default: | ||||
7105 | break; | ||||
7106 | } | ||||
7107 | } | ||||
7108 | |||||
7109 | bool llvm::mustTriggerUB(const Instruction *I, | ||||
7110 | const SmallPtrSetImpl<const Value *> &KnownPoison) { | ||||
7111 | SmallVector<const Value *, 4> NonPoisonOps; | ||||
7112 | getGuaranteedNonPoisonOps(I, NonPoisonOps); | ||||
7113 | |||||
7114 | for (const auto *V : NonPoisonOps) | ||||
7115 | if (KnownPoison.count(V)) | ||||
7116 | return true; | ||||
7117 | |||||
7118 | return false; | ||||
7119 | } | ||||
7120 | |||||
7121 | static bool programUndefinedIfUndefOrPoison(const Value *V, | ||||
7122 | bool PoisonOnly) { | ||||
7123 | // We currently only look for uses of values within the same basic | ||||
7124 | // block, as that makes it easier to guarantee that the uses will be | ||||
7125 | // executed given that Inst is executed. | ||||
7126 | // | ||||
7127 | // FIXME: Expand this to consider uses beyond the same basic block. To do | ||||
7128 | // this, look out for the distinction between post-dominance and strong | ||||
7129 | // post-dominance. | ||||
7130 | const BasicBlock *BB = nullptr; | ||||
7131 | BasicBlock::const_iterator Begin; | ||||
7132 | if (const auto *Inst = dyn_cast<Instruction>(V)) { | ||||
7133 | BB = Inst->getParent(); | ||||
7134 | Begin = Inst->getIterator(); | ||||
7135 | Begin++; | ||||
7136 | } else if (const auto *Arg = dyn_cast<Argument>(V)) { | ||||
7137 | BB = &Arg->getParent()->getEntryBlock(); | ||||
7138 | Begin = BB->begin(); | ||||
7139 | } else { | ||||
7140 | return false; | ||||
7141 | } | ||||
7142 | |||||
7143 | // Limit number of instructions we look at, to avoid scanning through large | ||||
7144 | // blocks. The current limit is chosen arbitrarily. | ||||
7145 | unsigned ScanLimit = 32; | ||||
7146 | BasicBlock::const_iterator End = BB->end(); | ||||
7147 | |||||
7148 | if (!PoisonOnly) { | ||||
7149 | // Since undef does not propagate eagerly, be conservative & just check | ||||
7150 | // whether a value is directly passed to an instruction that must take | ||||
7151 | // well-defined operands. | ||||
7152 | |||||
7153 | for (const auto &I : make_range(Begin, End)) { | ||||
7154 | if (isa<DbgInfoIntrinsic>(I)) | ||||
7155 | continue; | ||||
7156 | if (--ScanLimit == 0) | ||||
7157 | break; | ||||
7158 | |||||
7159 | SmallVector<const Value *, 4> WellDefinedOps; | ||||
7160 | getGuaranteedWellDefinedOps(&I, WellDefinedOps); | ||||
7161 | if (is_contained(WellDefinedOps, V)) | ||||
7162 | return true; | ||||
7163 | |||||
7164 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) | ||||
7165 | break; | ||||
7166 | } | ||||
7167 | return false; | ||||
7168 | } | ||||
7169 | |||||
7170 | // Set of instructions that we have proved will yield poison if Inst | ||||
7171 | // does. | ||||
7172 | SmallSet<const Value *, 16> YieldsPoison; | ||||
7173 | SmallSet<const BasicBlock *, 4> Visited; | ||||
7174 | |||||
7175 | YieldsPoison.insert(V); | ||||
7176 | Visited.insert(BB); | ||||
7177 | |||||
7178 | while (true) { | ||||
7179 | for (const auto &I : make_range(Begin, End)) { | ||||
7180 | if (isa<DbgInfoIntrinsic>(I)) | ||||
7181 | continue; | ||||
7182 | if (--ScanLimit == 0) | ||||
7183 | return false; | ||||
7184 | if (mustTriggerUB(&I, YieldsPoison)) | ||||
7185 | return true; | ||||
7186 | if (!isGuaranteedToTransferExecutionToSuccessor(&I)) | ||||
7187 | return false; | ||||
7188 | |||||
7189 | // If an operand is poison and propagates it, mark I as yielding poison. | ||||
7190 | for (const Use &Op : I.operands()) { | ||||
7191 | if (YieldsPoison.count(Op) && propagatesPoison(Op)) { | ||||
7192 | YieldsPoison.insert(&I); | ||||
7193 | break; | ||||
7194 | } | ||||
7195 | } | ||||
7196 | |||||
7197 | // Special handling for select, which returns poison if its operand 0 is | ||||
7198 | // poison (handled in the loop above) *or* if both its true/false operands | ||||
7199 | // are poison (handled here). | ||||
7200 | if (I.getOpcode() == Instruction::Select && | ||||
7201 | YieldsPoison.count(I.getOperand(1)) && | ||||
7202 | YieldsPoison.count(I.getOperand(2))) { | ||||
7203 | YieldsPoison.insert(&I); | ||||
7204 | } | ||||
7205 | } | ||||
7206 | |||||
7207 | BB = BB->getSingleSuccessor(); | ||||
7208 | if (!BB || !Visited.insert(BB).second) | ||||
7209 | break; | ||||
7210 | |||||
7211 | Begin = BB->getFirstNonPHI()->getIterator(); | ||||
7212 | End = BB->end(); | ||||
7213 | } | ||||
7214 | return false; | ||||
7215 | } | ||||
7216 | |||||
7217 | bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) { | ||||
7218 | return ::programUndefinedIfUndefOrPoison(Inst, false); | ||||
7219 | } | ||||
7220 | |||||
7221 | bool llvm::programUndefinedIfPoison(const Instruction *Inst) { | ||||
7222 | return ::programUndefinedIfUndefOrPoison(Inst, true); | ||||
7223 | } | ||||
7224 | |||||
7225 | static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { | ||||
7226 | if (FMF.noNaNs()) | ||||
7227 | return true; | ||||
7228 | |||||
7229 | if (auto *C = dyn_cast<ConstantFP>(V)) | ||||
7230 | return !C->isNaN(); | ||||
7231 | |||||
7232 | if (auto *C = dyn_cast<ConstantDataVector>(V)) { | ||||
7233 | if (!C->getElementType()->isFloatingPointTy()) | ||||
7234 | return false; | ||||
7235 | for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { | ||||
7236 | if (C->getElementAsAPFloat(I).isNaN()) | ||||
7237 | return false; | ||||
7238 | } | ||||
7239 | return true; | ||||
7240 | } | ||||
7241 | |||||
7242 | if (isa<ConstantAggregateZero>(V)) | ||||
7243 | return true; | ||||
7244 | |||||
7245 | return false; | ||||
7246 | } | ||||
7247 | |||||
7248 | static bool isKnownNonZero(const Value *V) { | ||||
7249 | if (auto *C = dyn_cast<ConstantFP>(V)) | ||||
7250 | return !C->isZero(); | ||||
7251 | |||||
7252 | if (auto *C = dyn_cast<ConstantDataVector>(V)) { | ||||
7253 | if (!C->getElementType()->isFloatingPointTy()) | ||||
7254 | return false; | ||||
7255 | for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { | ||||
7256 | if (C->getElementAsAPFloat(I).isZero()) | ||||
7257 | return false; | ||||
7258 | } | ||||
7259 | return true; | ||||
7260 | } | ||||
7261 | |||||
7262 | return false; | ||||
7263 | } | ||||
7264 | |||||
7265 | /// Match clamp pattern for float types without care about NaNs or signed zeros. | ||||
7266 | /// Given non-min/max outer cmp/select from the clamp pattern this | ||||
7267 | /// function recognizes if it can be substitued by a "canonical" min/max | ||||
7268 | /// pattern. | ||||
7269 | static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, | ||||
7270 | Value *CmpLHS, Value *CmpRHS, | ||||
7271 | Value *TrueVal, Value *FalseVal, | ||||
7272 | Value *&LHS, Value *&RHS) { | ||||
7273 | // Try to match | ||||
7274 | // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) | ||||
7275 | // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) | ||||
7276 | // and return description of the outer Max/Min. | ||||
7277 | |||||
7278 | // First, check if select has inverse order: | ||||
7279 | if (CmpRHS == FalseVal) { | ||||
7280 | std::swap(TrueVal, FalseVal); | ||||
7281 | Pred = CmpInst::getInversePredicate(Pred); | ||||
7282 | } | ||||
7283 | |||||
7284 | // Assume success now. If there's no match, callers should not use these anyway. | ||||
7285 | LHS = TrueVal; | ||||
7286 | RHS = FalseVal; | ||||
7287 | |||||
7288 | const APFloat *FC1; | ||||
7289 | if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) | ||||
7290 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7291 | |||||
7292 | const APFloat *FC2; | ||||
7293 | switch (Pred) { | ||||
7294 | case CmpInst::FCMP_OLT: | ||||
7295 | case CmpInst::FCMP_OLE: | ||||
7296 | case CmpInst::FCMP_ULT: | ||||
7297 | case CmpInst::FCMP_ULE: | ||||
7298 | if (match(FalseVal, | ||||
7299 | m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), | ||||
7300 | m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && | ||||
7301 | *FC1 < *FC2) | ||||
7302 | return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; | ||||
7303 | break; | ||||
7304 | case CmpInst::FCMP_OGT: | ||||
7305 | case CmpInst::FCMP_OGE: | ||||
7306 | case CmpInst::FCMP_UGT: | ||||
7307 | case CmpInst::FCMP_UGE: | ||||
7308 | if (match(FalseVal, | ||||
7309 | m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), | ||||
7310 | m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && | ||||
7311 | *FC1 > *FC2) | ||||
7312 | return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; | ||||
7313 | break; | ||||
7314 | default: | ||||
7315 | break; | ||||
7316 | } | ||||
7317 | |||||
7318 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7319 | } | ||||
7320 | |||||
7321 | /// Recognize variations of: | ||||
7322 | /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) | ||||
7323 | static SelectPatternResult matchClamp(CmpInst::Predicate Pred, | ||||
7324 | Value *CmpLHS, Value *CmpRHS, | ||||
7325 | Value *TrueVal, Value *FalseVal) { | ||||
7326 | // Swap the select operands and predicate to match the patterns below. | ||||
7327 | if (CmpRHS != TrueVal) { | ||||
7328 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
7329 | std::swap(TrueVal, FalseVal); | ||||
7330 | } | ||||
7331 | const APInt *C1; | ||||
7332 | if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { | ||||
7333 | const APInt *C2; | ||||
7334 | // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) | ||||
7335 | if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && | ||||
7336 | C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) | ||||
7337 | return {SPF_SMAX, SPNB_NA, false}; | ||||
7338 | |||||
7339 | // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) | ||||
7340 | if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && | ||||
7341 | C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) | ||||
7342 | return {SPF_SMIN, SPNB_NA, false}; | ||||
7343 | |||||
7344 | // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) | ||||
7345 | if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && | ||||
7346 | C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) | ||||
7347 | return {SPF_UMAX, SPNB_NA, false}; | ||||
7348 | |||||
7349 | // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) | ||||
7350 | if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && | ||||
7351 | C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) | ||||
7352 | return {SPF_UMIN, SPNB_NA, false}; | ||||
7353 | } | ||||
7354 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7355 | } | ||||
7356 | |||||
7357 | /// Recognize variations of: | ||||
7358 | /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) | ||||
7359 | static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, | ||||
7360 | Value *CmpLHS, Value *CmpRHS, | ||||
7361 | Value *TVal, Value *FVal, | ||||
7362 | unsigned Depth) { | ||||
7363 | // TODO: Allow FP min/max with nnan/nsz. | ||||
7364 | assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison")(static_cast <bool> (CmpInst::isIntPredicate(Pred) && "Expected integer comparison") ? void (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Expected integer comparison\"" , "llvm/lib/Analysis/ValueTracking.cpp", 7364, __extension__ __PRETTY_FUNCTION__ )); | ||||
7365 | |||||
7366 | Value *A = nullptr, *B = nullptr; | ||||
7367 | SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); | ||||
7368 | if (!SelectPatternResult::isMinOrMax(L.Flavor)) | ||||
7369 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7370 | |||||
7371 | Value *C = nullptr, *D = nullptr; | ||||
7372 | SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); | ||||
7373 | if (L.Flavor != R.Flavor) | ||||
7374 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7375 | |||||
7376 | // We have something like: x Pred y ? min(a, b) : min(c, d). | ||||
7377 | // Try to match the compare to the min/max operations of the select operands. | ||||
7378 | // First, make sure we have the right compare predicate. | ||||
7379 | switch (L.Flavor) { | ||||
7380 | case SPF_SMIN: | ||||
7381 | if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { | ||||
7382 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
7383 | std::swap(CmpLHS, CmpRHS); | ||||
7384 | } | ||||
7385 | if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) | ||||
7386 | break; | ||||
7387 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7388 | case SPF_SMAX: | ||||
7389 | if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { | ||||
7390 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
7391 | std::swap(CmpLHS, CmpRHS); | ||||
7392 | } | ||||
7393 | if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) | ||||
7394 | break; | ||||
7395 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7396 | case SPF_UMIN: | ||||
7397 | if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { | ||||
7398 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
7399 | std::swap(CmpLHS, CmpRHS); | ||||
7400 | } | ||||
7401 | if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) | ||||
7402 | break; | ||||
7403 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7404 | case SPF_UMAX: | ||||
7405 | if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { | ||||
7406 | Pred = ICmpInst::getSwappedPredicate(Pred); | ||||
7407 | std::swap(CmpLHS, CmpRHS); | ||||
7408 | } | ||||
7409 | if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) | ||||
7410 | break; | ||||
7411 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7412 | default: | ||||
7413 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7414 | } | ||||
7415 | |||||
7416 | // If there is a common operand in the already matched min/max and the other | ||||
7417 | // min/max operands match the compare operands (either directly or inverted), | ||||
7418 | // then this is min/max of the same flavor. | ||||
7419 | |||||
7420 | // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) | ||||
7421 | // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) | ||||
7422 | if (D == B) { | ||||
7423 | if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && | ||||
7424 | match(A, m_Not(m_Specific(CmpRHS))))) | ||||
7425 | return {L.Flavor, SPNB_NA, false}; | ||||
7426 | } | ||||
7427 | // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) | ||||
7428 | // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) | ||||
7429 | if (C == B) { | ||||
7430 | if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && | ||||
7431 | match(A, m_Not(m_Specific(CmpRHS))))) | ||||
7432 | return {L.Flavor, SPNB_NA, false}; | ||||
7433 | } | ||||
7434 | // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) | ||||
7435 | // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) | ||||
7436 | if (D == A) { | ||||
7437 | if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && | ||||
7438 | match(B, m_Not(m_Specific(CmpRHS))))) | ||||
7439 | return {L.Flavor, SPNB_NA, false}; | ||||
7440 | } | ||||
7441 | // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) | ||||
7442 | // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) | ||||
7443 | if (C == A) { | ||||
7444 | if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && | ||||
7445 | match(B, m_Not(m_Specific(CmpRHS))))) | ||||
7446 | return {L.Flavor, SPNB_NA, false}; | ||||
7447 | } | ||||
7448 | |||||
7449 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7450 | } | ||||
7451 | |||||
7452 | /// If the input value is the result of a 'not' op, constant integer, or vector | ||||
7453 | /// splat of a constant integer, return the bitwise-not source value. | ||||
7454 | /// TODO: This could be extended to handle non-splat vector integer constants. | ||||
7455 | static Value *getNotValue(Value *V) { | ||||
7456 | Value *NotV; | ||||
7457 | if (match(V, m_Not(m_Value(NotV)))) | ||||
7458 | return NotV; | ||||
7459 | |||||
7460 | const APInt *C; | ||||
7461 | if (match(V, m_APInt(C))) | ||||
7462 | return ConstantInt::get(V->getType(), ~(*C)); | ||||
7463 | |||||
7464 | return nullptr; | ||||
7465 | } | ||||
7466 | |||||
7467 | /// Match non-obvious integer minimum and maximum sequences. | ||||
7468 | static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, | ||||
7469 | Value *CmpLHS, Value *CmpRHS, | ||||
7470 | Value *TrueVal, Value *FalseVal, | ||||
7471 | Value *&LHS, Value *&RHS, | ||||
7472 | unsigned Depth) { | ||||
7473 | // Assume success. If there's no match, callers should not use these anyway. | ||||
7474 | LHS = TrueVal; | ||||
7475 | RHS = FalseVal; | ||||
7476 | |||||
7477 | SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); | ||||
7478 | if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) | ||||
7479 | return SPR; | ||||
7480 | |||||
7481 | SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); | ||||
7482 | if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) | ||||
7483 | return SPR; | ||||
7484 | |||||
7485 | // Look through 'not' ops to find disguised min/max. | ||||
7486 | // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y) | ||||
7487 | // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y) | ||||
7488 | if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) { | ||||
7489 | switch (Pred) { | ||||
7490 | case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false}; | ||||
7491 | case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false}; | ||||
7492 | case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false}; | ||||
7493 | case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false}; | ||||
7494 | default: break; | ||||
7495 | } | ||||
7496 | } | ||||
7497 | |||||
7498 | // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X) | ||||
7499 | // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X) | ||||
7500 | if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) { | ||||
7501 | switch (Pred) { | ||||
7502 | case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false}; | ||||
7503 | case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false}; | ||||
7504 | case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false}; | ||||
7505 | case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false}; | ||||
7506 | default: break; | ||||
7507 | } | ||||
7508 | } | ||||
7509 | |||||
7510 | if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) | ||||
7511 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7512 | |||||
7513 | const APInt *C1; | ||||
7514 | if (!match(CmpRHS, m_APInt(C1))) | ||||
7515 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7516 | |||||
7517 | // An unsigned min/max can be written with a signed compare. | ||||
7518 | const APInt *C2; | ||||
7519 | if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || | ||||
7520 | (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { | ||||
7521 | // Is the sign bit set? | ||||
7522 | // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX | ||||
7523 | // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN | ||||
7524 | if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue()) | ||||
7525 | return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; | ||||
7526 | |||||
7527 | // Is the sign bit clear? | ||||
7528 | // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX | ||||
7529 | // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN | ||||
7530 | if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue()) | ||||
7531 | return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; | ||||
7532 | } | ||||
7533 | |||||
7534 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7535 | } | ||||
7536 | |||||
7537 | bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { | ||||
7538 | assert(X && Y && "Invalid operand")(static_cast <bool> (X && Y && "Invalid operand" ) ? void (0) : __assert_fail ("X && Y && \"Invalid operand\"" , "llvm/lib/Analysis/ValueTracking.cpp", 7538, __extension__ __PRETTY_FUNCTION__ )); | ||||
7539 | |||||
7540 | // X = sub (0, Y) || X = sub nsw (0, Y) | ||||
7541 | if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || | ||||
7542 | (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) | ||||
7543 | return true; | ||||
7544 | |||||
7545 | // Y = sub (0, X) || Y = sub nsw (0, X) | ||||
7546 | if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || | ||||
7547 | (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) | ||||
7548 | return true; | ||||
7549 | |||||
7550 | // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) | ||||
7551 | Value *A, *B; | ||||
7552 | return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && | ||||
7553 | match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || | ||||
7554 | (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && | ||||
7555 | match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); | ||||
7556 | } | ||||
7557 | |||||
7558 | static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, | ||||
7559 | FastMathFlags FMF, | ||||
7560 | Value *CmpLHS, Value *CmpRHS, | ||||
7561 | Value *TrueVal, Value *FalseVal, | ||||
7562 | Value *&LHS, Value *&RHS, | ||||
7563 | unsigned Depth) { | ||||
7564 | bool HasMismatchedZeros = false; | ||||
7565 | if (CmpInst::isFPPredicate(Pred)) { | ||||
7566 | // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one | ||||
7567 | // 0.0 operand, set the compare's 0.0 operands to that same value for the | ||||
7568 | // purpose of identifying min/max. Disregard vector constants with undefined | ||||
7569 | // elements because those can not be back-propagated for analysis. | ||||
7570 | Value *OutputZeroVal = nullptr; | ||||
7571 | if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && | ||||
7572 | !cast<Constant>(TrueVal)->containsUndefOrPoisonElement()) | ||||
7573 | OutputZeroVal = TrueVal; | ||||
7574 | else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && | ||||
7575 | !cast<Constant>(FalseVal)->containsUndefOrPoisonElement()) | ||||
7576 | OutputZeroVal = FalseVal; | ||||
7577 | |||||
7578 | if (OutputZeroVal) { | ||||
7579 | if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) { | ||||
7580 | HasMismatchedZeros = true; | ||||
7581 | CmpLHS = OutputZeroVal; | ||||
7582 | } | ||||
7583 | if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) { | ||||
7584 | HasMismatchedZeros = true; | ||||
7585 | CmpRHS = OutputZeroVal; | ||||
7586 | } | ||||
7587 | } | ||||
7588 | } | ||||
7589 | |||||
7590 | LHS = CmpLHS; | ||||
7591 | RHS = CmpRHS; | ||||
7592 | |||||
7593 | // Signed zero may return inconsistent results between implementations. | ||||
7594 | // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 | ||||
7595 | // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) | ||||
7596 | // Therefore, we behave conservatively and only proceed if at least one of the | ||||
7597 | // operands is known to not be zero or if we don't care about signed zero. | ||||
7598 | switch (Pred) { | ||||
7599 | default: break; | ||||
7600 | case CmpInst::FCMP_OGT: case CmpInst::FCMP_OLT: | ||||
7601 | case CmpInst::FCMP_UGT: case CmpInst::FCMP_ULT: | ||||
7602 | if (!HasMismatchedZeros) | ||||
7603 | break; | ||||
7604 | [[fallthrough]]; | ||||
7605 | case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: | ||||
7606 | case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: | ||||
7607 | if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && | ||||
7608 | !isKnownNonZero(CmpRHS)) | ||||
7609 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7610 | } | ||||
7611 | |||||
7612 | SelectPatternNaNBehavior NaNBehavior = SPNB_NA; | ||||
7613 | bool Ordered = false; | ||||
7614 | |||||
7615 | // When given one NaN and one non-NaN input: | ||||
7616 | // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. | ||||
7617 | // - A simple C99 (a < b ? a : b) construction will return 'b' (as the | ||||
7618 | // ordered comparison fails), which could be NaN or non-NaN. | ||||
7619 | // so here we discover exactly what NaN behavior is required/accepted. | ||||
7620 | if (CmpInst::isFPPredicate(Pred)) { | ||||
7621 | bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); | ||||
7622 | bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); | ||||
7623 | |||||
7624 | if (LHSSafe && RHSSafe) { | ||||
7625 | // Both operands are known non-NaN. | ||||
7626 | NaNBehavior = SPNB_RETURNS_ANY; | ||||
7627 | } else if (CmpInst::isOrdered(Pred)) { | ||||
7628 | // An ordered comparison will return false when given a NaN, so it | ||||
7629 | // returns the RHS. | ||||
7630 | Ordered = true; | ||||
7631 | if (LHSSafe) | ||||
7632 | // LHS is non-NaN, so if RHS is NaN then NaN will be returned. | ||||
7633 | NaNBehavior = SPNB_RETURNS_NAN; | ||||
7634 | else if (RHSSafe) | ||||
7635 | NaNBehavior = SPNB_RETURNS_OTHER; | ||||
7636 | else | ||||
7637 | // Completely unsafe. | ||||
7638 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7639 | } else { | ||||
7640 | Ordered = false; | ||||
7641 | // An unordered comparison will return true when given a NaN, so it | ||||
7642 | // returns the LHS. | ||||
7643 | if (LHSSafe) | ||||
7644 | // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. | ||||
7645 | NaNBehavior = SPNB_RETURNS_OTHER; | ||||
7646 | else if (RHSSafe) | ||||
7647 | NaNBehavior = SPNB_RETURNS_NAN; | ||||
7648 | else | ||||
7649 | // Completely unsafe. | ||||
7650 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7651 | } | ||||
7652 | } | ||||
7653 | |||||
7654 | if (TrueVal == CmpRHS && FalseVal == CmpLHS) { | ||||
7655 | std::swap(CmpLHS, CmpRHS); | ||||
7656 | Pred = CmpInst::getSwappedPredicate(Pred); | ||||
7657 | if (NaNBehavior == SPNB_RETURNS_NAN) | ||||
7658 | NaNBehavior = SPNB_RETURNS_OTHER; | ||||
7659 | else if (NaNBehavior == SPNB_RETURNS_OTHER) | ||||
7660 | NaNBehavior = SPNB_RETURNS_NAN; | ||||
7661 | Ordered = !Ordered; | ||||
7662 | } | ||||
7663 | |||||
7664 | // ([if]cmp X, Y) ? X : Y | ||||
7665 | if (TrueVal == CmpLHS && FalseVal == CmpRHS) { | ||||
7666 | switch (Pred) { | ||||
7667 | default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. | ||||
7668 | case ICmpInst::ICMP_UGT: | ||||
7669 | case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; | ||||
7670 | case ICmpInst::ICMP_SGT: | ||||
7671 | case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; | ||||
7672 | case ICmpInst::ICMP_ULT: | ||||
7673 | case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; | ||||
7674 | case ICmpInst::ICMP_SLT: | ||||
7675 | case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; | ||||
7676 | case FCmpInst::FCMP_UGT: | ||||
7677 | case FCmpInst::FCMP_UGE: | ||||
7678 | case FCmpInst::FCMP_OGT: | ||||
7679 | case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; | ||||
7680 | case FCmpInst::FCMP_ULT: | ||||
7681 | case FCmpInst::FCMP_ULE: | ||||
7682 | case FCmpInst::FCMP_OLT: | ||||
7683 | case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; | ||||
7684 | } | ||||
7685 | } | ||||
7686 | |||||
7687 | if (isKnownNegation(TrueVal, FalseVal)) { | ||||
7688 | // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can | ||||
7689 | // match against either LHS or sext(LHS). | ||||
7690 | auto MaybeSExtCmpLHS = | ||||
7691 | m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); | ||||
7692 | auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); | ||||
7693 | auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); | ||||
7694 | if (match(TrueVal, MaybeSExtCmpLHS)) { | ||||
7695 | // Set the return values. If the compare uses the negated value (-X >s 0), | ||||
7696 | // swap the return values because the negated value is always 'RHS'. | ||||
7697 | LHS = TrueVal; | ||||
7698 | RHS = FalseVal; | ||||
7699 | if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) | ||||
7700 | std::swap(LHS, RHS); | ||||
7701 | |||||
7702 | // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) | ||||
7703 | // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) | ||||
7704 | if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) | ||||
7705 | return {SPF_ABS, SPNB_NA, false}; | ||||
7706 | |||||
7707 | // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) | ||||
7708 | if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne)) | ||||
7709 | return {SPF_ABS, SPNB_NA, false}; | ||||
7710 | |||||
7711 | // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) | ||||
7712 | // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) | ||||
7713 | if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) | ||||
7714 | return {SPF_NABS, SPNB_NA, false}; | ||||
7715 | } | ||||
7716 | else if (match(FalseVal, MaybeSExtCmpLHS)) { | ||||
7717 | // Set the return values. If the compare uses the negated value (-X >s 0), | ||||
7718 | // swap the return values because the negated value is always 'RHS'. | ||||
7719 | LHS = FalseVal; | ||||
7720 | RHS = TrueVal; | ||||
7721 | if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) | ||||
7722 | std::swap(LHS, RHS); | ||||
7723 | |||||
7724 | // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) | ||||
7725 | // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) | ||||
7726 | if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) | ||||
7727 | return {SPF_NABS, SPNB_NA, false}; | ||||
7728 | |||||
7729 | // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) | ||||
7730 | // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) | ||||
7731 | if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) | ||||
7732 | return {SPF_ABS, SPNB_NA, false}; | ||||
7733 | } | ||||
7734 | } | ||||
7735 | |||||
7736 | if (CmpInst::isIntPredicate(Pred)) | ||||
7737 | return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); | ||||
7738 | |||||
7739 | // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar | ||||
7740 | // may return either -0.0 or 0.0, so fcmp/select pair has stricter | ||||
7741 | // semantics than minNum. Be conservative in such case. | ||||
7742 | if (NaNBehavior != SPNB_RETURNS_ANY || | ||||
7743 | (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && | ||||
7744 | !isKnownNonZero(CmpRHS))) | ||||
7745 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7746 | |||||
7747 | return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); | ||||
7748 | } | ||||
7749 | |||||
7750 | /// Helps to match a select pattern in case of a type mismatch. | ||||
7751 | /// | ||||
7752 | /// The function processes the case when type of true and false values of a | ||||
7753 | /// select instruction differs from type of the cmp instruction operands because | ||||
7754 | /// of a cast instruction. The function checks if it is legal to move the cast | ||||
7755 | /// operation after "select". If yes, it returns the new second value of | ||||
7756 | /// "select" (with the assumption that cast is moved): | ||||
7757 | /// 1. As operand of cast instruction when both values of "select" are same cast | ||||
7758 | /// instructions. | ||||
7759 | /// 2. As restored constant (by applying reverse cast operation) when the first | ||||
7760 | /// value of the "select" is a cast operation and the second value is a | ||||
7761 | /// constant. | ||||
7762 | /// NOTE: We return only the new second value because the first value could be | ||||
7763 | /// accessed as operand of cast instruction. | ||||
7764 | static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, | ||||
7765 | Instruction::CastOps *CastOp) { | ||||
7766 | auto *Cast1 = dyn_cast<CastInst>(V1); | ||||
7767 | if (!Cast1) | ||||
7768 | return nullptr; | ||||
7769 | |||||
7770 | *CastOp = Cast1->getOpcode(); | ||||
7771 | Type *SrcTy = Cast1->getSrcTy(); | ||||
7772 | if (auto *Cast2 = dyn_cast<CastInst>(V2)) { | ||||
7773 | // If V1 and V2 are both the same cast from the same type, look through V1. | ||||
7774 | if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) | ||||
7775 | return Cast2->getOperand(0); | ||||
7776 | return nullptr; | ||||
7777 | } | ||||
7778 | |||||
7779 | auto *C = dyn_cast<Constant>(V2); | ||||
7780 | if (!C) | ||||
7781 | return nullptr; | ||||
7782 | |||||
7783 | Constant *CastedTo = nullptr; | ||||
7784 | switch (*CastOp) { | ||||
7785 | case Instruction::ZExt: | ||||
7786 | if (CmpI->isUnsigned()) | ||||
7787 | CastedTo = ConstantExpr::getTrunc(C, SrcTy); | ||||
7788 | break; | ||||
7789 | case Instruction::SExt: | ||||
7790 | if (CmpI->isSigned()) | ||||
7791 | CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); | ||||
7792 | break; | ||||
7793 | case Instruction::Trunc: | ||||
7794 | Constant *CmpConst; | ||||
7795 | if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && | ||||
7796 | CmpConst->getType() == SrcTy) { | ||||
7797 | // Here we have the following case: | ||||
7798 | // | ||||
7799 | // %cond = cmp iN %x, CmpConst | ||||
7800 | // %tr = trunc iN %x to iK | ||||
7801 | // %narrowsel = select i1 %cond, iK %t, iK C | ||||
7802 | // | ||||
7803 | // We can always move trunc after select operation: | ||||
7804 | // | ||||
7805 | // %cond = cmp iN %x, CmpConst | ||||
7806 | // %widesel = select i1 %cond, iN %x, iN CmpConst | ||||
7807 | // %tr = trunc iN %widesel to iK | ||||
7808 | // | ||||
7809 | // Note that C could be extended in any way because we don't care about | ||||
7810 | // upper bits after truncation. It can't be abs pattern, because it would | ||||
7811 | // look like: | ||||
7812 | // | ||||
7813 | // select i1 %cond, x, -x. | ||||
7814 | // | ||||
7815 | // So only min/max pattern could be matched. Such match requires widened C | ||||
7816 | // == CmpConst. That is why set widened C = CmpConst, condition trunc | ||||
7817 | // CmpConst == C is checked below. | ||||
7818 | CastedTo = CmpConst; | ||||
7819 | } else { | ||||
7820 | CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); | ||||
7821 | } | ||||
7822 | break; | ||||
7823 | case Instruction::FPTrunc: | ||||
7824 | CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); | ||||
7825 | break; | ||||
7826 | case Instruction::FPExt: | ||||
7827 | CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); | ||||
7828 | break; | ||||
7829 | case Instruction::FPToUI: | ||||
7830 | CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); | ||||
7831 | break; | ||||
7832 | case Instruction::FPToSI: | ||||
7833 | CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); | ||||
7834 | break; | ||||
7835 | case Instruction::UIToFP: | ||||
7836 | CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); | ||||
7837 | break; | ||||
7838 | case Instruction::SIToFP: | ||||
7839 | CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); | ||||
7840 | break; | ||||
7841 | default: | ||||
7842 | break; | ||||
7843 | } | ||||
7844 | |||||
7845 | if (!CastedTo) | ||||
7846 | return nullptr; | ||||
7847 | |||||
7848 | // Make sure the cast doesn't lose any information. | ||||
7849 | Constant *CastedBack = | ||||
7850 | ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); | ||||
7851 | if (CastedBack != C) | ||||
7852 | return nullptr; | ||||
7853 | |||||
7854 | return CastedTo; | ||||
7855 | } | ||||
7856 | |||||
7857 | SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, | ||||
7858 | Instruction::CastOps *CastOp, | ||||
7859 | unsigned Depth) { | ||||
7860 | if (Depth >= MaxAnalysisRecursionDepth) | ||||
7861 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7862 | |||||
7863 | SelectInst *SI = dyn_cast<SelectInst>(V); | ||||
7864 | if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7865 | |||||
7866 | CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); | ||||
7867 | if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7868 | |||||
7869 | Value *TrueVal = SI->getTrueValue(); | ||||
7870 | Value *FalseVal = SI->getFalseValue(); | ||||
7871 | |||||
7872 | return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS, | ||||
7873 | CastOp, Depth); | ||||
7874 | } | ||||
7875 | |||||
7876 | SelectPatternResult llvm::matchDecomposedSelectPattern( | ||||
7877 | CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, | ||||
7878 | Instruction::CastOps *CastOp, unsigned Depth) { | ||||
7879 | CmpInst::Predicate Pred = CmpI->getPredicate(); | ||||
7880 | Value *CmpLHS = CmpI->getOperand(0); | ||||
7881 | Value *CmpRHS = CmpI->getOperand(1); | ||||
7882 | FastMathFlags FMF; | ||||
7883 | if (isa<FPMathOperator>(CmpI)) | ||||
7884 | FMF = CmpI->getFastMathFlags(); | ||||
7885 | |||||
7886 | // Bail out early. | ||||
7887 | if (CmpI->isEquality()) | ||||
7888 | return {SPF_UNKNOWN, SPNB_NA, false}; | ||||
7889 | |||||
7890 | // Deal with type mismatches. | ||||
7891 | if (CastOp && CmpLHS->getType() != TrueVal->getType()) { | ||||
7892 | if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { | ||||
7893 | // If this is a potential fmin/fmax with a cast to integer, then ignore | ||||
7894 | // -0.0 because there is no corresponding integer value. | ||||
7895 | if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) | ||||
7896 | FMF.setNoSignedZeros(); | ||||
7897 | return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, | ||||
7898 | cast<CastInst>(TrueVal)->getOperand(0), C, | ||||
7899 | LHS, RHS, Depth); | ||||
7900 | } | ||||
7901 | if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { | ||||
7902 | // If this is a potential fmin/fmax with a cast to integer, then ignore | ||||
7903 | // -0.0 because there is no corresponding integer value. | ||||
7904 | if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) | ||||
7905 | FMF.setNoSignedZeros(); | ||||
7906 | return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, | ||||
7907 | C, cast<CastInst>(FalseVal)->getOperand(0), | ||||
7908 | LHS, RHS, Depth); | ||||
7909 | } | ||||
7910 | } | ||||
7911 | return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, | ||||
7912 | LHS, RHS, Depth); | ||||
7913 | } | ||||
7914 | |||||
7915 | CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { | ||||
7916 | if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; | ||||
7917 | if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; | ||||
7918 | if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; | ||||
7919 | if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; | ||||
7920 | if (SPF == SPF_FMINNUM) | ||||
7921 | return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; | ||||
7922 | if (SPF == SPF_FMAXNUM) | ||||
7923 | return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; | ||||
7924 | llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "llvm/lib/Analysis/ValueTracking.cpp" , 7924); | ||||
7925 | } | ||||
7926 | |||||
7927 | SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { | ||||
7928 | if (SPF == SPF_SMIN) return SPF_SMAX; | ||||
7929 | if (SPF == SPF_UMIN) return SPF_UMAX; | ||||
7930 | if (SPF == SPF_SMAX) return SPF_SMIN; | ||||
7931 | if (SPF == SPF_UMAX) return SPF_UMIN; | ||||
7932 | llvm_unreachable("unhandled!")::llvm::llvm_unreachable_internal("unhandled!", "llvm/lib/Analysis/ValueTracking.cpp" , 7932); | ||||
7933 | } | ||||
7934 | |||||
7935 | Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) { | ||||
7936 | switch (MinMaxID) { | ||||
7937 | case Intrinsic::smax: return Intrinsic::smin; | ||||
7938 | case Intrinsic::smin: return Intrinsic::smax; | ||||
7939 | case Intrinsic::umax: return Intrinsic::umin; | ||||
7940 | case Intrinsic::umin: return Intrinsic::umax; | ||||
7941 | // Please note that next four intrinsics may produce the same result for | ||||
7942 | // original and inverted case even if X != Y due to NaN is handled specially. | ||||
7943 | case Intrinsic::maximum: return Intrinsic::minimum; | ||||
7944 | case Intrinsic::minimum: return Intrinsic::maximum; | ||||
7945 | case Intrinsic::maxnum: return Intrinsic::minnum; | ||||
7946 | case Intrinsic::minnum: return Intrinsic::maxnum; | ||||
7947 | default: llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "llvm/lib/Analysis/ValueTracking.cpp" , 7947); | ||||
7948 | } | ||||
7949 | } | ||||
7950 | |||||
7951 | APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) { | ||||
7952 | switch (SPF) { | ||||
7953 | case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth); | ||||
7954 | case SPF_SMIN: return APInt::getSignedMinValue(BitWidth); | ||||
7955 | case SPF_UMAX: return APInt::getMaxValue(BitWidth); | ||||
7956 | case SPF_UMIN: return APInt::getMinValue(BitWidth); | ||||
7957 | default: llvm_unreachable("Unexpected flavor")::llvm::llvm_unreachable_internal("Unexpected flavor", "llvm/lib/Analysis/ValueTracking.cpp" , 7957); | ||||
7958 | } | ||||
7959 | } | ||||
7960 | |||||
7961 | std::pair<Intrinsic::ID, bool> | ||||
7962 | llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) { | ||||
7963 | // Check if VL contains select instructions that can be folded into a min/max | ||||
7964 | // vector intrinsic and return the intrinsic if it is possible. | ||||
7965 | // TODO: Support floating point min/max. | ||||
7966 | bool AllCmpSingleUse = true; | ||||
7967 | SelectPatternResult SelectPattern; | ||||
7968 | SelectPattern.Flavor = SPF_UNKNOWN; | ||||
7969 | if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) { | ||||
7970 | Value *LHS, *RHS; | ||||
7971 | auto CurrentPattern = matchSelectPattern(I, LHS, RHS); | ||||
7972 | if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) || | ||||
7973 | CurrentPattern.Flavor == SPF_FMINNUM || | ||||
7974 | CurrentPattern.Flavor == SPF_FMAXNUM || | ||||
7975 | !I->getType()->isIntOrIntVectorTy()) | ||||
7976 | return false; | ||||
7977 | if (SelectPattern.Flavor != SPF_UNKNOWN && | ||||
7978 | SelectPattern.Flavor != CurrentPattern.Flavor) | ||||
7979 | return false; | ||||
7980 | SelectPattern = CurrentPattern; | ||||
7981 | AllCmpSingleUse &= | ||||
7982 | match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value())); | ||||
7983 | return true; | ||||
7984 | })) { | ||||
7985 | switch (SelectPattern.Flavor) { | ||||
7986 | case SPF_SMIN: | ||||
7987 | return {Intrinsic::smin, AllCmpSingleUse}; | ||||
7988 | case SPF_UMIN: | ||||
7989 | return {Intrinsic::umin, AllCmpSingleUse}; | ||||
7990 | case SPF_SMAX: | ||||
7991 | return {Intrinsic::smax, AllCmpSingleUse}; | ||||
7992 | case SPF_UMAX: | ||||
7993 | return {Intrinsic::umax, AllCmpSingleUse}; | ||||
7994 | default: | ||||
7995 | llvm_unreachable("unexpected select pattern flavor")::llvm::llvm_unreachable_internal("unexpected select pattern flavor" , "llvm/lib/Analysis/ValueTracking.cpp", 7995); | ||||
7996 | } | ||||
7997 | } | ||||
7998 | return {Intrinsic::not_intrinsic, false}; | ||||
7999 | } | ||||
8000 | |||||
8001 | bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, | ||||
8002 | Value *&Start, Value *&Step) { | ||||
8003 | // Handle the case of a simple two-predecessor recurrence PHI. | ||||
8004 | // There's a lot more that could theoretically be done here, but | ||||
8005 | // this is sufficient to catch some interesting cases. | ||||
8006 | if (P->getNumIncomingValues() != 2) | ||||
8007 | return false; | ||||
8008 | |||||
8009 | for (unsigned i = 0; i != 2; ++i) { | ||||
8010 | Value *L = P->getIncomingValue(i); | ||||
8011 | Value *R = P->getIncomingValue(!i); | ||||
8012 | Operator *LU = dyn_cast<Operator>(L); | ||||
8013 | if (!LU) | ||||
8014 | continue; | ||||
8015 | unsigned Opcode = LU->getOpcode(); | ||||
8016 | |||||
8017 | switch (Opcode) { | ||||
8018 | default: | ||||
8019 | continue; | ||||
8020 | // TODO: Expand list -- xor, div, gep, uaddo, etc.. | ||||
8021 | case Instruction::LShr: | ||||
8022 | case Instruction::AShr: | ||||
8023 | case Instruction::Shl: | ||||
8024 | case Instruction::Add: | ||||
8025 | case Instruction::Sub: | ||||
8026 | case Instruction::And: | ||||
8027 | case Instruction::Or: | ||||
8028 | case Instruction::Mul: | ||||
8029 | case Instruction::FMul: { | ||||
8030 | Value *LL = LU->getOperand(0); | ||||
8031 | Value *LR = LU->getOperand(1); | ||||
8032 | // Find a recurrence. | ||||
8033 | if (LL == P) | ||||
8034 | L = LR; | ||||
8035 | else if (LR == P) | ||||
8036 | L = LL; | ||||
8037 | else | ||||
8038 | continue; // Check for recurrence with L and R flipped. | ||||
8039 | |||||
8040 | break; // Match! | ||||
8041 | } | ||||
8042 | }; | ||||
8043 | |||||
8044 | // We have matched a recurrence of the form: | ||||
8045 | // %iv = [R, %entry], [%iv.next, %backedge] | ||||
8046 | // %iv.next = binop %iv, L | ||||
8047 | // OR | ||||
8048 | // %iv = [R, %entry], [%iv.next, %backedge] | ||||
8049 | // %iv.next = binop L, %iv | ||||
8050 | BO = cast<BinaryOperator>(LU); | ||||
8051 | Start = R; | ||||
8052 | Step = L; | ||||
8053 | return true; | ||||
8054 | } | ||||
8055 | return false; | ||||
8056 | } | ||||
8057 | |||||
8058 | bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, | ||||
8059 | Value *&Start, Value *&Step) { | ||||
8060 | BinaryOperator *BO = nullptr; | ||||
8061 | P = dyn_cast<PHINode>(I->getOperand(0)); | ||||
8062 | if (!P) | ||||
8063 | P = dyn_cast<PHINode>(I->getOperand(1)); | ||||
8064 | return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I; | ||||
8065 | } | ||||
8066 | |||||
8067 | /// Return true if "icmp Pred LHS RHS" is always true. | ||||
8068 | static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, | ||||
8069 | const Value *RHS, const DataLayout &DL, | ||||
8070 | unsigned Depth) { | ||||
8071 | if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) | ||||
8072 | return true; | ||||
8073 | |||||
8074 | switch (Pred) { | ||||
8075 | default: | ||||
8076 | return false; | ||||
8077 | |||||
8078 | case CmpInst::ICMP_SLE: { | ||||
8079 | const APInt *C; | ||||
8080 | |||||
8081 | // LHS s<= LHS +_{nsw} C if C >= 0 | ||||
8082 | if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) | ||||
8083 | return !C->isNegative(); | ||||
8084 | return false; | ||||
8085 | } | ||||
8086 | |||||
8087 | case CmpInst::ICMP_ULE: { | ||||
8088 | const APInt *C; | ||||
8089 | |||||
8090 | // LHS u<= LHS +_{nuw} C for any C | ||||
8091 | if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) | ||||
8092 | return true; | ||||
8093 | |||||
8094 | // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) | ||||
8095 | auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, | ||||
8096 | const Value *&X, | ||||
8097 | const APInt *&CA, const APInt *&CB) { | ||||
8098 | if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && | ||||
8099 | match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) | ||||
8100 | return true; | ||||
8101 | |||||
8102 | // If X & C == 0 then (X | C) == X +_{nuw} C | ||||
8103 | if (match(A, m_Or(m_Value(X), m_APInt(CA))) && | ||||
8104 | match(B, m_Or(m_Specific(X), m_APInt(CB)))) { | ||||
8105 | KnownBits Known(CA->getBitWidth()); | ||||
8106 | computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, | ||||
8107 | /*CxtI*/ nullptr, /*DT*/ nullptr); | ||||
8108 | if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) | ||||
8109 | return true; | ||||
8110 | } | ||||
8111 | |||||
8112 | return false; | ||||
8113 | }; | ||||
8114 | |||||
8115 | const Value *X; | ||||
8116 | const APInt *CLHS, *CRHS; | ||||
8117 | if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) | ||||
8118 | return CLHS->ule(*CRHS); | ||||
8119 | |||||
8120 | return false; | ||||
8121 | } | ||||
8122 | } | ||||
8123 | } | ||||
8124 | |||||
8125 | /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred | ||||
8126 | /// ALHS ARHS" is true. Otherwise, return std::nullopt. | ||||
8127 | static std::optional<bool> | ||||
8128 | isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, | ||||
8129 | const Value *ARHS, const Value *BLHS, const Value *BRHS, | ||||
8130 | const DataLayout &DL, unsigned Depth) { | ||||
8131 | switch (Pred) { | ||||
8132 | default: | ||||
8133 | return std::nullopt; | ||||
8134 | |||||
8135 | case CmpInst::ICMP_SLT: | ||||
8136 | case CmpInst::ICMP_SLE: | ||||
8137 | if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && | ||||
8138 | isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) | ||||
8139 | return true; | ||||
8140 | return std::nullopt; | ||||
8141 | |||||
8142 | case CmpInst::ICMP_SGT: | ||||
8143 | case CmpInst::ICMP_SGE: | ||||
8144 | if (isTruePredicate(CmpInst::ICMP_SLE, ALHS, BLHS, DL, Depth) && | ||||
8145 | isTruePredicate(CmpInst::ICMP_SLE, BRHS, ARHS, DL, Depth)) | ||||
8146 | return true; | ||||
8147 | return std::nullopt; | ||||
8148 | |||||
8149 | case CmpInst::ICMP_ULT: | ||||
8150 | case CmpInst::ICMP_ULE: | ||||
8151 | if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && | ||||
8152 | isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) | ||||
8153 | return true; | ||||
8154 | return std::nullopt; | ||||
8155 | |||||
8156 | case CmpInst::ICMP_UGT: | ||||
8157 | case CmpInst::ICMP_UGE: | ||||
8158 | if (isTruePredicate(CmpInst::ICMP_ULE, ALHS, BLHS, DL, Depth) && | ||||
8159 | isTruePredicate(CmpInst::ICMP_ULE, BRHS, ARHS, DL, Depth)) | ||||
8160 | return true; | ||||
8161 | return std::nullopt; | ||||
8162 | } | ||||
8163 | } | ||||
8164 | |||||
8165 | /// Return true if the operands of two compares (expanded as "L0 pred L1" and | ||||
8166 | /// "R0 pred R1") match. IsSwappedOps is true when the operands match, but are | ||||
8167 | /// swapped. | ||||
8168 | static bool areMatchingOperands(const Value *L0, const Value *L1, const Value *R0, | ||||
8169 | const Value *R1, bool &AreSwappedOps) { | ||||
8170 | bool AreMatchingOps = (L0 == R0 && L1 == R1); | ||||
8171 | AreSwappedOps = (L0 == R1 && L1 == R0); | ||||
8172 | return AreMatchingOps || AreSwappedOps; | ||||
8173 | } | ||||
8174 | |||||
8175 | /// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true. | ||||
8176 | /// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false. | ||||
8177 | /// Otherwise, return std::nullopt if we can't infer anything. | ||||
8178 | static std::optional<bool> | ||||
8179 | isImpliedCondMatchingOperands(CmpInst::Predicate LPred, | ||||
8180 | CmpInst::Predicate RPred, bool AreSwappedOps) { | ||||
8181 | // Canonicalize the predicate as if the operands were not commuted. | ||||
8182 | if (AreSwappedOps) | ||||
8183 | RPred = ICmpInst::getSwappedPredicate(RPred); | ||||
8184 | |||||
8185 | if (CmpInst::isImpliedTrueByMatchingCmp(LPred, RPred)) | ||||
8186 | return true; | ||||
8187 | if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred)) | ||||
8188 | return false; | ||||
8189 | |||||
8190 | return std::nullopt; | ||||
8191 | } | ||||
8192 | |||||
8193 | /// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true. | ||||
8194 | /// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false. | ||||
8195 | /// Otherwise, return std::nullopt if we can't infer anything. | ||||
8196 | static std::optional<bool> isImpliedCondCommonOperandWithConstants( | ||||
8197 | CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred, | ||||
8198 | const APInt &RC) { | ||||
8199 | ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC); | ||||
8200 | ConstantRange CR = ConstantRange::makeExactICmpRegion(RPred, RC); | ||||
8201 | ConstantRange Intersection = DomCR.intersectWith(CR); | ||||
8202 | ConstantRange Difference = DomCR.difference(CR); | ||||
8203 | if (Intersection.isEmptySet()) | ||||
8204 | return false; | ||||
8205 | if (Difference.isEmptySet()) | ||||
8206 | return true; | ||||
8207 | return std::nullopt; | ||||
8208 | } | ||||
8209 | |||||
8210 | /// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") | ||||
8211 | /// is true. Return false if LHS implies RHS is false. Otherwise, return | ||||
8212 | /// std::nullopt if we can't infer anything. | ||||
8213 | static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS, | ||||
8214 | CmpInst::Predicate RPred, | ||||
8215 | const Value *R0, const Value *R1, | ||||
8216 | const DataLayout &DL, | ||||
8217 | bool LHSIsTrue, unsigned Depth) { | ||||
8218 | Value *L0 = LHS->getOperand(0); | ||||
8219 | Value *L1 = LHS->getOperand(1); | ||||
8220 | |||||
8221 | // The rest of the logic assumes the LHS condition is true. If that's not the | ||||
8222 | // case, invert the predicate to make it so. | ||||
8223 | CmpInst::Predicate LPred = | ||||
8224 | LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); | ||||
8225 | |||||
8226 | // Can we infer anything when the two compares have matching operands? | ||||
8227 | bool AreSwappedOps; | ||||
8228 | if (areMatchingOperands(L0, L1, R0, R1, AreSwappedOps)) | ||||
8229 | return isImpliedCondMatchingOperands(LPred, RPred, AreSwappedOps); | ||||
8230 | |||||
8231 | // Can we infer anything when the 0-operands match and the 1-operands are | ||||
8232 | // constants (not necessarily matching)? | ||||
8233 | const APInt *LC, *RC; | ||||
8234 | if (L0 == R0 && match(L1, m_APInt(LC)) && match(R1, m_APInt(RC))) | ||||
8235 | return isImpliedCondCommonOperandWithConstants(LPred, *LC, RPred, *RC); | ||||
8236 | |||||
8237 | if (LPred == RPred) | ||||
8238 | return isImpliedCondOperands(LPred, L0, L1, R0, R1, DL, Depth); | ||||
8239 | |||||
8240 | return std::nullopt; | ||||
8241 | } | ||||
8242 | |||||
8243 | /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is | ||||
8244 | /// false. Otherwise, return std::nullopt if we can't infer anything. We | ||||
8245 | /// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' | ||||
8246 | /// instruction. | ||||
8247 | static std::optional<bool> | ||||
8248 | isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred, | ||||
8249 | const Value *RHSOp0, const Value *RHSOp1, | ||||
8250 | const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { | ||||
8251 | // The LHS must be an 'or', 'and', or a 'select' instruction. | ||||
8252 | assert((LHS->getOpcode() == Instruction::And ||(static_cast <bool> ((LHS->getOpcode() == Instruction ::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode () == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'." ) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8255, __extension__ __PRETTY_FUNCTION__ )) | ||||
8253 | LHS->getOpcode() == Instruction::Or ||(static_cast <bool> ((LHS->getOpcode() == Instruction ::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode () == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'." ) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8255, __extension__ __PRETTY_FUNCTION__ )) | ||||
8254 | LHS->getOpcode() == Instruction::Select) &&(static_cast <bool> ((LHS->getOpcode() == Instruction ::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode () == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'." ) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8255, __extension__ __PRETTY_FUNCTION__ )) | ||||
8255 | "Expected LHS to be 'and', 'or', or 'select'.")(static_cast <bool> ((LHS->getOpcode() == Instruction ::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode () == Instruction::Select) && "Expected LHS to be 'and', 'or', or 'select'." ) ? void (0) : __assert_fail ("(LHS->getOpcode() == Instruction::And || LHS->getOpcode() == Instruction::Or || LHS->getOpcode() == Instruction::Select) && \"Expected LHS to be 'and', 'or', or 'select'.\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8255, __extension__ __PRETTY_FUNCTION__ )); | ||||
8256 | |||||
8257 | assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit")(static_cast <bool> (Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit") ? void (0) : __assert_fail ("Depth <= MaxAnalysisRecursionDepth && \"Hit recursion limit\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8257, __extension__ __PRETTY_FUNCTION__ )); | ||||
8258 | |||||
8259 | // If the result of an 'or' is false, then we know both legs of the 'or' are | ||||
8260 | // false. Similarly, if the result of an 'and' is true, then we know both | ||||
8261 | // legs of the 'and' are true. | ||||
8262 | const Value *ALHS, *ARHS; | ||||
8263 | if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) || | ||||
8264 | (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) { | ||||
8265 | // FIXME: Make this non-recursion. | ||||
8266 | if (std::optional<bool> Implication = isImpliedCondition( | ||||
8267 | ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) | ||||
8268 | return Implication; | ||||
8269 | if (std::optional<bool> Implication = isImpliedCondition( | ||||
8270 | ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) | ||||
8271 | return Implication; | ||||
8272 | return std::nullopt; | ||||
8273 | } | ||||
8274 | return std::nullopt; | ||||
8275 | } | ||||
8276 | |||||
8277 | std::optional<bool> | ||||
8278 | llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred, | ||||
8279 | const Value *RHSOp0, const Value *RHSOp1, | ||||
8280 | const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { | ||||
8281 | // Bail out when we hit the limit. | ||||
8282 | if (Depth == MaxAnalysisRecursionDepth) | ||||
8283 | return std::nullopt; | ||||
8284 | |||||
8285 | // A mismatch occurs when we compare a scalar cmp to a vector cmp, for | ||||
8286 | // example. | ||||
8287 | if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy()) | ||||
8288 | return std::nullopt; | ||||
8289 | |||||
8290 | assert(LHS->getType()->isIntOrIntVectorTy(1) &&(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy (1) && "Expected integer type only!") ? void (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy(1) && \"Expected integer type only!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8291, __extension__ __PRETTY_FUNCTION__ )) | ||||
8291 | "Expected integer type only!")(static_cast <bool> (LHS->getType()->isIntOrIntVectorTy (1) && "Expected integer type only!") ? void (0) : __assert_fail ("LHS->getType()->isIntOrIntVectorTy(1) && \"Expected integer type only!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8291, __extension__ __PRETTY_FUNCTION__ )); | ||||
8292 | |||||
8293 | // Both LHS and RHS are icmps. | ||||
8294 | const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); | ||||
8295 | if (LHSCmp) | ||||
8296 | return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, | ||||
8297 | Depth); | ||||
8298 | |||||
8299 | /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect | ||||
8300 | /// the RHS to be an icmp. | ||||
8301 | /// FIXME: Add support for and/or/select on the RHS. | ||||
8302 | if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) { | ||||
8303 | if ((LHSI->getOpcode() == Instruction::And || | ||||
8304 | LHSI->getOpcode() == Instruction::Or || | ||||
8305 | LHSI->getOpcode() == Instruction::Select)) | ||||
8306 | return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, | ||||
8307 | Depth); | ||||
8308 | } | ||||
8309 | return std::nullopt; | ||||
8310 | } | ||||
8311 | |||||
8312 | std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, | ||||
8313 | const DataLayout &DL, | ||||
8314 | bool LHSIsTrue, unsigned Depth) { | ||||
8315 | // LHS ==> RHS by definition | ||||
8316 | if (LHS == RHS) | ||||
8317 | return LHSIsTrue; | ||||
8318 | |||||
8319 | if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS)) | ||||
8320 | return isImpliedCondition(LHS, RHSCmp->getPredicate(), | ||||
8321 | RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL, | ||||
8322 | LHSIsTrue, Depth); | ||||
8323 | |||||
8324 | if (Depth == MaxAnalysisRecursionDepth) | ||||
8325 | return std::nullopt; | ||||
8326 | |||||
8327 | // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2 | ||||
8328 | // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2 | ||||
8329 | const Value *RHS1, *RHS2; | ||||
8330 | if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) { | ||||
8331 | if (std::optional<bool> Imp = | ||||
8332 | isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) | ||||
8333 | if (*Imp == true) | ||||
8334 | return true; | ||||
8335 | if (std::optional<bool> Imp = | ||||
8336 | isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) | ||||
8337 | if (*Imp == true) | ||||
8338 | return true; | ||||
8339 | } | ||||
8340 | if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) { | ||||
8341 | if (std::optional<bool> Imp = | ||||
8342 | isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) | ||||
8343 | if (*Imp == false) | ||||
8344 | return false; | ||||
8345 | if (std::optional<bool> Imp = | ||||
8346 | isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) | ||||
8347 | if (*Imp == false) | ||||
8348 | return false; | ||||
8349 | } | ||||
8350 | |||||
8351 | return std::nullopt; | ||||
8352 | } | ||||
8353 | |||||
8354 | // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch | ||||
8355 | // condition dominating ContextI or nullptr, if no condition is found. | ||||
8356 | static std::pair<Value *, bool> | ||||
8357 | getDomPredecessorCondition(const Instruction *ContextI) { | ||||
8358 | if (!ContextI || !ContextI->getParent()) | ||||
8359 | return {nullptr, false}; | ||||
8360 | |||||
8361 | // TODO: This is a poor/cheap way to determine dominance. Should we use a | ||||
8362 | // dominator tree (eg, from a SimplifyQuery) instead? | ||||
8363 | const BasicBlock *ContextBB = ContextI->getParent(); | ||||
8364 | const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); | ||||
8365 | if (!PredBB) | ||||
8366 | return {nullptr, false}; | ||||
8367 | |||||
8368 | // We need a conditional branch in the predecessor. | ||||
8369 | Value *PredCond; | ||||
8370 | BasicBlock *TrueBB, *FalseBB; | ||||
8371 | if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) | ||||
8372 | return {nullptr, false}; | ||||
8373 | |||||
8374 | // The branch should get simplified. Don't bother simplifying this condition. | ||||
8375 | if (TrueBB == FalseBB) | ||||
8376 | return {nullptr, false}; | ||||
8377 | |||||
8378 | assert((TrueBB == ContextBB || FalseBB == ContextBB) &&(static_cast <bool> ((TrueBB == ContextBB || FalseBB == ContextBB) && "Predecessor block does not point to successor?" ) ? void (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8379, __extension__ __PRETTY_FUNCTION__ )) | ||||
8379 | "Predecessor block does not point to successor?")(static_cast <bool> ((TrueBB == ContextBB || FalseBB == ContextBB) && "Predecessor block does not point to successor?" ) ? void (0) : __assert_fail ("(TrueBB == ContextBB || FalseBB == ContextBB) && \"Predecessor block does not point to successor?\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8379, __extension__ __PRETTY_FUNCTION__ )); | ||||
8380 | |||||
8381 | // Is this condition implied by the predecessor condition? | ||||
8382 | return {PredCond, TrueBB == ContextBB}; | ||||
8383 | } | ||||
8384 | |||||
8385 | std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, | ||||
8386 | const Instruction *ContextI, | ||||
8387 | const DataLayout &DL) { | ||||
8388 | assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool")(static_cast <bool> (Cond->getType()->isIntOrIntVectorTy (1) && "Condition must be bool") ? void (0) : __assert_fail ("Cond->getType()->isIntOrIntVectorTy(1) && \"Condition must be bool\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8388, __extension__ __PRETTY_FUNCTION__ )); | ||||
8389 | auto PredCond = getDomPredecessorCondition(ContextI); | ||||
8390 | if (PredCond.first) | ||||
8391 | return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second); | ||||
8392 | return std::nullopt; | ||||
8393 | } | ||||
8394 | |||||
8395 | std::optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred, | ||||
8396 | const Value *LHS, | ||||
8397 | const Value *RHS, | ||||
8398 | const Instruction *ContextI, | ||||
8399 | const DataLayout &DL) { | ||||
8400 | auto PredCond = getDomPredecessorCondition(ContextI); | ||||
8401 | if (PredCond.first) | ||||
8402 | return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL, | ||||
8403 | PredCond.second); | ||||
8404 | return std::nullopt; | ||||
8405 | } | ||||
8406 | |||||
8407 | static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, | ||||
8408 | APInt &Upper, const InstrInfoQuery &IIQ, | ||||
8409 | bool PreferSignedRange) { | ||||
8410 | unsigned Width = Lower.getBitWidth(); | ||||
8411 | const APInt *C; | ||||
8412 | switch (BO.getOpcode()) { | ||||
8413 | case Instruction::Add: | ||||
8414 | if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { | ||||
8415 | bool HasNSW = IIQ.hasNoSignedWrap(&BO); | ||||
8416 | bool HasNUW = IIQ.hasNoUnsignedWrap(&BO); | ||||
8417 | |||||
8418 | // If the caller expects a signed compare, then try to use a signed range. | ||||
8419 | // Otherwise if both no-wraps are set, use the unsigned range because it | ||||
8420 | // is never larger than the signed range. Example: | ||||
8421 | // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125]. | ||||
8422 | if (PreferSignedRange && HasNSW && HasNUW) | ||||
8423 | HasNUW = false; | ||||
8424 | |||||
8425 | if (HasNUW) { | ||||
8426 | // 'add nuw x, C' produces [C, UINT_MAX]. | ||||
8427 | Lower = *C; | ||||
8428 | } else if (HasNSW) { | ||||
8429 | if (C->isNegative()) { | ||||
8430 | // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. | ||||
8431 | Lower = APInt::getSignedMinValue(Width); | ||||
8432 | Upper = APInt::getSignedMaxValue(Width) + *C + 1; | ||||
8433 | } else { | ||||
8434 | // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX]. | ||||
8435 | Lower = APInt::getSignedMinValue(Width) + *C; | ||||
8436 | Upper = APInt::getSignedMaxValue(Width) + 1; | ||||
8437 | } | ||||
8438 | } | ||||
8439 | } | ||||
8440 | break; | ||||
8441 | |||||
8442 | case Instruction::And: | ||||
8443 | if (match(BO.getOperand(1), m_APInt(C))) | ||||
8444 | // 'and x, C' produces [0, C]. | ||||
8445 | Upper = *C + 1; | ||||
8446 | break; | ||||
8447 | |||||
8448 | case Instruction::Or: | ||||
8449 | if (match(BO.getOperand(1), m_APInt(C))) | ||||
8450 | // 'or x, C' produces [C, UINT_MAX]. | ||||
8451 | Lower = *C; | ||||
8452 | break; | ||||
8453 | |||||
8454 | case Instruction::AShr: | ||||
8455 | if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { | ||||
8456 | // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C]. | ||||
8457 | Lower = APInt::getSignedMinValue(Width).ashr(*C); | ||||
8458 | Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; | ||||
8459 | } else if (match(BO.getOperand(0), m_APInt(C))) { | ||||
8460 | unsigned ShiftAmount = Width - 1; | ||||
8461 | if (!C->isZero() && IIQ.isExact(&BO)) | ||||
8462 | ShiftAmount = C->countr_zero(); | ||||
8463 | if (C->isNegative()) { | ||||
8464 | // 'ashr C, x' produces [C, C >> (Width-1)] | ||||
8465 | Lower = *C; | ||||
8466 | Upper = C->ashr(ShiftAmount) + 1; | ||||
8467 | } else { | ||||
8468 | // 'ashr C, x' produces [C >> (Width-1), C] | ||||
8469 | Lower = C->ashr(ShiftAmount); | ||||
8470 | Upper = *C + 1; | ||||
8471 | } | ||||
8472 | } | ||||
8473 | break; | ||||
8474 | |||||
8475 | case Instruction::LShr: | ||||
8476 | if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { | ||||
8477 | // 'lshr x, C' produces [0, UINT_MAX >> C]. | ||||
8478 | Upper = APInt::getAllOnes(Width).lshr(*C) + 1; | ||||
8479 | } else if (match(BO.getOperand(0), m_APInt(C))) { | ||||
8480 | // 'lshr C, x' produces [C >> (Width-1), C]. | ||||
8481 | unsigned ShiftAmount = Width - 1; | ||||
8482 | if (!C->isZero() && IIQ.isExact(&BO)) | ||||
8483 | ShiftAmount = C->countr_zero(); | ||||
8484 | Lower = C->lshr(ShiftAmount); | ||||
8485 | Upper = *C + 1; | ||||
8486 | } | ||||
8487 | break; | ||||
8488 | |||||
8489 | case Instruction::Shl: | ||||
8490 | if (match(BO.getOperand(0), m_APInt(C))) { | ||||
8491 | if (IIQ.hasNoUnsignedWrap(&BO)) { | ||||
8492 | // 'shl nuw C, x' produces [C, C << CLZ(C)] | ||||
8493 | Lower = *C; | ||||
8494 | Upper = Lower.shl(Lower.countl_zero()) + 1; | ||||
8495 | } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw? | ||||
8496 | if (C->isNegative()) { | ||||
8497 | // 'shl nsw C, x' produces [C << CLO(C)-1, C] | ||||
8498 | unsigned ShiftAmount = C->countl_one() - 1; | ||||
8499 | Lower = C->shl(ShiftAmount); | ||||
8500 | Upper = *C + 1; | ||||
8501 | } else { | ||||
8502 | // 'shl nsw C, x' produces [C, C << CLZ(C)-1] | ||||
8503 | unsigned ShiftAmount = C->countl_zero() - 1; | ||||
8504 | Lower = *C; | ||||
8505 | Upper = C->shl(ShiftAmount) + 1; | ||||
8506 | } | ||||
8507 | } | ||||
8508 | } | ||||
8509 | break; | ||||
8510 | |||||
8511 | case Instruction::SDiv: | ||||
8512 | if (match(BO.getOperand(1), m_APInt(C))) { | ||||
8513 | APInt IntMin = APInt::getSignedMinValue(Width); | ||||
8514 | APInt IntMax = APInt::getSignedMaxValue(Width); | ||||
8515 | if (C->isAllOnes()) { | ||||
8516 | // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] | ||||
8517 | // where C != -1 and C != 0 and C != 1 | ||||
8518 | Lower = IntMin + 1; | ||||
8519 | Upper = IntMax + 1; | ||||
8520 | } else if (C->countl_zero() < Width - 1) { | ||||
8521 | // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C] | ||||
8522 | // where C != -1 and C != 0 and C != 1 | ||||
8523 | Lower = IntMin.sdiv(*C); | ||||
8524 | Upper = IntMax.sdiv(*C); | ||||
8525 | if (Lower.sgt(Upper)) | ||||
8526 | std::swap(Lower, Upper); | ||||
8527 | Upper = Upper + 1; | ||||
8528 | assert(Upper != Lower && "Upper part of range has wrapped!")(static_cast <bool> (Upper != Lower && "Upper part of range has wrapped!" ) ? void (0) : __assert_fail ("Upper != Lower && \"Upper part of range has wrapped!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8528, __extension__ __PRETTY_FUNCTION__ )); | ||||
8529 | } | ||||
8530 | } else if (match(BO.getOperand(0), m_APInt(C))) { | ||||
8531 | if (C->isMinSignedValue()) { | ||||
8532 | // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. | ||||
8533 | Lower = *C; | ||||
8534 | Upper = Lower.lshr(1) + 1; | ||||
8535 | } else { | ||||
8536 | // 'sdiv C, x' produces [-|C|, |C|]. | ||||
8537 | Upper = C->abs() + 1; | ||||
8538 | Lower = (-Upper) + 1; | ||||
8539 | } | ||||
8540 | } | ||||
8541 | break; | ||||
8542 | |||||
8543 | case Instruction::UDiv: | ||||
8544 | if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { | ||||
8545 | // 'udiv x, C' produces [0, UINT_MAX / C]. | ||||
8546 | Upper = APInt::getMaxValue(Width).udiv(*C) + 1; | ||||
8547 | } else if (match(BO.getOperand(0), m_APInt(C))) { | ||||
8548 | // 'udiv C, x' produces [0, C]. | ||||
8549 | Upper = *C + 1; | ||||
8550 | } | ||||
8551 | break; | ||||
8552 | |||||
8553 | case Instruction::SRem: | ||||
8554 | if (match(BO.getOperand(1), m_APInt(C))) { | ||||
8555 | // 'srem x, C' produces (-|C|, |C|). | ||||
8556 | Upper = C->abs(); | ||||
8557 | Lower = (-Upper) + 1; | ||||
8558 | } | ||||
8559 | break; | ||||
8560 | |||||
8561 | case Instruction::URem: | ||||
8562 | if (match(BO.getOperand(1), m_APInt(C))) | ||||
8563 | // 'urem x, C' produces [0, C). | ||||
8564 | Upper = *C; | ||||
8565 | break; | ||||
8566 | |||||
8567 | default: | ||||
8568 | break; | ||||
8569 | } | ||||
8570 | } | ||||
8571 | |||||
8572 | static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II) { | ||||
8573 | unsigned Width = II.getType()->getScalarSizeInBits(); | ||||
8574 | const APInt *C; | ||||
8575 | switch (II.getIntrinsicID()) { | ||||
8576 | case Intrinsic::ctpop: | ||||
8577 | case Intrinsic::ctlz: | ||||
8578 | case Intrinsic::cttz: | ||||
8579 | // Maximum of set/clear bits is the bit width. | ||||
8580 | return ConstantRange(APInt::getZero(Width), APInt(Width, Width + 1)); | ||||
8581 | case Intrinsic::uadd_sat: | ||||
8582 | // uadd.sat(x, C) produces [C, UINT_MAX]. | ||||
8583 | if (match(II.getOperand(0), m_APInt(C)) || | ||||
8584 | match(II.getOperand(1), m_APInt(C))) | ||||
8585 | return ConstantRange::getNonEmpty(*C, APInt::getZero(Width)); | ||||
8586 | break; | ||||
8587 | case Intrinsic::sadd_sat: | ||||
8588 | if (match(II.getOperand(0), m_APInt(C)) || | ||||
8589 | match(II.getOperand(1), m_APInt(C))) { | ||||
8590 | if (C->isNegative()) | ||||
8591 | // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. | ||||
8592 | return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width), | ||||
8593 | APInt::getSignedMaxValue(Width) + *C + | ||||
8594 | 1); | ||||
8595 | |||||
8596 | // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX]. | ||||
8597 | return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) + *C, | ||||
8598 | APInt::getSignedMaxValue(Width) + 1); | ||||
8599 | } | ||||
8600 | break; | ||||
8601 | case Intrinsic::usub_sat: | ||||
8602 | // usub.sat(C, x) produces [0, C]. | ||||
8603 | if (match(II.getOperand(0), m_APInt(C))) | ||||
8604 | return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1); | ||||
8605 | |||||
8606 | // usub.sat(x, C) produces [0, UINT_MAX - C]. | ||||
8607 | if (match(II.getOperand(1), m_APInt(C))) | ||||
8608 | return ConstantRange::getNonEmpty(APInt::getZero(Width), | ||||
8609 | APInt::getMaxValue(Width) - *C + 1); | ||||
8610 | break; | ||||
8611 | case Intrinsic::ssub_sat: | ||||
8612 | if (match(II.getOperand(0), m_APInt(C))) { | ||||
8613 | if (C->isNegative()) | ||||
8614 | // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. | ||||
8615 | return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width), | ||||
8616 | *C - APInt::getSignedMinValue(Width) + | ||||
8617 | 1); | ||||
8618 | |||||
8619 | // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. | ||||
8620 | return ConstantRange::getNonEmpty(*C - APInt::getSignedMaxValue(Width), | ||||
8621 | APInt::getSignedMaxValue(Width) + 1); | ||||
8622 | } else if (match(II.getOperand(1), m_APInt(C))) { | ||||
8623 | if (C->isNegative()) | ||||
8624 | // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: | ||||
8625 | return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) - *C, | ||||
8626 | APInt::getSignedMaxValue(Width) + 1); | ||||
8627 | |||||
8628 | // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. | ||||
8629 | return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width), | ||||
8630 | APInt::getSignedMaxValue(Width) - *C + | ||||
8631 | 1); | ||||
8632 | } | ||||
8633 | break; | ||||
8634 | case Intrinsic::umin: | ||||
8635 | case Intrinsic::umax: | ||||
8636 | case Intrinsic::smin: | ||||
8637 | case Intrinsic::smax: | ||||
8638 | if (!match(II.getOperand(0), m_APInt(C)) && | ||||
8639 | !match(II.getOperand(1), m_APInt(C))) | ||||
8640 | break; | ||||
8641 | |||||
8642 | switch (II.getIntrinsicID()) { | ||||
8643 | case Intrinsic::umin: | ||||
8644 | return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1); | ||||
8645 | case Intrinsic::umax: | ||||
8646 | return ConstantRange::getNonEmpty(*C, APInt::getZero(Width)); | ||||
8647 | case Intrinsic::smin: | ||||
8648 | return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width), | ||||
8649 | *C + 1); | ||||
8650 | case Intrinsic::smax: | ||||
8651 | return ConstantRange::getNonEmpty(*C, | ||||
8652 | APInt::getSignedMaxValue(Width) + 1); | ||||
8653 | default: | ||||
8654 | llvm_unreachable("Must be min/max intrinsic")::llvm::llvm_unreachable_internal("Must be min/max intrinsic" , "llvm/lib/Analysis/ValueTracking.cpp", 8654); | ||||
8655 | } | ||||
8656 | break; | ||||
8657 | case Intrinsic::abs: | ||||
8658 | // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX], | ||||
8659 | // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. | ||||
8660 | if (match(II.getOperand(1), m_One())) | ||||
8661 | return ConstantRange(APInt::getZero(Width), | ||||
8662 | APInt::getSignedMaxValue(Width) + 1); | ||||
8663 | |||||
8664 | return ConstantRange(APInt::getZero(Width), | ||||
8665 | APInt::getSignedMinValue(Width) + 1); | ||||
8666 | case Intrinsic::vscale: | ||||
8667 | if (!II.getParent() || !II.getFunction()) | ||||
8668 | break; | ||||
8669 | return getVScaleRange(II.getFunction(), Width); | ||||
8670 | default: | ||||
8671 | break; | ||||
8672 | } | ||||
8673 | |||||
8674 | return ConstantRange::getFull(Width); | ||||
8675 | } | ||||
8676 | |||||
8677 | static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower, | ||||
8678 | APInt &Upper, const InstrInfoQuery &IIQ) { | ||||
8679 | const Value *LHS = nullptr, *RHS = nullptr; | ||||
8680 | SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS); | ||||
8681 | if (R.Flavor == SPF_UNKNOWN) | ||||
8682 | return; | ||||
8683 | |||||
8684 | unsigned BitWidth = SI.getType()->getScalarSizeInBits(); | ||||
8685 | |||||
8686 | if (R.Flavor == SelectPatternFlavor::SPF_ABS) { | ||||
8687 | // If the negation part of the abs (in RHS) has the NSW flag, | ||||
8688 | // then the result of abs(X) is [0..SIGNED_MAX], | ||||
8689 | // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. | ||||
8690 | Lower = APInt::getZero(BitWidth); | ||||
8691 | if (match(RHS, m_Neg(m_Specific(LHS))) && | ||||
8692 | IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) | ||||
8693 | Upper = APInt::getSignedMaxValue(BitWidth) + 1; | ||||
8694 | else | ||||
8695 | Upper = APInt::getSignedMinValue(BitWidth) + 1; | ||||
8696 | return; | ||||
8697 | } | ||||
8698 | |||||
8699 | if (R.Flavor == SelectPatternFlavor::SPF_NABS) { | ||||
8700 | // The result of -abs(X) is <= 0. | ||||
8701 | Lower = APInt::getSignedMinValue(BitWidth); | ||||
8702 | Upper = APInt(BitWidth, 1); | ||||
8703 | return; | ||||
8704 | } | ||||
8705 | |||||
8706 | const APInt *C; | ||||
8707 | if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C))) | ||||
8708 | return; | ||||
8709 | |||||
8710 | switch (R.Flavor) { | ||||
8711 | case SPF_UMIN: | ||||
8712 | Upper = *C + 1; | ||||
8713 | break; | ||||
8714 | case SPF_UMAX: | ||||
8715 | Lower = *C; | ||||
8716 | break; | ||||
8717 | case SPF_SMIN: | ||||
8718 | Lower = APInt::getSignedMinValue(BitWidth); | ||||
8719 | Upper = *C + 1; | ||||
8720 | break; | ||||
8721 | case SPF_SMAX: | ||||
8722 | Lower = *C; | ||||
8723 | Upper = APInt::getSignedMaxValue(BitWidth) + 1; | ||||
8724 | break; | ||||
8725 | default: | ||||
8726 | break; | ||||
8727 | } | ||||
8728 | } | ||||
8729 | |||||
8730 | static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) { | ||||
8731 | // The maximum representable value of a half is 65504. For floats the maximum | ||||
8732 | // value is 3.4e38 which requires roughly 129 bits. | ||||
8733 | unsigned BitWidth = I->getType()->getScalarSizeInBits(); | ||||
8734 | if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy()) | ||||
8735 | return; | ||||
8736 | if (isa<FPToSIInst>(I) && BitWidth >= 17) { | ||||
8737 | Lower = APInt(BitWidth, -65504); | ||||
8738 | Upper = APInt(BitWidth, 65505); | ||||
8739 | } | ||||
8740 | |||||
8741 | if (isa<FPToUIInst>(I) && BitWidth >= 16) { | ||||
8742 | // For a fptoui the lower limit is left as 0. | ||||
8743 | Upper = APInt(BitWidth, 65505); | ||||
8744 | } | ||||
8745 | } | ||||
8746 | |||||
8747 | ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, | ||||
8748 | bool UseInstrInfo, AssumptionCache *AC, | ||||
8749 | const Instruction *CtxI, | ||||
8750 | const DominatorTree *DT, | ||||
8751 | unsigned Depth) { | ||||
8752 | assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && "Expected integer instruction") ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && \"Expected integer instruction\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8752, __extension__ __PRETTY_FUNCTION__ )); | ||||
8753 | |||||
8754 | if (Depth == MaxAnalysisRecursionDepth) | ||||
8755 | return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); | ||||
8756 | |||||
8757 | const APInt *C; | ||||
8758 | if (match(V, m_APInt(C))) | ||||
8759 | return ConstantRange(*C); | ||||
8760 | |||||
8761 | InstrInfoQuery IIQ(UseInstrInfo); | ||||
8762 | unsigned BitWidth = V->getType()->getScalarSizeInBits(); | ||||
8763 | ConstantRange CR = ConstantRange::getFull(BitWidth); | ||||
8764 | if (auto *BO = dyn_cast<BinaryOperator>(V)) { | ||||
8765 | APInt Lower = APInt(BitWidth, 0); | ||||
8766 | APInt Upper = APInt(BitWidth, 0); | ||||
8767 | // TODO: Return ConstantRange. | ||||
8768 | setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned); | ||||
8769 | CR = ConstantRange::getNonEmpty(Lower, Upper); | ||||
8770 | } else if (auto *II = dyn_cast<IntrinsicInst>(V)) | ||||
8771 | CR = getRangeForIntrinsic(*II); | ||||
8772 | else if (auto *SI = dyn_cast<SelectInst>(V)) { | ||||
8773 | APInt Lower = APInt(BitWidth, 0); | ||||
8774 | APInt Upper = APInt(BitWidth, 0); | ||||
8775 | // TODO: Return ConstantRange. | ||||
8776 | setLimitsForSelectPattern(*SI, Lower, Upper, IIQ); | ||||
8777 | CR = ConstantRange::getNonEmpty(Lower, Upper); | ||||
8778 | } else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) { | ||||
8779 | APInt Lower = APInt(BitWidth, 0); | ||||
8780 | APInt Upper = APInt(BitWidth, 0); | ||||
8781 | // TODO: Return ConstantRange. | ||||
8782 | setLimitForFPToI(cast<Instruction>(V), Lower, Upper); | ||||
8783 | CR = ConstantRange::getNonEmpty(Lower, Upper); | ||||
8784 | } | ||||
8785 | |||||
8786 | if (auto *I = dyn_cast<Instruction>(V)) | ||||
8787 | if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range)) | ||||
8788 | CR = CR.intersectWith(getConstantRangeFromMetadata(*Range)); | ||||
8789 | |||||
8790 | if (CtxI && AC) { | ||||
8791 | // Try to restrict the range based on information from assumptions. | ||||
8792 | for (auto &AssumeVH : AC->assumptionsFor(V)) { | ||||
8793 | if (!AssumeVH) | ||||
8794 | continue; | ||||
8795 | CallInst *I = cast<CallInst>(AssumeVH); | ||||
8796 | assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&(static_cast <bool> (I->getParent()->getParent() == CtxI->getParent()->getParent() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8797, __extension__ __PRETTY_FUNCTION__ )) | ||||
8797 | "Got assumption for the wrong function!")(static_cast <bool> (I->getParent()->getParent() == CtxI->getParent()->getParent() && "Got assumption for the wrong function!" ) ? void (0) : __assert_fail ("I->getParent()->getParent() == CtxI->getParent()->getParent() && \"Got assumption for the wrong function!\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8797, __extension__ __PRETTY_FUNCTION__ )); | ||||
8798 | assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8799, __extension__ __PRETTY_FUNCTION__ )) | ||||
8799 | "must be an assume intrinsic")(static_cast <bool> (I->getCalledFunction()->getIntrinsicID () == Intrinsic::assume && "must be an assume intrinsic" ) ? void (0) : __assert_fail ("I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && \"must be an assume intrinsic\"" , "llvm/lib/Analysis/ValueTracking.cpp", 8799, __extension__ __PRETTY_FUNCTION__ )); | ||||
8800 | |||||
8801 | if (!isValidAssumeForContext(I, CtxI, DT)) | ||||
8802 | continue; | ||||
8803 | Value *Arg = I->getArgOperand(0); | ||||
8804 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); | ||||
8805 | // Currently we just use information from comparisons. | ||||
8806 | if (!Cmp || Cmp->getOperand(0) != V) | ||||
8807 | continue; | ||||
8808 | // TODO: Set "ForSigned" parameter via Cmp->isSigned()? | ||||
8809 | ConstantRange RHS = | ||||
8810 | computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false, | ||||
8811 | UseInstrInfo, AC, I, DT, Depth + 1); | ||||
8812 | CR = CR.intersectWith( | ||||
8813 | ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS)); | ||||
8814 | } | ||||
8815 | } | ||||
8816 | |||||
8817 | return CR; | ||||
8818 | } |
1 | //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file exposes the class definitions of all of the subclasses of the | |||
10 | // Instruction class. This is meant to be an easy way to get access to all | |||
11 | // instruction subclasses. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #ifndef LLVM_IR_INSTRUCTIONS_H | |||
16 | #define LLVM_IR_INSTRUCTIONS_H | |||
17 | ||||
18 | #include "llvm/ADT/ArrayRef.h" | |||
19 | #include "llvm/ADT/Bitfields.h" | |||
20 | #include "llvm/ADT/MapVector.h" | |||
21 | #include "llvm/ADT/STLExtras.h" | |||
22 | #include "llvm/ADT/SmallVector.h" | |||
23 | #include "llvm/ADT/Twine.h" | |||
24 | #include "llvm/ADT/iterator.h" | |||
25 | #include "llvm/ADT/iterator_range.h" | |||
26 | #include "llvm/IR/CFG.h" | |||
27 | #include "llvm/IR/Constant.h" | |||
28 | #include "llvm/IR/DerivedTypes.h" | |||
29 | #include "llvm/IR/InstrTypes.h" | |||
30 | #include "llvm/IR/Instruction.h" | |||
31 | #include "llvm/IR/OperandTraits.h" | |||
32 | #include "llvm/IR/Use.h" | |||
33 | #include "llvm/IR/User.h" | |||
34 | #include "llvm/Support/AtomicOrdering.h" | |||
35 | #include "llvm/Support/ErrorHandling.h" | |||
36 | #include <cassert> | |||
37 | #include <cstddef> | |||
38 | #include <cstdint> | |||
39 | #include <iterator> | |||
40 | #include <optional> | |||
41 | ||||
42 | namespace llvm { | |||
43 | ||||
44 | class APFloat; | |||
45 | class APInt; | |||
46 | class BasicBlock; | |||
47 | class ConstantInt; | |||
48 | class DataLayout; | |||
49 | class StringRef; | |||
50 | class Type; | |||
51 | class Value; | |||
52 | ||||
53 | //===----------------------------------------------------------------------===// | |||
54 | // AllocaInst Class | |||
55 | //===----------------------------------------------------------------------===// | |||
56 | ||||
57 | /// an instruction to allocate memory on the stack | |||
58 | class AllocaInst : public UnaryInstruction { | |||
59 | Type *AllocatedType; | |||
60 | ||||
61 | using AlignmentField = AlignmentBitfieldElementT<0>; | |||
62 | using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; | |||
63 | using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; | |||
64 | static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, | |||
65 | SwiftErrorField>(), | |||
66 | "Bitfields must be contiguous"); | |||
67 | ||||
68 | protected: | |||
69 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
70 | friend class Instruction; | |||
71 | ||||
72 | AllocaInst *cloneImpl() const; | |||
73 | ||||
74 | public: | |||
75 | explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, | |||
76 | const Twine &Name, Instruction *InsertBefore); | |||
77 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, | |||
78 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
79 | ||||
80 | AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, | |||
81 | Instruction *InsertBefore); | |||
82 | AllocaInst(Type *Ty, unsigned AddrSpace, | |||
83 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
84 | ||||
85 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, | |||
86 | const Twine &Name = "", Instruction *InsertBefore = nullptr); | |||
87 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, | |||
88 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
89 | ||||
90 | /// Return true if there is an allocation size parameter to the allocation | |||
91 | /// instruction that is not 1. | |||
92 | bool isArrayAllocation() const; | |||
93 | ||||
94 | /// Get the number of elements allocated. For a simple allocation of a single | |||
95 | /// element, this will return a constant 1 value. | |||
96 | const Value *getArraySize() const { return getOperand(0); } | |||
97 | Value *getArraySize() { return getOperand(0); } | |||
98 | ||||
99 | /// Overload to return most specific pointer type. | |||
100 | PointerType *getType() const { | |||
101 | return cast<PointerType>(Instruction::getType()); | |||
102 | } | |||
103 | ||||
104 | /// Return the address space for the allocation. | |||
105 | unsigned getAddressSpace() const { | |||
106 | return getType()->getAddressSpace(); | |||
107 | } | |||
108 | ||||
109 | /// Get allocation size in bytes. Returns std::nullopt if size can't be | |||
110 | /// determined, e.g. in case of a VLA. | |||
111 | std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const; | |||
112 | ||||
113 | /// Get allocation size in bits. Returns std::nullopt if size can't be | |||
114 | /// determined, e.g. in case of a VLA. | |||
115 | std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; | |||
116 | ||||
117 | /// Return the type that is being allocated by the instruction. | |||
118 | Type *getAllocatedType() const { return AllocatedType; } | |||
119 | /// for use only in special circumstances that need to generically | |||
120 | /// transform a whole instruction (eg: IR linking and vectorization). | |||
121 | void setAllocatedType(Type *Ty) { AllocatedType = Ty; } | |||
122 | ||||
123 | /// Return the alignment of the memory that is being allocated by the | |||
124 | /// instruction. | |||
125 | Align getAlign() const { | |||
126 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
127 | } | |||
128 | ||||
129 | void setAlignment(Align Align) { | |||
130 | setSubclassData<AlignmentField>(Log2(Align)); | |||
131 | } | |||
132 | ||||
133 | /// Return true if this alloca is in the entry block of the function and is a | |||
134 | /// constant size. If so, the code generator will fold it into the | |||
135 | /// prolog/epilog code, so it is basically free. | |||
136 | bool isStaticAlloca() const; | |||
137 | ||||
138 | /// Return true if this alloca is used as an inalloca argument to a call. Such | |||
139 | /// allocas are never considered static even if they are in the entry block. | |||
140 | bool isUsedWithInAlloca() const { | |||
141 | return getSubclassData<UsedWithInAllocaField>(); | |||
142 | } | |||
143 | ||||
144 | /// Specify whether this alloca is used to represent the arguments to a call. | |||
145 | void setUsedWithInAlloca(bool V) { | |||
146 | setSubclassData<UsedWithInAllocaField>(V); | |||
147 | } | |||
148 | ||||
149 | /// Return true if this alloca is used as a swifterror argument to a call. | |||
150 | bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } | |||
151 | /// Specify whether this alloca is used to represent a swifterror. | |||
152 | void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } | |||
153 | ||||
154 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
155 | static bool classof(const Instruction *I) { | |||
156 | return (I->getOpcode() == Instruction::Alloca); | |||
157 | } | |||
158 | static bool classof(const Value *V) { | |||
159 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
160 | } | |||
161 | ||||
162 | private: | |||
163 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
164 | // method so that subclasses cannot accidentally use it. | |||
165 | template <typename Bitfield> | |||
166 | void setSubclassData(typename Bitfield::Type Value) { | |||
167 | Instruction::setSubclassData<Bitfield>(Value); | |||
168 | } | |||
169 | }; | |||
170 | ||||
171 | //===----------------------------------------------------------------------===// | |||
172 | // LoadInst Class | |||
173 | //===----------------------------------------------------------------------===// | |||
174 | ||||
175 | /// An instruction for reading from memory. This uses the SubclassData field in | |||
176 | /// Value to store whether or not the load is volatile. | |||
177 | class LoadInst : public UnaryInstruction { | |||
178 | using VolatileField = BoolBitfieldElementT<0>; | |||
179 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; | |||
180 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; | |||
181 | static_assert( | |||
182 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), | |||
183 | "Bitfields must be contiguous"); | |||
184 | ||||
185 | void AssertOK(); | |||
186 | ||||
187 | protected: | |||
188 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
189 | friend class Instruction; | |||
190 | ||||
191 | LoadInst *cloneImpl() const; | |||
192 | ||||
193 | public: | |||
194 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, | |||
195 | Instruction *InsertBefore); | |||
196 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
197 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
198 | Instruction *InsertBefore); | |||
199 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
200 | BasicBlock *InsertAtEnd); | |||
201 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
202 | Align Align, Instruction *InsertBefore = nullptr); | |||
203 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
204 | Align Align, BasicBlock *InsertAtEnd); | |||
205 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
206 | Align Align, AtomicOrdering Order, | |||
207 | SyncScope::ID SSID = SyncScope::System, | |||
208 | Instruction *InsertBefore = nullptr); | |||
209 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
210 | Align Align, AtomicOrdering Order, SyncScope::ID SSID, | |||
211 | BasicBlock *InsertAtEnd); | |||
212 | ||||
213 | /// Return true if this is a load from a volatile memory location. | |||
214 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
215 | ||||
216 | /// Specify whether this is a volatile load or not. | |||
217 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
218 | ||||
219 | /// Return the alignment of the access that is being performed. | |||
220 | Align getAlign() const { | |||
221 | return Align(1ULL << (getSubclassData<AlignmentField>())); | |||
222 | } | |||
223 | ||||
224 | void setAlignment(Align Align) { | |||
225 | setSubclassData<AlignmentField>(Log2(Align)); | |||
226 | } | |||
227 | ||||
228 | /// Returns the ordering constraint of this load instruction. | |||
229 | AtomicOrdering getOrdering() const { | |||
230 | return getSubclassData<OrderingField>(); | |||
231 | } | |||
232 | /// Sets the ordering constraint of this load instruction. May not be Release | |||
233 | /// or AcquireRelease. | |||
234 | void setOrdering(AtomicOrdering Ordering) { | |||
235 | setSubclassData<OrderingField>(Ordering); | |||
236 | } | |||
237 | ||||
238 | /// Returns the synchronization scope ID of this load instruction. | |||
239 | SyncScope::ID getSyncScopeID() const { | |||
240 | return SSID; | |||
241 | } | |||
242 | ||||
243 | /// Sets the synchronization scope ID of this load instruction. | |||
244 | void setSyncScopeID(SyncScope::ID SSID) { | |||
245 | this->SSID = SSID; | |||
246 | } | |||
247 | ||||
248 | /// Sets the ordering constraint and the synchronization scope ID of this load | |||
249 | /// instruction. | |||
250 | void setAtomic(AtomicOrdering Ordering, | |||
251 | SyncScope::ID SSID = SyncScope::System) { | |||
252 | setOrdering(Ordering); | |||
253 | setSyncScopeID(SSID); | |||
254 | } | |||
255 | ||||
256 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
257 | ||||
258 | bool isUnordered() const { | |||
259 | return (getOrdering() == AtomicOrdering::NotAtomic || | |||
260 | getOrdering() == AtomicOrdering::Unordered) && | |||
261 | !isVolatile(); | |||
262 | } | |||
263 | ||||
264 | Value *getPointerOperand() { return getOperand(0); } | |||
265 | const Value *getPointerOperand() const { return getOperand(0); } | |||
266 | static unsigned getPointerOperandIndex() { return 0U; } | |||
267 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } | |||
268 | ||||
269 | /// Returns the address space of the pointer operand. | |||
270 | unsigned getPointerAddressSpace() const { | |||
271 | return getPointerOperandType()->getPointerAddressSpace(); | |||
272 | } | |||
273 | ||||
274 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
275 | static bool classof(const Instruction *I) { | |||
276 | return I->getOpcode() == Instruction::Load; | |||
277 | } | |||
278 | static bool classof(const Value *V) { | |||
279 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
280 | } | |||
281 | ||||
282 | private: | |||
283 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
284 | // method so that subclasses cannot accidentally use it. | |||
285 | template <typename Bitfield> | |||
286 | void setSubclassData(typename Bitfield::Type Value) { | |||
287 | Instruction::setSubclassData<Bitfield>(Value); | |||
288 | } | |||
289 | ||||
290 | /// The synchronization scope ID of this load instruction. Not quite enough | |||
291 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
292 | /// own field. | |||
293 | SyncScope::ID SSID; | |||
294 | }; | |||
295 | ||||
296 | //===----------------------------------------------------------------------===// | |||
297 | // StoreInst Class | |||
298 | //===----------------------------------------------------------------------===// | |||
299 | ||||
300 | /// An instruction for storing to memory. | |||
301 | class StoreInst : public Instruction { | |||
302 | using VolatileField = BoolBitfieldElementT<0>; | |||
303 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; | |||
304 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; | |||
305 | static_assert( | |||
306 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), | |||
307 | "Bitfields must be contiguous"); | |||
308 | ||||
309 | void AssertOK(); | |||
310 | ||||
311 | protected: | |||
312 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
313 | friend class Instruction; | |||
314 | ||||
315 | StoreInst *cloneImpl() const; | |||
316 | ||||
317 | public: | |||
318 | StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); | |||
319 | StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); | |||
320 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); | |||
321 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); | |||
322 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
323 | Instruction *InsertBefore = nullptr); | |||
324 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
325 | BasicBlock *InsertAtEnd); | |||
326 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
327 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, | |||
328 | Instruction *InsertBefore = nullptr); | |||
329 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
330 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); | |||
331 | ||||
332 | // allocate space for exactly two operands | |||
333 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
334 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
335 | ||||
336 | /// Return true if this is a store to a volatile memory location. | |||
337 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
338 | ||||
339 | /// Specify whether this is a volatile store or not. | |||
340 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
341 | ||||
342 | /// Transparently provide more efficient getOperand methods. | |||
343 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
344 | ||||
345 | Align getAlign() const { | |||
346 | return Align(1ULL << (getSubclassData<AlignmentField>())); | |||
347 | } | |||
348 | ||||
349 | void setAlignment(Align Align) { | |||
350 | setSubclassData<AlignmentField>(Log2(Align)); | |||
351 | } | |||
352 | ||||
353 | /// Returns the ordering constraint of this store instruction. | |||
354 | AtomicOrdering getOrdering() const { | |||
355 | return getSubclassData<OrderingField>(); | |||
356 | } | |||
357 | ||||
358 | /// Sets the ordering constraint of this store instruction. May not be | |||
359 | /// Acquire or AcquireRelease. | |||
360 | void setOrdering(AtomicOrdering Ordering) { | |||
361 | setSubclassData<OrderingField>(Ordering); | |||
362 | } | |||
363 | ||||
364 | /// Returns the synchronization scope ID of this store instruction. | |||
365 | SyncScope::ID getSyncScopeID() const { | |||
366 | return SSID; | |||
367 | } | |||
368 | ||||
369 | /// Sets the synchronization scope ID of this store instruction. | |||
370 | void setSyncScopeID(SyncScope::ID SSID) { | |||
371 | this->SSID = SSID; | |||
372 | } | |||
373 | ||||
374 | /// Sets the ordering constraint and the synchronization scope ID of this | |||
375 | /// store instruction. | |||
376 | void setAtomic(AtomicOrdering Ordering, | |||
377 | SyncScope::ID SSID = SyncScope::System) { | |||
378 | setOrdering(Ordering); | |||
379 | setSyncScopeID(SSID); | |||
380 | } | |||
381 | ||||
382 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
383 | ||||
384 | bool isUnordered() const { | |||
385 | return (getOrdering() == AtomicOrdering::NotAtomic || | |||
386 | getOrdering() == AtomicOrdering::Unordered) && | |||
387 | !isVolatile(); | |||
388 | } | |||
389 | ||||
390 | Value *getValueOperand() { return getOperand(0); } | |||
391 | const Value *getValueOperand() const { return getOperand(0); } | |||
392 | ||||
393 | Value *getPointerOperand() { return getOperand(1); } | |||
394 | const Value *getPointerOperand() const { return getOperand(1); } | |||
395 | static unsigned getPointerOperandIndex() { return 1U; } | |||
396 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } | |||
397 | ||||
398 | /// Returns the address space of the pointer operand. | |||
399 | unsigned getPointerAddressSpace() const { | |||
400 | return getPointerOperandType()->getPointerAddressSpace(); | |||
401 | } | |||
402 | ||||
403 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
404 | static bool classof(const Instruction *I) { | |||
405 | return I->getOpcode() == Instruction::Store; | |||
406 | } | |||
407 | static bool classof(const Value *V) { | |||
408 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
409 | } | |||
410 | ||||
411 | private: | |||
412 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
413 | // method so that subclasses cannot accidentally use it. | |||
414 | template <typename Bitfield> | |||
415 | void setSubclassData(typename Bitfield::Type Value) { | |||
416 | Instruction::setSubclassData<Bitfield>(Value); | |||
417 | } | |||
418 | ||||
419 | /// The synchronization scope ID of this store instruction. Not quite enough | |||
420 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
421 | /// own field. | |||
422 | SyncScope::ID SSID; | |||
423 | }; | |||
424 | ||||
425 | template <> | |||
426 | struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { | |||
427 | }; | |||
428 | ||||
429 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits <StoreInst>::op_begin(this); } StoreInst::const_op_iterator StoreInst::op_begin() const { return OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this)); } StoreInst ::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst >::op_end(this); } StoreInst::const_op_iterator StoreInst:: op_end() const { return OperandTraits<StoreInst>::op_end (const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand (unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<StoreInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 429, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this))[i_nocapture ].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 429, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<StoreInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned StoreInst::getNumOperands() const { return OperandTraits<StoreInst>::operands(this); } template <int Idx_nocapture> Use &StoreInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &StoreInst::Op() const { return this->OpFrom <Idx_nocapture>(this); } | |||
430 | ||||
431 | //===----------------------------------------------------------------------===// | |||
432 | // FenceInst Class | |||
433 | //===----------------------------------------------------------------------===// | |||
434 | ||||
435 | /// An instruction for ordering other memory operations. | |||
436 | class FenceInst : public Instruction { | |||
437 | using OrderingField = AtomicOrderingBitfieldElementT<0>; | |||
438 | ||||
439 | void Init(AtomicOrdering Ordering, SyncScope::ID SSID); | |||
440 | ||||
441 | protected: | |||
442 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
443 | friend class Instruction; | |||
444 | ||||
445 | FenceInst *cloneImpl() const; | |||
446 | ||||
447 | public: | |||
448 | // Ordering may only be Acquire, Release, AcquireRelease, or | |||
449 | // SequentiallyConsistent. | |||
450 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, | |||
451 | SyncScope::ID SSID = SyncScope::System, | |||
452 | Instruction *InsertBefore = nullptr); | |||
453 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, | |||
454 | BasicBlock *InsertAtEnd); | |||
455 | ||||
456 | // allocate space for exactly zero operands | |||
457 | void *operator new(size_t S) { return User::operator new(S, 0); } | |||
458 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
459 | ||||
460 | /// Returns the ordering constraint of this fence instruction. | |||
461 | AtomicOrdering getOrdering() const { | |||
462 | return getSubclassData<OrderingField>(); | |||
463 | } | |||
464 | ||||
465 | /// Sets the ordering constraint of this fence instruction. May only be | |||
466 | /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. | |||
467 | void setOrdering(AtomicOrdering Ordering) { | |||
468 | setSubclassData<OrderingField>(Ordering); | |||
469 | } | |||
470 | ||||
471 | /// Returns the synchronization scope ID of this fence instruction. | |||
472 | SyncScope::ID getSyncScopeID() const { | |||
473 | return SSID; | |||
474 | } | |||
475 | ||||
476 | /// Sets the synchronization scope ID of this fence instruction. | |||
477 | void setSyncScopeID(SyncScope::ID SSID) { | |||
478 | this->SSID = SSID; | |||
479 | } | |||
480 | ||||
481 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
482 | static bool classof(const Instruction *I) { | |||
483 | return I->getOpcode() == Instruction::Fence; | |||
484 | } | |||
485 | static bool classof(const Value *V) { | |||
486 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
487 | } | |||
488 | ||||
489 | private: | |||
490 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
491 | // method so that subclasses cannot accidentally use it. | |||
492 | template <typename Bitfield> | |||
493 | void setSubclassData(typename Bitfield::Type Value) { | |||
494 | Instruction::setSubclassData<Bitfield>(Value); | |||
495 | } | |||
496 | ||||
497 | /// The synchronization scope ID of this fence instruction. Not quite enough | |||
498 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
499 | /// own field. | |||
500 | SyncScope::ID SSID; | |||
501 | }; | |||
502 | ||||
503 | //===----------------------------------------------------------------------===// | |||
504 | // AtomicCmpXchgInst Class | |||
505 | //===----------------------------------------------------------------------===// | |||
506 | ||||
507 | /// An instruction that atomically checks whether a | |||
508 | /// specified value is in a memory location, and, if it is, stores a new value | |||
509 | /// there. The value returned by this instruction is a pair containing the | |||
510 | /// original value as first element, and an i1 indicating success (true) or | |||
511 | /// failure (false) as second element. | |||
512 | /// | |||
513 | class AtomicCmpXchgInst : public Instruction { | |||
514 | void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, | |||
515 | AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, | |||
516 | SyncScope::ID SSID); | |||
517 | ||||
518 | template <unsigned Offset> | |||
519 | using AtomicOrderingBitfieldElement = | |||
520 | typename Bitfield::Element<AtomicOrdering, Offset, 3, | |||
521 | AtomicOrdering::LAST>; | |||
522 | ||||
523 | protected: | |||
524 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
525 | friend class Instruction; | |||
526 | ||||
527 | AtomicCmpXchgInst *cloneImpl() const; | |||
528 | ||||
529 | public: | |||
530 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, | |||
531 | AtomicOrdering SuccessOrdering, | |||
532 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, | |||
533 | Instruction *InsertBefore = nullptr); | |||
534 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, | |||
535 | AtomicOrdering SuccessOrdering, | |||
536 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, | |||
537 | BasicBlock *InsertAtEnd); | |||
538 | ||||
539 | // allocate space for exactly three operands | |||
540 | void *operator new(size_t S) { return User::operator new(S, 3); } | |||
541 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
542 | ||||
543 | using VolatileField = BoolBitfieldElementT<0>; | |||
544 | using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; | |||
545 | using SuccessOrderingField = | |||
546 | AtomicOrderingBitfieldElementT<WeakField::NextBit>; | |||
547 | using FailureOrderingField = | |||
548 | AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; | |||
549 | using AlignmentField = | |||
550 | AlignmentBitfieldElementT<FailureOrderingField::NextBit>; | |||
551 | static_assert( | |||
552 | Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, | |||
553 | FailureOrderingField, AlignmentField>(), | |||
554 | "Bitfields must be contiguous"); | |||
555 | ||||
556 | /// Return the alignment of the memory that is being allocated by the | |||
557 | /// instruction. | |||
558 | Align getAlign() const { | |||
559 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
560 | } | |||
561 | ||||
562 | void setAlignment(Align Align) { | |||
563 | setSubclassData<AlignmentField>(Log2(Align)); | |||
564 | } | |||
565 | ||||
566 | /// Return true if this is a cmpxchg from a volatile memory | |||
567 | /// location. | |||
568 | /// | |||
569 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
570 | ||||
571 | /// Specify whether this is a volatile cmpxchg. | |||
572 | /// | |||
573 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
574 | ||||
575 | /// Return true if this cmpxchg may spuriously fail. | |||
576 | bool isWeak() const { return getSubclassData<WeakField>(); } | |||
577 | ||||
578 | void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } | |||
579 | ||||
580 | /// Transparently provide more efficient getOperand methods. | |||
581 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
582 | ||||
583 | static bool isValidSuccessOrdering(AtomicOrdering Ordering) { | |||
584 | return Ordering != AtomicOrdering::NotAtomic && | |||
585 | Ordering != AtomicOrdering::Unordered; | |||
586 | } | |||
587 | ||||
588 | static bool isValidFailureOrdering(AtomicOrdering Ordering) { | |||
589 | return Ordering != AtomicOrdering::NotAtomic && | |||
590 | Ordering != AtomicOrdering::Unordered && | |||
591 | Ordering != AtomicOrdering::AcquireRelease && | |||
592 | Ordering != AtomicOrdering::Release; | |||
593 | } | |||
594 | ||||
595 | /// Returns the success ordering constraint of this cmpxchg instruction. | |||
596 | AtomicOrdering getSuccessOrdering() const { | |||
597 | return getSubclassData<SuccessOrderingField>(); | |||
598 | } | |||
599 | ||||
600 | /// Sets the success ordering constraint of this cmpxchg instruction. | |||
601 | void setSuccessOrdering(AtomicOrdering Ordering) { | |||
602 | assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) && "invalid CmpXchg success ordering") ? void (0) : __assert_fail ("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\"" , "llvm/include/llvm/IR/Instructions.h", 603, __extension__ __PRETTY_FUNCTION__ )) | |||
603 | "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) && "invalid CmpXchg success ordering") ? void (0) : __assert_fail ("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\"" , "llvm/include/llvm/IR/Instructions.h", 603, __extension__ __PRETTY_FUNCTION__ )); | |||
604 | setSubclassData<SuccessOrderingField>(Ordering); | |||
605 | } | |||
606 | ||||
607 | /// Returns the failure ordering constraint of this cmpxchg instruction. | |||
608 | AtomicOrdering getFailureOrdering() const { | |||
609 | return getSubclassData<FailureOrderingField>(); | |||
610 | } | |||
611 | ||||
612 | /// Sets the failure ordering constraint of this cmpxchg instruction. | |||
613 | void setFailureOrdering(AtomicOrdering Ordering) { | |||
614 | assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) && "invalid CmpXchg failure ordering") ? void (0) : __assert_fail ("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\"" , "llvm/include/llvm/IR/Instructions.h", 615, __extension__ __PRETTY_FUNCTION__ )) | |||
615 | "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) && "invalid CmpXchg failure ordering") ? void (0) : __assert_fail ("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\"" , "llvm/include/llvm/IR/Instructions.h", 615, __extension__ __PRETTY_FUNCTION__ )); | |||
616 | setSubclassData<FailureOrderingField>(Ordering); | |||
617 | } | |||
618 | ||||
619 | /// Returns a single ordering which is at least as strong as both the | |||
620 | /// success and failure orderings for this cmpxchg. | |||
621 | AtomicOrdering getMergedOrdering() const { | |||
622 | if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) | |||
623 | return AtomicOrdering::SequentiallyConsistent; | |||
624 | if (getFailureOrdering() == AtomicOrdering::Acquire) { | |||
625 | if (getSuccessOrdering() == AtomicOrdering::Monotonic) | |||
626 | return AtomicOrdering::Acquire; | |||
627 | if (getSuccessOrdering() == AtomicOrdering::Release) | |||
628 | return AtomicOrdering::AcquireRelease; | |||
629 | } | |||
630 | return getSuccessOrdering(); | |||
631 | } | |||
632 | ||||
633 | /// Returns the synchronization scope ID of this cmpxchg instruction. | |||
634 | SyncScope::ID getSyncScopeID() const { | |||
635 | return SSID; | |||
636 | } | |||
637 | ||||
638 | /// Sets the synchronization scope ID of this cmpxchg instruction. | |||
639 | void setSyncScopeID(SyncScope::ID SSID) { | |||
640 | this->SSID = SSID; | |||
641 | } | |||
642 | ||||
643 | Value *getPointerOperand() { return getOperand(0); } | |||
644 | const Value *getPointerOperand() const { return getOperand(0); } | |||
645 | static unsigned getPointerOperandIndex() { return 0U; } | |||
646 | ||||
647 | Value *getCompareOperand() { return getOperand(1); } | |||
648 | const Value *getCompareOperand() const { return getOperand(1); } | |||
649 | ||||
650 | Value *getNewValOperand() { return getOperand(2); } | |||
651 | const Value *getNewValOperand() const { return getOperand(2); } | |||
652 | ||||
653 | /// Returns the address space of the pointer operand. | |||
654 | unsigned getPointerAddressSpace() const { | |||
655 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
656 | } | |||
657 | ||||
658 | /// Returns the strongest permitted ordering on failure, given the | |||
659 | /// desired ordering on success. | |||
660 | /// | |||
661 | /// If the comparison in a cmpxchg operation fails, there is no atomic store | |||
662 | /// so release semantics cannot be provided. So this function drops explicit | |||
663 | /// Release requests from the AtomicOrdering. A SequentiallyConsistent | |||
664 | /// operation would remain SequentiallyConsistent. | |||
665 | static AtomicOrdering | |||
666 | getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { | |||
667 | switch (SuccessOrdering) { | |||
668 | default: | |||
669 | llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering" , "llvm/include/llvm/IR/Instructions.h", 669); | |||
670 | case AtomicOrdering::Release: | |||
671 | case AtomicOrdering::Monotonic: | |||
672 | return AtomicOrdering::Monotonic; | |||
673 | case AtomicOrdering::AcquireRelease: | |||
674 | case AtomicOrdering::Acquire: | |||
675 | return AtomicOrdering::Acquire; | |||
676 | case AtomicOrdering::SequentiallyConsistent: | |||
677 | return AtomicOrdering::SequentiallyConsistent; | |||
678 | } | |||
679 | } | |||
680 | ||||
681 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
682 | static bool classof(const Instruction *I) { | |||
683 | return I->getOpcode() == Instruction::AtomicCmpXchg; | |||
684 | } | |||
685 | static bool classof(const Value *V) { | |||
686 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
687 | } | |||
688 | ||||
689 | private: | |||
690 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
691 | // method so that subclasses cannot accidentally use it. | |||
692 | template <typename Bitfield> | |||
693 | void setSubclassData(typename Bitfield::Type Value) { | |||
694 | Instruction::setSubclassData<Bitfield>(Value); | |||
695 | } | |||
696 | ||||
697 | /// The synchronization scope ID of this cmpxchg instruction. Not quite | |||
698 | /// enough room in SubClassData for everything, so synchronization scope ID | |||
699 | /// gets its own field. | |||
700 | SyncScope::ID SSID; | |||
701 | }; | |||
702 | ||||
703 | template <> | |||
704 | struct OperandTraits<AtomicCmpXchgInst> : | |||
705 | public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { | |||
706 | }; | |||
707 | ||||
708 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() { return OperandTraits<AtomicCmpXchgInst>::op_begin(this ); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst:: op_begin() const { return OperandTraits<AtomicCmpXchgInst> ::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst ::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits <AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst:: const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits <AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst *>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 708, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst >::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture ].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 708, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands () const { return OperandTraits<AtomicCmpXchgInst>::operands (this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &AtomicCmpXchgInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
709 | ||||
710 | //===----------------------------------------------------------------------===// | |||
711 | // AtomicRMWInst Class | |||
712 | //===----------------------------------------------------------------------===// | |||
713 | ||||
714 | /// an instruction that atomically reads a memory location, | |||
715 | /// combines it with another value, and then stores the result back. Returns | |||
716 | /// the old value. | |||
717 | /// | |||
718 | class AtomicRMWInst : public Instruction { | |||
719 | protected: | |||
720 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
721 | friend class Instruction; | |||
722 | ||||
723 | AtomicRMWInst *cloneImpl() const; | |||
724 | ||||
725 | public: | |||
726 | /// This enumeration lists the possible modifications atomicrmw can make. In | |||
727 | /// the descriptions, 'p' is the pointer to the instruction's memory location, | |||
728 | /// 'old' is the initial value of *p, and 'v' is the other value passed to the | |||
729 | /// instruction. These instructions always return 'old'. | |||
730 | enum BinOp : unsigned { | |||
731 | /// *p = v | |||
732 | Xchg, | |||
733 | /// *p = old + v | |||
734 | Add, | |||
735 | /// *p = old - v | |||
736 | Sub, | |||
737 | /// *p = old & v | |||
738 | And, | |||
739 | /// *p = ~(old & v) | |||
740 | Nand, | |||
741 | /// *p = old | v | |||
742 | Or, | |||
743 | /// *p = old ^ v | |||
744 | Xor, | |||
745 | /// *p = old >signed v ? old : v | |||
746 | Max, | |||
747 | /// *p = old <signed v ? old : v | |||
748 | Min, | |||
749 | /// *p = old >unsigned v ? old : v | |||
750 | UMax, | |||
751 | /// *p = old <unsigned v ? old : v | |||
752 | UMin, | |||
753 | ||||
754 | /// *p = old + v | |||
755 | FAdd, | |||
756 | ||||
757 | /// *p = old - v | |||
758 | FSub, | |||
759 | ||||
760 | /// *p = maxnum(old, v) | |||
761 | /// \p maxnum matches the behavior of \p llvm.maxnum.*. | |||
762 | FMax, | |||
763 | ||||
764 | /// *p = minnum(old, v) | |||
765 | /// \p minnum matches the behavior of \p llvm.minnum.*. | |||
766 | FMin, | |||
767 | ||||
768 | /// Increment one up to a maximum value. | |||
769 | /// *p = (old u>= v) ? 0 : (old + 1) | |||
770 | UIncWrap, | |||
771 | ||||
772 | /// Decrement one until a minimum value or zero. | |||
773 | /// *p = ((old == 0) || (old u> v)) ? v : (old - 1) | |||
774 | UDecWrap, | |||
775 | ||||
776 | FIRST_BINOP = Xchg, | |||
777 | LAST_BINOP = UDecWrap, | |||
778 | BAD_BINOP | |||
779 | }; | |||
780 | ||||
781 | private: | |||
782 | template <unsigned Offset> | |||
783 | using AtomicOrderingBitfieldElement = | |||
784 | typename Bitfield::Element<AtomicOrdering, Offset, 3, | |||
785 | AtomicOrdering::LAST>; | |||
786 | ||||
787 | template <unsigned Offset> | |||
788 | using BinOpBitfieldElement = | |||
789 | typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>; | |||
790 | ||||
791 | public: | |||
792 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, | |||
793 | AtomicOrdering Ordering, SyncScope::ID SSID, | |||
794 | Instruction *InsertBefore = nullptr); | |||
795 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, | |||
796 | AtomicOrdering Ordering, SyncScope::ID SSID, | |||
797 | BasicBlock *InsertAtEnd); | |||
798 | ||||
799 | // allocate space for exactly two operands | |||
800 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
801 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
802 | ||||
803 | using VolatileField = BoolBitfieldElementT<0>; | |||
804 | using AtomicOrderingField = | |||
805 | AtomicOrderingBitfieldElementT<VolatileField::NextBit>; | |||
806 | using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; | |||
807 | using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; | |||
808 | static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, | |||
809 | OperationField, AlignmentField>(), | |||
810 | "Bitfields must be contiguous"); | |||
811 | ||||
812 | BinOp getOperation() const { return getSubclassData<OperationField>(); } | |||
813 | ||||
814 | static StringRef getOperationName(BinOp Op); | |||
815 | ||||
816 | static bool isFPOperation(BinOp Op) { | |||
817 | switch (Op) { | |||
818 | case AtomicRMWInst::FAdd: | |||
819 | case AtomicRMWInst::FSub: | |||
820 | case AtomicRMWInst::FMax: | |||
821 | case AtomicRMWInst::FMin: | |||
822 | return true; | |||
823 | default: | |||
824 | return false; | |||
825 | } | |||
826 | } | |||
827 | ||||
828 | void setOperation(BinOp Operation) { | |||
829 | setSubclassData<OperationField>(Operation); | |||
830 | } | |||
831 | ||||
832 | /// Return the alignment of the memory that is being allocated by the | |||
833 | /// instruction. | |||
834 | Align getAlign() const { | |||
835 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
836 | } | |||
837 | ||||
838 | void setAlignment(Align Align) { | |||
839 | setSubclassData<AlignmentField>(Log2(Align)); | |||
840 | } | |||
841 | ||||
842 | /// Return true if this is a RMW on a volatile memory location. | |||
843 | /// | |||
844 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
845 | ||||
846 | /// Specify whether this is a volatile RMW or not. | |||
847 | /// | |||
848 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
849 | ||||
850 | /// Transparently provide more efficient getOperand methods. | |||
851 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
852 | ||||
853 | /// Returns the ordering constraint of this rmw instruction. | |||
854 | AtomicOrdering getOrdering() const { | |||
855 | return getSubclassData<AtomicOrderingField>(); | |||
856 | } | |||
857 | ||||
858 | /// Sets the ordering constraint of this rmw instruction. | |||
859 | void setOrdering(AtomicOrdering Ordering) { | |||
860 | assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "llvm/include/llvm/IR/Instructions.h", 861, __extension__ __PRETTY_FUNCTION__ )) | |||
861 | "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "llvm/include/llvm/IR/Instructions.h", 861, __extension__ __PRETTY_FUNCTION__ )); | |||
862 | assert(Ordering != AtomicOrdering::Unordered &&(static_cast <bool> (Ordering != AtomicOrdering::Unordered && "atomicrmw instructions cannot be unordered.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\"" , "llvm/include/llvm/IR/Instructions.h", 863, __extension__ __PRETTY_FUNCTION__ )) | |||
863 | "atomicrmw instructions cannot be unordered.")(static_cast <bool> (Ordering != AtomicOrdering::Unordered && "atomicrmw instructions cannot be unordered.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\"" , "llvm/include/llvm/IR/Instructions.h", 863, __extension__ __PRETTY_FUNCTION__ )); | |||
864 | setSubclassData<AtomicOrderingField>(Ordering); | |||
865 | } | |||
866 | ||||
867 | /// Returns the synchronization scope ID of this rmw instruction. | |||
868 | SyncScope::ID getSyncScopeID() const { | |||
869 | return SSID; | |||
870 | } | |||
871 | ||||
872 | /// Sets the synchronization scope ID of this rmw instruction. | |||
873 | void setSyncScopeID(SyncScope::ID SSID) { | |||
874 | this->SSID = SSID; | |||
875 | } | |||
876 | ||||
877 | Value *getPointerOperand() { return getOperand(0); } | |||
878 | const Value *getPointerOperand() const { return getOperand(0); } | |||
879 | static unsigned getPointerOperandIndex() { return 0U; } | |||
880 | ||||
881 | Value *getValOperand() { return getOperand(1); } | |||
882 | const Value *getValOperand() const { return getOperand(1); } | |||
883 | ||||
884 | /// Returns the address space of the pointer operand. | |||
885 | unsigned getPointerAddressSpace() const { | |||
886 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
887 | } | |||
888 | ||||
889 | bool isFloatingPointOperation() const { | |||
890 | return isFPOperation(getOperation()); | |||
891 | } | |||
892 | ||||
893 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
894 | static bool classof(const Instruction *I) { | |||
895 | return I->getOpcode() == Instruction::AtomicRMW; | |||
896 | } | |||
897 | static bool classof(const Value *V) { | |||
898 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
899 | } | |||
900 | ||||
901 | private: | |||
902 | void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, | |||
903 | AtomicOrdering Ordering, SyncScope::ID SSID); | |||
904 | ||||
905 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
906 | // method so that subclasses cannot accidentally use it. | |||
907 | template <typename Bitfield> | |||
908 | void setSubclassData(typename Bitfield::Type Value) { | |||
909 | Instruction::setSubclassData<Bitfield>(Value); | |||
910 | } | |||
911 | ||||
912 | /// The synchronization scope ID of this rmw instruction. Not quite enough | |||
913 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
914 | /// own field. | |||
915 | SyncScope::ID SSID; | |||
916 | }; | |||
917 | ||||
918 | template <> | |||
919 | struct OperandTraits<AtomicRMWInst> | |||
920 | : public FixedNumOperandTraits<AtomicRMWInst,2> { | |||
921 | }; | |||
922 | ||||
923 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst ::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits <AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*> (this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end() { return OperandTraits<AtomicRMWInst>::op_end(this); } AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const { return OperandTraits<AtomicRMWInst>::op_end(const_cast <AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand (unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 923, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<AtomicRMWInst >::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture ].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 923, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits<AtomicRMWInst>::operands( this); } template <int Idx_nocapture> Use &AtomicRMWInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &AtomicRMWInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
924 | ||||
925 | //===----------------------------------------------------------------------===// | |||
926 | // GetElementPtrInst Class | |||
927 | //===----------------------------------------------------------------------===// | |||
928 | ||||
929 | // checkGEPType - Simple wrapper function to give a better assertion failure | |||
930 | // message on bad indexes for a gep instruction. | |||
931 | // | |||
932 | inline Type *checkGEPType(Type *Ty) { | |||
933 | assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!" ) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\"" , "llvm/include/llvm/IR/Instructions.h", 933, __extension__ __PRETTY_FUNCTION__ )); | |||
934 | return Ty; | |||
935 | } | |||
936 | ||||
937 | /// an instruction for type-safe pointer arithmetic to | |||
938 | /// access elements of arrays and structs | |||
939 | /// | |||
940 | class GetElementPtrInst : public Instruction { | |||
941 | Type *SourceElementType; | |||
942 | Type *ResultElementType; | |||
943 | ||||
944 | GetElementPtrInst(const GetElementPtrInst &GEPI); | |||
945 | ||||
946 | /// Constructors - Create a getelementptr instruction with a base pointer an | |||
947 | /// list of indices. The first ctor can optionally insert before an existing | |||
948 | /// instruction, the second appends the new instruction to the specified | |||
949 | /// BasicBlock. | |||
950 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
951 | ArrayRef<Value *> IdxList, unsigned Values, | |||
952 | const Twine &NameStr, Instruction *InsertBefore); | |||
953 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
954 | ArrayRef<Value *> IdxList, unsigned Values, | |||
955 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
956 | ||||
957 | void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); | |||
958 | ||||
959 | protected: | |||
960 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
961 | friend class Instruction; | |||
962 | ||||
963 | GetElementPtrInst *cloneImpl() const; | |||
964 | ||||
965 | public: | |||
966 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, | |||
967 | ArrayRef<Value *> IdxList, | |||
968 | const Twine &NameStr = "", | |||
969 | Instruction *InsertBefore = nullptr) { | |||
970 | unsigned Values = 1 + unsigned(IdxList.size()); | |||
971 | assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type" ) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\"" , "llvm/include/llvm/IR/Instructions.h", 971, __extension__ __PRETTY_FUNCTION__ )); | |||
972 | assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 973, __extension__ __PRETTY_FUNCTION__ )) | |||
973 | ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 973, __extension__ __PRETTY_FUNCTION__ )); | |||
974 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, | |||
975 | NameStr, InsertBefore); | |||
976 | } | |||
977 | ||||
978 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, | |||
979 | ArrayRef<Value *> IdxList, | |||
980 | const Twine &NameStr, | |||
981 | BasicBlock *InsertAtEnd) { | |||
982 | unsigned Values = 1 + unsigned(IdxList.size()); | |||
983 | assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type" ) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\"" , "llvm/include/llvm/IR/Instructions.h", 983, __extension__ __PRETTY_FUNCTION__ )); | |||
984 | assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 985, __extension__ __PRETTY_FUNCTION__ )) | |||
985 | ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 985, __extension__ __PRETTY_FUNCTION__ )); | |||
986 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, | |||
987 | NameStr, InsertAtEnd); | |||
988 | } | |||
989 | ||||
990 | /// Create an "inbounds" getelementptr. See the documentation for the | |||
991 | /// "inbounds" flag in LangRef.html for details. | |||
992 | static GetElementPtrInst * | |||
993 | CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, | |||
994 | const Twine &NameStr = "", | |||
995 | Instruction *InsertBefore = nullptr) { | |||
996 | GetElementPtrInst *GEP = | |||
997 | Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); | |||
998 | GEP->setIsInBounds(true); | |||
999 | return GEP; | |||
1000 | } | |||
1001 | ||||
1002 | static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, | |||
1003 | ArrayRef<Value *> IdxList, | |||
1004 | const Twine &NameStr, | |||
1005 | BasicBlock *InsertAtEnd) { | |||
1006 | GetElementPtrInst *GEP = | |||
1007 | Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); | |||
1008 | GEP->setIsInBounds(true); | |||
1009 | return GEP; | |||
1010 | } | |||
1011 | ||||
1012 | /// Transparently provide more efficient getOperand methods. | |||
1013 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1014 | ||||
1015 | Type *getSourceElementType() const { return SourceElementType; } | |||
1016 | ||||
1017 | void setSourceElementType(Type *Ty) { SourceElementType = Ty; } | |||
1018 | void setResultElementType(Type *Ty) { ResultElementType = Ty; } | |||
1019 | ||||
1020 | Type *getResultElementType() const { | |||
1021 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1022, __extension__ __PRETTY_FUNCTION__ )) | |||
1022 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1022, __extension__ __PRETTY_FUNCTION__ )); | |||
1023 | return ResultElementType; | |||
1024 | } | |||
1025 | ||||
1026 | /// Returns the address space of this instruction's pointer type. | |||
1027 | unsigned getAddressSpace() const { | |||
1028 | // Note that this is always the same as the pointer operand's address space | |||
1029 | // and that is cheaper to compute, so cheat here. | |||
1030 | return getPointerAddressSpace(); | |||
1031 | } | |||
1032 | ||||
1033 | /// Returns the result type of a getelementptr with the given source | |||
1034 | /// element type and indexes. | |||
1035 | /// | |||
1036 | /// Null is returned if the indices are invalid for the specified | |||
1037 | /// source element type. | |||
1038 | static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); | |||
1039 | static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); | |||
1040 | static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); | |||
1041 | ||||
1042 | /// Return the type of the element at the given index of an indexable | |||
1043 | /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". | |||
1044 | /// | |||
1045 | /// Returns null if the type can't be indexed, or the given index is not | |||
1046 | /// legal for the given type. | |||
1047 | static Type *getTypeAtIndex(Type *Ty, Value *Idx); | |||
1048 | static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); | |||
1049 | ||||
1050 | inline op_iterator idx_begin() { return op_begin()+1; } | |||
1051 | inline const_op_iterator idx_begin() const { return op_begin()+1; } | |||
1052 | inline op_iterator idx_end() { return op_end(); } | |||
1053 | inline const_op_iterator idx_end() const { return op_end(); } | |||
1054 | ||||
1055 | inline iterator_range<op_iterator> indices() { | |||
1056 | return make_range(idx_begin(), idx_end()); | |||
1057 | } | |||
1058 | ||||
1059 | inline iterator_range<const_op_iterator> indices() const { | |||
1060 | return make_range(idx_begin(), idx_end()); | |||
1061 | } | |||
1062 | ||||
1063 | Value *getPointerOperand() { | |||
1064 | return getOperand(0); | |||
1065 | } | |||
1066 | const Value *getPointerOperand() const { | |||
1067 | return getOperand(0); | |||
1068 | } | |||
1069 | static unsigned getPointerOperandIndex() { | |||
1070 | return 0U; // get index for modifying correct operand. | |||
1071 | } | |||
1072 | ||||
1073 | /// Method to return the pointer operand as a | |||
1074 | /// PointerType. | |||
1075 | Type *getPointerOperandType() const { | |||
1076 | return getPointerOperand()->getType(); | |||
1077 | } | |||
1078 | ||||
1079 | /// Returns the address space of the pointer operand. | |||
1080 | unsigned getPointerAddressSpace() const { | |||
1081 | return getPointerOperandType()->getPointerAddressSpace(); | |||
1082 | } | |||
1083 | ||||
1084 | /// Returns the pointer type returned by the GEP | |||
1085 | /// instruction, which may be a vector of pointers. | |||
1086 | static Type *getGEPReturnType(Type *ElTy, Value *Ptr, | |||
1087 | ArrayRef<Value *> IdxList) { | |||
1088 | PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); | |||
1089 | unsigned AddrSpace = OrigPtrTy->getAddressSpace(); | |||
1090 | Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList)); | |||
1091 | Type *PtrTy = OrigPtrTy->isOpaque() | |||
1092 | ? PointerType::get(OrigPtrTy->getContext(), AddrSpace) | |||
1093 | : PointerType::get(ResultElemTy, AddrSpace); | |||
1094 | // Vector GEP | |||
1095 | if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { | |||
1096 | ElementCount EltCount = PtrVTy->getElementCount(); | |||
1097 | return VectorType::get(PtrTy, EltCount); | |||
1098 | } | |||
1099 | for (Value *Index : IdxList) | |||
1100 | if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { | |||
1101 | ElementCount EltCount = IndexVTy->getElementCount(); | |||
1102 | return VectorType::get(PtrTy, EltCount); | |||
1103 | } | |||
1104 | // Scalar GEP | |||
1105 | return PtrTy; | |||
1106 | } | |||
1107 | ||||
1108 | unsigned getNumIndices() const { // Note: always non-negative | |||
1109 | return getNumOperands() - 1; | |||
1110 | } | |||
1111 | ||||
1112 | bool hasIndices() const { | |||
1113 | return getNumOperands() > 1; | |||
1114 | } | |||
1115 | ||||
1116 | /// Return true if all of the indices of this GEP are | |||
1117 | /// zeros. If so, the result pointer and the first operand have the same | |||
1118 | /// value, just potentially different types. | |||
1119 | bool hasAllZeroIndices() const; | |||
1120 | ||||
1121 | /// Return true if all of the indices of this GEP are | |||
1122 | /// constant integers. If so, the result pointer and the first operand have | |||
1123 | /// a constant offset between them. | |||
1124 | bool hasAllConstantIndices() const; | |||
1125 | ||||
1126 | /// Set or clear the inbounds flag on this GEP instruction. | |||
1127 | /// See LangRef.html for the meaning of inbounds on a getelementptr. | |||
1128 | void setIsInBounds(bool b = true); | |||
1129 | ||||
1130 | /// Determine whether the GEP has the inbounds flag. | |||
1131 | bool isInBounds() const; | |||
1132 | ||||
1133 | /// Accumulate the constant address offset of this GEP if possible. | |||
1134 | /// | |||
1135 | /// This routine accepts an APInt into which it will accumulate the constant | |||
1136 | /// offset of this GEP if the GEP is in fact constant. If the GEP is not | |||
1137 | /// all-constant, it returns false and the value of the offset APInt is | |||
1138 | /// undefined (it is *not* preserved!). The APInt passed into this routine | |||
1139 | /// must be at least as wide as the IntPtr type for the address space of | |||
1140 | /// the base GEP pointer. | |||
1141 | bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; | |||
1142 | bool collectOffset(const DataLayout &DL, unsigned BitWidth, | |||
1143 | MapVector<Value *, APInt> &VariableOffsets, | |||
1144 | APInt &ConstantOffset) const; | |||
1145 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1146 | static bool classof(const Instruction *I) { | |||
1147 | return (I->getOpcode() == Instruction::GetElementPtr); | |||
1148 | } | |||
1149 | static bool classof(const Value *V) { | |||
1150 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1151 | } | |||
1152 | }; | |||
1153 | ||||
1154 | template <> | |||
1155 | struct OperandTraits<GetElementPtrInst> : | |||
1156 | public VariadicOperandTraits<GetElementPtrInst, 1> { | |||
1157 | }; | |||
1158 | ||||
1159 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
1160 | ArrayRef<Value *> IdxList, unsigned Values, | |||
1161 | const Twine &NameStr, | |||
1162 | Instruction *InsertBefore) | |||
1163 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, | |||
1164 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, | |||
1165 | Values, InsertBefore), | |||
1166 | SourceElementType(PointeeType), | |||
1167 | ResultElementType(getIndexedType(PointeeType, IdxList)) { | |||
1168 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1169, __extension__ __PRETTY_FUNCTION__ )) | |||
1169 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1169, __extension__ __PRETTY_FUNCTION__ )); | |||
1170 | init(Ptr, IdxList, NameStr); | |||
1171 | } | |||
1172 | ||||
1173 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
1174 | ArrayRef<Value *> IdxList, unsigned Values, | |||
1175 | const Twine &NameStr, | |||
1176 | BasicBlock *InsertAtEnd) | |||
1177 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, | |||
1178 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, | |||
1179 | Values, InsertAtEnd), | |||
1180 | SourceElementType(PointeeType), | |||
1181 | ResultElementType(getIndexedType(PointeeType, IdxList)) { | |||
1182 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1183, __extension__ __PRETTY_FUNCTION__ )) | |||
1183 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1183, __extension__ __PRETTY_FUNCTION__ )); | |||
1184 | init(Ptr, IdxList, NameStr); | |||
1185 | } | |||
1186 | ||||
1187 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() { return OperandTraits<GetElementPtrInst>::op_begin(this ); } GetElementPtrInst::const_op_iterator GetElementPtrInst:: op_begin() const { return OperandTraits<GetElementPtrInst> ::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst ::op_iterator GetElementPtrInst::op_end() { return OperandTraits <GetElementPtrInst>::op_end(this); } GetElementPtrInst:: const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits <GetElementPtrInst>::op_end(const_cast<GetElementPtrInst *>(this)); } Value *GetElementPtrInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1187, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<GetElementPtrInst >::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture ].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1187, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands () const { return OperandTraits<GetElementPtrInst>::operands (this); } template <int Idx_nocapture> Use &GetElementPtrInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &GetElementPtrInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
1188 | ||||
1189 | //===----------------------------------------------------------------------===// | |||
1190 | // ICmpInst Class | |||
1191 | //===----------------------------------------------------------------------===// | |||
1192 | ||||
1193 | /// This instruction compares its operands according to the predicate given | |||
1194 | /// to the constructor. It only operates on integers or pointers. The operands | |||
1195 | /// must be identical types. | |||
1196 | /// Represent an integer comparison operator. | |||
1197 | class ICmpInst: public CmpInst { | |||
1198 | void AssertOK() { | |||
1199 | assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value" ) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1200, __extension__ __PRETTY_FUNCTION__ )) | |||
1200 | "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value" ) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1200, __extension__ __PRETTY_FUNCTION__ )); | |||
1201 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1202, __extension__ __PRETTY_FUNCTION__ )) | |||
1202 | "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1202, __extension__ __PRETTY_FUNCTION__ )); | |||
1203 | // Check that the operands are the right type | |||
1204 | assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1206, __extension__ __PRETTY_FUNCTION__ )) | |||
1205 | getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1206, __extension__ __PRETTY_FUNCTION__ )) | |||
1206 | "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1206, __extension__ __PRETTY_FUNCTION__ )); | |||
1207 | } | |||
1208 | ||||
1209 | protected: | |||
1210 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1211 | friend class Instruction; | |||
1212 | ||||
1213 | /// Clone an identical ICmpInst | |||
1214 | ICmpInst *cloneImpl() const; | |||
1215 | ||||
1216 | public: | |||
1217 | /// Constructor with insert-before-instruction semantics. | |||
1218 | ICmpInst( | |||
1219 | Instruction *InsertBefore, ///< Where to insert | |||
1220 | Predicate pred, ///< The predicate to use for the comparison | |||
1221 | Value *LHS, ///< The left-hand-side of the expression | |||
1222 | Value *RHS, ///< The right-hand-side of the expression | |||
1223 | const Twine &NameStr = "" ///< Name of the instruction | |||
1224 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1225 | Instruction::ICmp, pred, LHS, RHS, NameStr, | |||
1226 | InsertBefore) { | |||
1227 | #ifndef NDEBUG | |||
1228 | AssertOK(); | |||
1229 | #endif | |||
1230 | } | |||
1231 | ||||
1232 | /// Constructor with insert-at-end semantics. | |||
1233 | ICmpInst( | |||
1234 | BasicBlock &InsertAtEnd, ///< Block to insert into. | |||
1235 | Predicate pred, ///< The predicate to use for the comparison | |||
1236 | Value *LHS, ///< The left-hand-side of the expression | |||
1237 | Value *RHS, ///< The right-hand-side of the expression | |||
1238 | const Twine &NameStr = "" ///< Name of the instruction | |||
1239 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1240 | Instruction::ICmp, pred, LHS, RHS, NameStr, | |||
1241 | &InsertAtEnd) { | |||
1242 | #ifndef NDEBUG | |||
1243 | AssertOK(); | |||
1244 | #endif | |||
1245 | } | |||
1246 | ||||
1247 | /// Constructor with no-insertion semantics | |||
1248 | ICmpInst( | |||
1249 | Predicate pred, ///< The predicate to use for the comparison | |||
1250 | Value *LHS, ///< The left-hand-side of the expression | |||
1251 | Value *RHS, ///< The right-hand-side of the expression | |||
1252 | const Twine &NameStr = "" ///< Name of the instruction | |||
1253 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1254 | Instruction::ICmp, pred, LHS, RHS, NameStr) { | |||
1255 | #ifndef NDEBUG | |||
1256 | AssertOK(); | |||
1257 | #endif | |||
1258 | } | |||
1259 | ||||
1260 | /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. | |||
1261 | /// @returns the predicate that would be the result if the operand were | |||
1262 | /// regarded as signed. | |||
1263 | /// Return the signed version of the predicate | |||
1264 | Predicate getSignedPredicate() const { | |||
1265 | return getSignedPredicate(getPredicate()); | |||
1266 | } | |||
1267 | ||||
1268 | /// This is a static version that you can use without an instruction. | |||
1269 | /// Return the signed version of the predicate. | |||
1270 | static Predicate getSignedPredicate(Predicate pred); | |||
1271 | ||||
1272 | /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. | |||
1273 | /// @returns the predicate that would be the result if the operand were | |||
1274 | /// regarded as unsigned. | |||
1275 | /// Return the unsigned version of the predicate | |||
1276 | Predicate getUnsignedPredicate() const { | |||
1277 | return getUnsignedPredicate(getPredicate()); | |||
1278 | } | |||
1279 | ||||
1280 | /// This is a static version that you can use without an instruction. | |||
1281 | /// Return the unsigned version of the predicate. | |||
1282 | static Predicate getUnsignedPredicate(Predicate pred); | |||
1283 | ||||
1284 | /// Return true if this predicate is either EQ or NE. This also | |||
1285 | /// tests for commutativity. | |||
1286 | static bool isEquality(Predicate P) { | |||
1287 | return P == ICMP_EQ || P == ICMP_NE; | |||
1288 | } | |||
1289 | ||||
1290 | /// Return true if this predicate is either EQ or NE. This also | |||
1291 | /// tests for commutativity. | |||
1292 | bool isEquality() const { | |||
1293 | return isEquality(getPredicate()); | |||
1294 | } | |||
1295 | ||||
1296 | /// @returns true if the predicate of this ICmpInst is commutative | |||
1297 | /// Determine if this relation is commutative. | |||
1298 | bool isCommutative() const { return isEquality(); } | |||
1299 | ||||
1300 | /// Return true if the predicate is relational (not EQ or NE). | |||
1301 | /// | |||
1302 | bool isRelational() const { | |||
1303 | return !isEquality(); | |||
1304 | } | |||
1305 | ||||
1306 | /// Return true if the predicate is relational (not EQ or NE). | |||
1307 | /// | |||
1308 | static bool isRelational(Predicate P) { | |||
1309 | return !isEquality(P); | |||
1310 | } | |||
1311 | ||||
1312 | /// Return true if the predicate is SGT or UGT. | |||
1313 | /// | |||
1314 | static bool isGT(Predicate P) { | |||
1315 | return P == ICMP_SGT || P == ICMP_UGT; | |||
1316 | } | |||
1317 | ||||
1318 | /// Return true if the predicate is SLT or ULT. | |||
1319 | /// | |||
1320 | static bool isLT(Predicate P) { | |||
1321 | return P == ICMP_SLT || P == ICMP_ULT; | |||
1322 | } | |||
1323 | ||||
1324 | /// Return true if the predicate is SGE or UGE. | |||
1325 | /// | |||
1326 | static bool isGE(Predicate P) { | |||
1327 | return P == ICMP_SGE || P == ICMP_UGE; | |||
1328 | } | |||
1329 | ||||
1330 | /// Return true if the predicate is SLE or ULE. | |||
1331 | /// | |||
1332 | static bool isLE(Predicate P) { | |||
1333 | return P == ICMP_SLE || P == ICMP_ULE; | |||
1334 | } | |||
1335 | ||||
1336 | /// Returns the sequence of all ICmp predicates. | |||
1337 | /// | |||
1338 | static auto predicates() { return ICmpPredicates(); } | |||
1339 | ||||
1340 | /// Exchange the two operands to this instruction in such a way that it does | |||
1341 | /// not modify the semantics of the instruction. The predicate value may be | |||
1342 | /// changed to retain the same result if the predicate is order dependent | |||
1343 | /// (e.g. ult). | |||
1344 | /// Swap operands and adjust predicate. | |||
1345 | void swapOperands() { | |||
1346 | setPredicate(getSwappedPredicate()); | |||
1347 | Op<0>().swap(Op<1>()); | |||
1348 | } | |||
1349 | ||||
1350 | /// Return result of `LHS Pred RHS` comparison. | |||
1351 | static bool compare(const APInt &LHS, const APInt &RHS, | |||
1352 | ICmpInst::Predicate Pred); | |||
1353 | ||||
1354 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1355 | static bool classof(const Instruction *I) { | |||
1356 | return I->getOpcode() == Instruction::ICmp; | |||
1357 | } | |||
1358 | static bool classof(const Value *V) { | |||
1359 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1360 | } | |||
1361 | }; | |||
1362 | ||||
1363 | //===----------------------------------------------------------------------===// | |||
1364 | // FCmpInst Class | |||
1365 | //===----------------------------------------------------------------------===// | |||
1366 | ||||
1367 | /// This instruction compares its operands according to the predicate given | |||
1368 | /// to the constructor. It only operates on floating point values or packed | |||
1369 | /// vectors of floating point values. The operands must be identical types. | |||
1370 | /// Represents a floating point comparison operator. | |||
1371 | class FCmpInst: public CmpInst { | |||
1372 | void AssertOK() { | |||
1373 | assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value" ) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1373, __extension__ __PRETTY_FUNCTION__ )); | |||
1374 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1375, __extension__ __PRETTY_FUNCTION__ )) | |||
1375 | "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1375, __extension__ __PRETTY_FUNCTION__ )); | |||
1376 | // Check that the operands are the right type | |||
1377 | assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy () && "Invalid operand types for FCmp instruction") ? void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1378, __extension__ __PRETTY_FUNCTION__ )) | |||
1378 | "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy () && "Invalid operand types for FCmp instruction") ? void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1378, __extension__ __PRETTY_FUNCTION__ )); | |||
1379 | } | |||
1380 | ||||
1381 | protected: | |||
1382 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1383 | friend class Instruction; | |||
1384 | ||||
1385 | /// Clone an identical FCmpInst | |||
1386 | FCmpInst *cloneImpl() const; | |||
1387 | ||||
1388 | public: | |||
1389 | /// Constructor with insert-before-instruction semantics. | |||
1390 | FCmpInst( | |||
1391 | Instruction *InsertBefore, ///< Where to insert | |||
1392 | Predicate pred, ///< The predicate to use for the comparison | |||
1393 | Value *LHS, ///< The left-hand-side of the expression | |||
1394 | Value *RHS, ///< The right-hand-side of the expression | |||
1395 | const Twine &NameStr = "" ///< Name of the instruction | |||
1396 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1397 | Instruction::FCmp, pred, LHS, RHS, NameStr, | |||
1398 | InsertBefore) { | |||
1399 | AssertOK(); | |||
1400 | } | |||
1401 | ||||
1402 | /// Constructor with insert-at-end semantics. | |||
1403 | FCmpInst( | |||
1404 | BasicBlock &InsertAtEnd, ///< Block to insert into. | |||
1405 | Predicate pred, ///< The predicate to use for the comparison | |||
1406 | Value *LHS, ///< The left-hand-side of the expression | |||
1407 | Value *RHS, ///< The right-hand-side of the expression | |||
1408 | const Twine &NameStr = "" ///< Name of the instruction | |||
1409 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1410 | Instruction::FCmp, pred, LHS, RHS, NameStr, | |||
1411 | &InsertAtEnd) { | |||
1412 | AssertOK(); | |||
1413 | } | |||
1414 | ||||
1415 | /// Constructor with no-insertion semantics | |||
1416 | FCmpInst( | |||
1417 | Predicate Pred, ///< The predicate to use for the comparison | |||
1418 | Value *LHS, ///< The left-hand-side of the expression | |||
1419 | Value *RHS, ///< The right-hand-side of the expression | |||
1420 | const Twine &NameStr = "", ///< Name of the instruction | |||
1421 | Instruction *FlagsSource = nullptr | |||
1422 | ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, | |||
1423 | RHS, NameStr, nullptr, FlagsSource) { | |||
1424 | AssertOK(); | |||
1425 | } | |||
1426 | ||||
1427 | /// @returns true if the predicate of this instruction is EQ or NE. | |||
1428 | /// Determine if this is an equality predicate. | |||
1429 | static bool isEquality(Predicate Pred) { | |||
1430 | return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || | |||
1431 | Pred == FCMP_UNE; | |||
1432 | } | |||
1433 | ||||
1434 | /// @returns true if the predicate of this instruction is EQ or NE. | |||
1435 | /// Determine if this is an equality predicate. | |||
1436 | bool isEquality() const { return isEquality(getPredicate()); } | |||
1437 | ||||
1438 | /// @returns true if the predicate of this instruction is commutative. | |||
1439 | /// Determine if this is a commutative predicate. | |||
1440 | bool isCommutative() const { | |||
1441 | return isEquality() || | |||
1442 | getPredicate() == FCMP_FALSE || | |||
1443 | getPredicate() == FCMP_TRUE || | |||
1444 | getPredicate() == FCMP_ORD || | |||
1445 | getPredicate() == FCMP_UNO; | |||
1446 | } | |||
1447 | ||||
1448 | /// @returns true if the predicate is relational (not EQ or NE). | |||
1449 | /// Determine if this a relational predicate. | |||
1450 | bool isRelational() const { return !isEquality(); } | |||
1451 | ||||
1452 | /// Exchange the two operands to this instruction in such a way that it does | |||
1453 | /// not modify the semantics of the instruction. The predicate value may be | |||
1454 | /// changed to retain the same result if the predicate is order dependent | |||
1455 | /// (e.g. ult). | |||
1456 | /// Swap operands and adjust predicate. | |||
1457 | void swapOperands() { | |||
1458 | setPredicate(getSwappedPredicate()); | |||
1459 | Op<0>().swap(Op<1>()); | |||
1460 | } | |||
1461 | ||||
1462 | /// Returns the sequence of all FCmp predicates. | |||
1463 | /// | |||
1464 | static auto predicates() { return FCmpPredicates(); } | |||
1465 | ||||
1466 | /// Return result of `LHS Pred RHS` comparison. | |||
1467 | static bool compare(const APFloat &LHS, const APFloat &RHS, | |||
1468 | FCmpInst::Predicate Pred); | |||
1469 | ||||
1470 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1471 | static bool classof(const Instruction *I) { | |||
1472 | return I->getOpcode() == Instruction::FCmp; | |||
1473 | } | |||
1474 | static bool classof(const Value *V) { | |||
1475 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1476 | } | |||
1477 | }; | |||
1478 | ||||
1479 | //===----------------------------------------------------------------------===// | |||
1480 | /// This class represents a function call, abstracting a target | |||
1481 | /// machine's calling convention. This class uses low bit of the SubClassData | |||
1482 | /// field to indicate whether or not this is a tail call. The rest of the bits | |||
1483 | /// hold the calling convention of the call. | |||
1484 | /// | |||
1485 | class CallInst : public CallBase { | |||
1486 | CallInst(const CallInst &CI); | |||
1487 | ||||
1488 | /// Construct a CallInst given a range of arguments. | |||
1489 | /// Construct a CallInst from a range of arguments | |||
1490 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1491 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1492 | Instruction *InsertBefore); | |||
1493 | ||||
1494 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1495 | const Twine &NameStr, Instruction *InsertBefore) | |||
1496 | : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {} | |||
1497 | ||||
1498 | /// Construct a CallInst given a range of arguments. | |||
1499 | /// Construct a CallInst from a range of arguments | |||
1500 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1501 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1502 | BasicBlock *InsertAtEnd); | |||
1503 | ||||
1504 | explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, | |||
1505 | Instruction *InsertBefore); | |||
1506 | ||||
1507 | CallInst(FunctionType *ty, Value *F, const Twine &NameStr, | |||
1508 | BasicBlock *InsertAtEnd); | |||
1509 | ||||
1510 | void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, | |||
1511 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
1512 | void init(FunctionType *FTy, Value *Func, const Twine &NameStr); | |||
1513 | ||||
1514 | /// Compute the number of operands to allocate. | |||
1515 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { | |||
1516 | // We need one operand for the called function, plus the input operand | |||
1517 | // counts provided. | |||
1518 | return 1 + NumArgs + NumBundleInputs; | |||
1519 | } | |||
1520 | ||||
1521 | protected: | |||
1522 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1523 | friend class Instruction; | |||
1524 | ||||
1525 | CallInst *cloneImpl() const; | |||
1526 | ||||
1527 | public: | |||
1528 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", | |||
1529 | Instruction *InsertBefore = nullptr) { | |||
1530 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); | |||
1531 | } | |||
1532 | ||||
1533 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1534 | const Twine &NameStr, | |||
1535 | Instruction *InsertBefore = nullptr) { | |||
1536 | return new (ComputeNumOperands(Args.size())) | |||
1537 | CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore); | |||
1538 | } | |||
1539 | ||||
1540 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1541 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
1542 | const Twine &NameStr = "", | |||
1543 | Instruction *InsertBefore = nullptr) { | |||
1544 | const int NumOperands = | |||
1545 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
1546 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
1547 | ||||
1548 | return new (NumOperands, DescriptorBytes) | |||
1549 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); | |||
1550 | } | |||
1551 | ||||
1552 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, | |||
1553 | BasicBlock *InsertAtEnd) { | |||
1554 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); | |||
1555 | } | |||
1556 | ||||
1557 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1558 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1559 | return new (ComputeNumOperands(Args.size())) | |||
1560 | CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd); | |||
1561 | } | |||
1562 | ||||
1563 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1564 | ArrayRef<OperandBundleDef> Bundles, | |||
1565 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1566 | const int NumOperands = | |||
1567 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
1568 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
1569 | ||||
1570 | return new (NumOperands, DescriptorBytes) | |||
1571 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); | |||
1572 | } | |||
1573 | ||||
1574 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", | |||
1575 | Instruction *InsertBefore = nullptr) { | |||
1576 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, | |||
1577 | InsertBefore); | |||
1578 | } | |||
1579 | ||||
1580 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1581 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
1582 | const Twine &NameStr = "", | |||
1583 | Instruction *InsertBefore = nullptr) { | |||
1584 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, | |||
1585 | NameStr, InsertBefore); | |||
1586 | } | |||
1587 | ||||
1588 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1589 | const Twine &NameStr, | |||
1590 | Instruction *InsertBefore = nullptr) { | |||
1591 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, | |||
1592 | InsertBefore); | |||
1593 | } | |||
1594 | ||||
1595 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr, | |||
1596 | BasicBlock *InsertAtEnd) { | |||
1597 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, | |||
1598 | InsertAtEnd); | |||
1599 | } | |||
1600 | ||||
1601 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1602 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1603 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, | |||
1604 | InsertAtEnd); | |||
1605 | } | |||
1606 | ||||
1607 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1608 | ArrayRef<OperandBundleDef> Bundles, | |||
1609 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1610 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, | |||
1611 | NameStr, InsertAtEnd); | |||
1612 | } | |||
1613 | ||||
1614 | /// Create a clone of \p CI with a different set of operand bundles and | |||
1615 | /// insert it before \p InsertPt. | |||
1616 | /// | |||
1617 | /// The returned call instruction is identical \p CI in every way except that | |||
1618 | /// the operand bundles for the new instruction are set to the operand bundles | |||
1619 | /// in \p Bundles. | |||
1620 | static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, | |||
1621 | Instruction *InsertPt = nullptr); | |||
1622 | ||||
1623 | /// Generate the IR for a call to malloc: | |||
1624 | /// 1. Compute the malloc call's argument as the specified type's size, | |||
1625 | /// possibly multiplied by the array size if the array size is not | |||
1626 | /// constant 1. | |||
1627 | /// 2. Call malloc with that argument. | |||
1628 | /// 3. Bitcast the result of the malloc call to the specified type. | |||
1629 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, | |||
1630 | Type *AllocTy, Value *AllocSize, | |||
1631 | Value *ArraySize = nullptr, | |||
1632 | Function *MallocF = nullptr, | |||
1633 | const Twine &Name = ""); | |||
1634 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, | |||
1635 | Type *AllocTy, Value *AllocSize, | |||
1636 | Value *ArraySize = nullptr, | |||
1637 | Function *MallocF = nullptr, | |||
1638 | const Twine &Name = ""); | |||
1639 | static Instruction * | |||
1640 | CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, Type *AllocTy, | |||
1641 | Value *AllocSize, Value *ArraySize = nullptr, | |||
1642 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
1643 | Function *MallocF = nullptr, const Twine &Name = ""); | |||
1644 | static Instruction * | |||
1645 | CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, Type *AllocTy, | |||
1646 | Value *AllocSize, Value *ArraySize = nullptr, | |||
1647 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
1648 | Function *MallocF = nullptr, const Twine &Name = ""); | |||
1649 | /// Generate the IR for a call to the builtin free function. | |||
1650 | static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); | |||
1651 | static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); | |||
1652 | static Instruction *CreateFree(Value *Source, | |||
1653 | ArrayRef<OperandBundleDef> Bundles, | |||
1654 | Instruction *InsertBefore); | |||
1655 | static Instruction *CreateFree(Value *Source, | |||
1656 | ArrayRef<OperandBundleDef> Bundles, | |||
1657 | BasicBlock *InsertAtEnd); | |||
1658 | ||||
1659 | // Note that 'musttail' implies 'tail'. | |||
1660 | enum TailCallKind : unsigned { | |||
1661 | TCK_None = 0, | |||
1662 | TCK_Tail = 1, | |||
1663 | TCK_MustTail = 2, | |||
1664 | TCK_NoTail = 3, | |||
1665 | TCK_LAST = TCK_NoTail | |||
1666 | }; | |||
1667 | ||||
1668 | using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; | |||
1669 | static_assert( | |||
1670 | Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), | |||
1671 | "Bitfields must be contiguous"); | |||
1672 | ||||
1673 | TailCallKind getTailCallKind() const { | |||
1674 | return getSubclassData<TailCallKindField>(); | |||
1675 | } | |||
1676 | ||||
1677 | bool isTailCall() const { | |||
1678 | TailCallKind Kind = getTailCallKind(); | |||
1679 | return Kind == TCK_Tail || Kind == TCK_MustTail; | |||
1680 | } | |||
1681 | ||||
1682 | bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } | |||
1683 | ||||
1684 | bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } | |||
1685 | ||||
1686 | void setTailCallKind(TailCallKind TCK) { | |||
1687 | setSubclassData<TailCallKindField>(TCK); | |||
1688 | } | |||
1689 | ||||
1690 | void setTailCall(bool IsTc = true) { | |||
1691 | setTailCallKind(IsTc ? TCK_Tail : TCK_None); | |||
1692 | } | |||
1693 | ||||
1694 | /// Return true if the call can return twice | |||
1695 | bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } | |||
1696 | void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } | |||
1697 | ||||
1698 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1699 | static bool classof(const Instruction *I) { | |||
1700 | return I->getOpcode() == Instruction::Call; | |||
1701 | } | |||
1702 | static bool classof(const Value *V) { | |||
1703 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1704 | } | |||
1705 | ||||
1706 | /// Updates profile metadata by scaling it by \p S / \p T. | |||
1707 | void updateProfWeight(uint64_t S, uint64_t T); | |||
1708 | ||||
1709 | private: | |||
1710 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
1711 | // method so that subclasses cannot accidentally use it. | |||
1712 | template <typename Bitfield> | |||
1713 | void setSubclassData(typename Bitfield::Type Value) { | |||
1714 | Instruction::setSubclassData<Bitfield>(Value); | |||
1715 | } | |||
1716 | }; | |||
1717 | ||||
1718 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1719 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1720 | BasicBlock *InsertAtEnd) | |||
1721 | : CallBase(Ty->getReturnType(), Instruction::Call, | |||
1722 | OperandTraits<CallBase>::op_end(this) - | |||
1723 | (Args.size() + CountBundleInputs(Bundles) + 1), | |||
1724 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), | |||
1725 | InsertAtEnd) { | |||
1726 | init(Ty, Func, Args, Bundles, NameStr); | |||
1727 | } | |||
1728 | ||||
1729 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1730 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1731 | Instruction *InsertBefore) | |||
1732 | : CallBase(Ty->getReturnType(), Instruction::Call, | |||
1733 | OperandTraits<CallBase>::op_end(this) - | |||
1734 | (Args.size() + CountBundleInputs(Bundles) + 1), | |||
1735 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), | |||
1736 | InsertBefore) { | |||
1737 | init(Ty, Func, Args, Bundles, NameStr); | |||
1738 | } | |||
1739 | ||||
1740 | //===----------------------------------------------------------------------===// | |||
1741 | // SelectInst Class | |||
1742 | //===----------------------------------------------------------------------===// | |||
1743 | ||||
1744 | /// This class represents the LLVM 'select' instruction. | |||
1745 | /// | |||
1746 | class SelectInst : public Instruction { | |||
1747 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, | |||
1748 | Instruction *InsertBefore) | |||
1749 | : Instruction(S1->getType(), Instruction::Select, | |||
1750 | &Op<0>(), 3, InsertBefore) { | |||
1751 | init(C, S1, S2); | |||
1752 | setName(NameStr); | |||
1753 | } | |||
1754 | ||||
1755 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, | |||
1756 | BasicBlock *InsertAtEnd) | |||
1757 | : Instruction(S1->getType(), Instruction::Select, | |||
1758 | &Op<0>(), 3, InsertAtEnd) { | |||
1759 | init(C, S1, S2); | |||
1760 | setName(NameStr); | |||
1761 | } | |||
1762 | ||||
1763 | void init(Value *C, Value *S1, Value *S2) { | |||
1764 | assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) && "Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\"" , "llvm/include/llvm/IR/Instructions.h", 1764, __extension__ __PRETTY_FUNCTION__ )); | |||
1765 | Op<0>() = C; | |||
1766 | Op<1>() = S1; | |||
1767 | Op<2>() = S2; | |||
1768 | } | |||
1769 | ||||
1770 | protected: | |||
1771 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1772 | friend class Instruction; | |||
1773 | ||||
1774 | SelectInst *cloneImpl() const; | |||
1775 | ||||
1776 | public: | |||
1777 | static SelectInst *Create(Value *C, Value *S1, Value *S2, | |||
1778 | const Twine &NameStr = "", | |||
1779 | Instruction *InsertBefore = nullptr, | |||
1780 | Instruction *MDFrom = nullptr) { | |||
1781 | SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); | |||
1782 | if (MDFrom) | |||
1783 | Sel->copyMetadata(*MDFrom); | |||
1784 | return Sel; | |||
1785 | } | |||
1786 | ||||
1787 | static SelectInst *Create(Value *C, Value *S1, Value *S2, | |||
1788 | const Twine &NameStr, | |||
1789 | BasicBlock *InsertAtEnd) { | |||
1790 | return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); | |||
1791 | } | |||
1792 | ||||
1793 | const Value *getCondition() const { return Op<0>(); } | |||
1794 | const Value *getTrueValue() const { return Op<1>(); } | |||
1795 | const Value *getFalseValue() const { return Op<2>(); } | |||
1796 | Value *getCondition() { return Op<0>(); } | |||
1797 | Value *getTrueValue() { return Op<1>(); } | |||
1798 | Value *getFalseValue() { return Op<2>(); } | |||
1799 | ||||
1800 | void setCondition(Value *V) { Op<0>() = V; } | |||
1801 | void setTrueValue(Value *V) { Op<1>() = V; } | |||
1802 | void setFalseValue(Value *V) { Op<2>() = V; } | |||
1803 | ||||
1804 | /// Swap the true and false values of the select instruction. | |||
1805 | /// This doesn't swap prof metadata. | |||
1806 | void swapValues() { Op<1>().swap(Op<2>()); } | |||
1807 | ||||
1808 | /// Return a string if the specified operands are invalid | |||
1809 | /// for a select operation, otherwise return null. | |||
1810 | static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); | |||
1811 | ||||
1812 | /// Transparently provide more efficient getOperand methods. | |||
1813 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1814 | ||||
1815 | OtherOps getOpcode() const { | |||
1816 | return static_cast<OtherOps>(Instruction::getOpcode()); | |||
1817 | } | |||
1818 | ||||
1819 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1820 | static bool classof(const Instruction *I) { | |||
1821 | return I->getOpcode() == Instruction::Select; | |||
1822 | } | |||
1823 | static bool classof(const Value *V) { | |||
1824 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1825 | } | |||
1826 | }; | |||
1827 | ||||
1828 | template <> | |||
1829 | struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { | |||
1830 | }; | |||
1831 | ||||
1832 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits <SelectInst>::op_begin(this); } SelectInst::const_op_iterator SelectInst::op_begin() const { return OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this)); } SelectInst ::op_iterator SelectInst::op_end() { return OperandTraits< SelectInst>::op_end(this); } SelectInst::const_op_iterator SelectInst::op_end() const { return OperandTraits<SelectInst >::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<SelectInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1832, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this))[i_nocapture ].get()); } void SelectInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1832, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<SelectInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned SelectInst::getNumOperands() const { return OperandTraits<SelectInst>::operands(this); } template <int Idx_nocapture> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &SelectInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
1833 | ||||
1834 | //===----------------------------------------------------------------------===// | |||
1835 | // VAArgInst Class | |||
1836 | //===----------------------------------------------------------------------===// | |||
1837 | ||||
1838 | /// This class represents the va_arg llvm instruction, which returns | |||
1839 | /// an argument of the specified type given a va_list and increments that list | |||
1840 | /// | |||
1841 | class VAArgInst : public UnaryInstruction { | |||
1842 | protected: | |||
1843 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1844 | friend class Instruction; | |||
1845 | ||||
1846 | VAArgInst *cloneImpl() const; | |||
1847 | ||||
1848 | public: | |||
1849 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", | |||
1850 | Instruction *InsertBefore = nullptr) | |||
1851 | : UnaryInstruction(Ty, VAArg, List, InsertBefore) { | |||
1852 | setName(NameStr); | |||
1853 | } | |||
1854 | ||||
1855 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr, | |||
1856 | BasicBlock *InsertAtEnd) | |||
1857 | : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { | |||
1858 | setName(NameStr); | |||
1859 | } | |||
1860 | ||||
1861 | Value *getPointerOperand() { return getOperand(0); } | |||
1862 | const Value *getPointerOperand() const { return getOperand(0); } | |||
1863 | static unsigned getPointerOperandIndex() { return 0U; } | |||
1864 | ||||
1865 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1866 | static bool classof(const Instruction *I) { | |||
1867 | return I->getOpcode() == VAArg; | |||
1868 | } | |||
1869 | static bool classof(const Value *V) { | |||
1870 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1871 | } | |||
1872 | }; | |||
1873 | ||||
1874 | //===----------------------------------------------------------------------===// | |||
1875 | // ExtractElementInst Class | |||
1876 | //===----------------------------------------------------------------------===// | |||
1877 | ||||
1878 | /// This instruction extracts a single (scalar) | |||
1879 | /// element from a VectorType value | |||
1880 | /// | |||
1881 | class ExtractElementInst : public Instruction { | |||
1882 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", | |||
1883 | Instruction *InsertBefore = nullptr); | |||
1884 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, | |||
1885 | BasicBlock *InsertAtEnd); | |||
1886 | ||||
1887 | protected: | |||
1888 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1889 | friend class Instruction; | |||
1890 | ||||
1891 | ExtractElementInst *cloneImpl() const; | |||
1892 | ||||
1893 | public: | |||
1894 | static ExtractElementInst *Create(Value *Vec, Value *Idx, | |||
1895 | const Twine &NameStr = "", | |||
1896 | Instruction *InsertBefore = nullptr) { | |||
1897 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); | |||
1898 | } | |||
1899 | ||||
1900 | static ExtractElementInst *Create(Value *Vec, Value *Idx, | |||
1901 | const Twine &NameStr, | |||
1902 | BasicBlock *InsertAtEnd) { | |||
1903 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); | |||
1904 | } | |||
1905 | ||||
1906 | /// Return true if an extractelement instruction can be | |||
1907 | /// formed with the specified operands. | |||
1908 | static bool isValidOperands(const Value *Vec, const Value *Idx); | |||
1909 | ||||
1910 | Value *getVectorOperand() { return Op<0>(); } | |||
1911 | Value *getIndexOperand() { return Op<1>(); } | |||
1912 | const Value *getVectorOperand() const { return Op<0>(); } | |||
1913 | const Value *getIndexOperand() const { return Op<1>(); } | |||
1914 | ||||
1915 | VectorType *getVectorOperandType() const { | |||
1916 | return cast<VectorType>(getVectorOperand()->getType()); | |||
1917 | } | |||
1918 | ||||
1919 | /// Transparently provide more efficient getOperand methods. | |||
1920 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1921 | ||||
1922 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1923 | static bool classof(const Instruction *I) { | |||
1924 | return I->getOpcode() == Instruction::ExtractElement; | |||
1925 | } | |||
1926 | static bool classof(const Value *V) { | |||
1927 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1928 | } | |||
1929 | }; | |||
1930 | ||||
1931 | template <> | |||
1932 | struct OperandTraits<ExtractElementInst> : | |||
1933 | public FixedNumOperandTraits<ExtractElementInst, 2> { | |||
1934 | }; | |||
1935 | ||||
1936 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin( ) { return OperandTraits<ExtractElementInst>::op_begin( this); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_begin() const { return OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this)); } ExtractElementInst::op_iterator ExtractElementInst::op_end() { return OperandTraits<ExtractElementInst>::op_end(this ); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_end() const { return OperandTraits<ExtractElementInst >::op_end(const_cast<ExtractElementInst*>(this)); } Value *ExtractElementInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits< ExtractElementInst>::operands(this) && "getOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1936, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture ].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1936, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands () const { return OperandTraits<ExtractElementInst>::operands (this); } template <int Idx_nocapture> Use &ExtractElementInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ExtractElementInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
1937 | ||||
1938 | //===----------------------------------------------------------------------===// | |||
1939 | // InsertElementInst Class | |||
1940 | //===----------------------------------------------------------------------===// | |||
1941 | ||||
1942 | /// This instruction inserts a single (scalar) | |||
1943 | /// element into a VectorType value | |||
1944 | /// | |||
1945 | class InsertElementInst : public Instruction { | |||
1946 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, | |||
1947 | const Twine &NameStr = "", | |||
1948 | Instruction *InsertBefore = nullptr); | |||
1949 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, | |||
1950 | BasicBlock *InsertAtEnd); | |||
1951 | ||||
1952 | protected: | |||
1953 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1954 | friend class Instruction; | |||
1955 | ||||
1956 | InsertElementInst *cloneImpl() const; | |||
1957 | ||||
1958 | public: | |||
1959 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, | |||
1960 | const Twine &NameStr = "", | |||
1961 | Instruction *InsertBefore = nullptr) { | |||
1962 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); | |||
1963 | } | |||
1964 | ||||
1965 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, | |||
1966 | const Twine &NameStr, | |||
1967 | BasicBlock *InsertAtEnd) { | |||
1968 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); | |||
1969 | } | |||
1970 | ||||
1971 | /// Return true if an insertelement instruction can be | |||
1972 | /// formed with the specified operands. | |||
1973 | static bool isValidOperands(const Value *Vec, const Value *NewElt, | |||
1974 | const Value *Idx); | |||
1975 | ||||
1976 | /// Overload to return most specific vector type. | |||
1977 | /// | |||
1978 | VectorType *getType() const { | |||
1979 | return cast<VectorType>(Instruction::getType()); | |||
1980 | } | |||
1981 | ||||
1982 | /// Transparently provide more efficient getOperand methods. | |||
1983 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1984 | ||||
1985 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1986 | static bool classof(const Instruction *I) { | |||
1987 | return I->getOpcode() == Instruction::InsertElement; | |||
1988 | } | |||
1989 | static bool classof(const Value *V) { | |||
1990 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1991 | } | |||
1992 | }; | |||
1993 | ||||
1994 | template <> | |||
1995 | struct OperandTraits<InsertElementInst> : | |||
1996 | public FixedNumOperandTraits<InsertElementInst, 3> { | |||
1997 | }; | |||
1998 | ||||
1999 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() { return OperandTraits<InsertElementInst>::op_begin(this ); } InsertElementInst::const_op_iterator InsertElementInst:: op_begin() const { return OperandTraits<InsertElementInst> ::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst ::op_iterator InsertElementInst::op_end() { return OperandTraits <InsertElementInst>::op_end(this); } InsertElementInst:: const_op_iterator InsertElementInst::op_end() const { return OperandTraits <InsertElementInst>::op_end(const_cast<InsertElementInst *>(this)); } Value *InsertElementInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<InsertElementInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1999, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<InsertElementInst >::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture ].get()); } void InsertElementInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<InsertElementInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1999, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned InsertElementInst::getNumOperands () const { return OperandTraits<InsertElementInst>::operands (this); } template <int Idx_nocapture> Use &InsertElementInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertElementInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2000 | ||||
2001 | //===----------------------------------------------------------------------===// | |||
2002 | // ShuffleVectorInst Class | |||
2003 | //===----------------------------------------------------------------------===// | |||
2004 | ||||
2005 | constexpr int PoisonMaskElem = -1; | |||
2006 | ||||
2007 | /// This instruction constructs a fixed permutation of two | |||
2008 | /// input vectors. | |||
2009 | /// | |||
2010 | /// For each element of the result vector, the shuffle mask selects an element | |||
2011 | /// from one of the input vectors to copy to the result. Non-negative elements | |||
2012 | /// in the mask represent an index into the concatenated pair of input vectors. | |||
2013 | /// PoisonMaskElem (-1) specifies that the result element is poison. | |||
2014 | /// | |||
2015 | /// For scalable vectors, all the elements of the mask must be 0 or -1. This | |||
2016 | /// requirement may be relaxed in the future. | |||
2017 | class ShuffleVectorInst : public Instruction { | |||
2018 | SmallVector<int, 4> ShuffleMask; | |||
2019 | Constant *ShuffleMaskForBitcode; | |||
2020 | ||||
2021 | protected: | |||
2022 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2023 | friend class Instruction; | |||
2024 | ||||
2025 | ShuffleVectorInst *cloneImpl() const; | |||
2026 | ||||
2027 | public: | |||
2028 | ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", | |||
2029 | Instruction *InsertBefore = nullptr); | |||
2030 | ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, | |||
2031 | BasicBlock *InsertAtEnd); | |||
2032 | ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", | |||
2033 | Instruction *InsertBefore = nullptr); | |||
2034 | ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, | |||
2035 | BasicBlock *InsertAtEnd); | |||
2036 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, | |||
2037 | const Twine &NameStr = "", | |||
2038 | Instruction *InsertBefor = nullptr); | |||
2039 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, | |||
2040 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2041 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2042 | const Twine &NameStr = "", | |||
2043 | Instruction *InsertBefor = nullptr); | |||
2044 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2045 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2046 | ||||
2047 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
2048 | void operator delete(void *Ptr) { return User::operator delete(Ptr); } | |||
2049 | ||||
2050 | /// Swap the operands and adjust the mask to preserve the semantics | |||
2051 | /// of the instruction. | |||
2052 | void commute(); | |||
2053 | ||||
2054 | /// Return true if a shufflevector instruction can be | |||
2055 | /// formed with the specified operands. | |||
2056 | static bool isValidOperands(const Value *V1, const Value *V2, | |||
2057 | const Value *Mask); | |||
2058 | static bool isValidOperands(const Value *V1, const Value *V2, | |||
2059 | ArrayRef<int> Mask); | |||
2060 | ||||
2061 | /// Overload to return most specific vector type. | |||
2062 | /// | |||
2063 | VectorType *getType() const { | |||
2064 | return cast<VectorType>(Instruction::getType()); | |||
2065 | } | |||
2066 | ||||
2067 | /// Transparently provide more efficient getOperand methods. | |||
2068 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2069 | ||||
2070 | /// Return the shuffle mask value of this instruction for the given element | |||
2071 | /// index. Return PoisonMaskElem if the element is undef. | |||
2072 | int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } | |||
2073 | ||||
2074 | /// Convert the input shuffle mask operand to a vector of integers. Undefined | |||
2075 | /// elements of the mask are returned as PoisonMaskElem. | |||
2076 | static void getShuffleMask(const Constant *Mask, | |||
2077 | SmallVectorImpl<int> &Result); | |||
2078 | ||||
2079 | /// Return the mask for this instruction as a vector of integers. Undefined | |||
2080 | /// elements of the mask are returned as PoisonMaskElem. | |||
2081 | void getShuffleMask(SmallVectorImpl<int> &Result) const { | |||
2082 | Result.assign(ShuffleMask.begin(), ShuffleMask.end()); | |||
2083 | } | |||
2084 | ||||
2085 | /// Return the mask for this instruction, for use in bitcode. | |||
2086 | /// | |||
2087 | /// TODO: This is temporary until we decide a new bitcode encoding for | |||
2088 | /// shufflevector. | |||
2089 | Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } | |||
2090 | ||||
2091 | static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, | |||
2092 | Type *ResultTy); | |||
2093 | ||||
2094 | void setShuffleMask(ArrayRef<int> Mask); | |||
2095 | ||||
2096 | ArrayRef<int> getShuffleMask() const { return ShuffleMask; } | |||
2097 | ||||
2098 | /// Return true if this shuffle returns a vector with a different number of | |||
2099 | /// elements than its source vectors. | |||
2100 | /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> | |||
2101 | /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> | |||
2102 | bool changesLength() const { | |||
2103 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) | |||
2104 | ->getElementCount() | |||
2105 | .getKnownMinValue(); | |||
2106 | unsigned NumMaskElts = ShuffleMask.size(); | |||
2107 | return NumSourceElts != NumMaskElts; | |||
2108 | } | |||
2109 | ||||
2110 | /// Return true if this shuffle returns a vector with a greater number of | |||
2111 | /// elements than its source vectors. | |||
2112 | /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> | |||
2113 | bool increasesLength() const { | |||
2114 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) | |||
2115 | ->getElementCount() | |||
2116 | .getKnownMinValue(); | |||
2117 | unsigned NumMaskElts = ShuffleMask.size(); | |||
2118 | return NumSourceElts < NumMaskElts; | |||
2119 | } | |||
2120 | ||||
2121 | /// Return true if this shuffle mask chooses elements from exactly one source | |||
2122 | /// vector. | |||
2123 | /// Example: <7,5,undef,7> | |||
2124 | /// This assumes that vector operands are the same length as the mask. | |||
2125 | static bool isSingleSourceMask(ArrayRef<int> Mask); | |||
2126 | static bool isSingleSourceMask(const Constant *Mask) { | |||
2127 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2127, __extension__ __PRETTY_FUNCTION__ )); | |||
2128 | SmallVector<int, 16> MaskAsInts; | |||
2129 | getShuffleMask(Mask, MaskAsInts); | |||
2130 | return isSingleSourceMask(MaskAsInts); | |||
2131 | } | |||
2132 | ||||
2133 | /// Return true if this shuffle chooses elements from exactly one source | |||
2134 | /// vector without changing the length of that vector. | |||
2135 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> | |||
2136 | /// TODO: Optionally allow length-changing shuffles. | |||
2137 | bool isSingleSource() const { | |||
2138 | return !changesLength() && isSingleSourceMask(ShuffleMask); | |||
2139 | } | |||
2140 | ||||
2141 | /// Return true if this shuffle mask chooses elements from exactly one source | |||
2142 | /// vector without lane crossings. A shuffle using this mask is not | |||
2143 | /// necessarily a no-op because it may change the number of elements from its | |||
2144 | /// input vectors or it may provide demanded bits knowledge via undef lanes. | |||
2145 | /// Example: <undef,undef,2,3> | |||
2146 | static bool isIdentityMask(ArrayRef<int> Mask); | |||
2147 | static bool isIdentityMask(const Constant *Mask) { | |||
2148 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2148, __extension__ __PRETTY_FUNCTION__ )); | |||
2149 | ||||
2150 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2151 | // case. | |||
2152 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2153 | return false; | |||
2154 | ||||
2155 | SmallVector<int, 16> MaskAsInts; | |||
2156 | getShuffleMask(Mask, MaskAsInts); | |||
2157 | return isIdentityMask(MaskAsInts); | |||
2158 | } | |||
2159 | ||||
2160 | /// Return true if this shuffle chooses elements from exactly one source | |||
2161 | /// vector without lane crossings and does not change the number of elements | |||
2162 | /// from its input vectors. | |||
2163 | /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> | |||
2164 | bool isIdentity() const { | |||
2165 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2166 | // case. | |||
2167 | if (isa<ScalableVectorType>(getType())) | |||
2168 | return false; | |||
2169 | ||||
2170 | return !changesLength() && isIdentityMask(ShuffleMask); | |||
2171 | } | |||
2172 | ||||
2173 | /// Return true if this shuffle lengthens exactly one source vector with | |||
2174 | /// undefs in the high elements. | |||
2175 | bool isIdentityWithPadding() const; | |||
2176 | ||||
2177 | /// Return true if this shuffle extracts the first N elements of exactly one | |||
2178 | /// source vector. | |||
2179 | bool isIdentityWithExtract() const; | |||
2180 | ||||
2181 | /// Return true if this shuffle concatenates its 2 source vectors. This | |||
2182 | /// returns false if either input is undefined. In that case, the shuffle is | |||
2183 | /// is better classified as an identity with padding operation. | |||
2184 | bool isConcat() const; | |||
2185 | ||||
2186 | /// Return true if this shuffle mask chooses elements from its source vectors | |||
2187 | /// without lane crossings. A shuffle using this mask would be | |||
2188 | /// equivalent to a vector select with a constant condition operand. | |||
2189 | /// Example: <4,1,6,undef> | |||
2190 | /// This returns false if the mask does not choose from both input vectors. | |||
2191 | /// In that case, the shuffle is better classified as an identity shuffle. | |||
2192 | /// This assumes that vector operands are the same length as the mask | |||
2193 | /// (a length-changing shuffle can never be equivalent to a vector select). | |||
2194 | static bool isSelectMask(ArrayRef<int> Mask); | |||
2195 | static bool isSelectMask(const Constant *Mask) { | |||
2196 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2196, __extension__ __PRETTY_FUNCTION__ )); | |||
2197 | SmallVector<int, 16> MaskAsInts; | |||
2198 | getShuffleMask(Mask, MaskAsInts); | |||
2199 | return isSelectMask(MaskAsInts); | |||
2200 | } | |||
2201 | ||||
2202 | /// Return true if this shuffle chooses elements from its source vectors | |||
2203 | /// without lane crossings and all operands have the same number of elements. | |||
2204 | /// In other words, this shuffle is equivalent to a vector select with a | |||
2205 | /// constant condition operand. | |||
2206 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> | |||
2207 | /// This returns false if the mask does not choose from both input vectors. | |||
2208 | /// In that case, the shuffle is better classified as an identity shuffle. | |||
2209 | /// TODO: Optionally allow length-changing shuffles. | |||
2210 | bool isSelect() const { | |||
2211 | return !changesLength() && isSelectMask(ShuffleMask); | |||
2212 | } | |||
2213 | ||||
2214 | /// Return true if this shuffle mask swaps the order of elements from exactly | |||
2215 | /// one source vector. | |||
2216 | /// Example: <7,6,undef,4> | |||
2217 | /// This assumes that vector operands are the same length as the mask. | |||
2218 | static bool isReverseMask(ArrayRef<int> Mask); | |||
2219 | static bool isReverseMask(const Constant *Mask) { | |||
2220 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2220, __extension__ __PRETTY_FUNCTION__ )); | |||
2221 | SmallVector<int, 16> MaskAsInts; | |||
2222 | getShuffleMask(Mask, MaskAsInts); | |||
2223 | return isReverseMask(MaskAsInts); | |||
2224 | } | |||
2225 | ||||
2226 | /// Return true if this shuffle swaps the order of elements from exactly | |||
2227 | /// one source vector. | |||
2228 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> | |||
2229 | /// TODO: Optionally allow length-changing shuffles. | |||
2230 | bool isReverse() const { | |||
2231 | return !changesLength() && isReverseMask(ShuffleMask); | |||
2232 | } | |||
2233 | ||||
2234 | /// Return true if this shuffle mask chooses all elements with the same value | |||
2235 | /// as the first element of exactly one source vector. | |||
2236 | /// Example: <4,undef,undef,4> | |||
2237 | /// This assumes that vector operands are the same length as the mask. | |||
2238 | static bool isZeroEltSplatMask(ArrayRef<int> Mask); | |||
2239 | static bool isZeroEltSplatMask(const Constant *Mask) { | |||
2240 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2240, __extension__ __PRETTY_FUNCTION__ )); | |||
2241 | SmallVector<int, 16> MaskAsInts; | |||
2242 | getShuffleMask(Mask, MaskAsInts); | |||
2243 | return isZeroEltSplatMask(MaskAsInts); | |||
2244 | } | |||
2245 | ||||
2246 | /// Return true if all elements of this shuffle are the same value as the | |||
2247 | /// first element of exactly one source vector without changing the length | |||
2248 | /// of that vector. | |||
2249 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> | |||
2250 | /// TODO: Optionally allow length-changing shuffles. | |||
2251 | /// TODO: Optionally allow splats from other elements. | |||
2252 | bool isZeroEltSplat() const { | |||
2253 | return !changesLength() && isZeroEltSplatMask(ShuffleMask); | |||
2254 | } | |||
2255 | ||||
2256 | /// Return true if this shuffle mask is a transpose mask. | |||
2257 | /// Transpose vector masks transpose a 2xn matrix. They read corresponding | |||
2258 | /// even- or odd-numbered vector elements from two n-dimensional source | |||
2259 | /// vectors and write each result into consecutive elements of an | |||
2260 | /// n-dimensional destination vector. Two shuffles are necessary to complete | |||
2261 | /// the transpose, one for the even elements and another for the odd elements. | |||
2262 | /// This description closely follows how the TRN1 and TRN2 AArch64 | |||
2263 | /// instructions operate. | |||
2264 | /// | |||
2265 | /// For example, a simple 2x2 matrix can be transposed with: | |||
2266 | /// | |||
2267 | /// ; Original matrix | |||
2268 | /// m0 = < a, b > | |||
2269 | /// m1 = < c, d > | |||
2270 | /// | |||
2271 | /// ; Transposed matrix | |||
2272 | /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > | |||
2273 | /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > | |||
2274 | /// | |||
2275 | /// For matrices having greater than n columns, the resulting nx2 transposed | |||
2276 | /// matrix is stored in two result vectors such that one vector contains | |||
2277 | /// interleaved elements from all the even-numbered rows and the other vector | |||
2278 | /// contains interleaved elements from all the odd-numbered rows. For example, | |||
2279 | /// a 2x4 matrix can be transposed with: | |||
2280 | /// | |||
2281 | /// ; Original matrix | |||
2282 | /// m0 = < a, b, c, d > | |||
2283 | /// m1 = < e, f, g, h > | |||
2284 | /// | |||
2285 | /// ; Transposed matrix | |||
2286 | /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > | |||
2287 | /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > | |||
2288 | static bool isTransposeMask(ArrayRef<int> Mask); | |||
2289 | static bool isTransposeMask(const Constant *Mask) { | |||
2290 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2290, __extension__ __PRETTY_FUNCTION__ )); | |||
2291 | SmallVector<int, 16> MaskAsInts; | |||
2292 | getShuffleMask(Mask, MaskAsInts); | |||
2293 | return isTransposeMask(MaskAsInts); | |||
2294 | } | |||
2295 | ||||
2296 | /// Return true if this shuffle transposes the elements of its inputs without | |||
2297 | /// changing the length of the vectors. This operation may also be known as a | |||
2298 | /// merge or interleave. See the description for isTransposeMask() for the | |||
2299 | /// exact specification. | |||
2300 | /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> | |||
2301 | bool isTranspose() const { | |||
2302 | return !changesLength() && isTransposeMask(ShuffleMask); | |||
2303 | } | |||
2304 | ||||
2305 | /// Return true if this shuffle mask is a splice mask, concatenating the two | |||
2306 | /// inputs together and then extracts an original width vector starting from | |||
2307 | /// the splice index. | |||
2308 | /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> | |||
2309 | static bool isSpliceMask(ArrayRef<int> Mask, int &Index); | |||
2310 | static bool isSpliceMask(const Constant *Mask, int &Index) { | |||
2311 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2311, __extension__ __PRETTY_FUNCTION__ )); | |||
2312 | SmallVector<int, 16> MaskAsInts; | |||
2313 | getShuffleMask(Mask, MaskAsInts); | |||
2314 | return isSpliceMask(MaskAsInts, Index); | |||
2315 | } | |||
2316 | ||||
2317 | /// Return true if this shuffle splices two inputs without changing the length | |||
2318 | /// of the vectors. This operation concatenates the two inputs together and | |||
2319 | /// then extracts an original width vector starting from the splice index. | |||
2320 | /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> | |||
2321 | bool isSplice(int &Index) const { | |||
2322 | return !changesLength() && isSpliceMask(ShuffleMask, Index); | |||
2323 | } | |||
2324 | ||||
2325 | /// Return true if this shuffle mask is an extract subvector mask. | |||
2326 | /// A valid extract subvector mask returns a smaller vector from a single | |||
2327 | /// source operand. The base extraction index is returned as well. | |||
2328 | static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, | |||
2329 | int &Index); | |||
2330 | static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, | |||
2331 | int &Index) { | |||
2332 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2332, __extension__ __PRETTY_FUNCTION__ )); | |||
2333 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2334 | // case. | |||
2335 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2336 | return false; | |||
2337 | SmallVector<int, 16> MaskAsInts; | |||
2338 | getShuffleMask(Mask, MaskAsInts); | |||
2339 | return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); | |||
2340 | } | |||
2341 | ||||
2342 | /// Return true if this shuffle mask is an extract subvector mask. | |||
2343 | bool isExtractSubvectorMask(int &Index) const { | |||
2344 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2345 | // case. | |||
2346 | if (isa<ScalableVectorType>(getType())) | |||
2347 | return false; | |||
2348 | ||||
2349 | int NumSrcElts = | |||
2350 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); | |||
2351 | return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); | |||
2352 | } | |||
2353 | ||||
2354 | /// Return true if this shuffle mask is an insert subvector mask. | |||
2355 | /// A valid insert subvector mask inserts the lowest elements of a second | |||
2356 | /// source operand into an in-place first source operand operand. | |||
2357 | /// Both the sub vector width and the insertion index is returned. | |||
2358 | static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, | |||
2359 | int &NumSubElts, int &Index); | |||
2360 | static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, | |||
2361 | int &NumSubElts, int &Index) { | |||
2362 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2362, __extension__ __PRETTY_FUNCTION__ )); | |||
2363 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2364 | // case. | |||
2365 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2366 | return false; | |||
2367 | SmallVector<int, 16> MaskAsInts; | |||
2368 | getShuffleMask(Mask, MaskAsInts); | |||
2369 | return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); | |||
2370 | } | |||
2371 | ||||
2372 | /// Return true if this shuffle mask is an insert subvector mask. | |||
2373 | bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { | |||
2374 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2375 | // case. | |||
2376 | if (isa<ScalableVectorType>(getType())) | |||
2377 | return false; | |||
2378 | ||||
2379 | int NumSrcElts = | |||
2380 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); | |||
2381 | return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); | |||
2382 | } | |||
2383 | ||||
2384 | /// Return true if this shuffle mask replicates each of the \p VF elements | |||
2385 | /// in a vector \p ReplicationFactor times. | |||
2386 | /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: | |||
2387 | /// <0,0,0,1,1,1,2,2,2,3,3,3> | |||
2388 | static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, | |||
2389 | int &VF); | |||
2390 | static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, | |||
2391 | int &VF) { | |||
2392 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2392, __extension__ __PRETTY_FUNCTION__ )); | |||
2393 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2394 | // case. | |||
2395 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2396 | return false; | |||
2397 | SmallVector<int, 16> MaskAsInts; | |||
2398 | getShuffleMask(Mask, MaskAsInts); | |||
2399 | return isReplicationMask(MaskAsInts, ReplicationFactor, VF); | |||
2400 | } | |||
2401 | ||||
2402 | /// Return true if this shuffle mask is a replication mask. | |||
2403 | bool isReplicationMask(int &ReplicationFactor, int &VF) const; | |||
2404 | ||||
2405 | /// Return true if this shuffle mask represents "clustered" mask of size VF, | |||
2406 | /// i.e. each index between [0..VF) is used exactly once in each submask of | |||
2407 | /// size VF. | |||
2408 | /// For example, the mask for \p VF=4 is: | |||
2409 | /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4 | |||
2410 | /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time. | |||
2411 | /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because | |||
2412 | /// element 3 is used twice in the second submask | |||
2413 | /// (3,3,1,0) and index 2 is not used at all. | |||
2414 | static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF); | |||
2415 | ||||
2416 | /// Return true if this shuffle mask is a one-use-single-source("clustered") | |||
2417 | /// mask. | |||
2418 | bool isOneUseSingleSourceMask(int VF) const; | |||
2419 | ||||
2420 | /// Change values in a shuffle permute mask assuming the two vector operands | |||
2421 | /// of length InVecNumElts have swapped position. | |||
2422 | static void commuteShuffleMask(MutableArrayRef<int> Mask, | |||
2423 | unsigned InVecNumElts) { | |||
2424 | for (int &Idx : Mask) { | |||
2425 | if (Idx == -1) | |||
2426 | continue; | |||
2427 | Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; | |||
2428 | assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int )InVecNumElts * 2 && "shufflevector mask index out of range" ) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "llvm/include/llvm/IR/Instructions.h", 2429, __extension__ __PRETTY_FUNCTION__ )) | |||
2429 | "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int )InVecNumElts * 2 && "shufflevector mask index out of range" ) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "llvm/include/llvm/IR/Instructions.h", 2429, __extension__ __PRETTY_FUNCTION__ )); | |||
2430 | } | |||
2431 | } | |||
2432 | ||||
2433 | /// Return if this shuffle interleaves its two input vectors together. | |||
2434 | bool isInterleave(unsigned Factor); | |||
2435 | ||||
2436 | /// Return true if the mask interleaves one or more input vectors together. | |||
2437 | /// | |||
2438 | /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...> | |||
2439 | /// E.g. For a Factor of 2 (LaneLen=4): | |||
2440 | /// <0, 4, 1, 5, 2, 6, 3, 7> | |||
2441 | /// E.g. For a Factor of 3 (LaneLen=4): | |||
2442 | /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12> | |||
2443 | /// E.g. For a Factor of 4 (LaneLen=2): | |||
2444 | /// <0, 2, 6, 4, 1, 3, 7, 5> | |||
2445 | /// | |||
2446 | /// NumInputElts is the total number of elements in the input vectors. | |||
2447 | /// | |||
2448 | /// StartIndexes are the first indexes of each vector being interleaved, | |||
2449 | /// substituting any indexes that were undef | |||
2450 | /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2> | |||
2451 | /// | |||
2452 | /// Note that this does not check if the input vectors are consecutive: | |||
2453 | /// It will return true for masks such as | |||
2454 | /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2) | |||
2455 | static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, | |||
2456 | unsigned NumInputElts, | |||
2457 | SmallVectorImpl<unsigned> &StartIndexes); | |||
2458 | static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, | |||
2459 | unsigned NumInputElts) { | |||
2460 | SmallVector<unsigned, 8> StartIndexes; | |||
2461 | return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes); | |||
2462 | } | |||
2463 | ||||
2464 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2465 | static bool classof(const Instruction *I) { | |||
2466 | return I->getOpcode() == Instruction::ShuffleVector; | |||
2467 | } | |||
2468 | static bool classof(const Value *V) { | |||
2469 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2470 | } | |||
2471 | }; | |||
2472 | ||||
2473 | template <> | |||
2474 | struct OperandTraits<ShuffleVectorInst> | |||
2475 | : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; | |||
2476 | ||||
2477 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() { return OperandTraits<ShuffleVectorInst>::op_begin(this ); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst:: op_begin() const { return OperandTraits<ShuffleVectorInst> ::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst ::op_iterator ShuffleVectorInst::op_end() { return OperandTraits <ShuffleVectorInst>::op_end(this); } ShuffleVectorInst:: const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits <ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst *>(this)); } Value *ShuffleVectorInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2477, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst >::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture ].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2477, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands () const { return OperandTraits<ShuffleVectorInst>::operands (this); } template <int Idx_nocapture> Use &ShuffleVectorInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ShuffleVectorInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2478 | ||||
2479 | //===----------------------------------------------------------------------===// | |||
2480 | // ExtractValueInst Class | |||
2481 | //===----------------------------------------------------------------------===// | |||
2482 | ||||
2483 | /// This instruction extracts a struct member or array | |||
2484 | /// element value from an aggregate value. | |||
2485 | /// | |||
2486 | class ExtractValueInst : public UnaryInstruction { | |||
2487 | SmallVector<unsigned, 4> Indices; | |||
2488 | ||||
2489 | ExtractValueInst(const ExtractValueInst &EVI); | |||
2490 | ||||
2491 | /// Constructors - Create a extractvalue instruction with a base aggregate | |||
2492 | /// value and a list of indices. The first ctor can optionally insert before | |||
2493 | /// an existing instruction, the second appends the new instruction to the | |||
2494 | /// specified BasicBlock. | |||
2495 | inline ExtractValueInst(Value *Agg, | |||
2496 | ArrayRef<unsigned> Idxs, | |||
2497 | const Twine &NameStr, | |||
2498 | Instruction *InsertBefore); | |||
2499 | inline ExtractValueInst(Value *Agg, | |||
2500 | ArrayRef<unsigned> Idxs, | |||
2501 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2502 | ||||
2503 | void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); | |||
2504 | ||||
2505 | protected: | |||
2506 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2507 | friend class Instruction; | |||
2508 | ||||
2509 | ExtractValueInst *cloneImpl() const; | |||
2510 | ||||
2511 | public: | |||
2512 | static ExtractValueInst *Create(Value *Agg, | |||
2513 | ArrayRef<unsigned> Idxs, | |||
2514 | const Twine &NameStr = "", | |||
2515 | Instruction *InsertBefore = nullptr) { | |||
2516 | return new | |||
2517 | ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); | |||
2518 | } | |||
2519 | ||||
2520 | static ExtractValueInst *Create(Value *Agg, | |||
2521 | ArrayRef<unsigned> Idxs, | |||
2522 | const Twine &NameStr, | |||
2523 | BasicBlock *InsertAtEnd) { | |||
2524 | return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); | |||
2525 | } | |||
2526 | ||||
2527 | /// Returns the type of the element that would be extracted | |||
2528 | /// with an extractvalue instruction with the specified parameters. | |||
2529 | /// | |||
2530 | /// Null is returned if the indices are invalid for the specified type. | |||
2531 | static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); | |||
2532 | ||||
2533 | using idx_iterator = const unsigned*; | |||
2534 | ||||
2535 | inline idx_iterator idx_begin() const { return Indices.begin(); } | |||
2536 | inline idx_iterator idx_end() const { return Indices.end(); } | |||
2537 | inline iterator_range<idx_iterator> indices() const { | |||
2538 | return make_range(idx_begin(), idx_end()); | |||
2539 | } | |||
2540 | ||||
2541 | Value *getAggregateOperand() { | |||
2542 | return getOperand(0); | |||
2543 | } | |||
2544 | const Value *getAggregateOperand() const { | |||
2545 | return getOperand(0); | |||
2546 | } | |||
2547 | static unsigned getAggregateOperandIndex() { | |||
2548 | return 0U; // get index for modifying correct operand | |||
2549 | } | |||
2550 | ||||
2551 | ArrayRef<unsigned> getIndices() const { | |||
2552 | return Indices; | |||
2553 | } | |||
2554 | ||||
2555 | unsigned getNumIndices() const { | |||
2556 | return (unsigned)Indices.size(); | |||
2557 | } | |||
2558 | ||||
2559 | bool hasIndices() const { | |||
2560 | return true; | |||
2561 | } | |||
2562 | ||||
2563 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2564 | static bool classof(const Instruction *I) { | |||
2565 | return I->getOpcode() == Instruction::ExtractValue; | |||
2566 | } | |||
2567 | static bool classof(const Value *V) { | |||
2568 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2569 | } | |||
2570 | }; | |||
2571 | ||||
2572 | ExtractValueInst::ExtractValueInst(Value *Agg, | |||
2573 | ArrayRef<unsigned> Idxs, | |||
2574 | const Twine &NameStr, | |||
2575 | Instruction *InsertBefore) | |||
2576 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), | |||
2577 | ExtractValue, Agg, InsertBefore) { | |||
2578 | init(Idxs, NameStr); | |||
2579 | } | |||
2580 | ||||
2581 | ExtractValueInst::ExtractValueInst(Value *Agg, | |||
2582 | ArrayRef<unsigned> Idxs, | |||
2583 | const Twine &NameStr, | |||
2584 | BasicBlock *InsertAtEnd) | |||
2585 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), | |||
2586 | ExtractValue, Agg, InsertAtEnd) { | |||
2587 | init(Idxs, NameStr); | |||
2588 | } | |||
2589 | ||||
2590 | //===----------------------------------------------------------------------===// | |||
2591 | // InsertValueInst Class | |||
2592 | //===----------------------------------------------------------------------===// | |||
2593 | ||||
2594 | /// This instruction inserts a struct field of array element | |||
2595 | /// value into an aggregate value. | |||
2596 | /// | |||
2597 | class InsertValueInst : public Instruction { | |||
2598 | SmallVector<unsigned, 4> Indices; | |||
2599 | ||||
2600 | InsertValueInst(const InsertValueInst &IVI); | |||
2601 | ||||
2602 | /// Constructors - Create a insertvalue instruction with a base aggregate | |||
2603 | /// value, a value to insert, and a list of indices. The first ctor can | |||
2604 | /// optionally insert before an existing instruction, the second appends | |||
2605 | /// the new instruction to the specified BasicBlock. | |||
2606 | inline InsertValueInst(Value *Agg, Value *Val, | |||
2607 | ArrayRef<unsigned> Idxs, | |||
2608 | const Twine &NameStr, | |||
2609 | Instruction *InsertBefore); | |||
2610 | inline InsertValueInst(Value *Agg, Value *Val, | |||
2611 | ArrayRef<unsigned> Idxs, | |||
2612 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2613 | ||||
2614 | /// Constructors - These two constructors are convenience methods because one | |||
2615 | /// and two index insertvalue instructions are so common. | |||
2616 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, | |||
2617 | const Twine &NameStr = "", | |||
2618 | Instruction *InsertBefore = nullptr); | |||
2619 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, | |||
2620 | BasicBlock *InsertAtEnd); | |||
2621 | ||||
2622 | void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, | |||
2623 | const Twine &NameStr); | |||
2624 | ||||
2625 | protected: | |||
2626 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2627 | friend class Instruction; | |||
2628 | ||||
2629 | InsertValueInst *cloneImpl() const; | |||
2630 | ||||
2631 | public: | |||
2632 | // allocate space for exactly two operands | |||
2633 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
2634 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
2635 | ||||
2636 | static InsertValueInst *Create(Value *Agg, Value *Val, | |||
2637 | ArrayRef<unsigned> Idxs, | |||
2638 | const Twine &NameStr = "", | |||
2639 | Instruction *InsertBefore = nullptr) { | |||
2640 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); | |||
2641 | } | |||
2642 | ||||
2643 | static InsertValueInst *Create(Value *Agg, Value *Val, | |||
2644 | ArrayRef<unsigned> Idxs, | |||
2645 | const Twine &NameStr, | |||
2646 | BasicBlock *InsertAtEnd) { | |||
2647 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); | |||
2648 | } | |||
2649 | ||||
2650 | /// Transparently provide more efficient getOperand methods. | |||
2651 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2652 | ||||
2653 | using idx_iterator = const unsigned*; | |||
2654 | ||||
2655 | inline idx_iterator idx_begin() const { return Indices.begin(); } | |||
2656 | inline idx_iterator idx_end() const { return Indices.end(); } | |||
2657 | inline iterator_range<idx_iterator> indices() const { | |||
2658 | return make_range(idx_begin(), idx_end()); | |||
2659 | } | |||
2660 | ||||
2661 | Value *getAggregateOperand() { | |||
2662 | return getOperand(0); | |||
2663 | } | |||
2664 | const Value *getAggregateOperand() const { | |||
2665 | return getOperand(0); | |||
2666 | } | |||
2667 | static unsigned getAggregateOperandIndex() { | |||
2668 | return 0U; // get index for modifying correct operand | |||
2669 | } | |||
2670 | ||||
2671 | Value *getInsertedValueOperand() { | |||
2672 | return getOperand(1); | |||
2673 | } | |||
2674 | const Value *getInsertedValueOperand() const { | |||
2675 | return getOperand(1); | |||
2676 | } | |||
2677 | static unsigned getInsertedValueOperandIndex() { | |||
2678 | return 1U; // get index for modifying correct operand | |||
2679 | } | |||
2680 | ||||
2681 | ArrayRef<unsigned> getIndices() const { | |||
2682 | return Indices; | |||
2683 | } | |||
2684 | ||||
2685 | unsigned getNumIndices() const { | |||
2686 | return (unsigned)Indices.size(); | |||
2687 | } | |||
2688 | ||||
2689 | bool hasIndices() const { | |||
2690 | return true; | |||
2691 | } | |||
2692 | ||||
2693 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2694 | static bool classof(const Instruction *I) { | |||
2695 | return I->getOpcode() == Instruction::InsertValue; | |||
2696 | } | |||
2697 | static bool classof(const Value *V) { | |||
2698 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2699 | } | |||
2700 | }; | |||
2701 | ||||
2702 | template <> | |||
2703 | struct OperandTraits<InsertValueInst> : | |||
2704 | public FixedNumOperandTraits<InsertValueInst, 2> { | |||
2705 | }; | |||
2706 | ||||
2707 | InsertValueInst::InsertValueInst(Value *Agg, | |||
2708 | Value *Val, | |||
2709 | ArrayRef<unsigned> Idxs, | |||
2710 | const Twine &NameStr, | |||
2711 | Instruction *InsertBefore) | |||
2712 | : Instruction(Agg->getType(), InsertValue, | |||
| ||||
2713 | OperandTraits<InsertValueInst>::op_begin(this), | |||
2714 | 2, InsertBefore) { | |||
2715 | init(Agg, Val, Idxs, NameStr); | |||
2716 | } | |||
2717 | ||||
2718 | InsertValueInst::InsertValueInst(Value *Agg, | |||
2719 | Value *Val, | |||
2720 | ArrayRef<unsigned> Idxs, | |||
2721 | const Twine &NameStr, | |||
2722 | BasicBlock *InsertAtEnd) | |||
2723 | : Instruction(Agg->getType(), InsertValue, | |||
2724 | OperandTraits<InsertValueInst>::op_begin(this), | |||
2725 | 2, InsertAtEnd) { | |||
2726 | init(Agg, Val, Idxs, NameStr); | |||
2727 | } | |||
2728 | ||||
2729 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst ::const_op_iterator InsertValueInst::op_begin() const { return OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst ::op_end() { return OperandTraits<InsertValueInst>::op_end (this); } InsertValueInst::const_op_iterator InsertValueInst:: op_end() const { return OperandTraits<InsertValueInst>:: op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<InsertValueInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2729, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<InsertValueInst >::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture ].get()); } void InsertValueInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<InsertValueInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2729, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned InsertValueInst::getNumOperands () const { return OperandTraits<InsertValueInst>::operands (this); } template <int Idx_nocapture> Use &InsertValueInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertValueInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2730 | ||||
2731 | //===----------------------------------------------------------------------===// | |||
2732 | // PHINode Class | |||
2733 | //===----------------------------------------------------------------------===// | |||
2734 | ||||
2735 | // PHINode - The PHINode class is used to represent the magical mystical PHI | |||
2736 | // node, that can not exist in nature, but can be synthesized in a computer | |||
2737 | // scientist's overactive imagination. | |||
2738 | // | |||
2739 | class PHINode : public Instruction { | |||
2740 | /// The number of operands actually allocated. NumOperands is | |||
2741 | /// the number actually in use. | |||
2742 | unsigned ReservedSpace; | |||
2743 | ||||
2744 | PHINode(const PHINode &PN); | |||
2745 | ||||
2746 | explicit PHINode(Type *Ty, unsigned NumReservedValues, | |||
2747 | const Twine &NameStr = "", | |||
2748 | Instruction *InsertBefore = nullptr) | |||
2749 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), | |||
2750 | ReservedSpace(NumReservedValues) { | |||
2751 | assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!" ) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\"" , "llvm/include/llvm/IR/Instructions.h", 2751, __extension__ __PRETTY_FUNCTION__ )); | |||
2752 | setName(NameStr); | |||
2753 | allocHungoffUses(ReservedSpace); | |||
2754 | } | |||
2755 | ||||
2756 | PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, | |||
2757 | BasicBlock *InsertAtEnd) | |||
2758 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), | |||
2759 | ReservedSpace(NumReservedValues) { | |||
2760 | assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!" ) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\"" , "llvm/include/llvm/IR/Instructions.h", 2760, __extension__ __PRETTY_FUNCTION__ )); | |||
2761 | setName(NameStr); | |||
2762 | allocHungoffUses(ReservedSpace); | |||
2763 | } | |||
2764 | ||||
2765 | protected: | |||
2766 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2767 | friend class Instruction; | |||
2768 | ||||
2769 | PHINode *cloneImpl() const; | |||
2770 | ||||
2771 | // allocHungoffUses - this is more complicated than the generic | |||
2772 | // User::allocHungoffUses, because we have to allocate Uses for the incoming | |||
2773 | // values and pointers to the incoming blocks, all in one allocation. | |||
2774 | void allocHungoffUses(unsigned N) { | |||
2775 | User::allocHungoffUses(N, /* IsPhi */ true); | |||
2776 | } | |||
2777 | ||||
2778 | public: | |||
2779 | /// Constructors - NumReservedValues is a hint for the number of incoming | |||
2780 | /// edges that this phi node will have (use 0 if you really have no idea). | |||
2781 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, | |||
2782 | const Twine &NameStr = "", | |||
2783 | Instruction *InsertBefore = nullptr) { | |||
2784 | return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); | |||
2785 | } | |||
2786 | ||||
2787 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, | |||
2788 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
2789 | return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); | |||
2790 | } | |||
2791 | ||||
2792 | /// Provide fast operand accessors | |||
2793 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2794 | ||||
2795 | // Block iterator interface. This provides access to the list of incoming | |||
2796 | // basic blocks, which parallels the list of incoming values. | |||
2797 | // Please note that we are not providing non-const iterators for blocks to | |||
2798 | // force all updates go through an interface function. | |||
2799 | ||||
2800 | using block_iterator = BasicBlock **; | |||
2801 | using const_block_iterator = BasicBlock * const *; | |||
2802 | ||||
2803 | const_block_iterator block_begin() const { | |||
2804 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); | |||
2805 | } | |||
2806 | ||||
2807 | const_block_iterator block_end() const { | |||
2808 | return block_begin() + getNumOperands(); | |||
2809 | } | |||
2810 | ||||
2811 | iterator_range<const_block_iterator> blocks() const { | |||
2812 | return make_range(block_begin(), block_end()); | |||
2813 | } | |||
2814 | ||||
2815 | op_range incoming_values() { return operands(); } | |||
2816 | ||||
2817 | const_op_range incoming_values() const { return operands(); } | |||
2818 | ||||
2819 | /// Return the number of incoming edges | |||
2820 | /// | |||
2821 | unsigned getNumIncomingValues() const { return getNumOperands(); } | |||
2822 | ||||
2823 | /// Return incoming value number x | |||
2824 | /// | |||
2825 | Value *getIncomingValue(unsigned i) const { | |||
2826 | return getOperand(i); | |||
2827 | } | |||
2828 | void setIncomingValue(unsigned i, Value *V) { | |||
2829 | assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!" ) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "llvm/include/llvm/IR/Instructions.h", 2829, __extension__ __PRETTY_FUNCTION__ )); | |||
2830 | assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "llvm/include/llvm/IR/Instructions.h", 2831, __extension__ __PRETTY_FUNCTION__ )) | |||
2831 | "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "llvm/include/llvm/IR/Instructions.h", 2831, __extension__ __PRETTY_FUNCTION__ )); | |||
2832 | setOperand(i, V); | |||
2833 | } | |||
2834 | ||||
2835 | static unsigned getOperandNumForIncomingValue(unsigned i) { | |||
2836 | return i; | |||
2837 | } | |||
2838 | ||||
2839 | static unsigned getIncomingValueNumForOperand(unsigned i) { | |||
2840 | return i; | |||
2841 | } | |||
2842 | ||||
2843 | /// Return incoming basic block number @p i. | |||
2844 | /// | |||
2845 | BasicBlock *getIncomingBlock(unsigned i) const { | |||
2846 | return block_begin()[i]; | |||
2847 | } | |||
2848 | ||||
2849 | /// Return incoming basic block corresponding | |||
2850 | /// to an operand of the PHI. | |||
2851 | /// | |||
2852 | BasicBlock *getIncomingBlock(const Use &U) const { | |||
2853 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "llvm/include/llvm/IR/Instructions.h", 2853, __extension__ __PRETTY_FUNCTION__ )); | |||
2854 | return getIncomingBlock(unsigned(&U - op_begin())); | |||
2855 | } | |||
2856 | ||||
2857 | /// Return incoming basic block corresponding | |||
2858 | /// to value use iterator. | |||
2859 | /// | |||
2860 | BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { | |||
2861 | return getIncomingBlock(I.getUse()); | |||
2862 | } | |||
2863 | ||||
2864 | void setIncomingBlock(unsigned i, BasicBlock *BB) { | |||
2865 | const_cast<block_iterator>(block_begin())[i] = BB; | |||
2866 | } | |||
2867 | ||||
2868 | /// Copies the basic blocks from \p BBRange to the incoming basic block list | |||
2869 | /// of this PHINode, starting at \p ToIdx. | |||
2870 | void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange, | |||
2871 | uint32_t ToIdx = 0) { | |||
2872 | copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx); | |||
2873 | } | |||
2874 | ||||
2875 | /// Replace every incoming basic block \p Old to basic block \p New. | |||
2876 | void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { | |||
2877 | assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2877, __extension__ __PRETTY_FUNCTION__ )); | |||
2878 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) | |||
2879 | if (getIncomingBlock(Op) == Old) | |||
2880 | setIncomingBlock(Op, New); | |||
2881 | } | |||
2882 | ||||
2883 | /// Add an incoming value to the end of the PHI list | |||
2884 | /// | |||
2885 | void addIncoming(Value *V, BasicBlock *BB) { | |||
2886 | if (getNumOperands() == ReservedSpace) | |||
2887 | growOperands(); // Get more space! | |||
2888 | // Initialize some new operands. | |||
2889 | setNumHungOffUseOperands(getNumOperands() + 1); | |||
2890 | setIncomingValue(getNumOperands() - 1, V); | |||
2891 | setIncomingBlock(getNumOperands() - 1, BB); | |||
2892 | } | |||
2893 | ||||
2894 | /// Remove an incoming value. This is useful if a | |||
2895 | /// predecessor basic block is deleted. The value removed is returned. | |||
2896 | /// | |||
2897 | /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty | |||
2898 | /// is true), the PHI node is destroyed and any uses of it are replaced with | |||
2899 | /// dummy values. The only time there should be zero incoming values to a PHI | |||
2900 | /// node is when the block is dead, so this strategy is sound. | |||
2901 | /// | |||
2902 | Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); | |||
2903 | ||||
2904 | Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { | |||
2905 | int Idx = getBasicBlockIndex(BB); | |||
2906 | assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\"" , "llvm/include/llvm/IR/Instructions.h", 2906, __extension__ __PRETTY_FUNCTION__ )); | |||
2907 | return removeIncomingValue(Idx, DeletePHIIfEmpty); | |||
2908 | } | |||
2909 | ||||
2910 | /// Return the first index of the specified basic | |||
2911 | /// block in the value list for this PHI. Returns -1 if no instance. | |||
2912 | /// | |||
2913 | int getBasicBlockIndex(const BasicBlock *BB) const { | |||
2914 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) | |||
2915 | if (block_begin()[i] == BB) | |||
2916 | return i; | |||
2917 | return -1; | |||
2918 | } | |||
2919 | ||||
2920 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { | |||
2921 | int Idx = getBasicBlockIndex(BB); | |||
2922 | assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "llvm/include/llvm/IR/Instructions.h", 2922, __extension__ __PRETTY_FUNCTION__ )); | |||
2923 | return getIncomingValue(Idx); | |||
2924 | } | |||
2925 | ||||
2926 | /// Set every incoming value(s) for block \p BB to \p V. | |||
2927 | void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { | |||
2928 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2928, __extension__ __PRETTY_FUNCTION__ )); | |||
2929 | bool Found = false; | |||
2930 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) | |||
2931 | if (getIncomingBlock(Op) == BB) { | |||
2932 | Found = true; | |||
2933 | setIncomingValue(Op, V); | |||
2934 | } | |||
2935 | (void)Found; | |||
2936 | assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!" ) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\"" , "llvm/include/llvm/IR/Instructions.h", 2936, __extension__ __PRETTY_FUNCTION__ )); | |||
2937 | } | |||
2938 | ||||
2939 | /// If the specified PHI node always merges together the | |||
2940 | /// same value, return the value, otherwise return null. | |||
2941 | Value *hasConstantValue() const; | |||
2942 | ||||
2943 | /// Whether the specified PHI node always merges | |||
2944 | /// together the same value, assuming undefs are equal to a unique | |||
2945 | /// non-undef value. | |||
2946 | bool hasConstantOrUndefValue() const; | |||
2947 | ||||
2948 | /// If the PHI node is complete which means all of its parent's predecessors | |||
2949 | /// have incoming value in this PHI, return true, otherwise return false. | |||
2950 | bool isComplete() const { | |||
2951 | return llvm::all_of(predecessors(getParent()), | |||
2952 | [this](const BasicBlock *Pred) { | |||
2953 | return getBasicBlockIndex(Pred) >= 0; | |||
2954 | }); | |||
2955 | } | |||
2956 | ||||
2957 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2958 | static bool classof(const Instruction *I) { | |||
2959 | return I->getOpcode() == Instruction::PHI; | |||
2960 | } | |||
2961 | static bool classof(const Value *V) { | |||
2962 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2963 | } | |||
2964 | ||||
2965 | private: | |||
2966 | void growOperands(); | |||
2967 | }; | |||
2968 | ||||
2969 | template <> | |||
2970 | struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { | |||
2971 | }; | |||
2972 | ||||
2973 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits <PHINode>::op_begin(this); } PHINode::const_op_iterator PHINode::op_begin() const { return OperandTraits<PHINode> ::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator PHINode::op_end() { return OperandTraits<PHINode>::op_end (this); } PHINode::const_op_iterator PHINode::op_end() const { return OperandTraits<PHINode>::op_end(const_cast<PHINode *>(this)); } Value *PHINode::getOperand(unsigned i_nocapture ) const { (static_cast <bool> (i_nocapture < OperandTraits <PHINode>::operands(this) && "getOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2973, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<PHINode >::op_begin(const_cast<PHINode*>(this))[i_nocapture] .get()); } void PHINode::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<PHINode>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2973, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode::getNumOperands() const { return OperandTraits<PHINode>::operands(this); } template <int Idx_nocapture> Use &PHINode::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &PHINode::Op() const { return this->OpFrom <Idx_nocapture>(this); } | |||
2974 | ||||
2975 | //===----------------------------------------------------------------------===// | |||
2976 | // LandingPadInst Class | |||
2977 | //===----------------------------------------------------------------------===// | |||
2978 | ||||
2979 | //===--------------------------------------------------------------------------- | |||
2980 | /// The landingpad instruction holds all of the information | |||
2981 | /// necessary to generate correct exception handling. The landingpad instruction | |||
2982 | /// cannot be moved from the top of a landing pad block, which itself is | |||
2983 | /// accessible only from the 'unwind' edge of an invoke. This uses the | |||
2984 | /// SubclassData field in Value to store whether or not the landingpad is a | |||
2985 | /// cleanup. | |||
2986 | /// | |||
2987 | class LandingPadInst : public Instruction { | |||
2988 | using CleanupField = BoolBitfieldElementT<0>; | |||
2989 | ||||
2990 | /// The number of operands actually allocated. NumOperands is | |||
2991 | /// the number actually in use. | |||
2992 | unsigned ReservedSpace; | |||
2993 | ||||
2994 | LandingPadInst(const LandingPadInst &LP); | |||
2995 | ||||
2996 | public: | |||
2997 | enum ClauseType { Catch, Filter }; | |||
2998 | ||||
2999 | private: | |||
3000 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, | |||
3001 | const Twine &NameStr, Instruction *InsertBefore); | |||
3002 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, | |||
3003 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
3004 | ||||
3005 | // Allocate space for exactly zero operands. | |||
3006 | void *operator new(size_t S) { return User::operator new(S); } | |||
3007 | ||||
3008 | void growOperands(unsigned Size); | |||
3009 | void init(unsigned NumReservedValues, const Twine &NameStr); | |||
3010 | ||||
3011 | protected: | |||
3012 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3013 | friend class Instruction; | |||
3014 | ||||
3015 | LandingPadInst *cloneImpl() const; | |||
3016 | ||||
3017 | public: | |||
3018 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
3019 | ||||
3020 | /// Constructors - NumReservedClauses is a hint for the number of incoming | |||
3021 | /// clauses that this landingpad will have (use 0 if you really have no idea). | |||
3022 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, | |||
3023 | const Twine &NameStr = "", | |||
3024 | Instruction *InsertBefore = nullptr); | |||
3025 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, | |||
3026 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
3027 | ||||
3028 | /// Provide fast operand accessors | |||
3029 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3030 | ||||
3031 | /// Return 'true' if this landingpad instruction is a | |||
3032 | /// cleanup. I.e., it should be run when unwinding even if its landing pad | |||
3033 | /// doesn't catch the exception. | |||
3034 | bool isCleanup() const { return getSubclassData<CleanupField>(); } | |||
3035 | ||||
3036 | /// Indicate that this landingpad instruction is a cleanup. | |||
3037 | void setCleanup(bool V) { setSubclassData<CleanupField>(V); } | |||
3038 | ||||
3039 | /// Add a catch or filter clause to the landing pad. | |||
3040 | void addClause(Constant *ClauseVal); | |||
3041 | ||||
3042 | /// Get the value of the clause at index Idx. Use isCatch/isFilter to | |||
3043 | /// determine what type of clause this is. | |||
3044 | Constant *getClause(unsigned Idx) const { | |||
3045 | return cast<Constant>(getOperandList()[Idx]); | |||
3046 | } | |||
3047 | ||||
3048 | /// Return 'true' if the clause and index Idx is a catch clause. | |||
3049 | bool isCatch(unsigned Idx) const { | |||
3050 | return !isa<ArrayType>(getOperandList()[Idx]->getType()); | |||
3051 | } | |||
3052 | ||||
3053 | /// Return 'true' if the clause and index Idx is a filter clause. | |||
3054 | bool isFilter(unsigned Idx) const { | |||
3055 | return isa<ArrayType>(getOperandList()[Idx]->getType()); | |||
3056 | } | |||
3057 | ||||
3058 | /// Get the number of clauses for this landing pad. | |||
3059 | unsigned getNumClauses() const { return getNumOperands(); } | |||
3060 | ||||
3061 | /// Grow the size of the operand list to accommodate the new | |||
3062 | /// number of clauses. | |||
3063 | void reserveClauses(unsigned Size) { growOperands(Size); } | |||
3064 | ||||
3065 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3066 | static bool classof(const Instruction *I) { | |||
3067 | return I->getOpcode() == Instruction::LandingPad; | |||
3068 | } | |||
3069 | static bool classof(const Value *V) { | |||
3070 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3071 | } | |||
3072 | }; | |||
3073 | ||||
3074 | template <> | |||
3075 | struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { | |||
3076 | }; | |||
3077 | ||||
3078 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst ::const_op_iterator LandingPadInst::op_begin() const { return OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst ::op_end() { return OperandTraits<LandingPadInst>::op_end (this); } LandingPadInst::const_op_iterator LandingPadInst::op_end () const { return OperandTraits<LandingPadInst>::op_end (const_cast<LandingPadInst*>(this)); } Value *LandingPadInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<LandingPadInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3078, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<LandingPadInst >::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture ].get()); } void LandingPadInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<LandingPadInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3078, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned LandingPadInst::getNumOperands( ) const { return OperandTraits<LandingPadInst>::operands (this); } template <int Idx_nocapture> Use &LandingPadInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &LandingPadInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
3079 | ||||
3080 | //===----------------------------------------------------------------------===// | |||
3081 | // ReturnInst Class | |||
3082 | //===----------------------------------------------------------------------===// | |||
3083 | ||||
3084 | //===--------------------------------------------------------------------------- | |||
3085 | /// Return a value (possibly void), from a function. Execution | |||
3086 | /// does not continue in this function any longer. | |||
3087 | /// | |||
3088 | class ReturnInst : public Instruction { | |||
3089 | ReturnInst(const ReturnInst &RI); | |||
3090 | ||||
3091 | private: | |||
3092 | // ReturnInst constructors: | |||
3093 | // ReturnInst() - 'ret void' instruction | |||
3094 | // ReturnInst( null) - 'ret void' instruction | |||
3095 | // ReturnInst(Value* X) - 'ret X' instruction | |||
3096 | // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I | |||
3097 | // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I | |||
3098 | // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B | |||
3099 | // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B | |||
3100 | // | |||
3101 | // NOTE: If the Value* passed is of type void then the constructor behaves as | |||
3102 | // if it was passed NULL. | |||
3103 | explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, | |||
3104 | Instruction *InsertBefore = nullptr); | |||
3105 | ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); | |||
3106 | explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); | |||
3107 | ||||
3108 | protected: | |||
3109 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3110 | friend class Instruction; | |||
3111 | ||||
3112 | ReturnInst *cloneImpl() const; | |||
3113 | ||||
3114 | public: | |||
3115 | static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, | |||
3116 | Instruction *InsertBefore = nullptr) { | |||
3117 | return new(!!retVal) ReturnInst(C, retVal, InsertBefore); | |||
3118 | } | |||
3119 | ||||
3120 | static ReturnInst* Create(LLVMContext &C, Value *retVal, | |||
3121 | BasicBlock *InsertAtEnd) { | |||
3122 | return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); | |||
3123 | } | |||
3124 | ||||
3125 | static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { | |||
3126 | return new(0) ReturnInst(C, InsertAtEnd); | |||
3127 | } | |||
3128 | ||||
3129 | /// Provide fast operand accessors | |||
3130 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3131 | ||||
3132 | /// Convenience accessor. Returns null if there is no return value. | |||
3133 | Value *getReturnValue() const { | |||
3134 | return getNumOperands() != 0 ? getOperand(0) : nullptr; | |||
3135 | } | |||
3136 | ||||
3137 | unsigned getNumSuccessors() const { return 0; } | |||
3138 | ||||
3139 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3140 | static bool classof(const Instruction *I) { | |||
3141 | return (I->getOpcode() == Instruction::Ret); | |||
3142 | } | |||
3143 | static bool classof(const Value *V) { | |||
3144 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3145 | } | |||
3146 | ||||
3147 | private: | |||
3148 | BasicBlock *getSuccessor(unsigned idx) const { | |||
3149 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 3149); | |||
3150 | } | |||
3151 | ||||
3152 | void setSuccessor(unsigned idx, BasicBlock *B) { | |||
3153 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 3153); | |||
3154 | } | |||
3155 | }; | |||
3156 | ||||
3157 | template <> | |||
3158 | struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { | |||
3159 | }; | |||
3160 | ||||
3161 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits <ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator ReturnInst::op_begin() const { return OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst ::op_iterator ReturnInst::op_end() { return OperandTraits< ReturnInst>::op_end(this); } ReturnInst::const_op_iterator ReturnInst::op_end() const { return OperandTraits<ReturnInst >::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<ReturnInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3161, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this))[i_nocapture ].get()); } void ReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3161, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const { return OperandTraits<ReturnInst>::operands(this); } template <int Idx_nocapture> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ReturnInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3162 | ||||
3163 | //===----------------------------------------------------------------------===// | |||
3164 | // BranchInst Class | |||
3165 | //===----------------------------------------------------------------------===// | |||
3166 | ||||
3167 | //===--------------------------------------------------------------------------- | |||
3168 | /// Conditional or Unconditional Branch instruction. | |||
3169 | /// | |||
3170 | class BranchInst : public Instruction { | |||
3171 | /// Ops list - Branches are strange. The operands are ordered: | |||
3172 | /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because | |||
3173 | /// they don't have to check for cond/uncond branchness. These are mostly | |||
3174 | /// accessed relative from op_end(). | |||
3175 | BranchInst(const BranchInst &BI); | |||
3176 | // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): | |||
3177 | // BranchInst(BB *B) - 'br B' | |||
3178 | // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' | |||
3179 | // BranchInst(BB* B, Inst *I) - 'br B' insert before I | |||
3180 | // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I | |||
3181 | // BranchInst(BB* B, BB *I) - 'br B' insert at end | |||
3182 | // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end | |||
3183 | explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); | |||
3184 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, | |||
3185 | Instruction *InsertBefore = nullptr); | |||
3186 | BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); | |||
3187 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, | |||
3188 | BasicBlock *InsertAtEnd); | |||
3189 | ||||
3190 | void AssertOK(); | |||
3191 | ||||
3192 | protected: | |||
3193 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3194 | friend class Instruction; | |||
3195 | ||||
3196 | BranchInst *cloneImpl() const; | |||
3197 | ||||
3198 | public: | |||
3199 | /// Iterator type that casts an operand to a basic block. | |||
3200 | /// | |||
3201 | /// This only makes sense because the successors are stored as adjacent | |||
3202 | /// operands for branch instructions. | |||
3203 | struct succ_op_iterator | |||
3204 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, | |||
3205 | std::random_access_iterator_tag, BasicBlock *, | |||
3206 | ptrdiff_t, BasicBlock *, BasicBlock *> { | |||
3207 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} | |||
3208 | ||||
3209 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3210 | BasicBlock *operator->() const { return operator*(); } | |||
3211 | }; | |||
3212 | ||||
3213 | /// The const version of `succ_op_iterator`. | |||
3214 | struct const_succ_op_iterator | |||
3215 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, | |||
3216 | std::random_access_iterator_tag, | |||
3217 | const BasicBlock *, ptrdiff_t, const BasicBlock *, | |||
3218 | const BasicBlock *> { | |||
3219 | explicit const_succ_op_iterator(const_value_op_iterator I) | |||
3220 | : iterator_adaptor_base(I) {} | |||
3221 | ||||
3222 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3223 | const BasicBlock *operator->() const { return operator*(); } | |||
3224 | }; | |||
3225 | ||||
3226 | static BranchInst *Create(BasicBlock *IfTrue, | |||
3227 | Instruction *InsertBefore = nullptr) { | |||
3228 | return new(1) BranchInst(IfTrue, InsertBefore); | |||
3229 | } | |||
3230 | ||||
3231 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, | |||
3232 | Value *Cond, Instruction *InsertBefore = nullptr) { | |||
3233 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); | |||
3234 | } | |||
3235 | ||||
3236 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { | |||
3237 | return new(1) BranchInst(IfTrue, InsertAtEnd); | |||
3238 | } | |||
3239 | ||||
3240 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, | |||
3241 | Value *Cond, BasicBlock *InsertAtEnd) { | |||
3242 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); | |||
3243 | } | |||
3244 | ||||
3245 | /// Transparently provide more efficient getOperand methods. | |||
3246 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3247 | ||||
3248 | bool isUnconditional() const { return getNumOperands() == 1; } | |||
3249 | bool isConditional() const { return getNumOperands() == 3; } | |||
3250 | ||||
3251 | Value *getCondition() const { | |||
3252 | assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!" ) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3252, __extension__ __PRETTY_FUNCTION__ )); | |||
3253 | return Op<-3>(); | |||
3254 | } | |||
3255 | ||||
3256 | void setCondition(Value *V) { | |||
3257 | assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!" ) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3257, __extension__ __PRETTY_FUNCTION__ )); | |||
3258 | Op<-3>() = V; | |||
3259 | } | |||
3260 | ||||
3261 | unsigned getNumSuccessors() const { return 1+isConditional(); } | |||
3262 | ||||
3263 | BasicBlock *getSuccessor(unsigned i) const { | |||
3264 | assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() && "Successor # out of range for Branch!") ? void (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3264, __extension__ __PRETTY_FUNCTION__ )); | |||
3265 | return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); | |||
3266 | } | |||
3267 | ||||
3268 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
3269 | assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor # out of range for Branch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3269, __extension__ __PRETTY_FUNCTION__ )); | |||
3270 | *(&Op<-1>() - idx) = NewSucc; | |||
3271 | } | |||
3272 | ||||
3273 | /// Swap the successors of this branch instruction. | |||
3274 | /// | |||
3275 | /// Swaps the successors of the branch instruction. This also swaps any | |||
3276 | /// branch weight metadata associated with the instruction so that it | |||
3277 | /// continues to map correctly to each operand. | |||
3278 | void swapSuccessors(); | |||
3279 | ||||
3280 | iterator_range<succ_op_iterator> successors() { | |||
3281 | return make_range( | |||
3282 | succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), | |||
3283 | succ_op_iterator(value_op_end())); | |||
3284 | } | |||
3285 | ||||
3286 | iterator_range<const_succ_op_iterator> successors() const { | |||
3287 | return make_range(const_succ_op_iterator( | |||
3288 | std::next(value_op_begin(), isConditional() ? 1 : 0)), | |||
3289 | const_succ_op_iterator(value_op_end())); | |||
3290 | } | |||
3291 | ||||
3292 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3293 | static bool classof(const Instruction *I) { | |||
3294 | return (I->getOpcode() == Instruction::Br); | |||
3295 | } | |||
3296 | static bool classof(const Value *V) { | |||
3297 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3298 | } | |||
3299 | }; | |||
3300 | ||||
3301 | template <> | |||
3302 | struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { | |||
3303 | }; | |||
3304 | ||||
3305 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits <BranchInst>::op_begin(this); } BranchInst::const_op_iterator BranchInst::op_begin() const { return OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this)); } BranchInst ::op_iterator BranchInst::op_end() { return OperandTraits< BranchInst>::op_end(this); } BranchInst::const_op_iterator BranchInst::op_end() const { return OperandTraits<BranchInst >::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<BranchInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3305, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this))[i_nocapture ].get()); } void BranchInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3305, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<BranchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned BranchInst::getNumOperands() const { return OperandTraits<BranchInst>::operands(this); } template <int Idx_nocapture> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &BranchInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3306 | ||||
3307 | //===----------------------------------------------------------------------===// | |||
3308 | // SwitchInst Class | |||
3309 | //===----------------------------------------------------------------------===// | |||
3310 | ||||
3311 | //===--------------------------------------------------------------------------- | |||
3312 | /// Multiway switch | |||
3313 | /// | |||
3314 | class SwitchInst : public Instruction { | |||
3315 | unsigned ReservedSpace; | |||
3316 | ||||
3317 | // Operand[0] = Value to switch on | |||
3318 | // Operand[1] = Default basic block destination | |||
3319 | // Operand[2n ] = Value to match | |||
3320 | // Operand[2n+1] = BasicBlock to go to on match | |||
3321 | SwitchInst(const SwitchInst &SI); | |||
3322 | ||||
3323 | /// Create a new switch instruction, specifying a value to switch on and a | |||
3324 | /// default destination. The number of additional cases can be specified here | |||
3325 | /// to make memory allocation more efficient. This constructor can also | |||
3326 | /// auto-insert before another instruction. | |||
3327 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, | |||
3328 | Instruction *InsertBefore); | |||
3329 | ||||
3330 | /// Create a new switch instruction, specifying a value to switch on and a | |||
3331 | /// default destination. The number of additional cases can be specified here | |||
3332 | /// to make memory allocation more efficient. This constructor also | |||
3333 | /// auto-inserts at the end of the specified BasicBlock. | |||
3334 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, | |||
3335 | BasicBlock *InsertAtEnd); | |||
3336 | ||||
3337 | // allocate space for exactly zero operands | |||
3338 | void *operator new(size_t S) { return User::operator new(S); } | |||
3339 | ||||
3340 | void init(Value *Value, BasicBlock *Default, unsigned NumReserved); | |||
3341 | void growOperands(); | |||
3342 | ||||
3343 | protected: | |||
3344 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3345 | friend class Instruction; | |||
3346 | ||||
3347 | SwitchInst *cloneImpl() const; | |||
3348 | ||||
3349 | public: | |||
3350 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
3351 | ||||
3352 | // -2 | |||
3353 | static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); | |||
3354 | ||||
3355 | template <typename CaseHandleT> class CaseIteratorImpl; | |||
3356 | ||||
3357 | /// A handle to a particular switch case. It exposes a convenient interface | |||
3358 | /// to both the case value and the successor block. | |||
3359 | /// | |||
3360 | /// We define this as a template and instantiate it to form both a const and | |||
3361 | /// non-const handle. | |||
3362 | template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> | |||
3363 | class CaseHandleImpl { | |||
3364 | // Directly befriend both const and non-const iterators. | |||
3365 | friend class SwitchInst::CaseIteratorImpl< | |||
3366 | CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; | |||
3367 | ||||
3368 | protected: | |||
3369 | // Expose the switch type we're parameterized with to the iterator. | |||
3370 | using SwitchInstType = SwitchInstT; | |||
3371 | ||||
3372 | SwitchInstT *SI; | |||
3373 | ptrdiff_t Index; | |||
3374 | ||||
3375 | CaseHandleImpl() = default; | |||
3376 | CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} | |||
3377 | ||||
3378 | public: | |||
3379 | /// Resolves case value for current case. | |||
3380 | ConstantIntT *getCaseValue() const { | |||
3381 | assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3382, __extension__ __PRETTY_FUNCTION__ )) | |||
3382 | "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3382, __extension__ __PRETTY_FUNCTION__ )); | |||
3383 | return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); | |||
3384 | } | |||
3385 | ||||
3386 | /// Resolves successor for current case. | |||
3387 | BasicBlockT *getCaseSuccessor() const { | |||
3388 | assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3390, __extension__ __PRETTY_FUNCTION__ )) | |||
3389 | (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3390, __extension__ __PRETTY_FUNCTION__ )) | |||
3390 | "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3390, __extension__ __PRETTY_FUNCTION__ )); | |||
3391 | return SI->getSuccessor(getSuccessorIndex()); | |||
3392 | } | |||
3393 | ||||
3394 | /// Returns number of current case. | |||
3395 | unsigned getCaseIndex() const { return Index; } | |||
3396 | ||||
3397 | /// Returns successor index for current case successor. | |||
3398 | unsigned getSuccessorIndex() const { | |||
3399 | assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3401, __extension__ __PRETTY_FUNCTION__ )) | |||
3400 | (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3401, __extension__ __PRETTY_FUNCTION__ )) | |||
3401 | "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3401, __extension__ __PRETTY_FUNCTION__ )); | |||
3402 | return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; | |||
3403 | } | |||
3404 | ||||
3405 | bool operator==(const CaseHandleImpl &RHS) const { | |||
3406 | assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3406, __extension__ __PRETTY_FUNCTION__ )); | |||
3407 | return Index == RHS.Index; | |||
3408 | } | |||
3409 | }; | |||
3410 | ||||
3411 | using ConstCaseHandle = | |||
3412 | CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; | |||
3413 | ||||
3414 | class CaseHandle | |||
3415 | : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { | |||
3416 | friend class SwitchInst::CaseIteratorImpl<CaseHandle>; | |||
3417 | ||||
3418 | public: | |||
3419 | CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} | |||
3420 | ||||
3421 | /// Sets the new value for current case. | |||
3422 | void setValue(ConstantInt *V) const { | |||
3423 | assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3424, __extension__ __PRETTY_FUNCTION__ )) | |||
3424 | "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3424, __extension__ __PRETTY_FUNCTION__ )); | |||
3425 | SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); | |||
3426 | } | |||
3427 | ||||
3428 | /// Sets the new successor for current case. | |||
3429 | void setSuccessor(BasicBlock *S) const { | |||
3430 | SI->setSuccessor(getSuccessorIndex(), S); | |||
3431 | } | |||
3432 | }; | |||
3433 | ||||
3434 | template <typename CaseHandleT> | |||
3435 | class CaseIteratorImpl | |||
3436 | : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, | |||
3437 | std::random_access_iterator_tag, | |||
3438 | const CaseHandleT> { | |||
3439 | using SwitchInstT = typename CaseHandleT::SwitchInstType; | |||
3440 | ||||
3441 | CaseHandleT Case; | |||
3442 | ||||
3443 | public: | |||
3444 | /// Default constructed iterator is in an invalid state until assigned to | |||
3445 | /// a case for a particular switch. | |||
3446 | CaseIteratorImpl() = default; | |||
3447 | ||||
3448 | /// Initializes case iterator for given SwitchInst and for given | |||
3449 | /// case number. | |||
3450 | CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} | |||
3451 | ||||
3452 | /// Initializes case iterator for given SwitchInst and for given | |||
3453 | /// successor index. | |||
3454 | static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, | |||
3455 | unsigned SuccessorIndex) { | |||
3456 | assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors () && "Successor index # out of range!") ? void (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3457, __extension__ __PRETTY_FUNCTION__ )) | |||
3457 | "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors () && "Successor index # out of range!") ? void (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3457, __extension__ __PRETTY_FUNCTION__ )); | |||
3458 | return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) | |||
3459 | : CaseIteratorImpl(SI, DefaultPseudoIndex); | |||
3460 | } | |||
3461 | ||||
3462 | /// Support converting to the const variant. This will be a no-op for const | |||
3463 | /// variant. | |||
3464 | operator CaseIteratorImpl<ConstCaseHandle>() const { | |||
3465 | return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); | |||
3466 | } | |||
3467 | ||||
3468 | CaseIteratorImpl &operator+=(ptrdiff_t N) { | |||
3469 | // Check index correctness after addition. | |||
3470 | // Note: Index == getNumCases() means end(). | |||
3471 | assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3473, __extension__ __PRETTY_FUNCTION__ )) | |||
3472 | (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3473, __extension__ __PRETTY_FUNCTION__ )) | |||
3473 | "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3473, __extension__ __PRETTY_FUNCTION__ )); | |||
3474 | Case.Index += N; | |||
3475 | return *this; | |||
3476 | } | |||
3477 | CaseIteratorImpl &operator-=(ptrdiff_t N) { | |||
3478 | // Check index correctness after subtraction. | |||
3479 | // Note: Case.Index == getNumCases() means end(). | |||
3480 | assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3482, __extension__ __PRETTY_FUNCTION__ )) | |||
3481 | (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3482, __extension__ __PRETTY_FUNCTION__ )) | |||
3482 | "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3482, __extension__ __PRETTY_FUNCTION__ )); | |||
3483 | Case.Index -= N; | |||
3484 | return *this; | |||
3485 | } | |||
3486 | ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { | |||
3487 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3487, __extension__ __PRETTY_FUNCTION__ )); | |||
3488 | return Case.Index - RHS.Case.Index; | |||
3489 | } | |||
3490 | bool operator==(const CaseIteratorImpl &RHS) const { | |||
3491 | return Case == RHS.Case; | |||
3492 | } | |||
3493 | bool operator<(const CaseIteratorImpl &RHS) const { | |||
3494 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3494, __extension__ __PRETTY_FUNCTION__ )); | |||
3495 | return Case.Index < RHS.Case.Index; | |||
3496 | } | |||
3497 | const CaseHandleT &operator*() const { return Case; } | |||
3498 | }; | |||
3499 | ||||
3500 | using CaseIt = CaseIteratorImpl<CaseHandle>; | |||
3501 | using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; | |||
3502 | ||||
3503 | static SwitchInst *Create(Value *Value, BasicBlock *Default, | |||
3504 | unsigned NumCases, | |||
3505 | Instruction *InsertBefore = nullptr) { | |||
3506 | return new SwitchInst(Value, Default, NumCases, InsertBefore); | |||
3507 | } | |||
3508 | ||||
3509 | static SwitchInst *Create(Value *Value, BasicBlock *Default, | |||
3510 | unsigned NumCases, BasicBlock *InsertAtEnd) { | |||
3511 | return new SwitchInst(Value, Default, NumCases, InsertAtEnd); | |||
3512 | } | |||
3513 | ||||
3514 | /// Provide fast operand accessors | |||
3515 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3516 | ||||
3517 | // Accessor Methods for Switch stmt | |||
3518 | Value *getCondition() const { return getOperand(0); } | |||
3519 | void setCondition(Value *V) { setOperand(0, V); } | |||
3520 | ||||
3521 | BasicBlock *getDefaultDest() const { | |||
3522 | return cast<BasicBlock>(getOperand(1)); | |||
3523 | } | |||
3524 | ||||
3525 | void setDefaultDest(BasicBlock *DefaultCase) { | |||
3526 | setOperand(1, reinterpret_cast<Value*>(DefaultCase)); | |||
3527 | } | |||
3528 | ||||
3529 | /// Return the number of 'cases' in this switch instruction, excluding the | |||
3530 | /// default case. | |||
3531 | unsigned getNumCases() const { | |||
3532 | return getNumOperands()/2 - 1; | |||
3533 | } | |||
3534 | ||||
3535 | /// Returns a read/write iterator that points to the first case in the | |||
3536 | /// SwitchInst. | |||
3537 | CaseIt case_begin() { | |||
3538 | return CaseIt(this, 0); | |||
3539 | } | |||
3540 | ||||
3541 | /// Returns a read-only iterator that points to the first case in the | |||
3542 | /// SwitchInst. | |||
3543 | ConstCaseIt case_begin() const { | |||
3544 | return ConstCaseIt(this, 0); | |||
3545 | } | |||
3546 | ||||
3547 | /// Returns a read/write iterator that points one past the last in the | |||
3548 | /// SwitchInst. | |||
3549 | CaseIt case_end() { | |||
3550 | return CaseIt(this, getNumCases()); | |||
3551 | } | |||
3552 | ||||
3553 | /// Returns a read-only iterator that points one past the last in the | |||
3554 | /// SwitchInst. | |||
3555 | ConstCaseIt case_end() const { | |||
3556 | return ConstCaseIt(this, getNumCases()); | |||
3557 | } | |||
3558 | ||||
3559 | /// Iteration adapter for range-for loops. | |||
3560 | iterator_range<CaseIt> cases() { | |||
3561 | return make_range(case_begin(), case_end()); | |||
3562 | } | |||
3563 | ||||
3564 | /// Constant iteration adapter for range-for loops. | |||
3565 | iterator_range<ConstCaseIt> cases() const { | |||
3566 | return make_range(case_begin(), case_end()); | |||
3567 | } | |||
3568 | ||||
3569 | /// Returns an iterator that points to the default case. | |||
3570 | /// Note: this iterator allows to resolve successor only. Attempt | |||
3571 | /// to resolve case value causes an assertion. | |||
3572 | /// Also note, that increment and decrement also causes an assertion and | |||
3573 | /// makes iterator invalid. | |||
3574 | CaseIt case_default() { | |||
3575 | return CaseIt(this, DefaultPseudoIndex); | |||
3576 | } | |||
3577 | ConstCaseIt case_default() const { | |||
3578 | return ConstCaseIt(this, DefaultPseudoIndex); | |||
3579 | } | |||
3580 | ||||
3581 | /// Search all of the case values for the specified constant. If it is | |||
3582 | /// explicitly handled, return the case iterator of it, otherwise return | |||
3583 | /// default case iterator to indicate that it is handled by the default | |||
3584 | /// handler. | |||
3585 | CaseIt findCaseValue(const ConstantInt *C) { | |||
3586 | return CaseIt( | |||
3587 | this, | |||
3588 | const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); | |||
3589 | } | |||
3590 | ConstCaseIt findCaseValue(const ConstantInt *C) const { | |||
3591 | ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { | |||
3592 | return Case.getCaseValue() == C; | |||
3593 | }); | |||
3594 | if (I != case_end()) | |||
3595 | return I; | |||
3596 | ||||
3597 | return case_default(); | |||
3598 | } | |||
3599 | ||||
3600 | /// Finds the unique case value for a given successor. Returns null if the | |||
3601 | /// successor is not found, not unique, or is the default case. | |||
3602 | ConstantInt *findCaseDest(BasicBlock *BB) { | |||
3603 | if (BB == getDefaultDest()) | |||
3604 | return nullptr; | |||
3605 | ||||
3606 | ConstantInt *CI = nullptr; | |||
3607 | for (auto Case : cases()) { | |||
3608 | if (Case.getCaseSuccessor() != BB) | |||
3609 | continue; | |||
3610 | ||||
3611 | if (CI) | |||
3612 | return nullptr; // Multiple cases lead to BB. | |||
3613 | ||||
3614 | CI = Case.getCaseValue(); | |||
3615 | } | |||
3616 | ||||
3617 | return CI; | |||
3618 | } | |||
3619 | ||||
3620 | /// Add an entry to the switch instruction. | |||
3621 | /// Note: | |||
3622 | /// This action invalidates case_end(). Old case_end() iterator will | |||
3623 | /// point to the added case. | |||
3624 | void addCase(ConstantInt *OnVal, BasicBlock *Dest); | |||
3625 | ||||
3626 | /// This method removes the specified case and its successor from the switch | |||
3627 | /// instruction. Note that this operation may reorder the remaining cases at | |||
3628 | /// index idx and above. | |||
3629 | /// Note: | |||
3630 | /// This action invalidates iterators for all cases following the one removed, | |||
3631 | /// including the case_end() iterator. It returns an iterator for the next | |||
3632 | /// case. | |||
3633 | CaseIt removeCase(CaseIt I); | |||
3634 | ||||
3635 | unsigned getNumSuccessors() const { return getNumOperands()/2; } | |||
3636 | BasicBlock *getSuccessor(unsigned idx) const { | |||
3637 | assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor idx out of range for switch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\"" , "llvm/include/llvm/IR/Instructions.h", 3637, __extension__ __PRETTY_FUNCTION__ )); | |||
3638 | return cast<BasicBlock>(getOperand(idx*2+1)); | |||
3639 | } | |||
3640 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
3641 | assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor # out of range for switch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\"" , "llvm/include/llvm/IR/Instructions.h", 3641, __extension__ __PRETTY_FUNCTION__ )); | |||
3642 | setOperand(idx * 2 + 1, NewSucc); | |||
3643 | } | |||
3644 | ||||
3645 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3646 | static bool classof(const Instruction *I) { | |||
3647 | return I->getOpcode() == Instruction::Switch; | |||
3648 | } | |||
3649 | static bool classof(const Value *V) { | |||
3650 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3651 | } | |||
3652 | }; | |||
3653 | ||||
3654 | /// A wrapper class to simplify modification of SwitchInst cases along with | |||
3655 | /// their prof branch_weights metadata. | |||
3656 | class SwitchInstProfUpdateWrapper { | |||
3657 | SwitchInst &SI; | |||
3658 | std::optional<SmallVector<uint32_t, 8>> Weights; | |||
3659 | bool Changed = false; | |||
3660 | ||||
3661 | protected: | |||
3662 | MDNode *buildProfBranchWeightsMD(); | |||
3663 | ||||
3664 | void init(); | |||
3665 | ||||
3666 | public: | |||
3667 | using CaseWeightOpt = std::optional<uint32_t>; | |||
3668 | SwitchInst *operator->() { return &SI; } | |||
3669 | SwitchInst &operator*() { return SI; } | |||
3670 | operator SwitchInst *() { return &SI; } | |||
3671 | ||||
3672 | SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } | |||
3673 | ||||
3674 | ~SwitchInstProfUpdateWrapper() { | |||
3675 | if (Changed) | |||
3676 | SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); | |||
3677 | } | |||
3678 | ||||
3679 | /// Delegate the call to the underlying SwitchInst::removeCase() and remove | |||
3680 | /// correspondent branch weight. | |||
3681 | SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); | |||
3682 | ||||
3683 | /// Delegate the call to the underlying SwitchInst::addCase() and set the | |||
3684 | /// specified branch weight for the added case. | |||
3685 | void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); | |||
3686 | ||||
3687 | /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark | |||
3688 | /// this object to not touch the underlying SwitchInst in destructor. | |||
3689 | SymbolTableList<Instruction>::iterator eraseFromParent(); | |||
3690 | ||||
3691 | void setSuccessorWeight(unsigned idx, CaseWeightOpt W); | |||
3692 | CaseWeightOpt getSuccessorWeight(unsigned idx); | |||
3693 | ||||
3694 | static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); | |||
3695 | }; | |||
3696 | ||||
3697 | template <> | |||
3698 | struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { | |||
3699 | }; | |||
3700 | ||||
3701 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits <SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator SwitchInst::op_begin() const { return OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst ::op_iterator SwitchInst::op_end() { return OperandTraits< SwitchInst>::op_end(this); } SwitchInst::const_op_iterator SwitchInst::op_end() const { return OperandTraits<SwitchInst >::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<SwitchInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3701, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this))[i_nocapture ].get()); } void SwitchInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3701, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const { return OperandTraits<SwitchInst>::operands(this); } template <int Idx_nocapture> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &SwitchInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3702 | ||||
3703 | //===----------------------------------------------------------------------===// | |||
3704 | // IndirectBrInst Class | |||
3705 | //===----------------------------------------------------------------------===// | |||
3706 | ||||
3707 | //===--------------------------------------------------------------------------- | |||
3708 | /// Indirect Branch Instruction. | |||
3709 | /// | |||
3710 | class IndirectBrInst : public Instruction { | |||
3711 | unsigned ReservedSpace; | |||
3712 | ||||
3713 | // Operand[0] = Address to jump to | |||
3714 | // Operand[n+1] = n-th destination | |||
3715 | IndirectBrInst(const IndirectBrInst &IBI); | |||
3716 | ||||
3717 | /// Create a new indirectbr instruction, specifying an | |||
3718 | /// Address to jump to. The number of expected destinations can be specified | |||
3719 | /// here to make memory allocation more efficient. This constructor can also | |||
3720 | /// autoinsert before another instruction. | |||
3721 | IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); | |||
3722 | ||||
3723 | /// Create a new indirectbr instruction, specifying an | |||
3724 | /// Address to jump to. The number of expected destinations can be specified | |||
3725 | /// here to make memory allocation more efficient. This constructor also | |||
3726 | /// autoinserts at the end of the specified BasicBlock. | |||
3727 | IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); | |||
3728 | ||||
3729 | // allocate space for exactly zero operands | |||
3730 | void *operator new(size_t S) { return User::operator new(S); } | |||
3731 | ||||
3732 | void init(Value *Address, unsigned NumDests); | |||
3733 | void growOperands(); | |||
3734 | ||||
3735 | protected: | |||
3736 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3737 | friend class Instruction; | |||
3738 | ||||
3739 | IndirectBrInst *cloneImpl() const; | |||
3740 | ||||
3741 | public: | |||
3742 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
3743 | ||||
3744 | /// Iterator type that casts an operand to a basic block. | |||
3745 | /// | |||
3746 | /// This only makes sense because the successors are stored as adjacent | |||
3747 | /// operands for indirectbr instructions. | |||
3748 | struct succ_op_iterator | |||
3749 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, | |||
3750 | std::random_access_iterator_tag, BasicBlock *, | |||
3751 | ptrdiff_t, BasicBlock *, BasicBlock *> { | |||
3752 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} | |||
3753 | ||||
3754 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3755 | BasicBlock *operator->() const { return operator*(); } | |||
3756 | }; | |||
3757 | ||||
3758 | /// The const version of `succ_op_iterator`. | |||
3759 | struct const_succ_op_iterator | |||
3760 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, | |||
3761 | std::random_access_iterator_tag, | |||
3762 | const BasicBlock *, ptrdiff_t, const BasicBlock *, | |||
3763 | const BasicBlock *> { | |||
3764 | explicit const_succ_op_iterator(const_value_op_iterator I) | |||
3765 | : iterator_adaptor_base(I) {} | |||
3766 | ||||
3767 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3768 | const BasicBlock *operator->() const { return operator*(); } | |||
3769 | }; | |||
3770 | ||||
3771 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, | |||
3772 | Instruction *InsertBefore = nullptr) { | |||
3773 | return new IndirectBrInst(Address, NumDests, InsertBefore); | |||
3774 | } | |||
3775 | ||||
3776 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, | |||
3777 | BasicBlock *InsertAtEnd) { | |||
3778 | return new IndirectBrInst(Address, NumDests, InsertAtEnd); | |||
3779 | } | |||
3780 | ||||
3781 | /// Provide fast operand accessors. | |||
3782 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3783 | ||||
3784 | // Accessor Methods for IndirectBrInst instruction. | |||
3785 | Value *getAddress() { return getOperand(0); } | |||
3786 | const Value *getAddress() const { return getOperand(0); } | |||
3787 | void setAddress(Value *V) { setOperand(0, V); } | |||
3788 | ||||
3789 | /// return the number of possible destinations in this | |||
3790 | /// indirectbr instruction. | |||
3791 | unsigned getNumDestinations() const { return getNumOperands()-1; } | |||
3792 | ||||
3793 | /// Return the specified destination. | |||
3794 | BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } | |||
3795 | const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } | |||
3796 | ||||
3797 | /// Add a destination. | |||
3798 | /// | |||
3799 | void addDestination(BasicBlock *Dest); | |||
3800 | ||||
3801 | /// This method removes the specified successor from the | |||
3802 | /// indirectbr instruction. | |||
3803 | void removeDestination(unsigned i); | |||
3804 | ||||
3805 | unsigned getNumSuccessors() const { return getNumOperands()-1; } | |||
3806 | BasicBlock *getSuccessor(unsigned i) const { | |||
3807 | return cast<BasicBlock>(getOperand(i+1)); | |||
3808 | } | |||
3809 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
3810 | setOperand(i + 1, NewSucc); | |||
3811 | } | |||
3812 | ||||
3813 | iterator_range<succ_op_iterator> successors() { | |||
3814 | return make_range(succ_op_iterator(std::next(value_op_begin())), | |||
3815 | succ_op_iterator(value_op_end())); | |||
3816 | } | |||
3817 | ||||
3818 | iterator_range<const_succ_op_iterator> successors() const { | |||
3819 | return make_range(const_succ_op_iterator(std::next(value_op_begin())), | |||
3820 | const_succ_op_iterator(value_op_end())); | |||
3821 | } | |||
3822 | ||||
3823 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3824 | static bool classof(const Instruction *I) { | |||
3825 | return I->getOpcode() == Instruction::IndirectBr; | |||
3826 | } | |||
3827 | static bool classof(const Value *V) { | |||
3828 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3829 | } | |||
3830 | }; | |||
3831 | ||||
3832 | template <> | |||
3833 | struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { | |||
3834 | }; | |||
3835 | ||||
3836 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst ::const_op_iterator IndirectBrInst::op_begin() const { return OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst ::op_end() { return OperandTraits<IndirectBrInst>::op_end (this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end () const { return OperandTraits<IndirectBrInst>::op_end (const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<IndirectBrInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3836, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<IndirectBrInst >::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture ].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3836, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands( ) const { return OperandTraits<IndirectBrInst>::operands (this); } template <int Idx_nocapture> Use &IndirectBrInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &IndirectBrInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
3837 | ||||
3838 | //===----------------------------------------------------------------------===// | |||
3839 | // InvokeInst Class | |||
3840 | //===----------------------------------------------------------------------===// | |||
3841 | ||||
3842 | /// Invoke instruction. The SubclassData field is used to hold the | |||
3843 | /// calling convention of the call. | |||
3844 | /// | |||
3845 | class InvokeInst : public CallBase { | |||
3846 | /// The number of operands for this call beyond the called function, | |||
3847 | /// arguments, and operand bundles. | |||
3848 | static constexpr int NumExtraOperands = 2; | |||
3849 | ||||
3850 | /// The index from the end of the operand array to the normal destination. | |||
3851 | static constexpr int NormalDestOpEndIdx = -3; | |||
3852 | ||||
3853 | /// The index from the end of the operand array to the unwind destination. | |||
3854 | static constexpr int UnwindDestOpEndIdx = -2; | |||
3855 | ||||
3856 | InvokeInst(const InvokeInst &BI); | |||
3857 | ||||
3858 | /// Construct an InvokeInst given a range of arguments. | |||
3859 | /// | |||
3860 | /// Construct an InvokeInst from a range of arguments | |||
3861 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3862 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3863 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3864 | const Twine &NameStr, Instruction *InsertBefore); | |||
3865 | ||||
3866 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3867 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3868 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3869 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
3870 | ||||
3871 | void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3872 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3873 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
3874 | ||||
3875 | /// Compute the number of operands to allocate. | |||
3876 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { | |||
3877 | // We need one operand for the called function, plus our extra operands and | |||
3878 | // the input operand counts provided. | |||
3879 | return 1 + NumExtraOperands + NumArgs + NumBundleInputs; | |||
3880 | } | |||
3881 | ||||
3882 | protected: | |||
3883 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3884 | friend class Instruction; | |||
3885 | ||||
3886 | InvokeInst *cloneImpl() const; | |||
3887 | ||||
3888 | public: | |||
3889 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3890 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3891 | const Twine &NameStr, | |||
3892 | Instruction *InsertBefore = nullptr) { | |||
3893 | int NumOperands = ComputeNumOperands(Args.size()); | |||
3894 | return new (NumOperands) | |||
3895 | InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, | |||
3896 | NumOperands, NameStr, InsertBefore); | |||
3897 | } | |||
3898 | ||||
3899 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3900 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3901 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
3902 | const Twine &NameStr = "", | |||
3903 | Instruction *InsertBefore = nullptr) { | |||
3904 | int NumOperands = | |||
3905 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
3906 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
3907 | ||||
3908 | return new (NumOperands, DescriptorBytes) | |||
3909 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, | |||
3910 | NameStr, InsertBefore); | |||
3911 | } | |||
3912 | ||||
3913 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3914 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3915 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3916 | int NumOperands = ComputeNumOperands(Args.size()); | |||
3917 | return new (NumOperands) | |||
3918 | InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, | |||
3919 | NumOperands, NameStr, InsertAtEnd); | |||
3920 | } | |||
3921 | ||||
3922 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3923 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3924 | ArrayRef<OperandBundleDef> Bundles, | |||
3925 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3926 | int NumOperands = | |||
3927 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
3928 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
3929 | ||||
3930 | return new (NumOperands, DescriptorBytes) | |||
3931 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, | |||
3932 | NameStr, InsertAtEnd); | |||
3933 | } | |||
3934 | ||||
3935 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3936 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3937 | const Twine &NameStr, | |||
3938 | Instruction *InsertBefore = nullptr) { | |||
3939 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3940 | IfException, Args, std::nullopt, NameStr, InsertBefore); | |||
3941 | } | |||
3942 | ||||
3943 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3944 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3945 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
3946 | const Twine &NameStr = "", | |||
3947 | Instruction *InsertBefore = nullptr) { | |||
3948 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3949 | IfException, Args, Bundles, NameStr, InsertBefore); | |||
3950 | } | |||
3951 | ||||
3952 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3953 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3954 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3955 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3956 | IfException, Args, NameStr, InsertAtEnd); | |||
3957 | } | |||
3958 | ||||
3959 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3960 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3961 | ArrayRef<OperandBundleDef> Bundles, | |||
3962 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3963 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3964 | IfException, Args, Bundles, NameStr, InsertAtEnd); | |||
3965 | } | |||
3966 | ||||
3967 | /// Create a clone of \p II with a different set of operand bundles and | |||
3968 | /// insert it before \p InsertPt. | |||
3969 | /// | |||
3970 | /// The returned invoke instruction is identical to \p II in every way except | |||
3971 | /// that the operand bundles for the new instruction are set to the operand | |||
3972 | /// bundles in \p Bundles. | |||
3973 | static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, | |||
3974 | Instruction *InsertPt = nullptr); | |||
3975 | ||||
3976 | // get*Dest - Return the destination basic blocks... | |||
3977 | BasicBlock *getNormalDest() const { | |||
3978 | return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); | |||
3979 | } | |||
3980 | BasicBlock *getUnwindDest() const { | |||
3981 | return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); | |||
3982 | } | |||
3983 | void setNormalDest(BasicBlock *B) { | |||
3984 | Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); | |||
3985 | } | |||
3986 | void setUnwindDest(BasicBlock *B) { | |||
3987 | Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); | |||
3988 | } | |||
3989 | ||||
3990 | /// Get the landingpad instruction from the landing pad | |||
3991 | /// block (the unwind destination). | |||
3992 | LandingPadInst *getLandingPadInst() const; | |||
3993 | ||||
3994 | BasicBlock *getSuccessor(unsigned i) const { | |||
3995 | assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!" ) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "llvm/include/llvm/IR/Instructions.h", 3995, __extension__ __PRETTY_FUNCTION__ )); | |||
3996 | return i == 0 ? getNormalDest() : getUnwindDest(); | |||
3997 | } | |||
3998 | ||||
3999 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
4000 | assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!" ) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "llvm/include/llvm/IR/Instructions.h", 4000, __extension__ __PRETTY_FUNCTION__ )); | |||
4001 | if (i == 0) | |||
4002 | setNormalDest(NewSucc); | |||
4003 | else | |||
4004 | setUnwindDest(NewSucc); | |||
4005 | } | |||
4006 | ||||
4007 | unsigned getNumSuccessors() const { return 2; } | |||
4008 | ||||
4009 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4010 | static bool classof(const Instruction *I) { | |||
4011 | return (I->getOpcode() == Instruction::Invoke); | |||
4012 | } | |||
4013 | static bool classof(const Value *V) { | |||
4014 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4015 | } | |||
4016 | ||||
4017 | private: | |||
4018 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
4019 | // method so that subclasses cannot accidentally use it. | |||
4020 | template <typename Bitfield> | |||
4021 | void setSubclassData(typename Bitfield::Type Value) { | |||
4022 | Instruction::setSubclassData<Bitfield>(Value); | |||
4023 | } | |||
4024 | }; | |||
4025 | ||||
4026 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
4027 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
4028 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4029 | const Twine &NameStr, Instruction *InsertBefore) | |||
4030 | : CallBase(Ty->getReturnType(), Instruction::Invoke, | |||
4031 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4032 | InsertBefore) { | |||
4033 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); | |||
4034 | } | |||
4035 | ||||
4036 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
4037 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
4038 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4039 | const Twine &NameStr, BasicBlock *InsertAtEnd) | |||
4040 | : CallBase(Ty->getReturnType(), Instruction::Invoke, | |||
4041 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4042 | InsertAtEnd) { | |||
4043 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); | |||
4044 | } | |||
4045 | ||||
4046 | //===----------------------------------------------------------------------===// | |||
4047 | // CallBrInst Class | |||
4048 | //===----------------------------------------------------------------------===// | |||
4049 | ||||
4050 | /// CallBr instruction, tracking function calls that may not return control but | |||
4051 | /// instead transfer it to a third location. The SubclassData field is used to | |||
4052 | /// hold the calling convention of the call. | |||
4053 | /// | |||
4054 | class CallBrInst : public CallBase { | |||
4055 | ||||
4056 | unsigned NumIndirectDests; | |||
4057 | ||||
4058 | CallBrInst(const CallBrInst &BI); | |||
4059 | ||||
4060 | /// Construct a CallBrInst given a range of arguments. | |||
4061 | /// | |||
4062 | /// Construct a CallBrInst from a range of arguments | |||
4063 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4064 | ArrayRef<BasicBlock *> IndirectDests, | |||
4065 | ArrayRef<Value *> Args, | |||
4066 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4067 | const Twine &NameStr, Instruction *InsertBefore); | |||
4068 | ||||
4069 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4070 | ArrayRef<BasicBlock *> IndirectDests, | |||
4071 | ArrayRef<Value *> Args, | |||
4072 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4073 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
4074 | ||||
4075 | void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, | |||
4076 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, | |||
4077 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
4078 | ||||
4079 | /// Compute the number of operands to allocate. | |||
4080 | static int ComputeNumOperands(int NumArgs, int NumIndirectDests, | |||
4081 | int NumBundleInputs = 0) { | |||
4082 | // We need one operand for the called function, plus our extra operands and | |||
4083 | // the input operand counts provided. | |||
4084 | return 2 + NumIndirectDests + NumArgs + NumBundleInputs; | |||
4085 | } | |||
4086 | ||||
4087 | protected: | |||
4088 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4089 | friend class Instruction; | |||
4090 | ||||
4091 | CallBrInst *cloneImpl() const; | |||
4092 | ||||
4093 | public: | |||
4094 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4095 | BasicBlock *DefaultDest, | |||
4096 | ArrayRef<BasicBlock *> IndirectDests, | |||
4097 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4098 | Instruction *InsertBefore = nullptr) { | |||
4099 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); | |||
4100 | return new (NumOperands) | |||
4101 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, | |||
4102 | NumOperands, NameStr, InsertBefore); | |||
4103 | } | |||
4104 | ||||
4105 | static CallBrInst * | |||
4106 | Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4107 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, | |||
4108 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
4109 | const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { | |||
4110 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), | |||
4111 | CountBundleInputs(Bundles)); | |||
4112 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
4113 | ||||
4114 | return new (NumOperands, DescriptorBytes) | |||
4115 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, | |||
4116 | NumOperands, NameStr, InsertBefore); | |||
4117 | } | |||
4118 | ||||
4119 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4120 | BasicBlock *DefaultDest, | |||
4121 | ArrayRef<BasicBlock *> IndirectDests, | |||
4122 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4123 | BasicBlock *InsertAtEnd) { | |||
4124 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); | |||
4125 | return new (NumOperands) | |||
4126 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, | |||
4127 | NumOperands, NameStr, InsertAtEnd); | |||
4128 | } | |||
4129 | ||||
4130 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4131 | BasicBlock *DefaultDest, | |||
4132 | ArrayRef<BasicBlock *> IndirectDests, | |||
4133 | ArrayRef<Value *> Args, | |||
4134 | ArrayRef<OperandBundleDef> Bundles, | |||
4135 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4136 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), | |||
4137 | CountBundleInputs(Bundles)); | |||
4138 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
4139 | ||||
4140 | return new (NumOperands, DescriptorBytes) | |||
4141 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, | |||
4142 | NumOperands, NameStr, InsertAtEnd); | |||
4143 | } | |||
4144 | ||||
4145 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4146 | ArrayRef<BasicBlock *> IndirectDests, | |||
4147 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4148 | Instruction *InsertBefore = nullptr) { | |||
4149 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4150 | IndirectDests, Args, NameStr, InsertBefore); | |||
4151 | } | |||
4152 | ||||
4153 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4154 | ArrayRef<BasicBlock *> IndirectDests, | |||
4155 | ArrayRef<Value *> Args, | |||
4156 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
4157 | const Twine &NameStr = "", | |||
4158 | Instruction *InsertBefore = nullptr) { | |||
4159 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4160 | IndirectDests, Args, Bundles, NameStr, InsertBefore); | |||
4161 | } | |||
4162 | ||||
4163 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4164 | ArrayRef<BasicBlock *> IndirectDests, | |||
4165 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4166 | BasicBlock *InsertAtEnd) { | |||
4167 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4168 | IndirectDests, Args, NameStr, InsertAtEnd); | |||
4169 | } | |||
4170 | ||||
4171 | static CallBrInst *Create(FunctionCallee Func, | |||
4172 | BasicBlock *DefaultDest, | |||
4173 | ArrayRef<BasicBlock *> IndirectDests, | |||
4174 | ArrayRef<Value *> Args, | |||
4175 | ArrayRef<OperandBundleDef> Bundles, | |||
4176 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4177 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4178 | IndirectDests, Args, Bundles, NameStr, InsertAtEnd); | |||
4179 | } | |||
4180 | ||||
4181 | /// Create a clone of \p CBI with a different set of operand bundles and | |||
4182 | /// insert it before \p InsertPt. | |||
4183 | /// | |||
4184 | /// The returned callbr instruction is identical to \p CBI in every way | |||
4185 | /// except that the operand bundles for the new instruction are set to the | |||
4186 | /// operand bundles in \p Bundles. | |||
4187 | static CallBrInst *Create(CallBrInst *CBI, | |||
4188 | ArrayRef<OperandBundleDef> Bundles, | |||
4189 | Instruction *InsertPt = nullptr); | |||
4190 | ||||
4191 | /// Return the number of callbr indirect dest labels. | |||
4192 | /// | |||
4193 | unsigned getNumIndirectDests() const { return NumIndirectDests; } | |||
4194 | ||||
4195 | /// getIndirectDestLabel - Return the i-th indirect dest label. | |||
4196 | /// | |||
4197 | Value *getIndirectDestLabel(unsigned i) const { | |||
4198 | assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() && "Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "llvm/include/llvm/IR/Instructions.h", 4198, __extension__ __PRETTY_FUNCTION__ )); | |||
4199 | return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); | |||
4200 | } | |||
4201 | ||||
4202 | Value *getIndirectDestLabelUse(unsigned i) const { | |||
4203 | assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() && "Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "llvm/include/llvm/IR/Instructions.h", 4203, __extension__ __PRETTY_FUNCTION__ )); | |||
4204 | return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); | |||
4205 | } | |||
4206 | ||||
4207 | // Return the destination basic blocks... | |||
4208 | BasicBlock *getDefaultDest() const { | |||
4209 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); | |||
4210 | } | |||
4211 | BasicBlock *getIndirectDest(unsigned i) const { | |||
4212 | return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); | |||
4213 | } | |||
4214 | SmallVector<BasicBlock *, 16> getIndirectDests() const { | |||
4215 | SmallVector<BasicBlock *, 16> IndirectDests; | |||
4216 | for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) | |||
4217 | IndirectDests.push_back(getIndirectDest(i)); | |||
4218 | return IndirectDests; | |||
4219 | } | |||
4220 | void setDefaultDest(BasicBlock *B) { | |||
4221 | *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); | |||
4222 | } | |||
4223 | void setIndirectDest(unsigned i, BasicBlock *B) { | |||
4224 | *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); | |||
4225 | } | |||
4226 | ||||
4227 | BasicBlock *getSuccessor(unsigned i) const { | |||
4228 | assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4229, __extension__ __PRETTY_FUNCTION__ )) | |||
4229 | "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4229, __extension__ __PRETTY_FUNCTION__ )); | |||
4230 | return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); | |||
4231 | } | |||
4232 | ||||
4233 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
4234 | assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4235, __extension__ __PRETTY_FUNCTION__ )) | |||
4235 | "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4235, __extension__ __PRETTY_FUNCTION__ )); | |||
4236 | return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); | |||
4237 | } | |||
4238 | ||||
4239 | unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } | |||
4240 | ||||
4241 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4242 | static bool classof(const Instruction *I) { | |||
4243 | return (I->getOpcode() == Instruction::CallBr); | |||
4244 | } | |||
4245 | static bool classof(const Value *V) { | |||
4246 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4247 | } | |||
4248 | ||||
4249 | private: | |||
4250 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
4251 | // method so that subclasses cannot accidentally use it. | |||
4252 | template <typename Bitfield> | |||
4253 | void setSubclassData(typename Bitfield::Type Value) { | |||
4254 | Instruction::setSubclassData<Bitfield>(Value); | |||
4255 | } | |||
4256 | }; | |||
4257 | ||||
4258 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4259 | ArrayRef<BasicBlock *> IndirectDests, | |||
4260 | ArrayRef<Value *> Args, | |||
4261 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4262 | const Twine &NameStr, Instruction *InsertBefore) | |||
4263 | : CallBase(Ty->getReturnType(), Instruction::CallBr, | |||
4264 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4265 | InsertBefore) { | |||
4266 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); | |||
4267 | } | |||
4268 | ||||
4269 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4270 | ArrayRef<BasicBlock *> IndirectDests, | |||
4271 | ArrayRef<Value *> Args, | |||
4272 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4273 | const Twine &NameStr, BasicBlock *InsertAtEnd) | |||
4274 | : CallBase(Ty->getReturnType(), Instruction::CallBr, | |||
4275 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4276 | InsertAtEnd) { | |||
4277 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); | |||
4278 | } | |||
4279 | ||||
4280 | //===----------------------------------------------------------------------===// | |||
4281 | // ResumeInst Class | |||
4282 | //===----------------------------------------------------------------------===// | |||
4283 | ||||
4284 | //===--------------------------------------------------------------------------- | |||
4285 | /// Resume the propagation of an exception. | |||
4286 | /// | |||
4287 | class ResumeInst : public Instruction { | |||
4288 | ResumeInst(const ResumeInst &RI); | |||
4289 | ||||
4290 | explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); | |||
4291 | ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); | |||
4292 | ||||
4293 | protected: | |||
4294 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4295 | friend class Instruction; | |||
4296 | ||||
4297 | ResumeInst *cloneImpl() const; | |||
4298 | ||||
4299 | public: | |||
4300 | static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { | |||
4301 | return new(1) ResumeInst(Exn, InsertBefore); | |||
4302 | } | |||
4303 | ||||
4304 | static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { | |||
4305 | return new(1) ResumeInst(Exn, InsertAtEnd); | |||
4306 | } | |||
4307 | ||||
4308 | /// Provide fast operand accessors | |||
4309 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4310 | ||||
4311 | /// Convenience accessor. | |||
4312 | Value *getValue() const { return Op<0>(); } | |||
4313 | ||||
4314 | unsigned getNumSuccessors() const { return 0; } | |||
4315 | ||||
4316 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4317 | static bool classof(const Instruction *I) { | |||
4318 | return I->getOpcode() == Instruction::Resume; | |||
4319 | } | |||
4320 | static bool classof(const Value *V) { | |||
4321 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4322 | } | |||
4323 | ||||
4324 | private: | |||
4325 | BasicBlock *getSuccessor(unsigned idx) const { | |||
4326 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4326); | |||
4327 | } | |||
4328 | ||||
4329 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
4330 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4330); | |||
4331 | } | |||
4332 | }; | |||
4333 | ||||
4334 | template <> | |||
4335 | struct OperandTraits<ResumeInst> : | |||
4336 | public FixedNumOperandTraits<ResumeInst, 1> { | |||
4337 | }; | |||
4338 | ||||
4339 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits <ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator ResumeInst::op_begin() const { return OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst ::op_iterator ResumeInst::op_end() { return OperandTraits< ResumeInst>::op_end(this); } ResumeInst::const_op_iterator ResumeInst::op_end() const { return OperandTraits<ResumeInst >::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<ResumeInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4339, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this))[i_nocapture ].get()); } void ResumeInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4339, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const { return OperandTraits<ResumeInst>::operands(this); } template <int Idx_nocapture> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ResumeInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
4340 | ||||
4341 | //===----------------------------------------------------------------------===// | |||
4342 | // CatchSwitchInst Class | |||
4343 | //===----------------------------------------------------------------------===// | |||
4344 | class CatchSwitchInst : public Instruction { | |||
4345 | using UnwindDestField = BoolBitfieldElementT<0>; | |||
4346 | ||||
4347 | /// The number of operands actually allocated. NumOperands is | |||
4348 | /// the number actually in use. | |||
4349 | unsigned ReservedSpace; | |||
4350 | ||||
4351 | // Operand[0] = Outer scope | |||
4352 | // Operand[1] = Unwind block destination | |||
4353 | // Operand[n] = BasicBlock to go to on match | |||
4354 | CatchSwitchInst(const CatchSwitchInst &CSI); | |||
4355 | ||||
4356 | /// Create a new switch instruction, specifying a | |||
4357 | /// default destination. The number of additional handlers can be specified | |||
4358 | /// here to make memory allocation more efficient. | |||
4359 | /// This constructor can also autoinsert before another instruction. | |||
4360 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, | |||
4361 | unsigned NumHandlers, const Twine &NameStr, | |||
4362 | Instruction *InsertBefore); | |||
4363 | ||||
4364 | /// Create a new switch instruction, specifying a | |||
4365 | /// default destination. The number of additional handlers can be specified | |||
4366 | /// here to make memory allocation more efficient. | |||
4367 | /// This constructor also autoinserts at the end of the specified BasicBlock. | |||
4368 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, | |||
4369 | unsigned NumHandlers, const Twine &NameStr, | |||
4370 | BasicBlock *InsertAtEnd); | |||
4371 | ||||
4372 | // allocate space for exactly zero operands | |||
4373 | void *operator new(size_t S) { return User::operator new(S); } | |||
4374 | ||||
4375 | void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); | |||
4376 | void growOperands(unsigned Size); | |||
4377 | ||||
4378 | protected: | |||
4379 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4380 | friend class Instruction; | |||
4381 | ||||
4382 | CatchSwitchInst *cloneImpl() const; | |||
4383 | ||||
4384 | public: | |||
4385 | void operator delete(void *Ptr) { return User::operator delete(Ptr); } | |||
4386 | ||||
4387 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, | |||
4388 | unsigned NumHandlers, | |||
4389 | const Twine &NameStr = "", | |||
4390 | Instruction *InsertBefore = nullptr) { | |||
4391 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, | |||
4392 | InsertBefore); | |||
4393 | } | |||
4394 | ||||
4395 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, | |||
4396 | unsigned NumHandlers, const Twine &NameStr, | |||
4397 | BasicBlock *InsertAtEnd) { | |||
4398 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, | |||
4399 | InsertAtEnd); | |||
4400 | } | |||
4401 | ||||
4402 | /// Provide fast operand accessors | |||
4403 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4404 | ||||
4405 | // Accessor Methods for CatchSwitch stmt | |||
4406 | Value *getParentPad() const { return getOperand(0); } | |||
4407 | void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } | |||
4408 | ||||
4409 | // Accessor Methods for CatchSwitch stmt | |||
4410 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } | |||
4411 | bool unwindsToCaller() const { return !hasUnwindDest(); } | |||
4412 | BasicBlock *getUnwindDest() const { | |||
4413 | if (hasUnwindDest()) | |||
4414 | return cast<BasicBlock>(getOperand(1)); | |||
4415 | return nullptr; | |||
4416 | } | |||
4417 | void setUnwindDest(BasicBlock *UnwindDest) { | |||
4418 | assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail ("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4418, __extension__ __PRETTY_FUNCTION__)); | |||
4419 | assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail ("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4419 , __extension__ __PRETTY_FUNCTION__)); | |||
4420 | setOperand(1, UnwindDest); | |||
4421 | } | |||
4422 | ||||
4423 | /// return the number of 'handlers' in this catchswitch | |||
4424 | /// instruction, except the default handler | |||
4425 | unsigned getNumHandlers() const { | |||
4426 | if (hasUnwindDest()) | |||
4427 | return getNumOperands() - 2; | |||
4428 | return getNumOperands() - 1; | |||
4429 | } | |||
4430 | ||||
4431 | private: | |||
4432 | static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } | |||
4433 | static const BasicBlock *handler_helper(const Value *V) { | |||
4434 | return cast<BasicBlock>(V); | |||
4435 | } | |||
4436 | ||||
4437 | public: | |||
4438 | using DerefFnTy = BasicBlock *(*)(Value *); | |||
4439 | using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; | |||
4440 | using handler_range = iterator_range<handler_iterator>; | |||
4441 | using ConstDerefFnTy = const BasicBlock *(*)(const Value *); | |||
4442 | using const_handler_iterator = | |||
4443 | mapped_iterator<const_op_iterator, ConstDerefFnTy>; | |||
4444 | using const_handler_range = iterator_range<const_handler_iterator>; | |||
4445 | ||||
4446 | /// Returns an iterator that points to the first handler in CatchSwitchInst. | |||
4447 | handler_iterator handler_begin() { | |||
4448 | op_iterator It = op_begin() + 1; | |||
4449 | if (hasUnwindDest()) | |||
4450 | ++It; | |||
4451 | return handler_iterator(It, DerefFnTy(handler_helper)); | |||
4452 | } | |||
4453 | ||||
4454 | /// Returns an iterator that points to the first handler in the | |||
4455 | /// CatchSwitchInst. | |||
4456 | const_handler_iterator handler_begin() const { | |||
4457 | const_op_iterator It = op_begin() + 1; | |||
4458 | if (hasUnwindDest()) | |||
4459 | ++It; | |||
4460 | return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); | |||
4461 | } | |||
4462 | ||||
4463 | /// Returns a read-only iterator that points one past the last | |||
4464 | /// handler in the CatchSwitchInst. | |||
4465 | handler_iterator handler_end() { | |||
4466 | return handler_iterator(op_end(), DerefFnTy(handler_helper)); | |||
4467 | } | |||
4468 | ||||
4469 | /// Returns an iterator that points one past the last handler in the | |||
4470 | /// CatchSwitchInst. | |||
4471 | const_handler_iterator handler_end() const { | |||
4472 | return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); | |||
4473 | } | |||
4474 | ||||
4475 | /// iteration adapter for range-for loops. | |||
4476 | handler_range handlers() { | |||
4477 | return make_range(handler_begin(), handler_end()); | |||
4478 | } | |||
4479 | ||||
4480 | /// iteration adapter for range-for loops. | |||
4481 | const_handler_range handlers() const { | |||
4482 | return make_range(handler_begin(), handler_end()); | |||
4483 | } | |||
4484 | ||||
4485 | /// Add an entry to the switch instruction... | |||
4486 | /// Note: | |||
4487 | /// This action invalidates handler_end(). Old handler_end() iterator will | |||
4488 | /// point to the added handler. | |||
4489 | void addHandler(BasicBlock *Dest); | |||
4490 | ||||
4491 | void removeHandler(handler_iterator HI); | |||
4492 | ||||
4493 | unsigned getNumSuccessors() const { return getNumOperands() - 1; } | |||
4494 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4495 | assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4496, __extension__ __PRETTY_FUNCTION__ )) | |||
4496 | "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4496, __extension__ __PRETTY_FUNCTION__ )); | |||
4497 | return cast<BasicBlock>(getOperand(Idx + 1)); | |||
4498 | } | |||
4499 | void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { | |||
4500 | assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4501, __extension__ __PRETTY_FUNCTION__ )) | |||
4501 | "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4501, __extension__ __PRETTY_FUNCTION__ )); | |||
4502 | setOperand(Idx + 1, NewSucc); | |||
4503 | } | |||
4504 | ||||
4505 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4506 | static bool classof(const Instruction *I) { | |||
4507 | return I->getOpcode() == Instruction::CatchSwitch; | |||
4508 | } | |||
4509 | static bool classof(const Value *V) { | |||
4510 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4511 | } | |||
4512 | }; | |||
4513 | ||||
4514 | template <> | |||
4515 | struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; | |||
4516 | ||||
4517 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst ::const_op_iterator CatchSwitchInst::op_begin() const { return OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst ::op_end() { return OperandTraits<CatchSwitchInst>::op_end (this); } CatchSwitchInst::const_op_iterator CatchSwitchInst:: op_end() const { return OperandTraits<CatchSwitchInst>:: op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<CatchSwitchInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4517, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CatchSwitchInst >::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture ].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4517, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands () const { return OperandTraits<CatchSwitchInst>::operands (this); } template <int Idx_nocapture> Use &CatchSwitchInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchSwitchInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4518 | ||||
4519 | //===----------------------------------------------------------------------===// | |||
4520 | // CleanupPadInst Class | |||
4521 | //===----------------------------------------------------------------------===// | |||
4522 | class CleanupPadInst : public FuncletPadInst { | |||
4523 | private: | |||
4524 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, | |||
4525 | unsigned Values, const Twine &NameStr, | |||
4526 | Instruction *InsertBefore) | |||
4527 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, | |||
4528 | NameStr, InsertBefore) {} | |||
4529 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, | |||
4530 | unsigned Values, const Twine &NameStr, | |||
4531 | BasicBlock *InsertAtEnd) | |||
4532 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, | |||
4533 | NameStr, InsertAtEnd) {} | |||
4534 | ||||
4535 | public: | |||
4536 | static CleanupPadInst *Create(Value *ParentPad, | |||
4537 | ArrayRef<Value *> Args = std::nullopt, | |||
4538 | const Twine &NameStr = "", | |||
4539 | Instruction *InsertBefore = nullptr) { | |||
4540 | unsigned Values = 1 + Args.size(); | |||
4541 | return new (Values) | |||
4542 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); | |||
4543 | } | |||
4544 | ||||
4545 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, | |||
4546 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4547 | unsigned Values = 1 + Args.size(); | |||
4548 | return new (Values) | |||
4549 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); | |||
4550 | } | |||
4551 | ||||
4552 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4553 | static bool classof(const Instruction *I) { | |||
4554 | return I->getOpcode() == Instruction::CleanupPad; | |||
4555 | } | |||
4556 | static bool classof(const Value *V) { | |||
4557 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4558 | } | |||
4559 | }; | |||
4560 | ||||
4561 | //===----------------------------------------------------------------------===// | |||
4562 | // CatchPadInst Class | |||
4563 | //===----------------------------------------------------------------------===// | |||
4564 | class CatchPadInst : public FuncletPadInst { | |||
4565 | private: | |||
4566 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4567 | unsigned Values, const Twine &NameStr, | |||
4568 | Instruction *InsertBefore) | |||
4569 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, | |||
4570 | NameStr, InsertBefore) {} | |||
4571 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4572 | unsigned Values, const Twine &NameStr, | |||
4573 | BasicBlock *InsertAtEnd) | |||
4574 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, | |||
4575 | NameStr, InsertAtEnd) {} | |||
4576 | ||||
4577 | public: | |||
4578 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4579 | const Twine &NameStr = "", | |||
4580 | Instruction *InsertBefore = nullptr) { | |||
4581 | unsigned Values = 1 + Args.size(); | |||
4582 | return new (Values) | |||
4583 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); | |||
4584 | } | |||
4585 | ||||
4586 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4587 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4588 | unsigned Values = 1 + Args.size(); | |||
4589 | return new (Values) | |||
4590 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); | |||
4591 | } | |||
4592 | ||||
4593 | /// Convenience accessors | |||
4594 | CatchSwitchInst *getCatchSwitch() const { | |||
4595 | return cast<CatchSwitchInst>(Op<-1>()); | |||
4596 | } | |||
4597 | void setCatchSwitch(Value *CatchSwitch) { | |||
4598 | assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail ("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4598, __extension__ __PRETTY_FUNCTION__)); | |||
4599 | Op<-1>() = CatchSwitch; | |||
4600 | } | |||
4601 | ||||
4602 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4603 | static bool classof(const Instruction *I) { | |||
4604 | return I->getOpcode() == Instruction::CatchPad; | |||
4605 | } | |||
4606 | static bool classof(const Value *V) { | |||
4607 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4608 | } | |||
4609 | }; | |||
4610 | ||||
4611 | //===----------------------------------------------------------------------===// | |||
4612 | // CatchReturnInst Class | |||
4613 | //===----------------------------------------------------------------------===// | |||
4614 | ||||
4615 | class CatchReturnInst : public Instruction { | |||
4616 | CatchReturnInst(const CatchReturnInst &RI); | |||
4617 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); | |||
4618 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); | |||
4619 | ||||
4620 | void init(Value *CatchPad, BasicBlock *BB); | |||
4621 | ||||
4622 | protected: | |||
4623 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4624 | friend class Instruction; | |||
4625 | ||||
4626 | CatchReturnInst *cloneImpl() const; | |||
4627 | ||||
4628 | public: | |||
4629 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, | |||
4630 | Instruction *InsertBefore = nullptr) { | |||
4631 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4631, __extension__ __PRETTY_FUNCTION__)); | |||
4632 | assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB" , "llvm/include/llvm/IR/Instructions.h", 4632, __extension__ __PRETTY_FUNCTION__ )); | |||
4633 | return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); | |||
4634 | } | |||
4635 | ||||
4636 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, | |||
4637 | BasicBlock *InsertAtEnd) { | |||
4638 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4638, __extension__ __PRETTY_FUNCTION__)); | |||
4639 | assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB" , "llvm/include/llvm/IR/Instructions.h", 4639, __extension__ __PRETTY_FUNCTION__ )); | |||
4640 | return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); | |||
4641 | } | |||
4642 | ||||
4643 | /// Provide fast operand accessors | |||
4644 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4645 | ||||
4646 | /// Convenience accessors. | |||
4647 | CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } | |||
4648 | void setCatchPad(CatchPadInst *CatchPad) { | |||
4649 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4649, __extension__ __PRETTY_FUNCTION__)); | |||
4650 | Op<0>() = CatchPad; | |||
4651 | } | |||
4652 | ||||
4653 | BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } | |||
4654 | void setSuccessor(BasicBlock *NewSucc) { | |||
4655 | assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail ("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4655, __extension__ __PRETTY_FUNCTION__)); | |||
4656 | Op<1>() = NewSucc; | |||
4657 | } | |||
4658 | unsigned getNumSuccessors() const { return 1; } | |||
4659 | ||||
4660 | /// Get the parentPad of this catchret's catchpad's catchswitch. | |||
4661 | /// The successor block is implicitly a member of this funclet. | |||
4662 | Value *getCatchSwitchParentPad() const { | |||
4663 | return getCatchPad()->getCatchSwitch()->getParentPad(); | |||
4664 | } | |||
4665 | ||||
4666 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4667 | static bool classof(const Instruction *I) { | |||
4668 | return (I->getOpcode() == Instruction::CatchRet); | |||
4669 | } | |||
4670 | static bool classof(const Value *V) { | |||
4671 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4672 | } | |||
4673 | ||||
4674 | private: | |||
4675 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4676 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchret!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "llvm/include/llvm/IR/Instructions.h", 4676, __extension__ __PRETTY_FUNCTION__ )); | |||
4677 | return getSuccessor(); | |||
4678 | } | |||
4679 | ||||
4680 | void setSuccessor(unsigned Idx, BasicBlock *B) { | |||
4681 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchret!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "llvm/include/llvm/IR/Instructions.h", 4681, __extension__ __PRETTY_FUNCTION__ )); | |||
4682 | setSuccessor(B); | |||
4683 | } | |||
4684 | }; | |||
4685 | ||||
4686 | template <> | |||
4687 | struct OperandTraits<CatchReturnInst> | |||
4688 | : public FixedNumOperandTraits<CatchReturnInst, 2> {}; | |||
4689 | ||||
4690 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst ::const_op_iterator CatchReturnInst::op_begin() const { return OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst ::op_end() { return OperandTraits<CatchReturnInst>::op_end (this); } CatchReturnInst::const_op_iterator CatchReturnInst:: op_end() const { return OperandTraits<CatchReturnInst>:: op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<CatchReturnInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4690, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CatchReturnInst >::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture ].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4690, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands () const { return OperandTraits<CatchReturnInst>::operands (this); } template <int Idx_nocapture> Use &CatchReturnInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchReturnInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4691 | ||||
4692 | //===----------------------------------------------------------------------===// | |||
4693 | // CleanupReturnInst Class | |||
4694 | //===----------------------------------------------------------------------===// | |||
4695 | ||||
4696 | class CleanupReturnInst : public Instruction { | |||
4697 | using UnwindDestField = BoolBitfieldElementT<0>; | |||
4698 | ||||
4699 | private: | |||
4700 | CleanupReturnInst(const CleanupReturnInst &RI); | |||
4701 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, | |||
4702 | Instruction *InsertBefore = nullptr); | |||
4703 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, | |||
4704 | BasicBlock *InsertAtEnd); | |||
4705 | ||||
4706 | void init(Value *CleanupPad, BasicBlock *UnwindBB); | |||
4707 | ||||
4708 | protected: | |||
4709 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4710 | friend class Instruction; | |||
4711 | ||||
4712 | CleanupReturnInst *cloneImpl() const; | |||
4713 | ||||
4714 | public: | |||
4715 | static CleanupReturnInst *Create(Value *CleanupPad, | |||
4716 | BasicBlock *UnwindBB = nullptr, | |||
4717 | Instruction *InsertBefore = nullptr) { | |||
4718 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4718, __extension__ __PRETTY_FUNCTION__)); | |||
4719 | unsigned Values = 1; | |||
4720 | if (UnwindBB) | |||
4721 | ++Values; | |||
4722 | return new (Values) | |||
4723 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); | |||
4724 | } | |||
4725 | ||||
4726 | static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, | |||
4727 | BasicBlock *InsertAtEnd) { | |||
4728 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4728, __extension__ __PRETTY_FUNCTION__)); | |||
4729 | unsigned Values = 1; | |||
4730 | if (UnwindBB) | |||
4731 | ++Values; | |||
4732 | return new (Values) | |||
4733 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); | |||
4734 | } | |||
4735 | ||||
4736 | /// Provide fast operand accessors | |||
4737 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4738 | ||||
4739 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } | |||
4740 | bool unwindsToCaller() const { return !hasUnwindDest(); } | |||
4741 | ||||
4742 | /// Convenience accessor. | |||
4743 | CleanupPadInst *getCleanupPad() const { | |||
4744 | return cast<CleanupPadInst>(Op<0>()); | |||
4745 | } | |||
4746 | void setCleanupPad(CleanupPadInst *CleanupPad) { | |||
4747 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4747, __extension__ __PRETTY_FUNCTION__)); | |||
4748 | Op<0>() = CleanupPad; | |||
4749 | } | |||
4750 | ||||
4751 | unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } | |||
4752 | ||||
4753 | BasicBlock *getUnwindDest() const { | |||
4754 | return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; | |||
4755 | } | |||
4756 | void setUnwindDest(BasicBlock *NewDest) { | |||
4757 | assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail ("NewDest", "llvm/include/llvm/IR/Instructions.h", 4757, __extension__ __PRETTY_FUNCTION__)); | |||
4758 | assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail ("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4758 , __extension__ __PRETTY_FUNCTION__)); | |||
4759 | Op<1>() = NewDest; | |||
4760 | } | |||
4761 | ||||
4762 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4763 | static bool classof(const Instruction *I) { | |||
4764 | return (I->getOpcode() == Instruction::CleanupRet); | |||
4765 | } | |||
4766 | static bool classof(const Value *V) { | |||
4767 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4768 | } | |||
4769 | ||||
4770 | private: | |||
4771 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4772 | assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail ("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4772, __extension__ __PRETTY_FUNCTION__)); | |||
4773 | return getUnwindDest(); | |||
4774 | } | |||
4775 | ||||
4776 | void setSuccessor(unsigned Idx, BasicBlock *B) { | |||
4777 | assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail ("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4777, __extension__ __PRETTY_FUNCTION__)); | |||
4778 | setUnwindDest(B); | |||
4779 | } | |||
4780 | ||||
4781 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
4782 | // method so that subclasses cannot accidentally use it. | |||
4783 | template <typename Bitfield> | |||
4784 | void setSubclassData(typename Bitfield::Type Value) { | |||
4785 | Instruction::setSubclassData<Bitfield>(Value); | |||
4786 | } | |||
4787 | }; | |||
4788 | ||||
4789 | template <> | |||
4790 | struct OperandTraits<CleanupReturnInst> | |||
4791 | : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; | |||
4792 | ||||
4793 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() { return OperandTraits<CleanupReturnInst>::op_begin(this ); } CleanupReturnInst::const_op_iterator CleanupReturnInst:: op_begin() const { return OperandTraits<CleanupReturnInst> ::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst ::op_iterator CleanupReturnInst::op_end() { return OperandTraits <CleanupReturnInst>::op_end(this); } CleanupReturnInst:: const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits <CleanupReturnInst>::op_end(const_cast<CleanupReturnInst *>(this)); } Value *CleanupReturnInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4793, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CleanupReturnInst >::op_begin(const_cast<CleanupReturnInst*>(this))[i_nocapture ].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4793, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CleanupReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands () const { return OperandTraits<CleanupReturnInst>::operands (this); } template <int Idx_nocapture> Use &CleanupReturnInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CleanupReturnInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4794 | ||||
4795 | //===----------------------------------------------------------------------===// | |||
4796 | // UnreachableInst Class | |||
4797 | //===----------------------------------------------------------------------===// | |||
4798 | ||||
4799 | //===--------------------------------------------------------------------------- | |||
4800 | /// This function has undefined behavior. In particular, the | |||
4801 | /// presence of this instruction indicates some higher level knowledge that the | |||
4802 | /// end of the block cannot be reached. | |||
4803 | /// | |||
4804 | class UnreachableInst : public Instruction { | |||
4805 | protected: | |||
4806 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4807 | friend class Instruction; | |||
4808 | ||||
4809 | UnreachableInst *cloneImpl() const; | |||
4810 | ||||
4811 | public: | |||
4812 | explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); | |||
4813 | explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); | |||
4814 | ||||
4815 | // allocate space for exactly zero operands | |||
4816 | void *operator new(size_t S) { return User::operator new(S, 0); } | |||
4817 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
4818 | ||||
4819 | unsigned getNumSuccessors() const { return 0; } | |||
4820 | ||||
4821 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4822 | static bool classof(const Instruction *I) { | |||
4823 | return I->getOpcode() == Instruction::Unreachable; | |||
4824 | } | |||
4825 | static bool classof(const Value *V) { | |||
4826 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4827 | } | |||
4828 | ||||
4829 | private: | |||
4830 | BasicBlock *getSuccessor(unsigned idx) const { | |||
4831 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4831); | |||
4832 | } | |||
4833 | ||||
4834 | void setSuccessor(unsigned idx, BasicBlock *B) { | |||
4835 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4835); | |||
4836 | } | |||
4837 | }; | |||
4838 | ||||
4839 | //===----------------------------------------------------------------------===// | |||
4840 | // TruncInst Class | |||
4841 | //===----------------------------------------------------------------------===// | |||
4842 | ||||
4843 | /// This class represents a truncation of integer types. | |||
4844 | class TruncInst : public CastInst { | |||
4845 | protected: | |||
4846 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4847 | friend class Instruction; | |||
4848 | ||||
4849 | /// Clone an identical TruncInst | |||
4850 | TruncInst *cloneImpl() const; | |||
4851 | ||||
4852 | public: | |||
4853 | /// Constructor with insert-before-instruction semantics | |||
4854 | TruncInst( | |||
4855 | Value *S, ///< The value to be truncated | |||
4856 | Type *Ty, ///< The (smaller) type to truncate to | |||
4857 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4858 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4859 | ); | |||
4860 | ||||
4861 | /// Constructor with insert-at-end-of-block semantics | |||
4862 | TruncInst( | |||
4863 | Value *S, ///< The value to be truncated | |||
4864 | Type *Ty, ///< The (smaller) type to truncate to | |||
4865 | const Twine &NameStr, ///< A name for the new instruction | |||
4866 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4867 | ); | |||
4868 | ||||
4869 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4870 | static bool classof(const Instruction *I) { | |||
4871 | return I->getOpcode() == Trunc; | |||
4872 | } | |||
4873 | static bool classof(const Value *V) { | |||
4874 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4875 | } | |||
4876 | }; | |||
4877 | ||||
4878 | //===----------------------------------------------------------------------===// | |||
4879 | // ZExtInst Class | |||
4880 | //===----------------------------------------------------------------------===// | |||
4881 | ||||
4882 | /// This class represents zero extension of integer types. | |||
4883 | class ZExtInst : public CastInst { | |||
4884 | protected: | |||
4885 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4886 | friend class Instruction; | |||
4887 | ||||
4888 | /// Clone an identical ZExtInst | |||
4889 | ZExtInst *cloneImpl() const; | |||
4890 | ||||
4891 | public: | |||
4892 | /// Constructor with insert-before-instruction semantics | |||
4893 | ZExtInst( | |||
4894 | Value *S, ///< The value to be zero extended | |||
4895 | Type *Ty, ///< The type to zero extend to | |||
4896 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4897 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4898 | ); | |||
4899 | ||||
4900 | /// Constructor with insert-at-end semantics. | |||
4901 | ZExtInst( | |||
4902 | Value *S, ///< The value to be zero extended | |||
4903 | Type *Ty, ///< The type to zero extend to | |||
4904 | const Twine &NameStr, ///< A name for the new instruction | |||
4905 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4906 | ); | |||
4907 | ||||
4908 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4909 | static bool classof(const Instruction *I) { | |||
4910 | return I->getOpcode() == ZExt; | |||
4911 | } | |||
4912 | static bool classof(const Value *V) { | |||
4913 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4914 | } | |||
4915 | }; | |||
4916 | ||||
4917 | //===----------------------------------------------------------------------===// | |||
4918 | // SExtInst Class | |||
4919 | //===----------------------------------------------------------------------===// | |||
4920 | ||||
4921 | /// This class represents a sign extension of integer types. | |||
4922 | class SExtInst : public CastInst { | |||
4923 | protected: | |||
4924 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4925 | friend class Instruction; | |||
4926 | ||||
4927 | /// Clone an identical SExtInst | |||
4928 | SExtInst *cloneImpl() const; | |||
4929 | ||||
4930 | public: | |||
4931 | /// Constructor with insert-before-instruction semantics | |||
4932 | SExtInst( | |||
4933 | Value *S, ///< The value to be sign extended | |||
4934 | Type *Ty, ///< The type to sign extend to | |||
4935 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4936 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4937 | ); | |||
4938 | ||||
4939 | /// Constructor with insert-at-end-of-block semantics | |||
4940 | SExtInst( | |||
4941 | Value *S, ///< The value to be sign extended | |||
4942 | Type *Ty, ///< The type to sign extend to | |||
4943 | const Twine &NameStr, ///< A name for the new instruction | |||
4944 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4945 | ); | |||
4946 | ||||
4947 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4948 | static bool classof(const Instruction *I) { | |||
4949 | return I->getOpcode() == SExt; | |||
4950 | } | |||
4951 | static bool classof(const Value *V) { | |||
4952 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4953 | } | |||
4954 | }; | |||
4955 | ||||
4956 | //===----------------------------------------------------------------------===// | |||
4957 | // FPTruncInst Class | |||
4958 | //===----------------------------------------------------------------------===// | |||
4959 | ||||
4960 | /// This class represents a truncation of floating point types. | |||
4961 | class FPTruncInst : public CastInst { | |||
4962 | protected: | |||
4963 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4964 | friend class Instruction; | |||
4965 | ||||
4966 | /// Clone an identical FPTruncInst | |||
4967 | FPTruncInst *cloneImpl() const; | |||
4968 | ||||
4969 | public: | |||
4970 | /// Constructor with insert-before-instruction semantics | |||
4971 | FPTruncInst( | |||
4972 | Value *S, ///< The value to be truncated | |||
4973 | Type *Ty, ///< The type to truncate to | |||
4974 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4975 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4976 | ); | |||
4977 | ||||
4978 | /// Constructor with insert-before-instruction semantics | |||
4979 | FPTruncInst( | |||
4980 | Value *S, ///< The value to be truncated | |||
4981 | Type *Ty, ///< The type to truncate to | |||
4982 | const Twine &NameStr, ///< A name for the new instruction | |||
4983 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4984 | ); | |||
4985 | ||||
4986 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4987 | static bool classof(const Instruction *I) { | |||
4988 | return I->getOpcode() == FPTrunc; | |||
4989 | } | |||
4990 | static bool classof(const Value *V) { | |||
4991 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4992 | } | |||
4993 | }; | |||
4994 | ||||
4995 | //===----------------------------------------------------------------------===// | |||
4996 | // FPExtInst Class | |||
4997 | //===----------------------------------------------------------------------===// | |||
4998 | ||||
4999 | /// This class represents an extension of floating point types. | |||
5000 | class FPExtInst : public CastInst { | |||
5001 | protected: | |||
5002 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5003 | friend class Instruction; | |||
5004 | ||||
5005 | /// Clone an identical FPExtInst | |||
5006 | FPExtInst *cloneImpl() const; | |||
5007 | ||||
5008 | public: | |||
5009 | /// Constructor with insert-before-instruction semantics | |||
5010 | FPExtInst( | |||
5011 | Value *S, ///< The value to be extended | |||
5012 | Type *Ty, ///< The type to extend to | |||
5013 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5014 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5015 | ); | |||
5016 | ||||
5017 | /// Constructor with insert-at-end-of-block semantics | |||
5018 | FPExtInst( | |||
5019 | Value *S, ///< The value to be extended | |||
5020 | Type *Ty, ///< The type to extend to | |||
5021 | const Twine &NameStr, ///< A name for the new instruction | |||
5022 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5023 | ); | |||
5024 | ||||
5025 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5026 | static bool classof(const Instruction *I) { | |||
5027 | return I->getOpcode() == FPExt; | |||
5028 | } | |||
5029 | static bool classof(const Value *V) { | |||
5030 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5031 | } | |||
5032 | }; | |||
5033 | ||||
5034 | //===----------------------------------------------------------------------===// | |||
5035 | // UIToFPInst Class | |||
5036 | //===----------------------------------------------------------------------===// | |||
5037 | ||||
5038 | /// This class represents a cast unsigned integer to floating point. | |||
5039 | class UIToFPInst : public CastInst { | |||
5040 | protected: | |||
5041 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5042 | friend class Instruction; | |||
5043 | ||||
5044 | /// Clone an identical UIToFPInst | |||
5045 | UIToFPInst *cloneImpl() const; | |||
5046 | ||||
5047 | public: | |||
5048 | /// Constructor with insert-before-instruction semantics | |||
5049 | UIToFPInst( | |||
5050 | Value *S, ///< The value to be converted | |||
5051 | Type *Ty, ///< The type to convert to | |||
5052 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5053 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5054 | ); | |||
5055 | ||||
5056 | /// Constructor with insert-at-end-of-block semantics | |||
5057 | UIToFPInst( | |||
5058 | Value *S, ///< The value to be converted | |||
5059 | Type *Ty, ///< The type to convert to | |||
5060 | const Twine &NameStr, ///< A name for the new instruction | |||
5061 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5062 | ); | |||
5063 | ||||
5064 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5065 | static bool classof(const Instruction *I) { | |||
5066 | return I->getOpcode() == UIToFP; | |||
5067 | } | |||
5068 | static bool classof(const Value *V) { | |||
5069 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5070 | } | |||
5071 | }; | |||
5072 | ||||
5073 | //===----------------------------------------------------------------------===// | |||
5074 | // SIToFPInst Class | |||
5075 | //===----------------------------------------------------------------------===// | |||
5076 | ||||
5077 | /// This class represents a cast from signed integer to floating point. | |||
5078 | class SIToFPInst : public CastInst { | |||
5079 | protected: | |||
5080 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5081 | friend class Instruction; | |||
5082 | ||||
5083 | /// Clone an identical SIToFPInst | |||
5084 | SIToFPInst *cloneImpl() const; | |||
5085 | ||||
5086 | public: | |||
5087 | /// Constructor with insert-before-instruction semantics | |||
5088 | SIToFPInst( | |||
5089 | Value *S, ///< The value to be converted | |||
5090 | Type *Ty, ///< The type to convert to | |||
5091 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5092 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5093 | ); | |||
5094 | ||||
5095 | /// Constructor with insert-at-end-of-block semantics | |||
5096 | SIToFPInst( | |||
5097 | Value *S, ///< The value to be converted | |||
5098 | Type *Ty, ///< The type to convert to | |||
5099 | const Twine &NameStr, ///< A name for the new instruction | |||
5100 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5101 | ); | |||
5102 | ||||
5103 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5104 | static bool classof(const Instruction *I) { | |||
5105 | return I->getOpcode() == SIToFP; | |||
5106 | } | |||
5107 | static bool classof(const Value *V) { | |||
5108 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5109 | } | |||
5110 | }; | |||
5111 | ||||
5112 | //===----------------------------------------------------------------------===// | |||
5113 | // FPToUIInst Class | |||
5114 | //===----------------------------------------------------------------------===// | |||
5115 | ||||
5116 | /// This class represents a cast from floating point to unsigned integer | |||
5117 | class FPToUIInst : public CastInst { | |||
5118 | protected: | |||
5119 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5120 | friend class Instruction; | |||
5121 | ||||
5122 | /// Clone an identical FPToUIInst | |||
5123 | FPToUIInst *cloneImpl() const; | |||
5124 | ||||
5125 | public: | |||
5126 | /// Constructor with insert-before-instruction semantics | |||
5127 | FPToUIInst( | |||
5128 | Value *S, ///< The value to be converted | |||
5129 | Type *Ty, ///< The type to convert to | |||
5130 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5131 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5132 | ); | |||
5133 | ||||
5134 | /// Constructor with insert-at-end-of-block semantics | |||
5135 | FPToUIInst( | |||
5136 | Value *S, ///< The value to be converted | |||
5137 | Type *Ty, ///< The type to convert to | |||
5138 | const Twine &NameStr, ///< A name for the new instruction | |||
5139 | BasicBlock *InsertAtEnd ///< Where to insert the new instruction | |||
5140 | ); | |||
5141 | ||||
5142 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5143 | static bool classof(const Instruction *I) { | |||
5144 | return I->getOpcode() == FPToUI; | |||
5145 | } | |||
5146 | static bool classof(const Value *V) { | |||
5147 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5148 | } | |||
5149 | }; | |||
5150 | ||||
5151 | //===----------------------------------------------------------------------===// | |||
5152 | // FPToSIInst Class | |||
5153 | //===----------------------------------------------------------------------===// | |||
5154 | ||||
5155 | /// This class represents a cast from floating point to signed integer. | |||
5156 | class FPToSIInst : public CastInst { | |||
5157 | protected: | |||
5158 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5159 | friend class Instruction; | |||
5160 | ||||
5161 | /// Clone an identical FPToSIInst | |||
5162 | FPToSIInst *cloneImpl() const; | |||
5163 | ||||
5164 | public: | |||
5165 | /// Constructor with insert-before-instruction semantics | |||
5166 | FPToSIInst( | |||
5167 | Value *S, ///< The value to be converted | |||
5168 | Type *Ty, ///< The type to convert to | |||
5169 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5170 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5171 | ); | |||
5172 | ||||
5173 | /// Constructor with insert-at-end-of-block semantics | |||
5174 | FPToSIInst( | |||
5175 | Value *S, ///< The value to be converted | |||
5176 | Type *Ty, ///< The type to convert to | |||
5177 | const Twine &NameStr, ///< A name for the new instruction | |||
5178 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5179 | ); | |||
5180 | ||||
5181 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5182 | static bool classof(const Instruction *I) { | |||
5183 | return I->getOpcode() == FPToSI; | |||
5184 | } | |||
5185 | static bool classof(const Value *V) { | |||
5186 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5187 | } | |||
5188 | }; | |||
5189 | ||||
5190 | //===----------------------------------------------------------------------===// | |||
5191 | // IntToPtrInst Class | |||
5192 | //===----------------------------------------------------------------------===// | |||
5193 | ||||
5194 | /// This class represents a cast from an integer to a pointer. | |||
5195 | class IntToPtrInst : public CastInst { | |||
5196 | public: | |||
5197 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5198 | friend class Instruction; | |||
5199 | ||||
5200 | /// Constructor with insert-before-instruction semantics | |||
5201 | IntToPtrInst( | |||
5202 | Value *S, ///< The value to be converted | |||
5203 | Type *Ty, ///< The type to convert to | |||
5204 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5205 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5206 | ); | |||
5207 | ||||
5208 | /// Constructor with insert-at-end-of-block semantics | |||
5209 | IntToPtrInst( | |||
5210 | Value *S, ///< The value to be converted | |||
5211 | Type *Ty, ///< The type to convert to | |||
5212 | const Twine &NameStr, ///< A name for the new instruction | |||
5213 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5214 | ); | |||
5215 | ||||
5216 | /// Clone an identical IntToPtrInst. | |||
5217 | IntToPtrInst *cloneImpl() const; | |||
5218 | ||||
5219 | /// Returns the address space of this instruction's pointer type. | |||
5220 | unsigned getAddressSpace() const { | |||
5221 | return getType()->getPointerAddressSpace(); | |||
5222 | } | |||
5223 | ||||
5224 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5225 | static bool classof(const Instruction *I) { | |||
5226 | return I->getOpcode() == IntToPtr; | |||
5227 | } | |||
5228 | static bool classof(const Value *V) { | |||
5229 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5230 | } | |||
5231 | }; | |||
5232 | ||||
5233 | //===----------------------------------------------------------------------===// | |||
5234 | // PtrToIntInst Class | |||
5235 | //===----------------------------------------------------------------------===// | |||
5236 | ||||
5237 | /// This class represents a cast from a pointer to an integer. | |||
5238 | class PtrToIntInst : public CastInst { | |||
5239 | protected: | |||
5240 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5241 | friend class Instruction; | |||
5242 | ||||
5243 | /// Clone an identical PtrToIntInst. | |||
5244 | PtrToIntInst *cloneImpl() const; | |||
5245 | ||||
5246 | public: | |||
5247 | /// Constructor with insert-before-instruction semantics | |||
5248 | PtrToIntInst( | |||
5249 | Value *S, ///< The value to be converted | |||
5250 | Type *Ty, ///< The type to convert to | |||
5251 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5252 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5253 | ); | |||
5254 | ||||
5255 | /// Constructor with insert-at-end-of-block semantics | |||
5256 | PtrToIntInst( | |||
5257 | Value *S, ///< The value to be converted | |||
5258 | Type *Ty, ///< The type to convert to | |||
5259 | const Twine &NameStr, ///< A name for the new instruction | |||
5260 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5261 | ); | |||
5262 | ||||
5263 | /// Gets the pointer operand. | |||
5264 | Value *getPointerOperand() { return getOperand(0); } | |||
5265 | /// Gets the pointer operand. | |||
5266 | const Value *getPointerOperand() const { return getOperand(0); } | |||
5267 | /// Gets the operand index of the pointer operand. | |||
5268 | static unsigned getPointerOperandIndex() { return 0U; } | |||
5269 | ||||
5270 | /// Returns the address space of the pointer operand. | |||
5271 | unsigned getPointerAddressSpace() const { | |||
5272 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
5273 | } | |||
5274 | ||||
5275 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5276 | static bool classof(const Instruction *I) { | |||
5277 | return I->getOpcode() == PtrToInt; | |||
5278 | } | |||
5279 | static bool classof(const Value *V) { | |||
5280 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5281 | } | |||
5282 | }; | |||
5283 | ||||
5284 | //===----------------------------------------------------------------------===// | |||
5285 | // BitCastInst Class | |||
5286 | //===----------------------------------------------------------------------===// | |||
5287 | ||||
5288 | /// This class represents a no-op cast from one type to another. | |||
5289 | class BitCastInst : public CastInst { | |||
5290 | protected: | |||
5291 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5292 | friend class Instruction; | |||
5293 | ||||
5294 | /// Clone an identical BitCastInst. | |||
5295 | BitCastInst *cloneImpl() const; | |||
5296 | ||||
5297 | public: | |||
5298 | /// Constructor with insert-before-instruction semantics | |||
5299 | BitCastInst( | |||
5300 | Value *S, ///< The value to be casted | |||
5301 | Type *Ty, ///< The type to casted to | |||
5302 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5303 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5304 | ); | |||
5305 | ||||
5306 | /// Constructor with insert-at-end-of-block semantics | |||
5307 | BitCastInst( | |||
5308 | Value *S, ///< The value to be casted | |||
5309 | Type *Ty, ///< The type to casted to | |||
5310 | const Twine &NameStr, ///< A name for the new instruction | |||
5311 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5312 | ); | |||
5313 | ||||
5314 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5315 | static bool classof(const Instruction *I) { | |||
5316 | return I->getOpcode() == BitCast; | |||
5317 | } | |||
5318 | static bool classof(const Value *V) { | |||
5319 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5320 | } | |||
5321 | }; | |||
5322 | ||||
5323 | //===----------------------------------------------------------------------===// | |||
5324 | // AddrSpaceCastInst Class | |||
5325 | //===----------------------------------------------------------------------===// | |||
5326 | ||||
5327 | /// This class represents a conversion between pointers from one address space | |||
5328 | /// to another. | |||
5329 | class AddrSpaceCastInst : public CastInst { | |||
5330 | protected: | |||
5331 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5332 | friend class Instruction; | |||
5333 | ||||
5334 | /// Clone an identical AddrSpaceCastInst. | |||
5335 | AddrSpaceCastInst *cloneImpl() const; | |||
5336 | ||||
5337 | public: | |||
5338 | /// Constructor with insert-before-instruction semantics | |||
5339 | AddrSpaceCastInst( | |||
5340 | Value *S, ///< The value to be casted | |||
5341 | Type *Ty, ///< The type to casted to | |||
5342 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5343 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5344 | ); | |||
5345 | ||||
5346 | /// Constructor with insert-at-end-of-block semantics | |||
5347 | AddrSpaceCastInst( | |||
5348 | Value *S, ///< The value to be casted | |||
5349 | Type *Ty, ///< The type to casted to | |||
5350 | const Twine &NameStr, ///< A name for the new instruction | |||
5351 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5352 | ); | |||
5353 | ||||
5354 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5355 | static bool classof(const Instruction *I) { | |||
5356 | return I->getOpcode() == AddrSpaceCast; | |||
5357 | } | |||
5358 | static bool classof(const Value *V) { | |||
5359 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5360 | } | |||
5361 | ||||
5362 | /// Gets the pointer operand. | |||
5363 | Value *getPointerOperand() { | |||
5364 | return getOperand(0); | |||
5365 | } | |||
5366 | ||||
5367 | /// Gets the pointer operand. | |||
5368 | const Value *getPointerOperand() const { | |||
5369 | return getOperand(0); | |||
5370 | } | |||
5371 | ||||
5372 | /// Gets the operand index of the pointer operand. | |||
5373 | static unsigned getPointerOperandIndex() { | |||
5374 | return 0U; | |||
5375 | } | |||
5376 | ||||
5377 | /// Returns the address space of the pointer operand. | |||
5378 | unsigned getSrcAddressSpace() const { | |||
5379 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
5380 | } | |||
5381 | ||||
5382 | /// Returns the address space of the result. | |||
5383 | unsigned getDestAddressSpace() const { | |||
5384 | return getType()->getPointerAddressSpace(); | |||
5385 | } | |||
5386 | }; | |||
5387 | ||||
5388 | //===----------------------------------------------------------------------===// | |||
5389 | // Helper functions | |||
5390 | //===----------------------------------------------------------------------===// | |||
5391 | ||||
5392 | /// A helper function that returns the pointer operand of a load or store | |||
5393 | /// instruction. Returns nullptr if not load or store. | |||
5394 | inline const Value *getLoadStorePointerOperand(const Value *V) { | |||
5395 | if (auto *Load = dyn_cast<LoadInst>(V)) | |||
5396 | return Load->getPointerOperand(); | |||
5397 | if (auto *Store = dyn_cast<StoreInst>(V)) | |||
5398 | return Store->getPointerOperand(); | |||
5399 | return nullptr; | |||
5400 | } | |||
5401 | inline Value *getLoadStorePointerOperand(Value *V) { | |||
5402 | return const_cast<Value *>( | |||
5403 | getLoadStorePointerOperand(static_cast<const Value *>(V))); | |||
5404 | } | |||
5405 | ||||
5406 | /// A helper function that returns the pointer operand of a load, store | |||
5407 | /// or GEP instruction. Returns nullptr if not load, store, or GEP. | |||
5408 | inline const Value *getPointerOperand(const Value *V) { | |||
5409 | if (auto *Ptr = getLoadStorePointerOperand(V)) | |||
5410 | return Ptr; | |||
5411 | if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) | |||
5412 | return Gep->getPointerOperand(); | |||
5413 | return nullptr; | |||
5414 | } | |||
5415 | inline Value *getPointerOperand(Value *V) { | |||
5416 | return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); | |||
5417 | } | |||
5418 | ||||
5419 | /// A helper function that returns the alignment of load or store instruction. | |||
5420 | inline Align getLoadStoreAlignment(Value *I) { | |||
5421 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5422, __extension__ __PRETTY_FUNCTION__ )) | |||
5422 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5422, __extension__ __PRETTY_FUNCTION__ )); | |||
5423 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5424 | return LI->getAlign(); | |||
5425 | return cast<StoreInst>(I)->getAlign(); | |||
5426 | } | |||
5427 | ||||
5428 | /// A helper function that returns the address space of the pointer operand of | |||
5429 | /// load or store instruction. | |||
5430 | inline unsigned getLoadStoreAddressSpace(Value *I) { | |||
5431 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5432, __extension__ __PRETTY_FUNCTION__ )) | |||
5432 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5432, __extension__ __PRETTY_FUNCTION__ )); | |||
5433 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5434 | return LI->getPointerAddressSpace(); | |||
5435 | return cast<StoreInst>(I)->getPointerAddressSpace(); | |||
5436 | } | |||
5437 | ||||
5438 | /// A helper function that returns the type of a load or store instruction. | |||
5439 | inline Type *getLoadStoreType(Value *I) { | |||
5440 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5441, __extension__ __PRETTY_FUNCTION__ )) | |||
5441 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5441, __extension__ __PRETTY_FUNCTION__ )); | |||
5442 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5443 | return LI->getType(); | |||
5444 | return cast<StoreInst>(I)->getValueOperand()->getType(); | |||
5445 | } | |||
5446 | ||||
5447 | /// A helper function that returns an atomic operation's sync scope; returns | |||
5448 | /// std::nullopt if it is not an atomic operation. | |||
5449 | inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) { | |||
5450 | if (!I->isAtomic()) | |||
5451 | return std::nullopt; | |||
5452 | if (auto *AI = dyn_cast<LoadInst>(I)) | |||
5453 | return AI->getSyncScopeID(); | |||
5454 | if (auto *AI = dyn_cast<StoreInst>(I)) | |||
5455 | return AI->getSyncScopeID(); | |||
5456 | if (auto *AI = dyn_cast<FenceInst>(I)) | |||
5457 | return AI->getSyncScopeID(); | |||
5458 | if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) | |||
5459 | return AI->getSyncScopeID(); | |||
5460 | if (auto *AI = dyn_cast<AtomicRMWInst>(I)) | |||
5461 | return AI->getSyncScopeID(); | |||
5462 | llvm_unreachable("unhandled atomic operation")::llvm::llvm_unreachable_internal("unhandled atomic operation" , "llvm/include/llvm/IR/Instructions.h", 5462); | |||
5463 | } | |||
5464 | ||||
5465 | //===----------------------------------------------------------------------===// | |||
5466 | // FreezeInst Class | |||
5467 | //===----------------------------------------------------------------------===// | |||
5468 | ||||
5469 | /// This class represents a freeze function that returns random concrete | |||
5470 | /// value if an operand is either a poison value or an undef value | |||
5471 | class FreezeInst : public UnaryInstruction { | |||
5472 | protected: | |||
5473 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5474 | friend class Instruction; | |||
5475 | ||||
5476 | /// Clone an identical FreezeInst | |||
5477 | FreezeInst *cloneImpl() const; | |||
5478 | ||||
5479 | public: | |||
5480 | explicit FreezeInst(Value *S, | |||
5481 | const Twine &NameStr = "", | |||
5482 | Instruction *InsertBefore = nullptr); | |||
5483 | FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
5484 | ||||
5485 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5486 | static inline bool classof(const Instruction *I) { | |||
5487 | return I->getOpcode() == Freeze; | |||
5488 | } | |||
5489 | static inline bool classof(const Value *V) { | |||
5490 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5491 | } | |||
5492 | }; | |||
5493 | ||||
5494 | } // end namespace llvm | |||
5495 | ||||
5496 | #endif // LLVM_IR_INSTRUCTIONS_H |