File: | build/source/llvm/lib/Analysis/ScalarEvolution.cpp |
Warning: | line 11512, column 32 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the implementation of the scalar evolution analysis |
10 | // engine, which is used primarily to analyze expressions involving induction |
11 | // variables in loops. |
12 | // |
13 | // There are several aspects to this library. First is the representation of |
14 | // scalar expressions, which are represented as subclasses of the SCEV class. |
15 | // These classes are used to represent certain types of subexpressions that we |
16 | // can handle. We only create one SCEV of a particular shape, so |
17 | // pointer-comparisons for equality are legal. |
18 | // |
19 | // One important aspect of the SCEV objects is that they are never cyclic, even |
20 | // if there is a cycle in the dataflow for an expression (ie, a PHI node). If |
21 | // the PHI node is one of the idioms that we can represent (e.g., a polynomial |
22 | // recurrence) then we represent it directly as a recurrence node, otherwise we |
23 | // represent it as a SCEVUnknown node. |
24 | // |
25 | // In addition to being able to represent expressions of various types, we also |
26 | // have folders that are used to build the *canonical* representation for a |
27 | // particular expression. These folders are capable of using a variety of |
28 | // rewrite rules to simplify the expressions. |
29 | // |
30 | // Once the folders are defined, we can implement the more interesting |
31 | // higher-level code, such as the code that recognizes PHI nodes of various |
32 | // types, computes the execution count of a loop, etc. |
33 | // |
34 | // TODO: We should use these routines and value representations to implement |
35 | // dependence analysis! |
36 | // |
37 | //===----------------------------------------------------------------------===// |
38 | // |
39 | // There are several good references for the techniques used in this analysis. |
40 | // |
41 | // Chains of recurrences -- a method to expedite the evaluation |
42 | // of closed-form functions |
43 | // Olaf Bachmann, Paul S. Wang, Eugene V. Zima |
44 | // |
45 | // On computational properties of chains of recurrences |
46 | // Eugene V. Zima |
47 | // |
48 | // Symbolic Evaluation of Chains of Recurrences for Loop Optimization |
49 | // Robert A. van Engelen |
50 | // |
51 | // Efficient Symbolic Analysis for Optimizing Compilers |
52 | // Robert A. van Engelen |
53 | // |
54 | // Using the chains of recurrences algebra for data dependence testing and |
55 | // induction variable substitution |
56 | // MS Thesis, Johnie Birch |
57 | // |
58 | //===----------------------------------------------------------------------===// |
59 | |
60 | #include "llvm/Analysis/ScalarEvolution.h" |
61 | #include "llvm/ADT/APInt.h" |
62 | #include "llvm/ADT/ArrayRef.h" |
63 | #include "llvm/ADT/DenseMap.h" |
64 | #include "llvm/ADT/DepthFirstIterator.h" |
65 | #include "llvm/ADT/EquivalenceClasses.h" |
66 | #include "llvm/ADT/FoldingSet.h" |
67 | #include "llvm/ADT/STLExtras.h" |
68 | #include "llvm/ADT/ScopeExit.h" |
69 | #include "llvm/ADT/Sequence.h" |
70 | #include "llvm/ADT/SmallPtrSet.h" |
71 | #include "llvm/ADT/SmallSet.h" |
72 | #include "llvm/ADT/SmallVector.h" |
73 | #include "llvm/ADT/Statistic.h" |
74 | #include "llvm/ADT/StringRef.h" |
75 | #include "llvm/Analysis/AssumptionCache.h" |
76 | #include "llvm/Analysis/ConstantFolding.h" |
77 | #include "llvm/Analysis/InstructionSimplify.h" |
78 | #include "llvm/Analysis/LoopInfo.h" |
79 | #include "llvm/Analysis/MemoryBuiltins.h" |
80 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
81 | #include "llvm/Analysis/TargetLibraryInfo.h" |
82 | #include "llvm/Analysis/ValueTracking.h" |
83 | #include "llvm/Config/llvm-config.h" |
84 | #include "llvm/IR/Argument.h" |
85 | #include "llvm/IR/BasicBlock.h" |
86 | #include "llvm/IR/CFG.h" |
87 | #include "llvm/IR/Constant.h" |
88 | #include "llvm/IR/ConstantRange.h" |
89 | #include "llvm/IR/Constants.h" |
90 | #include "llvm/IR/DataLayout.h" |
91 | #include "llvm/IR/DerivedTypes.h" |
92 | #include "llvm/IR/Dominators.h" |
93 | #include "llvm/IR/Function.h" |
94 | #include "llvm/IR/GlobalAlias.h" |
95 | #include "llvm/IR/GlobalValue.h" |
96 | #include "llvm/IR/InstIterator.h" |
97 | #include "llvm/IR/InstrTypes.h" |
98 | #include "llvm/IR/Instruction.h" |
99 | #include "llvm/IR/Instructions.h" |
100 | #include "llvm/IR/IntrinsicInst.h" |
101 | #include "llvm/IR/Intrinsics.h" |
102 | #include "llvm/IR/LLVMContext.h" |
103 | #include "llvm/IR/Operator.h" |
104 | #include "llvm/IR/PatternMatch.h" |
105 | #include "llvm/IR/Type.h" |
106 | #include "llvm/IR/Use.h" |
107 | #include "llvm/IR/User.h" |
108 | #include "llvm/IR/Value.h" |
109 | #include "llvm/IR/Verifier.h" |
110 | #include "llvm/InitializePasses.h" |
111 | #include "llvm/Pass.h" |
112 | #include "llvm/Support/Casting.h" |
113 | #include "llvm/Support/CommandLine.h" |
114 | #include "llvm/Support/Compiler.h" |
115 | #include "llvm/Support/Debug.h" |
116 | #include "llvm/Support/ErrorHandling.h" |
117 | #include "llvm/Support/KnownBits.h" |
118 | #include "llvm/Support/SaveAndRestore.h" |
119 | #include "llvm/Support/raw_ostream.h" |
120 | #include <algorithm> |
121 | #include <cassert> |
122 | #include <climits> |
123 | #include <cstdint> |
124 | #include <cstdlib> |
125 | #include <map> |
126 | #include <memory> |
127 | #include <numeric> |
128 | #include <optional> |
129 | #include <tuple> |
130 | #include <utility> |
131 | #include <vector> |
132 | |
133 | using namespace llvm; |
134 | using namespace PatternMatch; |
135 | |
136 | #define DEBUG_TYPE"scalar-evolution" "scalar-evolution" |
137 | |
138 | STATISTIC(NumTripCountsComputed,static llvm::Statistic NumTripCountsComputed = {"scalar-evolution" , "NumTripCountsComputed", "Number of loops with predictable loop counts" } |
139 | "Number of loops with predictable loop counts")static llvm::Statistic NumTripCountsComputed = {"scalar-evolution" , "NumTripCountsComputed", "Number of loops with predictable loop counts" }; |
140 | STATISTIC(NumTripCountsNotComputed,static llvm::Statistic NumTripCountsNotComputed = {"scalar-evolution" , "NumTripCountsNotComputed", "Number of loops without predictable loop counts" } |
141 | "Number of loops without predictable loop counts")static llvm::Statistic NumTripCountsNotComputed = {"scalar-evolution" , "NumTripCountsNotComputed", "Number of loops without predictable loop counts" }; |
142 | STATISTIC(NumBruteForceTripCountsComputed,static llvm::Statistic NumBruteForceTripCountsComputed = {"scalar-evolution" , "NumBruteForceTripCountsComputed", "Number of loops with trip counts computed by force" } |
143 | "Number of loops with trip counts computed by force")static llvm::Statistic NumBruteForceTripCountsComputed = {"scalar-evolution" , "NumBruteForceTripCountsComputed", "Number of loops with trip counts computed by force" }; |
144 | |
145 | #ifdef EXPENSIVE_CHECKS |
146 | bool llvm::VerifySCEV = true; |
147 | #else |
148 | bool llvm::VerifySCEV = false; |
149 | #endif |
150 | |
151 | static cl::opt<unsigned> |
152 | MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, |
153 | cl::desc("Maximum number of iterations SCEV will " |
154 | "symbolically execute a constant " |
155 | "derived loop"), |
156 | cl::init(100)); |
157 | |
158 | static cl::opt<bool, true> VerifySCEVOpt( |
159 | "verify-scev", cl::Hidden, cl::location(VerifySCEV), |
160 | cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); |
161 | static cl::opt<bool> VerifySCEVStrict( |
162 | "verify-scev-strict", cl::Hidden, |
163 | cl::desc("Enable stricter verification with -verify-scev is passed")); |
164 | static cl::opt<bool> |
165 | VerifySCEVMap("verify-scev-maps", cl::Hidden, |
166 | cl::desc("Verify no dangling value in ScalarEvolution's " |
167 | "ExprValueMap (slow)")); |
168 | |
169 | static cl::opt<bool> VerifyIR( |
170 | "scev-verify-ir", cl::Hidden, |
171 | cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), |
172 | cl::init(false)); |
173 | |
174 | static cl::opt<unsigned> MulOpsInlineThreshold( |
175 | "scev-mulops-inline-threshold", cl::Hidden, |
176 | cl::desc("Threshold for inlining multiplication operands into a SCEV"), |
177 | cl::init(32)); |
178 | |
179 | static cl::opt<unsigned> AddOpsInlineThreshold( |
180 | "scev-addops-inline-threshold", cl::Hidden, |
181 | cl::desc("Threshold for inlining addition operands into a SCEV"), |
182 | cl::init(500)); |
183 | |
184 | static cl::opt<unsigned> MaxSCEVCompareDepth( |
185 | "scalar-evolution-max-scev-compare-depth", cl::Hidden, |
186 | cl::desc("Maximum depth of recursive SCEV complexity comparisons"), |
187 | cl::init(32)); |
188 | |
189 | static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( |
190 | "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, |
191 | cl::desc("Maximum depth of recursive SCEV operations implication analysis"), |
192 | cl::init(2)); |
193 | |
194 | static cl::opt<unsigned> MaxValueCompareDepth( |
195 | "scalar-evolution-max-value-compare-depth", cl::Hidden, |
196 | cl::desc("Maximum depth of recursive value complexity comparisons"), |
197 | cl::init(2)); |
198 | |
199 | static cl::opt<unsigned> |
200 | MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, |
201 | cl::desc("Maximum depth of recursive arithmetics"), |
202 | cl::init(32)); |
203 | |
204 | static cl::opt<unsigned> MaxConstantEvolvingDepth( |
205 | "scalar-evolution-max-constant-evolving-depth", cl::Hidden, |
206 | cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); |
207 | |
208 | static cl::opt<unsigned> |
209 | MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, |
210 | cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), |
211 | cl::init(8)); |
212 | |
213 | static cl::opt<unsigned> |
214 | MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, |
215 | cl::desc("Max coefficients in AddRec during evolving"), |
216 | cl::init(8)); |
217 | |
218 | static cl::opt<unsigned> |
219 | HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, |
220 | cl::desc("Size of the expression which is considered huge"), |
221 | cl::init(4096)); |
222 | |
223 | static cl::opt<unsigned> RangeIterThreshold( |
224 | "scev-range-iter-threshold", cl::Hidden, |
225 | cl::desc("Threshold for switching to iteratively computing SCEV ranges"), |
226 | cl::init(32)); |
227 | |
228 | static cl::opt<bool> |
229 | ClassifyExpressions("scalar-evolution-classify-expressions", |
230 | cl::Hidden, cl::init(true), |
231 | cl::desc("When printing analysis, include information on every instruction")); |
232 | |
233 | static cl::opt<bool> UseExpensiveRangeSharpening( |
234 | "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, |
235 | cl::init(false), |
236 | cl::desc("Use more powerful methods of sharpening expression ranges. May " |
237 | "be costly in terms of compile time")); |
238 | |
239 | static cl::opt<unsigned> MaxPhiSCCAnalysisSize( |
240 | "scalar-evolution-max-scc-analysis-depth", cl::Hidden, |
241 | cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " |
242 | "Phi strongly connected components"), |
243 | cl::init(8)); |
244 | |
245 | static cl::opt<bool> |
246 | EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, |
247 | cl::desc("Handle <= and >= in finite loops"), |
248 | cl::init(true)); |
249 | |
250 | static cl::opt<bool> UseContextForNoWrapFlagInference( |
251 | "scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, |
252 | cl::desc("Infer nuw/nsw flags using context where suitable"), |
253 | cl::init(true)); |
254 | |
255 | //===----------------------------------------------------------------------===// |
256 | // SCEV class definitions |
257 | //===----------------------------------------------------------------------===// |
258 | |
259 | //===----------------------------------------------------------------------===// |
260 | // Implementation of the SCEV class. |
261 | // |
262 | |
263 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) |
264 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void SCEV::dump() const { |
265 | print(dbgs()); |
266 | dbgs() << '\n'; |
267 | } |
268 | #endif |
269 | |
270 | void SCEV::print(raw_ostream &OS) const { |
271 | switch (getSCEVType()) { |
272 | case scConstant: |
273 | cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); |
274 | return; |
275 | case scVScale: |
276 | OS << "vscale"; |
277 | return; |
278 | case scPtrToInt: { |
279 | const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); |
280 | const SCEV *Op = PtrToInt->getOperand(); |
281 | OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " |
282 | << *PtrToInt->getType() << ")"; |
283 | return; |
284 | } |
285 | case scTruncate: { |
286 | const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); |
287 | const SCEV *Op = Trunc->getOperand(); |
288 | OS << "(trunc " << *Op->getType() << " " << *Op << " to " |
289 | << *Trunc->getType() << ")"; |
290 | return; |
291 | } |
292 | case scZeroExtend: { |
293 | const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); |
294 | const SCEV *Op = ZExt->getOperand(); |
295 | OS << "(zext " << *Op->getType() << " " << *Op << " to " |
296 | << *ZExt->getType() << ")"; |
297 | return; |
298 | } |
299 | case scSignExtend: { |
300 | const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); |
301 | const SCEV *Op = SExt->getOperand(); |
302 | OS << "(sext " << *Op->getType() << " " << *Op << " to " |
303 | << *SExt->getType() << ")"; |
304 | return; |
305 | } |
306 | case scAddRecExpr: { |
307 | const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); |
308 | OS << "{" << *AR->getOperand(0); |
309 | for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) |
310 | OS << ",+," << *AR->getOperand(i); |
311 | OS << "}<"; |
312 | if (AR->hasNoUnsignedWrap()) |
313 | OS << "nuw><"; |
314 | if (AR->hasNoSignedWrap()) |
315 | OS << "nsw><"; |
316 | if (AR->hasNoSelfWrap() && |
317 | !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) |
318 | OS << "nw><"; |
319 | AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); |
320 | OS << ">"; |
321 | return; |
322 | } |
323 | case scAddExpr: |
324 | case scMulExpr: |
325 | case scUMaxExpr: |
326 | case scSMaxExpr: |
327 | case scUMinExpr: |
328 | case scSMinExpr: |
329 | case scSequentialUMinExpr: { |
330 | const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); |
331 | const char *OpStr = nullptr; |
332 | switch (NAry->getSCEVType()) { |
333 | case scAddExpr: OpStr = " + "; break; |
334 | case scMulExpr: OpStr = " * "; break; |
335 | case scUMaxExpr: OpStr = " umax "; break; |
336 | case scSMaxExpr: OpStr = " smax "; break; |
337 | case scUMinExpr: |
338 | OpStr = " umin "; |
339 | break; |
340 | case scSMinExpr: |
341 | OpStr = " smin "; |
342 | break; |
343 | case scSequentialUMinExpr: |
344 | OpStr = " umin_seq "; |
345 | break; |
346 | default: |
347 | llvm_unreachable("There are no other nary expression types.")::llvm::llvm_unreachable_internal("There are no other nary expression types." , "llvm/lib/Analysis/ScalarEvolution.cpp", 347); |
348 | } |
349 | OS << "("; |
350 | ListSeparator LS(OpStr); |
351 | for (const SCEV *Op : NAry->operands()) |
352 | OS << LS << *Op; |
353 | OS << ")"; |
354 | switch (NAry->getSCEVType()) { |
355 | case scAddExpr: |
356 | case scMulExpr: |
357 | if (NAry->hasNoUnsignedWrap()) |
358 | OS << "<nuw>"; |
359 | if (NAry->hasNoSignedWrap()) |
360 | OS << "<nsw>"; |
361 | break; |
362 | default: |
363 | // Nothing to print for other nary expressions. |
364 | break; |
365 | } |
366 | return; |
367 | } |
368 | case scUDivExpr: { |
369 | const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); |
370 | OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; |
371 | return; |
372 | } |
373 | case scUnknown: |
374 | cast<SCEVUnknown>(this)->getValue()->printAsOperand(OS, false); |
375 | return; |
376 | case scCouldNotCompute: |
377 | OS << "***COULDNOTCOMPUTE***"; |
378 | return; |
379 | } |
380 | llvm_unreachable("Unknown SCEV kind!")::llvm::llvm_unreachable_internal("Unknown SCEV kind!", "llvm/lib/Analysis/ScalarEvolution.cpp" , 380); |
381 | } |
382 | |
383 | Type *SCEV::getType() const { |
384 | switch (getSCEVType()) { |
385 | case scConstant: |
386 | return cast<SCEVConstant>(this)->getType(); |
387 | case scVScale: |
388 | return cast<SCEVVScale>(this)->getType(); |
389 | case scPtrToInt: |
390 | case scTruncate: |
391 | case scZeroExtend: |
392 | case scSignExtend: |
393 | return cast<SCEVCastExpr>(this)->getType(); |
394 | case scAddRecExpr: |
395 | return cast<SCEVAddRecExpr>(this)->getType(); |
396 | case scMulExpr: |
397 | return cast<SCEVMulExpr>(this)->getType(); |
398 | case scUMaxExpr: |
399 | case scSMaxExpr: |
400 | case scUMinExpr: |
401 | case scSMinExpr: |
402 | return cast<SCEVMinMaxExpr>(this)->getType(); |
403 | case scSequentialUMinExpr: |
404 | return cast<SCEVSequentialMinMaxExpr>(this)->getType(); |
405 | case scAddExpr: |
406 | return cast<SCEVAddExpr>(this)->getType(); |
407 | case scUDivExpr: |
408 | return cast<SCEVUDivExpr>(this)->getType(); |
409 | case scUnknown: |
410 | return cast<SCEVUnknown>(this)->getType(); |
411 | case scCouldNotCompute: |
412 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")::llvm::llvm_unreachable_internal("Attempt to use a SCEVCouldNotCompute object!" , "llvm/lib/Analysis/ScalarEvolution.cpp", 412); |
413 | } |
414 | llvm_unreachable("Unknown SCEV kind!")::llvm::llvm_unreachable_internal("Unknown SCEV kind!", "llvm/lib/Analysis/ScalarEvolution.cpp" , 414); |
415 | } |
416 | |
417 | ArrayRef<const SCEV *> SCEV::operands() const { |
418 | switch (getSCEVType()) { |
419 | case scConstant: |
420 | case scVScale: |
421 | case scUnknown: |
422 | return {}; |
423 | case scPtrToInt: |
424 | case scTruncate: |
425 | case scZeroExtend: |
426 | case scSignExtend: |
427 | return cast<SCEVCastExpr>(this)->operands(); |
428 | case scAddRecExpr: |
429 | case scAddExpr: |
430 | case scMulExpr: |
431 | case scUMaxExpr: |
432 | case scSMaxExpr: |
433 | case scUMinExpr: |
434 | case scSMinExpr: |
435 | case scSequentialUMinExpr: |
436 | return cast<SCEVNAryExpr>(this)->operands(); |
437 | case scUDivExpr: |
438 | return cast<SCEVUDivExpr>(this)->operands(); |
439 | case scCouldNotCompute: |
440 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")::llvm::llvm_unreachable_internal("Attempt to use a SCEVCouldNotCompute object!" , "llvm/lib/Analysis/ScalarEvolution.cpp", 440); |
441 | } |
442 | llvm_unreachable("Unknown SCEV kind!")::llvm::llvm_unreachable_internal("Unknown SCEV kind!", "llvm/lib/Analysis/ScalarEvolution.cpp" , 442); |
443 | } |
444 | |
445 | bool SCEV::isZero() const { |
446 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) |
447 | return SC->getValue()->isZero(); |
448 | return false; |
449 | } |
450 | |
451 | bool SCEV::isOne() const { |
452 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) |
453 | return SC->getValue()->isOne(); |
454 | return false; |
455 | } |
456 | |
457 | bool SCEV::isAllOnesValue() const { |
458 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) |
459 | return SC->getValue()->isMinusOne(); |
460 | return false; |
461 | } |
462 | |
463 | bool SCEV::isNonConstantNegative() const { |
464 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); |
465 | if (!Mul) return false; |
466 | |
467 | // If there is a constant factor, it will be first. |
468 | const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); |
469 | if (!SC) return false; |
470 | |
471 | // Return true if the value is negative, this matches things like (-42 * V). |
472 | return SC->getAPInt().isNegative(); |
473 | } |
474 | |
475 | SCEVCouldNotCompute::SCEVCouldNotCompute() : |
476 | SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} |
477 | |
478 | bool SCEVCouldNotCompute::classof(const SCEV *S) { |
479 | return S->getSCEVType() == scCouldNotCompute; |
480 | } |
481 | |
482 | const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { |
483 | FoldingSetNodeID ID; |
484 | ID.AddInteger(scConstant); |
485 | ID.AddPointer(V); |
486 | void *IP = nullptr; |
487 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
488 | SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); |
489 | UniqueSCEVs.InsertNode(S, IP); |
490 | return S; |
491 | } |
492 | |
493 | const SCEV *ScalarEvolution::getConstant(const APInt &Val) { |
494 | return getConstant(ConstantInt::get(getContext(), Val)); |
495 | } |
496 | |
497 | const SCEV * |
498 | ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { |
499 | IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); |
500 | return getConstant(ConstantInt::get(ITy, V, isSigned)); |
501 | } |
502 | |
503 | const SCEV *ScalarEvolution::getVScale(Type *Ty) { |
504 | FoldingSetNodeID ID; |
505 | ID.AddInteger(scVScale); |
506 | ID.AddPointer(Ty); |
507 | void *IP = nullptr; |
508 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) |
509 | return S; |
510 | SCEV *S = new (SCEVAllocator) SCEVVScale(ID.Intern(SCEVAllocator), Ty); |
511 | UniqueSCEVs.InsertNode(S, IP); |
512 | return S; |
513 | } |
514 | |
515 | SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, |
516 | const SCEV *op, Type *ty) |
517 | : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {} |
518 | |
519 | SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, |
520 | Type *ITy) |
521 | : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { |
522 | assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&(static_cast <bool> (getOperand()->getType()->isPointerTy () && Ty->isIntegerTy() && "Must be a non-bit-width-changing pointer-to-integer cast!" ) ? void (0) : __assert_fail ("getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && \"Must be a non-bit-width-changing pointer-to-integer cast!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 523, __extension__ __PRETTY_FUNCTION__)) |
523 | "Must be a non-bit-width-changing pointer-to-integer cast!")(static_cast <bool> (getOperand()->getType()->isPointerTy () && Ty->isIntegerTy() && "Must be a non-bit-width-changing pointer-to-integer cast!" ) ? void (0) : __assert_fail ("getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && \"Must be a non-bit-width-changing pointer-to-integer cast!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 523, __extension__ __PRETTY_FUNCTION__)); |
524 | } |
525 | |
526 | SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, |
527 | SCEVTypes SCEVTy, const SCEV *op, |
528 | Type *ty) |
529 | : SCEVCastExpr(ID, SCEVTy, op, ty) {} |
530 | |
531 | SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, |
532 | Type *ty) |
533 | : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { |
534 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (getOperand()->getType()->isIntOrPtrTy () && Ty->isIntOrPtrTy() && "Cannot truncate non-integer value!" ) ? void (0) : __assert_fail ("getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot truncate non-integer value!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 535, __extension__ __PRETTY_FUNCTION__)) |
535 | "Cannot truncate non-integer value!")(static_cast <bool> (getOperand()->getType()->isIntOrPtrTy () && Ty->isIntOrPtrTy() && "Cannot truncate non-integer value!" ) ? void (0) : __assert_fail ("getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot truncate non-integer value!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 535, __extension__ __PRETTY_FUNCTION__)); |
536 | } |
537 | |
538 | SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, |
539 | const SCEV *op, Type *ty) |
540 | : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { |
541 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (getOperand()->getType()->isIntOrPtrTy () && Ty->isIntOrPtrTy() && "Cannot zero extend non-integer value!" ) ? void (0) : __assert_fail ("getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot zero extend non-integer value!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 542, __extension__ __PRETTY_FUNCTION__)) |
542 | "Cannot zero extend non-integer value!")(static_cast <bool> (getOperand()->getType()->isIntOrPtrTy () && Ty->isIntOrPtrTy() && "Cannot zero extend non-integer value!" ) ? void (0) : __assert_fail ("getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot zero extend non-integer value!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 542, __extension__ __PRETTY_FUNCTION__)); |
543 | } |
544 | |
545 | SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, |
546 | const SCEV *op, Type *ty) |
547 | : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { |
548 | assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (getOperand()->getType()->isIntOrPtrTy () && Ty->isIntOrPtrTy() && "Cannot sign extend non-integer value!" ) ? void (0) : __assert_fail ("getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot sign extend non-integer value!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 549, __extension__ __PRETTY_FUNCTION__)) |
549 | "Cannot sign extend non-integer value!")(static_cast <bool> (getOperand()->getType()->isIntOrPtrTy () && Ty->isIntOrPtrTy() && "Cannot sign extend non-integer value!" ) ? void (0) : __assert_fail ("getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot sign extend non-integer value!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 549, __extension__ __PRETTY_FUNCTION__)); |
550 | } |
551 | |
552 | void SCEVUnknown::deleted() { |
553 | // Clear this SCEVUnknown from various maps. |
554 | SE->forgetMemoizedResults(this); |
555 | |
556 | // Remove this SCEVUnknown from the uniquing map. |
557 | SE->UniqueSCEVs.RemoveNode(this); |
558 | |
559 | // Release the value. |
560 | setValPtr(nullptr); |
561 | } |
562 | |
563 | void SCEVUnknown::allUsesReplacedWith(Value *New) { |
564 | // Clear this SCEVUnknown from various maps. |
565 | SE->forgetMemoizedResults(this); |
566 | |
567 | // Remove this SCEVUnknown from the uniquing map. |
568 | SE->UniqueSCEVs.RemoveNode(this); |
569 | |
570 | // Replace the value pointer in case someone is still using this SCEVUnknown. |
571 | setValPtr(New); |
572 | } |
573 | |
574 | //===----------------------------------------------------------------------===// |
575 | // SCEV Utilities |
576 | //===----------------------------------------------------------------------===// |
577 | |
578 | /// Compare the two values \p LV and \p RV in terms of their "complexity" where |
579 | /// "complexity" is a partial (and somewhat ad-hoc) relation used to order |
580 | /// operands in SCEV expressions. \p EqCache is a set of pairs of values that |
581 | /// have been previously deemed to be "equally complex" by this routine. It is |
582 | /// intended to avoid exponential time complexity in cases like: |
583 | /// |
584 | /// %a = f(%x, %y) |
585 | /// %b = f(%a, %a) |
586 | /// %c = f(%b, %b) |
587 | /// |
588 | /// %d = f(%x, %y) |
589 | /// %e = f(%d, %d) |
590 | /// %f = f(%e, %e) |
591 | /// |
592 | /// CompareValueComplexity(%f, %c) |
593 | /// |
594 | /// Since we do not continue running this routine on expression trees once we |
595 | /// have seen unequal values, there is no need to track them in the cache. |
596 | static int |
597 | CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, |
598 | const LoopInfo *const LI, Value *LV, Value *RV, |
599 | unsigned Depth) { |
600 | if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) |
601 | return 0; |
602 | |
603 | // Order pointer values after integer values. This helps SCEVExpander form |
604 | // GEPs. |
605 | bool LIsPointer = LV->getType()->isPointerTy(), |
606 | RIsPointer = RV->getType()->isPointerTy(); |
607 | if (LIsPointer != RIsPointer) |
608 | return (int)LIsPointer - (int)RIsPointer; |
609 | |
610 | // Compare getValueID values. |
611 | unsigned LID = LV->getValueID(), RID = RV->getValueID(); |
612 | if (LID != RID) |
613 | return (int)LID - (int)RID; |
614 | |
615 | // Sort arguments by their position. |
616 | if (const auto *LA = dyn_cast<Argument>(LV)) { |
617 | const auto *RA = cast<Argument>(RV); |
618 | unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); |
619 | return (int)LArgNo - (int)RArgNo; |
620 | } |
621 | |
622 | if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { |
623 | const auto *RGV = cast<GlobalValue>(RV); |
624 | |
625 | const auto IsGVNameSemantic = [&](const GlobalValue *GV) { |
626 | auto LT = GV->getLinkage(); |
627 | return !(GlobalValue::isPrivateLinkage(LT) || |
628 | GlobalValue::isInternalLinkage(LT)); |
629 | }; |
630 | |
631 | // Use the names to distinguish the two values, but only if the |
632 | // names are semantically important. |
633 | if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) |
634 | return LGV->getName().compare(RGV->getName()); |
635 | } |
636 | |
637 | // For instructions, compare their loop depth, and their operand count. This |
638 | // is pretty loose. |
639 | if (const auto *LInst = dyn_cast<Instruction>(LV)) { |
640 | const auto *RInst = cast<Instruction>(RV); |
641 | |
642 | // Compare loop depths. |
643 | const BasicBlock *LParent = LInst->getParent(), |
644 | *RParent = RInst->getParent(); |
645 | if (LParent != RParent) { |
646 | unsigned LDepth = LI->getLoopDepth(LParent), |
647 | RDepth = LI->getLoopDepth(RParent); |
648 | if (LDepth != RDepth) |
649 | return (int)LDepth - (int)RDepth; |
650 | } |
651 | |
652 | // Compare the number of operands. |
653 | unsigned LNumOps = LInst->getNumOperands(), |
654 | RNumOps = RInst->getNumOperands(); |
655 | if (LNumOps != RNumOps) |
656 | return (int)LNumOps - (int)RNumOps; |
657 | |
658 | for (unsigned Idx : seq(0u, LNumOps)) { |
659 | int Result = |
660 | CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), |
661 | RInst->getOperand(Idx), Depth + 1); |
662 | if (Result != 0) |
663 | return Result; |
664 | } |
665 | } |
666 | |
667 | EqCacheValue.unionSets(LV, RV); |
668 | return 0; |
669 | } |
670 | |
671 | // Return negative, zero, or positive, if LHS is less than, equal to, or greater |
672 | // than RHS, respectively. A three-way result allows recursive comparisons to be |
673 | // more efficient. |
674 | // If the max analysis depth was reached, return std::nullopt, assuming we do |
675 | // not know if they are equivalent for sure. |
676 | static std::optional<int> |
677 | CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, |
678 | EquivalenceClasses<const Value *> &EqCacheValue, |
679 | const LoopInfo *const LI, const SCEV *LHS, |
680 | const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { |
681 | // Fast-path: SCEVs are uniqued so we can do a quick equality check. |
682 | if (LHS == RHS) |
683 | return 0; |
684 | |
685 | // Primarily, sort the SCEVs by their getSCEVType(). |
686 | SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); |
687 | if (LType != RType) |
688 | return (int)LType - (int)RType; |
689 | |
690 | if (EqCacheSCEV.isEquivalent(LHS, RHS)) |
691 | return 0; |
692 | |
693 | if (Depth > MaxSCEVCompareDepth) |
694 | return std::nullopt; |
695 | |
696 | // Aside from the getSCEVType() ordering, the particular ordering |
697 | // isn't very important except that it's beneficial to be consistent, |
698 | // so that (a + b) and (b + a) don't end up as different expressions. |
699 | switch (LType) { |
700 | case scUnknown: { |
701 | const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); |
702 | const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); |
703 | |
704 | int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), |
705 | RU->getValue(), Depth + 1); |
706 | if (X == 0) |
707 | EqCacheSCEV.unionSets(LHS, RHS); |
708 | return X; |
709 | } |
710 | |
711 | case scConstant: { |
712 | const SCEVConstant *LC = cast<SCEVConstant>(LHS); |
713 | const SCEVConstant *RC = cast<SCEVConstant>(RHS); |
714 | |
715 | // Compare constant values. |
716 | const APInt &LA = LC->getAPInt(); |
717 | const APInt &RA = RC->getAPInt(); |
718 | unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); |
719 | if (LBitWidth != RBitWidth) |
720 | return (int)LBitWidth - (int)RBitWidth; |
721 | return LA.ult(RA) ? -1 : 1; |
722 | } |
723 | |
724 | case scVScale: { |
725 | const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(LHS)->getType()); |
726 | const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(RHS)->getType()); |
727 | return LTy->getBitWidth() - RTy->getBitWidth(); |
728 | } |
729 | |
730 | case scAddRecExpr: { |
731 | const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); |
732 | const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); |
733 | |
734 | // There is always a dominance between two recs that are used by one SCEV, |
735 | // so we can safely sort recs by loop header dominance. We require such |
736 | // order in getAddExpr. |
737 | const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); |
738 | if (LLoop != RLoop) { |
739 | const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); |
740 | assert(LHead != RHead && "Two loops share the same header?")(static_cast <bool> (LHead != RHead && "Two loops share the same header?" ) ? void (0) : __assert_fail ("LHead != RHead && \"Two loops share the same header?\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 740, __extension__ __PRETTY_FUNCTION__)); |
741 | if (DT.dominates(LHead, RHead)) |
742 | return 1; |
743 | assert(DT.dominates(RHead, LHead) &&(static_cast <bool> (DT.dominates(RHead, LHead) && "No dominance between recurrences used by one SCEV?") ? void (0) : __assert_fail ("DT.dominates(RHead, LHead) && \"No dominance between recurrences used by one SCEV?\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 744, __extension__ __PRETTY_FUNCTION__)) |
744 | "No dominance between recurrences used by one SCEV?")(static_cast <bool> (DT.dominates(RHead, LHead) && "No dominance between recurrences used by one SCEV?") ? void (0) : __assert_fail ("DT.dominates(RHead, LHead) && \"No dominance between recurrences used by one SCEV?\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 744, __extension__ __PRETTY_FUNCTION__)); |
745 | return -1; |
746 | } |
747 | |
748 | [[fallthrough]]; |
749 | } |
750 | |
751 | case scTruncate: |
752 | case scZeroExtend: |
753 | case scSignExtend: |
754 | case scPtrToInt: |
755 | case scAddExpr: |
756 | case scMulExpr: |
757 | case scUDivExpr: |
758 | case scSMaxExpr: |
759 | case scUMaxExpr: |
760 | case scSMinExpr: |
761 | case scUMinExpr: |
762 | case scSequentialUMinExpr: { |
763 | ArrayRef<const SCEV *> LOps = LHS->operands(); |
764 | ArrayRef<const SCEV *> ROps = RHS->operands(); |
765 | |
766 | // Lexicographically compare n-ary-like expressions. |
767 | unsigned LNumOps = LOps.size(), RNumOps = ROps.size(); |
768 | if (LNumOps != RNumOps) |
769 | return (int)LNumOps - (int)RNumOps; |
770 | |
771 | for (unsigned i = 0; i != LNumOps; ++i) { |
772 | auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LOps[i], |
773 | ROps[i], DT, Depth + 1); |
774 | if (X != 0) |
775 | return X; |
776 | } |
777 | EqCacheSCEV.unionSets(LHS, RHS); |
778 | return 0; |
779 | } |
780 | |
781 | case scCouldNotCompute: |
782 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")::llvm::llvm_unreachable_internal("Attempt to use a SCEVCouldNotCompute object!" , "llvm/lib/Analysis/ScalarEvolution.cpp", 782); |
783 | } |
784 | llvm_unreachable("Unknown SCEV kind!")::llvm::llvm_unreachable_internal("Unknown SCEV kind!", "llvm/lib/Analysis/ScalarEvolution.cpp" , 784); |
785 | } |
786 | |
787 | /// Given a list of SCEV objects, order them by their complexity, and group |
788 | /// objects of the same complexity together by value. When this routine is |
789 | /// finished, we know that any duplicates in the vector are consecutive and that |
790 | /// complexity is monotonically increasing. |
791 | /// |
792 | /// Note that we go take special precautions to ensure that we get deterministic |
793 | /// results from this routine. In other words, we don't want the results of |
794 | /// this to depend on where the addresses of various SCEV objects happened to |
795 | /// land in memory. |
796 | static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, |
797 | LoopInfo *LI, DominatorTree &DT) { |
798 | if (Ops.size() < 2) return; // Noop |
799 | |
800 | EquivalenceClasses<const SCEV *> EqCacheSCEV; |
801 | EquivalenceClasses<const Value *> EqCacheValue; |
802 | |
803 | // Whether LHS has provably less complexity than RHS. |
804 | auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { |
805 | auto Complexity = |
806 | CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); |
807 | return Complexity && *Complexity < 0; |
808 | }; |
809 | if (Ops.size() == 2) { |
810 | // This is the common case, which also happens to be trivially simple. |
811 | // Special case it. |
812 | const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; |
813 | if (IsLessComplex(RHS, LHS)) |
814 | std::swap(LHS, RHS); |
815 | return; |
816 | } |
817 | |
818 | // Do the rough sort by complexity. |
819 | llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { |
820 | return IsLessComplex(LHS, RHS); |
821 | }); |
822 | |
823 | // Now that we are sorted by complexity, group elements of the same |
824 | // complexity. Note that this is, at worst, N^2, but the vector is likely to |
825 | // be extremely short in practice. Note that we take this approach because we |
826 | // do not want to depend on the addresses of the objects we are grouping. |
827 | for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { |
828 | const SCEV *S = Ops[i]; |
829 | unsigned Complexity = S->getSCEVType(); |
830 | |
831 | // If there are any objects of the same complexity and same value as this |
832 | // one, group them. |
833 | for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { |
834 | if (Ops[j] == S) { // Found a duplicate. |
835 | // Move it to immediately after i'th element. |
836 | std::swap(Ops[i+1], Ops[j]); |
837 | ++i; // no need to rescan it. |
838 | if (i == e-2) return; // Done! |
839 | } |
840 | } |
841 | } |
842 | } |
843 | |
844 | /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at |
845 | /// least HugeExprThreshold nodes). |
846 | static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { |
847 | return any_of(Ops, [](const SCEV *S) { |
848 | return S->getExpressionSize() >= HugeExprThreshold; |
849 | }); |
850 | } |
851 | |
852 | //===----------------------------------------------------------------------===// |
853 | // Simple SCEV method implementations |
854 | //===----------------------------------------------------------------------===// |
855 | |
856 | /// Compute BC(It, K). The result has width W. Assume, K > 0. |
857 | static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, |
858 | ScalarEvolution &SE, |
859 | Type *ResultTy) { |
860 | // Handle the simplest case efficiently. |
861 | if (K == 1) |
862 | return SE.getTruncateOrZeroExtend(It, ResultTy); |
863 | |
864 | // We are using the following formula for BC(It, K): |
865 | // |
866 | // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! |
867 | // |
868 | // Suppose, W is the bitwidth of the return value. We must be prepared for |
869 | // overflow. Hence, we must assure that the result of our computation is |
870 | // equal to the accurate one modulo 2^W. Unfortunately, division isn't |
871 | // safe in modular arithmetic. |
872 | // |
873 | // However, this code doesn't use exactly that formula; the formula it uses |
874 | // is something like the following, where T is the number of factors of 2 in |
875 | // K! (i.e. trailing zeros in the binary representation of K!), and ^ is |
876 | // exponentiation: |
877 | // |
878 | // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) |
879 | // |
880 | // This formula is trivially equivalent to the previous formula. However, |
881 | // this formula can be implemented much more efficiently. The trick is that |
882 | // K! / 2^T is odd, and exact division by an odd number *is* safe in modular |
883 | // arithmetic. To do exact division in modular arithmetic, all we have |
884 | // to do is multiply by the inverse. Therefore, this step can be done at |
885 | // width W. |
886 | // |
887 | // The next issue is how to safely do the division by 2^T. The way this |
888 | // is done is by doing the multiplication step at a width of at least W + T |
889 | // bits. This way, the bottom W+T bits of the product are accurate. Then, |
890 | // when we perform the division by 2^T (which is equivalent to a right shift |
891 | // by T), the bottom W bits are accurate. Extra bits are okay; they'll get |
892 | // truncated out after the division by 2^T. |
893 | // |
894 | // In comparison to just directly using the first formula, this technique |
895 | // is much more efficient; using the first formula requires W * K bits, |
896 | // but this formula less than W + K bits. Also, the first formula requires |
897 | // a division step, whereas this formula only requires multiplies and shifts. |
898 | // |
899 | // It doesn't matter whether the subtraction step is done in the calculation |
900 | // width or the input iteration count's width; if the subtraction overflows, |
901 | // the result must be zero anyway. We prefer here to do it in the width of |
902 | // the induction variable because it helps a lot for certain cases; CodeGen |
903 | // isn't smart enough to ignore the overflow, which leads to much less |
904 | // efficient code if the width of the subtraction is wider than the native |
905 | // register width. |
906 | // |
907 | // (It's possible to not widen at all by pulling out factors of 2 before |
908 | // the multiplication; for example, K=2 can be calculated as |
909 | // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires |
910 | // extra arithmetic, so it's not an obvious win, and it gets |
911 | // much more complicated for K > 3.) |
912 | |
913 | // Protection from insane SCEVs; this bound is conservative, |
914 | // but it probably doesn't matter. |
915 | if (K > 1000) |
916 | return SE.getCouldNotCompute(); |
917 | |
918 | unsigned W = SE.getTypeSizeInBits(ResultTy); |
919 | |
920 | // Calculate K! / 2^T and T; we divide out the factors of two before |
921 | // multiplying for calculating K! / 2^T to avoid overflow. |
922 | // Other overflow doesn't matter because we only care about the bottom |
923 | // W bits of the result. |
924 | APInt OddFactorial(W, 1); |
925 | unsigned T = 1; |
926 | for (unsigned i = 3; i <= K; ++i) { |
927 | APInt Mult(W, i); |
928 | unsigned TwoFactors = Mult.countr_zero(); |
929 | T += TwoFactors; |
930 | Mult.lshrInPlace(TwoFactors); |
931 | OddFactorial *= Mult; |
932 | } |
933 | |
934 | // We need at least W + T bits for the multiplication step |
935 | unsigned CalculationBits = W + T; |
936 | |
937 | // Calculate 2^T, at width T+W. |
938 | APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); |
939 | |
940 | // Calculate the multiplicative inverse of K! / 2^T; |
941 | // this multiplication factor will perform the exact division by |
942 | // K! / 2^T. |
943 | APInt Mod = APInt::getSignedMinValue(W+1); |
944 | APInt MultiplyFactor = OddFactorial.zext(W+1); |
945 | MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); |
946 | MultiplyFactor = MultiplyFactor.trunc(W); |
947 | |
948 | // Calculate the product, at width T+W |
949 | IntegerType *CalculationTy = IntegerType::get(SE.getContext(), |
950 | CalculationBits); |
951 | const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); |
952 | for (unsigned i = 1; i != K; ++i) { |
953 | const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); |
954 | Dividend = SE.getMulExpr(Dividend, |
955 | SE.getTruncateOrZeroExtend(S, CalculationTy)); |
956 | } |
957 | |
958 | // Divide by 2^T |
959 | const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); |
960 | |
961 | // Truncate the result, and divide by K! / 2^T. |
962 | |
963 | return SE.getMulExpr(SE.getConstant(MultiplyFactor), |
964 | SE.getTruncateOrZeroExtend(DivResult, ResultTy)); |
965 | } |
966 | |
967 | /// Return the value of this chain of recurrences at the specified iteration |
968 | /// number. We can evaluate this recurrence by multiplying each element in the |
969 | /// chain by the binomial coefficient corresponding to it. In other words, we |
970 | /// can evaluate {A,+,B,+,C,+,D} as: |
971 | /// |
972 | /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) |
973 | /// |
974 | /// where BC(It, k) stands for binomial coefficient. |
975 | const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, |
976 | ScalarEvolution &SE) const { |
977 | return evaluateAtIteration(operands(), It, SE); |
978 | } |
979 | |
980 | const SCEV * |
981 | SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, |
982 | const SCEV *It, ScalarEvolution &SE) { |
983 | assert(Operands.size() > 0)(static_cast <bool> (Operands.size() > 0) ? void (0) : __assert_fail ("Operands.size() > 0", "llvm/lib/Analysis/ScalarEvolution.cpp" , 983, __extension__ __PRETTY_FUNCTION__)); |
984 | const SCEV *Result = Operands[0]; |
985 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { |
986 | // The computation is correct in the face of overflow provided that the |
987 | // multiplication is performed _after_ the evaluation of the binomial |
988 | // coefficient. |
989 | const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); |
990 | if (isa<SCEVCouldNotCompute>(Coeff)) |
991 | return Coeff; |
992 | |
993 | Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); |
994 | } |
995 | return Result; |
996 | } |
997 | |
998 | //===----------------------------------------------------------------------===// |
999 | // SCEV Expression folder implementations |
1000 | //===----------------------------------------------------------------------===// |
1001 | |
1002 | const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, |
1003 | unsigned Depth) { |
1004 | assert(Depth <= 1 &&(static_cast <bool> (Depth <= 1 && "getLosslessPtrToIntExpr() should self-recurse at most once." ) ? void (0) : __assert_fail ("Depth <= 1 && \"getLosslessPtrToIntExpr() should self-recurse at most once.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1005, __extension__ __PRETTY_FUNCTION__)) |
1005 | "getLosslessPtrToIntExpr() should self-recurse at most once.")(static_cast <bool> (Depth <= 1 && "getLosslessPtrToIntExpr() should self-recurse at most once." ) ? void (0) : __assert_fail ("Depth <= 1 && \"getLosslessPtrToIntExpr() should self-recurse at most once.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1005, __extension__ __PRETTY_FUNCTION__)); |
1006 | |
1007 | // We could be called with an integer-typed operands during SCEV rewrites. |
1008 | // Since the operand is an integer already, just perform zext/trunc/self cast. |
1009 | if (!Op->getType()->isPointerTy()) |
1010 | return Op; |
1011 | |
1012 | // What would be an ID for such a SCEV cast expression? |
1013 | FoldingSetNodeID ID; |
1014 | ID.AddInteger(scPtrToInt); |
1015 | ID.AddPointer(Op); |
1016 | |
1017 | void *IP = nullptr; |
1018 | |
1019 | // Is there already an expression for such a cast? |
1020 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) |
1021 | return S; |
1022 | |
1023 | // It isn't legal for optimizations to construct new ptrtoint expressions |
1024 | // for non-integral pointers. |
1025 | if (getDataLayout().isNonIntegralPointerType(Op->getType())) |
1026 | return getCouldNotCompute(); |
1027 | |
1028 | Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); |
1029 | |
1030 | // We can only trivially model ptrtoint if SCEV's effective (integer) type |
1031 | // is sufficiently wide to represent all possible pointer values. |
1032 | // We could theoretically teach SCEV to truncate wider pointers, but |
1033 | // that isn't implemented for now. |
1034 | if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != |
1035 | getDataLayout().getTypeSizeInBits(IntPtrTy)) |
1036 | return getCouldNotCompute(); |
1037 | |
1038 | // If not, is this expression something we can't reduce any further? |
1039 | if (auto *U = dyn_cast<SCEVUnknown>(Op)) { |
1040 | // Perform some basic constant folding. If the operand of the ptr2int cast |
1041 | // is a null pointer, don't create a ptr2int SCEV expression (that will be |
1042 | // left as-is), but produce a zero constant. |
1043 | // NOTE: We could handle a more general case, but lack motivational cases. |
1044 | if (isa<ConstantPointerNull>(U->getValue())) |
1045 | return getZero(IntPtrTy); |
1046 | |
1047 | // Create an explicit cast node. |
1048 | // We can reuse the existing insert position since if we get here, |
1049 | // we won't have made any changes which would invalidate it. |
1050 | SCEV *S = new (SCEVAllocator) |
1051 | SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); |
1052 | UniqueSCEVs.InsertNode(S, IP); |
1053 | registerUser(S, Op); |
1054 | return S; |
1055 | } |
1056 | |
1057 | assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for "(static_cast <bool> (Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " "non-SCEVUnknown's.") ? void (0) : __assert_fail ("Depth == 0 && \"getLosslessPtrToIntExpr() should not self-recurse for \" \"non-SCEVUnknown's.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1058, __extension__ __PRETTY_FUNCTION__)) |
1058 | "non-SCEVUnknown's.")(static_cast <bool> (Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " "non-SCEVUnknown's.") ? void (0) : __assert_fail ("Depth == 0 && \"getLosslessPtrToIntExpr() should not self-recurse for \" \"non-SCEVUnknown's.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1058, __extension__ __PRETTY_FUNCTION__)); |
1059 | |
1060 | // Otherwise, we've got some expression that is more complex than just a |
1061 | // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an |
1062 | // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown |
1063 | // only, and the expressions must otherwise be integer-typed. |
1064 | // So sink the cast down to the SCEVUnknown's. |
1065 | |
1066 | /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, |
1067 | /// which computes a pointer-typed value, and rewrites the whole expression |
1068 | /// tree so that *all* the computations are done on integers, and the only |
1069 | /// pointer-typed operands in the expression are SCEVUnknown. |
1070 | class SCEVPtrToIntSinkingRewriter |
1071 | : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { |
1072 | using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; |
1073 | |
1074 | public: |
1075 | SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} |
1076 | |
1077 | static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { |
1078 | SCEVPtrToIntSinkingRewriter Rewriter(SE); |
1079 | return Rewriter.visit(Scev); |
1080 | } |
1081 | |
1082 | const SCEV *visit(const SCEV *S) { |
1083 | Type *STy = S->getType(); |
1084 | // If the expression is not pointer-typed, just keep it as-is. |
1085 | if (!STy->isPointerTy()) |
1086 | return S; |
1087 | // Else, recursively sink the cast down into it. |
1088 | return Base::visit(S); |
1089 | } |
1090 | |
1091 | const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { |
1092 | SmallVector<const SCEV *, 2> Operands; |
1093 | bool Changed = false; |
1094 | for (const auto *Op : Expr->operands()) { |
1095 | Operands.push_back(visit(Op)); |
1096 | Changed |= Op != Operands.back(); |
1097 | } |
1098 | return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); |
1099 | } |
1100 | |
1101 | const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { |
1102 | SmallVector<const SCEV *, 2> Operands; |
1103 | bool Changed = false; |
1104 | for (const auto *Op : Expr->operands()) { |
1105 | Operands.push_back(visit(Op)); |
1106 | Changed |= Op != Operands.back(); |
1107 | } |
1108 | return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); |
1109 | } |
1110 | |
1111 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { |
1112 | assert(Expr->getType()->isPointerTy() &&(static_cast <bool> (Expr->getType()->isPointerTy () && "Should only reach pointer-typed SCEVUnknown's." ) ? void (0) : __assert_fail ("Expr->getType()->isPointerTy() && \"Should only reach pointer-typed SCEVUnknown's.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1113, __extension__ __PRETTY_FUNCTION__)) |
1113 | "Should only reach pointer-typed SCEVUnknown's.")(static_cast <bool> (Expr->getType()->isPointerTy () && "Should only reach pointer-typed SCEVUnknown's." ) ? void (0) : __assert_fail ("Expr->getType()->isPointerTy() && \"Should only reach pointer-typed SCEVUnknown's.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1113, __extension__ __PRETTY_FUNCTION__)); |
1114 | return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); |
1115 | } |
1116 | }; |
1117 | |
1118 | // And actually perform the cast sinking. |
1119 | const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); |
1120 | assert(IntOp->getType()->isIntegerTy() &&(static_cast <bool> (IntOp->getType()->isIntegerTy () && "We must have succeeded in sinking the cast, " "and ending up with an integer-typed expression!" ) ? void (0) : __assert_fail ("IntOp->getType()->isIntegerTy() && \"We must have succeeded in sinking the cast, \" \"and ending up with an integer-typed expression!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1122, __extension__ __PRETTY_FUNCTION__)) |
1121 | "We must have succeeded in sinking the cast, "(static_cast <bool> (IntOp->getType()->isIntegerTy () && "We must have succeeded in sinking the cast, " "and ending up with an integer-typed expression!" ) ? void (0) : __assert_fail ("IntOp->getType()->isIntegerTy() && \"We must have succeeded in sinking the cast, \" \"and ending up with an integer-typed expression!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1122, __extension__ __PRETTY_FUNCTION__)) |
1122 | "and ending up with an integer-typed expression!")(static_cast <bool> (IntOp->getType()->isIntegerTy () && "We must have succeeded in sinking the cast, " "and ending up with an integer-typed expression!" ) ? void (0) : __assert_fail ("IntOp->getType()->isIntegerTy() && \"We must have succeeded in sinking the cast, \" \"and ending up with an integer-typed expression!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1122, __extension__ __PRETTY_FUNCTION__)); |
1123 | return IntOp; |
1124 | } |
1125 | |
1126 | const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { |
1127 | assert(Ty->isIntegerTy() && "Target type must be an integer type!")(static_cast <bool> (Ty->isIntegerTy() && "Target type must be an integer type!" ) ? void (0) : __assert_fail ("Ty->isIntegerTy() && \"Target type must be an integer type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1127, __extension__ __PRETTY_FUNCTION__)); |
1128 | |
1129 | const SCEV *IntOp = getLosslessPtrToIntExpr(Op); |
1130 | if (isa<SCEVCouldNotCompute>(IntOp)) |
1131 | return IntOp; |
1132 | |
1133 | return getTruncateOrZeroExtend(IntOp, Ty); |
1134 | } |
1135 | |
1136 | const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, |
1137 | unsigned Depth) { |
1138 | assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(Op->getType() ) > getTypeSizeInBits(Ty) && "This is not a truncating conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && \"This is not a truncating conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1139, __extension__ __PRETTY_FUNCTION__)) |
1139 | "This is not a truncating conversion!")(static_cast <bool> (getTypeSizeInBits(Op->getType() ) > getTypeSizeInBits(Ty) && "This is not a truncating conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && \"This is not a truncating conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1139, __extension__ __PRETTY_FUNCTION__)); |
1140 | assert(isSCEVable(Ty) &&(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1141, __extension__ __PRETTY_FUNCTION__)) |
1141 | "This is not a conversion to a SCEVable type!")(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1141, __extension__ __PRETTY_FUNCTION__)); |
1142 | assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!")(static_cast <bool> (!Op->getType()->isPointerTy( ) && "Can't truncate pointer!") ? void (0) : __assert_fail ("!Op->getType()->isPointerTy() && \"Can't truncate pointer!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1142, __extension__ __PRETTY_FUNCTION__)); |
1143 | Ty = getEffectiveSCEVType(Ty); |
1144 | |
1145 | FoldingSetNodeID ID; |
1146 | ID.AddInteger(scTruncate); |
1147 | ID.AddPointer(Op); |
1148 | ID.AddPointer(Ty); |
1149 | void *IP = nullptr; |
1150 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
1151 | |
1152 | // Fold if the operand is constant. |
1153 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) |
1154 | return getConstant( |
1155 | cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); |
1156 | |
1157 | // trunc(trunc(x)) --> trunc(x) |
1158 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) |
1159 | return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); |
1160 | |
1161 | // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing |
1162 | if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) |
1163 | return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); |
1164 | |
1165 | // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing |
1166 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) |
1167 | return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); |
1168 | |
1169 | if (Depth > MaxCastDepth) { |
1170 | SCEV *S = |
1171 | new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); |
1172 | UniqueSCEVs.InsertNode(S, IP); |
1173 | registerUser(S, Op); |
1174 | return S; |
1175 | } |
1176 | |
1177 | // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and |
1178 | // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), |
1179 | // if after transforming we have at most one truncate, not counting truncates |
1180 | // that replace other casts. |
1181 | if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { |
1182 | auto *CommOp = cast<SCEVCommutativeExpr>(Op); |
1183 | SmallVector<const SCEV *, 4> Operands; |
1184 | unsigned numTruncs = 0; |
1185 | for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; |
1186 | ++i) { |
1187 | const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); |
1188 | if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && |
1189 | isa<SCEVTruncateExpr>(S)) |
1190 | numTruncs++; |
1191 | Operands.push_back(S); |
1192 | } |
1193 | if (numTruncs < 2) { |
1194 | if (isa<SCEVAddExpr>(Op)) |
1195 | return getAddExpr(Operands); |
1196 | if (isa<SCEVMulExpr>(Op)) |
1197 | return getMulExpr(Operands); |
1198 | llvm_unreachable("Unexpected SCEV type for Op.")::llvm::llvm_unreachable_internal("Unexpected SCEV type for Op." , "llvm/lib/Analysis/ScalarEvolution.cpp", 1198); |
1199 | } |
1200 | // Although we checked in the beginning that ID is not in the cache, it is |
1201 | // possible that during recursion and different modification ID was inserted |
1202 | // into the cache. So if we find it, just return it. |
1203 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) |
1204 | return S; |
1205 | } |
1206 | |
1207 | // If the input value is a chrec scev, truncate the chrec's operands. |
1208 | if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { |
1209 | SmallVector<const SCEV *, 4> Operands; |
1210 | for (const SCEV *Op : AddRec->operands()) |
1211 | Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); |
1212 | return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); |
1213 | } |
1214 | |
1215 | // Return zero if truncating to known zeros. |
1216 | uint32_t MinTrailingZeros = getMinTrailingZeros(Op); |
1217 | if (MinTrailingZeros >= getTypeSizeInBits(Ty)) |
1218 | return getZero(Ty); |
1219 | |
1220 | // The cast wasn't folded; create an explicit cast node. We can reuse |
1221 | // the existing insert position since if we get here, we won't have |
1222 | // made any changes which would invalidate it. |
1223 | SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), |
1224 | Op, Ty); |
1225 | UniqueSCEVs.InsertNode(S, IP); |
1226 | registerUser(S, Op); |
1227 | return S; |
1228 | } |
1229 | |
1230 | // Get the limit of a recurrence such that incrementing by Step cannot cause |
1231 | // signed overflow as long as the value of the recurrence within the |
1232 | // loop does not exceed this limit before incrementing. |
1233 | static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, |
1234 | ICmpInst::Predicate *Pred, |
1235 | ScalarEvolution *SE) { |
1236 | unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); |
1237 | if (SE->isKnownPositive(Step)) { |
1238 | *Pred = ICmpInst::ICMP_SLT; |
1239 | return SE->getConstant(APInt::getSignedMinValue(BitWidth) - |
1240 | SE->getSignedRangeMax(Step)); |
1241 | } |
1242 | if (SE->isKnownNegative(Step)) { |
1243 | *Pred = ICmpInst::ICMP_SGT; |
1244 | return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - |
1245 | SE->getSignedRangeMin(Step)); |
1246 | } |
1247 | return nullptr; |
1248 | } |
1249 | |
1250 | // Get the limit of a recurrence such that incrementing by Step cannot cause |
1251 | // unsigned overflow as long as the value of the recurrence within the loop does |
1252 | // not exceed this limit before incrementing. |
1253 | static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, |
1254 | ICmpInst::Predicate *Pred, |
1255 | ScalarEvolution *SE) { |
1256 | unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); |
1257 | *Pred = ICmpInst::ICMP_ULT; |
1258 | |
1259 | return SE->getConstant(APInt::getMinValue(BitWidth) - |
1260 | SE->getUnsignedRangeMax(Step)); |
1261 | } |
1262 | |
1263 | namespace { |
1264 | |
1265 | struct ExtendOpTraitsBase { |
1266 | typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, |
1267 | unsigned); |
1268 | }; |
1269 | |
1270 | // Used to make code generic over signed and unsigned overflow. |
1271 | template <typename ExtendOp> struct ExtendOpTraits { |
1272 | // Members present: |
1273 | // |
1274 | // static const SCEV::NoWrapFlags WrapType; |
1275 | // |
1276 | // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; |
1277 | // |
1278 | // static const SCEV *getOverflowLimitForStep(const SCEV *Step, |
1279 | // ICmpInst::Predicate *Pred, |
1280 | // ScalarEvolution *SE); |
1281 | }; |
1282 | |
1283 | template <> |
1284 | struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { |
1285 | static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; |
1286 | |
1287 | static const GetExtendExprTy GetExtendExpr; |
1288 | |
1289 | static const SCEV *getOverflowLimitForStep(const SCEV *Step, |
1290 | ICmpInst::Predicate *Pred, |
1291 | ScalarEvolution *SE) { |
1292 | return getSignedOverflowLimitForStep(Step, Pred, SE); |
1293 | } |
1294 | }; |
1295 | |
1296 | const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< |
1297 | SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; |
1298 | |
1299 | template <> |
1300 | struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { |
1301 | static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; |
1302 | |
1303 | static const GetExtendExprTy GetExtendExpr; |
1304 | |
1305 | static const SCEV *getOverflowLimitForStep(const SCEV *Step, |
1306 | ICmpInst::Predicate *Pred, |
1307 | ScalarEvolution *SE) { |
1308 | return getUnsignedOverflowLimitForStep(Step, Pred, SE); |
1309 | } |
1310 | }; |
1311 | |
1312 | const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< |
1313 | SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; |
1314 | |
1315 | } // end anonymous namespace |
1316 | |
1317 | // The recurrence AR has been shown to have no signed/unsigned wrap or something |
1318 | // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as |
1319 | // easily prove NSW/NUW for its preincrement or postincrement sibling. This |
1320 | // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + |
1321 | // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the |
1322 | // expression "Step + sext/zext(PreIncAR)" is congruent with |
1323 | // "sext/zext(PostIncAR)" |
1324 | template <typename ExtendOpTy> |
1325 | static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, |
1326 | ScalarEvolution *SE, unsigned Depth) { |
1327 | auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; |
1328 | auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; |
1329 | |
1330 | const Loop *L = AR->getLoop(); |
1331 | const SCEV *Start = AR->getStart(); |
1332 | const SCEV *Step = AR->getStepRecurrence(*SE); |
1333 | |
1334 | // Check for a simple looking step prior to loop entry. |
1335 | const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); |
1336 | if (!SA) |
1337 | return nullptr; |
1338 | |
1339 | // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV |
1340 | // subtraction is expensive. For this purpose, perform a quick and dirty |
1341 | // difference, by checking for Step in the operand list. |
1342 | SmallVector<const SCEV *, 4> DiffOps; |
1343 | for (const SCEV *Op : SA->operands()) |
1344 | if (Op != Step) |
1345 | DiffOps.push_back(Op); |
1346 | |
1347 | if (DiffOps.size() == SA->getNumOperands()) |
1348 | return nullptr; |
1349 | |
1350 | // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + |
1351 | // `Step`: |
1352 | |
1353 | // 1. NSW/NUW flags on the step increment. |
1354 | auto PreStartFlags = |
1355 | ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); |
1356 | const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); |
1357 | const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( |
1358 | SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); |
1359 | |
1360 | // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies |
1361 | // "S+X does not sign/unsign-overflow". |
1362 | // |
1363 | |
1364 | const SCEV *BECount = SE->getBackedgeTakenCount(L); |
1365 | if (PreAR && PreAR->getNoWrapFlags(WrapType) && |
1366 | !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) |
1367 | return PreStart; |
1368 | |
1369 | // 2. Direct overflow check on the step operation's expression. |
1370 | unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); |
1371 | Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); |
1372 | const SCEV *OperandExtendedStart = |
1373 | SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), |
1374 | (SE->*GetExtendExpr)(Step, WideTy, Depth)); |
1375 | if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { |
1376 | if (PreAR && AR->getNoWrapFlags(WrapType)) { |
1377 | // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW |
1378 | // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then |
1379 | // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. |
1380 | SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); |
1381 | } |
1382 | return PreStart; |
1383 | } |
1384 | |
1385 | // 3. Loop precondition. |
1386 | ICmpInst::Predicate Pred; |
1387 | const SCEV *OverflowLimit = |
1388 | ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); |
1389 | |
1390 | if (OverflowLimit && |
1391 | SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) |
1392 | return PreStart; |
1393 | |
1394 | return nullptr; |
1395 | } |
1396 | |
1397 | // Get the normalized zero or sign extended expression for this AddRec's Start. |
1398 | template <typename ExtendOpTy> |
1399 | static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, |
1400 | ScalarEvolution *SE, |
1401 | unsigned Depth) { |
1402 | auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; |
1403 | |
1404 | const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); |
1405 | if (!PreStart) |
1406 | return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); |
1407 | |
1408 | return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, |
1409 | Depth), |
1410 | (SE->*GetExtendExpr)(PreStart, Ty, Depth)); |
1411 | } |
1412 | |
1413 | // Try to prove away overflow by looking at "nearby" add recurrences. A |
1414 | // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it |
1415 | // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. |
1416 | // |
1417 | // Formally: |
1418 | // |
1419 | // {S,+,X} == {S-T,+,X} + T |
1420 | // => Ext({S,+,X}) == Ext({S-T,+,X} + T) |
1421 | // |
1422 | // If ({S-T,+,X} + T) does not overflow ... (1) |
1423 | // |
1424 | // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) |
1425 | // |
1426 | // If {S-T,+,X} does not overflow ... (2) |
1427 | // |
1428 | // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) |
1429 | // == {Ext(S-T)+Ext(T),+,Ext(X)} |
1430 | // |
1431 | // If (S-T)+T does not overflow ... (3) |
1432 | // |
1433 | // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} |
1434 | // == {Ext(S),+,Ext(X)} == LHS |
1435 | // |
1436 | // Thus, if (1), (2) and (3) are true for some T, then |
1437 | // Ext({S,+,X}) == {Ext(S),+,Ext(X)} |
1438 | // |
1439 | // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) |
1440 | // does not overflow" restricted to the 0th iteration. Therefore we only need |
1441 | // to check for (1) and (2). |
1442 | // |
1443 | // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T |
1444 | // is `Delta` (defined below). |
1445 | template <typename ExtendOpTy> |
1446 | bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, |
1447 | const SCEV *Step, |
1448 | const Loop *L) { |
1449 | auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; |
1450 | |
1451 | // We restrict `Start` to a constant to prevent SCEV from spending too much |
1452 | // time here. It is correct (but more expensive) to continue with a |
1453 | // non-constant `Start` and do a general SCEV subtraction to compute |
1454 | // `PreStart` below. |
1455 | const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); |
1456 | if (!StartC) |
1457 | return false; |
1458 | |
1459 | APInt StartAI = StartC->getAPInt(); |
1460 | |
1461 | for (unsigned Delta : {-2, -1, 1, 2}) { |
1462 | const SCEV *PreStart = getConstant(StartAI - Delta); |
1463 | |
1464 | FoldingSetNodeID ID; |
1465 | ID.AddInteger(scAddRecExpr); |
1466 | ID.AddPointer(PreStart); |
1467 | ID.AddPointer(Step); |
1468 | ID.AddPointer(L); |
1469 | void *IP = nullptr; |
1470 | const auto *PreAR = |
1471 | static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); |
1472 | |
1473 | // Give up if we don't already have the add recurrence we need because |
1474 | // actually constructing an add recurrence is relatively expensive. |
1475 | if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) |
1476 | const SCEV *DeltaS = getConstant(StartC->getType(), Delta); |
1477 | ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; |
1478 | const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( |
1479 | DeltaS, &Pred, this); |
1480 | if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) |
1481 | return true; |
1482 | } |
1483 | } |
1484 | |
1485 | return false; |
1486 | } |
1487 | |
1488 | // Finds an integer D for an expression (C + x + y + ...) such that the top |
1489 | // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or |
1490 | // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is |
1491 | // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and |
1492 | // the (C + x + y + ...) expression is \p WholeAddExpr. |
1493 | static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, |
1494 | const SCEVConstant *ConstantTerm, |
1495 | const SCEVAddExpr *WholeAddExpr) { |
1496 | const APInt &C = ConstantTerm->getAPInt(); |
1497 | const unsigned BitWidth = C.getBitWidth(); |
1498 | // Find number of trailing zeros of (x + y + ...) w/o the C first: |
1499 | uint32_t TZ = BitWidth; |
1500 | for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) |
1501 | TZ = std::min(TZ, SE.getMinTrailingZeros(WholeAddExpr->getOperand(I))); |
1502 | if (TZ) { |
1503 | // Set D to be as many least significant bits of C as possible while still |
1504 | // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: |
1505 | return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; |
1506 | } |
1507 | return APInt(BitWidth, 0); |
1508 | } |
1509 | |
1510 | // Finds an integer D for an affine AddRec expression {C,+,x} such that the top |
1511 | // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the |
1512 | // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p |
1513 | // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. |
1514 | static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, |
1515 | const APInt &ConstantStart, |
1516 | const SCEV *Step) { |
1517 | const unsigned BitWidth = ConstantStart.getBitWidth(); |
1518 | const uint32_t TZ = SE.getMinTrailingZeros(Step); |
1519 | if (TZ) |
1520 | return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) |
1521 | : ConstantStart; |
1522 | return APInt(BitWidth, 0); |
1523 | } |
1524 | |
1525 | static void insertFoldCacheEntry( |
1526 | const ScalarEvolution::FoldID &ID, const SCEV *S, |
1527 | DenseMap<ScalarEvolution::FoldID, const SCEV *> &FoldCache, |
1528 | DenseMap<const SCEV *, SmallVector<ScalarEvolution::FoldID, 2>> |
1529 | &FoldCacheUser) { |
1530 | auto I = FoldCache.insert({ID, S}); |
1531 | if (!I.second) { |
1532 | // Remove FoldCacheUser entry for ID when replacing an existing FoldCache |
1533 | // entry. |
1534 | auto &UserIDs = FoldCacheUser[I.first->second]; |
1535 | assert(count(UserIDs, ID) == 1 && "unexpected duplicates in UserIDs")(static_cast <bool> (count(UserIDs, ID) == 1 && "unexpected duplicates in UserIDs") ? void (0) : __assert_fail ("count(UserIDs, ID) == 1 && \"unexpected duplicates in UserIDs\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1535, __extension__ __PRETTY_FUNCTION__)); |
1536 | for (unsigned I = 0; I != UserIDs.size(); ++I) |
1537 | if (UserIDs[I] == ID) { |
1538 | std::swap(UserIDs[I], UserIDs.back()); |
1539 | break; |
1540 | } |
1541 | UserIDs.pop_back(); |
1542 | I.first->second = S; |
1543 | } |
1544 | auto R = FoldCacheUser.insert({S, {}}); |
1545 | R.first->second.push_back(ID); |
1546 | } |
1547 | |
1548 | const SCEV * |
1549 | ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { |
1550 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1551, __extension__ __PRETTY_FUNCTION__)) |
1551 | "This is not an extending conversion!")(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1551, __extension__ __PRETTY_FUNCTION__)); |
1552 | assert(isSCEVable(Ty) &&(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1553, __extension__ __PRETTY_FUNCTION__)) |
1553 | "This is not a conversion to a SCEVable type!")(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1553, __extension__ __PRETTY_FUNCTION__)); |
1554 | assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")(static_cast <bool> (!Op->getType()->isPointerTy( ) && "Can't extend pointer!") ? void (0) : __assert_fail ("!Op->getType()->isPointerTy() && \"Can't extend pointer!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1554, __extension__ __PRETTY_FUNCTION__)); |
1555 | Ty = getEffectiveSCEVType(Ty); |
1556 | |
1557 | FoldID ID; |
1558 | ID.addInteger(scZeroExtend); |
1559 | ID.addPointer(Op); |
1560 | ID.addPointer(Ty); |
1561 | auto Iter = FoldCache.find(ID); |
1562 | if (Iter != FoldCache.end()) |
1563 | return Iter->second; |
1564 | |
1565 | const SCEV *S = getZeroExtendExprImpl(Op, Ty, Depth); |
1566 | if (!isa<SCEVZeroExtendExpr>(S)) |
1567 | insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser); |
1568 | return S; |
1569 | } |
1570 | |
1571 | const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty, |
1572 | unsigned Depth) { |
1573 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1574, __extension__ __PRETTY_FUNCTION__)) |
1574 | "This is not an extending conversion!")(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1574, __extension__ __PRETTY_FUNCTION__)); |
1575 | assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!")(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1575, __extension__ __PRETTY_FUNCTION__)); |
1576 | assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")(static_cast <bool> (!Op->getType()->isPointerTy( ) && "Can't extend pointer!") ? void (0) : __assert_fail ("!Op->getType()->isPointerTy() && \"Can't extend pointer!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1576, __extension__ __PRETTY_FUNCTION__)); |
1577 | |
1578 | // Fold if the operand is constant. |
1579 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) |
1580 | return getConstant( |
1581 | cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); |
1582 | |
1583 | // zext(zext(x)) --> zext(x) |
1584 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) |
1585 | return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); |
1586 | |
1587 | // Before doing any expensive analysis, check to see if we've already |
1588 | // computed a SCEV for this Op and Ty. |
1589 | FoldingSetNodeID ID; |
1590 | ID.AddInteger(scZeroExtend); |
1591 | ID.AddPointer(Op); |
1592 | ID.AddPointer(Ty); |
1593 | void *IP = nullptr; |
1594 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
1595 | if (Depth > MaxCastDepth) { |
1596 | SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), |
1597 | Op, Ty); |
1598 | UniqueSCEVs.InsertNode(S, IP); |
1599 | registerUser(S, Op); |
1600 | return S; |
1601 | } |
1602 | |
1603 | // zext(trunc(x)) --> zext(x) or x or trunc(x) |
1604 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { |
1605 | // It's possible the bits taken off by the truncate were all zero bits. If |
1606 | // so, we should be able to simplify this further. |
1607 | const SCEV *X = ST->getOperand(); |
1608 | ConstantRange CR = getUnsignedRange(X); |
1609 | unsigned TruncBits = getTypeSizeInBits(ST->getType()); |
1610 | unsigned NewBits = getTypeSizeInBits(Ty); |
1611 | if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( |
1612 | CR.zextOrTrunc(NewBits))) |
1613 | return getTruncateOrZeroExtend(X, Ty, Depth); |
1614 | } |
1615 | |
1616 | // If the input value is a chrec scev, and we can prove that the value |
1617 | // did not overflow the old, smaller, value, we can zero extend all of the |
1618 | // operands (often constants). This allows analysis of something like |
1619 | // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } |
1620 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) |
1621 | if (AR->isAffine()) { |
1622 | const SCEV *Start = AR->getStart(); |
1623 | const SCEV *Step = AR->getStepRecurrence(*this); |
1624 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); |
1625 | const Loop *L = AR->getLoop(); |
1626 | |
1627 | // If we have special knowledge that this addrec won't overflow, |
1628 | // we don't need to do any further analysis. |
1629 | if (AR->hasNoUnsignedWrap()) { |
1630 | Start = |
1631 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); |
1632 | Step = getZeroExtendExpr(Step, Ty, Depth + 1); |
1633 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
1634 | } |
1635 | |
1636 | // Check whether the backedge-taken count is SCEVCouldNotCompute. |
1637 | // Note that this serves two purposes: It filters out loops that are |
1638 | // simply not analyzable, and it covers the case where this code is |
1639 | // being called from within backedge-taken count analysis, such that |
1640 | // attempting to ask for the backedge-taken count would likely result |
1641 | // in infinite recursion. In the later case, the analysis code will |
1642 | // cope with a conservative value, and it will take care to purge |
1643 | // that value once it has finished. |
1644 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); |
1645 | if (!isa<SCEVCouldNotCompute>(MaxBECount)) { |
1646 | // Manually compute the final value for AR, checking for overflow. |
1647 | |
1648 | // Check whether the backedge-taken count can be losslessly casted to |
1649 | // the addrec's type. The count is always unsigned. |
1650 | const SCEV *CastedMaxBECount = |
1651 | getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); |
1652 | const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( |
1653 | CastedMaxBECount, MaxBECount->getType(), Depth); |
1654 | if (MaxBECount == RecastedMaxBECount) { |
1655 | Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); |
1656 | // Check whether Start+Step*MaxBECount has no unsigned overflow. |
1657 | const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, |
1658 | SCEV::FlagAnyWrap, Depth + 1); |
1659 | const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, |
1660 | SCEV::FlagAnyWrap, |
1661 | Depth + 1), |
1662 | WideTy, Depth + 1); |
1663 | const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); |
1664 | const SCEV *WideMaxBECount = |
1665 | getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); |
1666 | const SCEV *OperandExtendedAdd = |
1667 | getAddExpr(WideStart, |
1668 | getMulExpr(WideMaxBECount, |
1669 | getZeroExtendExpr(Step, WideTy, Depth + 1), |
1670 | SCEV::FlagAnyWrap, Depth + 1), |
1671 | SCEV::FlagAnyWrap, Depth + 1); |
1672 | if (ZAdd == OperandExtendedAdd) { |
1673 | // Cache knowledge of AR NUW, which is propagated to this AddRec. |
1674 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); |
1675 | // Return the expression with the addrec on the outside. |
1676 | Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, |
1677 | Depth + 1); |
1678 | Step = getZeroExtendExpr(Step, Ty, Depth + 1); |
1679 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
1680 | } |
1681 | // Similar to above, only this time treat the step value as signed. |
1682 | // This covers loops that count down. |
1683 | OperandExtendedAdd = |
1684 | getAddExpr(WideStart, |
1685 | getMulExpr(WideMaxBECount, |
1686 | getSignExtendExpr(Step, WideTy, Depth + 1), |
1687 | SCEV::FlagAnyWrap, Depth + 1), |
1688 | SCEV::FlagAnyWrap, Depth + 1); |
1689 | if (ZAdd == OperandExtendedAdd) { |
1690 | // Cache knowledge of AR NW, which is propagated to this AddRec. |
1691 | // Negative step causes unsigned wrap, but it still can't self-wrap. |
1692 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); |
1693 | // Return the expression with the addrec on the outside. |
1694 | Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, |
1695 | Depth + 1); |
1696 | Step = getSignExtendExpr(Step, Ty, Depth + 1); |
1697 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
1698 | } |
1699 | } |
1700 | } |
1701 | |
1702 | // Normally, in the cases we can prove no-overflow via a |
1703 | // backedge guarding condition, we can also compute a backedge |
1704 | // taken count for the loop. The exceptions are assumptions and |
1705 | // guards present in the loop -- SCEV is not great at exploiting |
1706 | // these to compute max backedge taken counts, but can still use |
1707 | // these to prove lack of overflow. Use this fact to avoid |
1708 | // doing extra work that may not pay off. |
1709 | if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || |
1710 | !AC.assumptions().empty()) { |
1711 | |
1712 | auto NewFlags = proveNoUnsignedWrapViaInduction(AR); |
1713 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); |
1714 | if (AR->hasNoUnsignedWrap()) { |
1715 | // Same as nuw case above - duplicated here to avoid a compile time |
1716 | // issue. It's not clear that the order of checks does matter, but |
1717 | // it's one of two issue possible causes for a change which was |
1718 | // reverted. Be conservative for the moment. |
1719 | Start = |
1720 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); |
1721 | Step = getZeroExtendExpr(Step, Ty, Depth + 1); |
1722 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
1723 | } |
1724 | |
1725 | // For a negative step, we can extend the operands iff doing so only |
1726 | // traverses values in the range zext([0,UINT_MAX]). |
1727 | if (isKnownNegative(Step)) { |
1728 | const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - |
1729 | getSignedRangeMin(Step)); |
1730 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || |
1731 | isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { |
1732 | // Cache knowledge of AR NW, which is propagated to this |
1733 | // AddRec. Negative step causes unsigned wrap, but it |
1734 | // still can't self-wrap. |
1735 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); |
1736 | // Return the expression with the addrec on the outside. |
1737 | Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, |
1738 | Depth + 1); |
1739 | Step = getSignExtendExpr(Step, Ty, Depth + 1); |
1740 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
1741 | } |
1742 | } |
1743 | } |
1744 | |
1745 | // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> |
1746 | // if D + (C - D + Step * n) could be proven to not unsigned wrap |
1747 | // where D maximizes the number of trailing zeros of (C - D + Step * n) |
1748 | if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { |
1749 | const APInt &C = SC->getAPInt(); |
1750 | const APInt &D = extractConstantWithoutWrapping(*this, C, Step); |
1751 | if (D != 0) { |
1752 | const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); |
1753 | const SCEV *SResidual = |
1754 | getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); |
1755 | const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); |
1756 | return getAddExpr(SZExtD, SZExtR, |
1757 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), |
1758 | Depth + 1); |
1759 | } |
1760 | } |
1761 | |
1762 | if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { |
1763 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); |
1764 | Start = |
1765 | getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); |
1766 | Step = getZeroExtendExpr(Step, Ty, Depth + 1); |
1767 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
1768 | } |
1769 | } |
1770 | |
1771 | // zext(A % B) --> zext(A) % zext(B) |
1772 | { |
1773 | const SCEV *LHS; |
1774 | const SCEV *RHS; |
1775 | if (matchURem(Op, LHS, RHS)) |
1776 | return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), |
1777 | getZeroExtendExpr(RHS, Ty, Depth + 1)); |
1778 | } |
1779 | |
1780 | // zext(A / B) --> zext(A) / zext(B). |
1781 | if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) |
1782 | return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), |
1783 | getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); |
1784 | |
1785 | if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { |
1786 | // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> |
1787 | if (SA->hasNoUnsignedWrap()) { |
1788 | // If the addition does not unsign overflow then we can, by definition, |
1789 | // commute the zero extension with the addition operation. |
1790 | SmallVector<const SCEV *, 4> Ops; |
1791 | for (const auto *Op : SA->operands()) |
1792 | Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); |
1793 | return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); |
1794 | } |
1795 | |
1796 | // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) |
1797 | // if D + (C - D + x + y + ...) could be proven to not unsigned wrap |
1798 | // where D maximizes the number of trailing zeros of (C - D + x + y + ...) |
1799 | // |
1800 | // Often address arithmetics contain expressions like |
1801 | // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). |
1802 | // This transformation is useful while proving that such expressions are |
1803 | // equal or differ by a small constant amount, see LoadStoreVectorizer pass. |
1804 | if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { |
1805 | const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); |
1806 | if (D != 0) { |
1807 | const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); |
1808 | const SCEV *SResidual = |
1809 | getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); |
1810 | const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); |
1811 | return getAddExpr(SZExtD, SZExtR, |
1812 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), |
1813 | Depth + 1); |
1814 | } |
1815 | } |
1816 | } |
1817 | |
1818 | if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { |
1819 | // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> |
1820 | if (SM->hasNoUnsignedWrap()) { |
1821 | // If the multiply does not unsign overflow then we can, by definition, |
1822 | // commute the zero extension with the multiply operation. |
1823 | SmallVector<const SCEV *, 4> Ops; |
1824 | for (const auto *Op : SM->operands()) |
1825 | Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); |
1826 | return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); |
1827 | } |
1828 | |
1829 | // zext(2^K * (trunc X to iN)) to iM -> |
1830 | // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> |
1831 | // |
1832 | // Proof: |
1833 | // |
1834 | // zext(2^K * (trunc X to iN)) to iM |
1835 | // = zext((trunc X to iN) << K) to iM |
1836 | // = zext((trunc X to i{N-K}) << K)<nuw> to iM |
1837 | // (because shl removes the top K bits) |
1838 | // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM |
1839 | // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. |
1840 | // |
1841 | if (SM->getNumOperands() == 2) |
1842 | if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) |
1843 | if (MulLHS->getAPInt().isPowerOf2()) |
1844 | if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { |
1845 | int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - |
1846 | MulLHS->getAPInt().logBase2(); |
1847 | Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); |
1848 | return getMulExpr( |
1849 | getZeroExtendExpr(MulLHS, Ty), |
1850 | getZeroExtendExpr( |
1851 | getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), |
1852 | SCEV::FlagNUW, Depth + 1); |
1853 | } |
1854 | } |
1855 | |
1856 | // zext(umin(x, y)) -> umin(zext(x), zext(y)) |
1857 | // zext(umax(x, y)) -> umax(zext(x), zext(y)) |
1858 | if (isa<SCEVUMinExpr>(Op) || isa<SCEVUMaxExpr>(Op)) { |
1859 | auto *MinMax = cast<SCEVMinMaxExpr>(Op); |
1860 | SmallVector<const SCEV *, 4> Operands; |
1861 | for (auto *Operand : MinMax->operands()) |
1862 | Operands.push_back(getZeroExtendExpr(Operand, Ty)); |
1863 | if (isa<SCEVUMinExpr>(MinMax)) |
1864 | return getUMinExpr(Operands); |
1865 | return getUMaxExpr(Operands); |
1866 | } |
1867 | |
1868 | // zext(umin_seq(x, y)) -> umin_seq(zext(x), zext(y)) |
1869 | if (auto *MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(Op)) { |
1870 | assert(isa<SCEVSequentialUMinExpr>(MinMax) && "Not supported!")(static_cast <bool> (isa<SCEVSequentialUMinExpr>( MinMax) && "Not supported!") ? void (0) : __assert_fail ("isa<SCEVSequentialUMinExpr>(MinMax) && \"Not supported!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1870, __extension__ __PRETTY_FUNCTION__)); |
1871 | SmallVector<const SCEV *, 4> Operands; |
1872 | for (auto *Operand : MinMax->operands()) |
1873 | Operands.push_back(getZeroExtendExpr(Operand, Ty)); |
1874 | return getUMinExpr(Operands, /*Sequential*/ true); |
1875 | } |
1876 | |
1877 | // The cast wasn't folded; create an explicit cast node. |
1878 | // Recompute the insert position, as it may have been invalidated. |
1879 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
1880 | SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), |
1881 | Op, Ty); |
1882 | UniqueSCEVs.InsertNode(S, IP); |
1883 | registerUser(S, Op); |
1884 | return S; |
1885 | } |
1886 | |
1887 | const SCEV * |
1888 | ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { |
1889 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1890, __extension__ __PRETTY_FUNCTION__)) |
1890 | "This is not an extending conversion!")(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1890, __extension__ __PRETTY_FUNCTION__)); |
1891 | assert(isSCEVable(Ty) &&(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1892, __extension__ __PRETTY_FUNCTION__)) |
1892 | "This is not a conversion to a SCEVable type!")(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1892, __extension__ __PRETTY_FUNCTION__)); |
1893 | assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")(static_cast <bool> (!Op->getType()->isPointerTy( ) && "Can't extend pointer!") ? void (0) : __assert_fail ("!Op->getType()->isPointerTy() && \"Can't extend pointer!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1893, __extension__ __PRETTY_FUNCTION__)); |
1894 | Ty = getEffectiveSCEVType(Ty); |
1895 | |
1896 | FoldID ID; |
1897 | ID.addInteger(scSignExtend); |
1898 | ID.addPointer(Op); |
1899 | ID.addPointer(Ty); |
1900 | auto Iter = FoldCache.find(ID); |
1901 | if (Iter != FoldCache.end()) |
1902 | return Iter->second; |
1903 | |
1904 | const SCEV *S = getSignExtendExprImpl(Op, Ty, Depth); |
1905 | if (!isa<SCEVSignExtendExpr>(S)) |
1906 | insertFoldCacheEntry(ID, S, FoldCache, FoldCacheUser); |
1907 | return S; |
1908 | } |
1909 | |
1910 | const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty, |
1911 | unsigned Depth) { |
1912 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1913, __extension__ __PRETTY_FUNCTION__)) |
1913 | "This is not an extending conversion!")(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1913, __extension__ __PRETTY_FUNCTION__)); |
1914 | assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!")(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1914, __extension__ __PRETTY_FUNCTION__)); |
1915 | assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")(static_cast <bool> (!Op->getType()->isPointerTy( ) && "Can't extend pointer!") ? void (0) : __assert_fail ("!Op->getType()->isPointerTy() && \"Can't extend pointer!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 1915, __extension__ __PRETTY_FUNCTION__)); |
1916 | Ty = getEffectiveSCEVType(Ty); |
1917 | |
1918 | // Fold if the operand is constant. |
1919 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) |
1920 | return getConstant( |
1921 | cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); |
1922 | |
1923 | // sext(sext(x)) --> sext(x) |
1924 | if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) |
1925 | return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); |
1926 | |
1927 | // sext(zext(x)) --> zext(x) |
1928 | if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) |
1929 | return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); |
1930 | |
1931 | // Before doing any expensive analysis, check to see if we've already |
1932 | // computed a SCEV for this Op and Ty. |
1933 | FoldingSetNodeID ID; |
1934 | ID.AddInteger(scSignExtend); |
1935 | ID.AddPointer(Op); |
1936 | ID.AddPointer(Ty); |
1937 | void *IP = nullptr; |
1938 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
1939 | // Limit recursion depth. |
1940 | if (Depth > MaxCastDepth) { |
1941 | SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), |
1942 | Op, Ty); |
1943 | UniqueSCEVs.InsertNode(S, IP); |
1944 | registerUser(S, Op); |
1945 | return S; |
1946 | } |
1947 | |
1948 | // sext(trunc(x)) --> sext(x) or x or trunc(x) |
1949 | if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { |
1950 | // It's possible the bits taken off by the truncate were all sign bits. If |
1951 | // so, we should be able to simplify this further. |
1952 | const SCEV *X = ST->getOperand(); |
1953 | ConstantRange CR = getSignedRange(X); |
1954 | unsigned TruncBits = getTypeSizeInBits(ST->getType()); |
1955 | unsigned NewBits = getTypeSizeInBits(Ty); |
1956 | if (CR.truncate(TruncBits).signExtend(NewBits).contains( |
1957 | CR.sextOrTrunc(NewBits))) |
1958 | return getTruncateOrSignExtend(X, Ty, Depth); |
1959 | } |
1960 | |
1961 | if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { |
1962 | // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> |
1963 | if (SA->hasNoSignedWrap()) { |
1964 | // If the addition does not sign overflow then we can, by definition, |
1965 | // commute the sign extension with the addition operation. |
1966 | SmallVector<const SCEV *, 4> Ops; |
1967 | for (const auto *Op : SA->operands()) |
1968 | Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); |
1969 | return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); |
1970 | } |
1971 | |
1972 | // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) |
1973 | // if D + (C - D + x + y + ...) could be proven to not signed wrap |
1974 | // where D maximizes the number of trailing zeros of (C - D + x + y + ...) |
1975 | // |
1976 | // For instance, this will bring two seemingly different expressions: |
1977 | // 1 + sext(5 + 20 * %x + 24 * %y) and |
1978 | // sext(6 + 20 * %x + 24 * %y) |
1979 | // to the same form: |
1980 | // 2 + sext(4 + 20 * %x + 24 * %y) |
1981 | if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { |
1982 | const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); |
1983 | if (D != 0) { |
1984 | const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); |
1985 | const SCEV *SResidual = |
1986 | getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); |
1987 | const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); |
1988 | return getAddExpr(SSExtD, SSExtR, |
1989 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), |
1990 | Depth + 1); |
1991 | } |
1992 | } |
1993 | } |
1994 | // If the input value is a chrec scev, and we can prove that the value |
1995 | // did not overflow the old, smaller, value, we can sign extend all of the |
1996 | // operands (often constants). This allows analysis of something like |
1997 | // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } |
1998 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) |
1999 | if (AR->isAffine()) { |
2000 | const SCEV *Start = AR->getStart(); |
2001 | const SCEV *Step = AR->getStepRecurrence(*this); |
2002 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); |
2003 | const Loop *L = AR->getLoop(); |
2004 | |
2005 | // If we have special knowledge that this addrec won't overflow, |
2006 | // we don't need to do any further analysis. |
2007 | if (AR->hasNoSignedWrap()) { |
2008 | Start = |
2009 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); |
2010 | Step = getSignExtendExpr(Step, Ty, Depth + 1); |
2011 | return getAddRecExpr(Start, Step, L, SCEV::FlagNSW); |
2012 | } |
2013 | |
2014 | // Check whether the backedge-taken count is SCEVCouldNotCompute. |
2015 | // Note that this serves two purposes: It filters out loops that are |
2016 | // simply not analyzable, and it covers the case where this code is |
2017 | // being called from within backedge-taken count analysis, such that |
2018 | // attempting to ask for the backedge-taken count would likely result |
2019 | // in infinite recursion. In the later case, the analysis code will |
2020 | // cope with a conservative value, and it will take care to purge |
2021 | // that value once it has finished. |
2022 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); |
2023 | if (!isa<SCEVCouldNotCompute>(MaxBECount)) { |
2024 | // Manually compute the final value for AR, checking for |
2025 | // overflow. |
2026 | |
2027 | // Check whether the backedge-taken count can be losslessly casted to |
2028 | // the addrec's type. The count is always unsigned. |
2029 | const SCEV *CastedMaxBECount = |
2030 | getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); |
2031 | const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( |
2032 | CastedMaxBECount, MaxBECount->getType(), Depth); |
2033 | if (MaxBECount == RecastedMaxBECount) { |
2034 | Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); |
2035 | // Check whether Start+Step*MaxBECount has no signed overflow. |
2036 | const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, |
2037 | SCEV::FlagAnyWrap, Depth + 1); |
2038 | const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, |
2039 | SCEV::FlagAnyWrap, |
2040 | Depth + 1), |
2041 | WideTy, Depth + 1); |
2042 | const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); |
2043 | const SCEV *WideMaxBECount = |
2044 | getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); |
2045 | const SCEV *OperandExtendedAdd = |
2046 | getAddExpr(WideStart, |
2047 | getMulExpr(WideMaxBECount, |
2048 | getSignExtendExpr(Step, WideTy, Depth + 1), |
2049 | SCEV::FlagAnyWrap, Depth + 1), |
2050 | SCEV::FlagAnyWrap, Depth + 1); |
2051 | if (SAdd == OperandExtendedAdd) { |
2052 | // Cache knowledge of AR NSW, which is propagated to this AddRec. |
2053 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); |
2054 | // Return the expression with the addrec on the outside. |
2055 | Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, |
2056 | Depth + 1); |
2057 | Step = getSignExtendExpr(Step, Ty, Depth + 1); |
2058 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
2059 | } |
2060 | // Similar to above, only this time treat the step value as unsigned. |
2061 | // This covers loops that count up with an unsigned step. |
2062 | OperandExtendedAdd = |
2063 | getAddExpr(WideStart, |
2064 | getMulExpr(WideMaxBECount, |
2065 | getZeroExtendExpr(Step, WideTy, Depth + 1), |
2066 | SCEV::FlagAnyWrap, Depth + 1), |
2067 | SCEV::FlagAnyWrap, Depth + 1); |
2068 | if (SAdd == OperandExtendedAdd) { |
2069 | // If AR wraps around then |
2070 | // |
2071 | // abs(Step) * MaxBECount > unsigned-max(AR->getType()) |
2072 | // => SAdd != OperandExtendedAdd |
2073 | // |
2074 | // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> |
2075 | // (SAdd == OperandExtendedAdd => AR is NW) |
2076 | |
2077 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); |
2078 | |
2079 | // Return the expression with the addrec on the outside. |
2080 | Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, |
2081 | Depth + 1); |
2082 | Step = getZeroExtendExpr(Step, Ty, Depth + 1); |
2083 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
2084 | } |
2085 | } |
2086 | } |
2087 | |
2088 | auto NewFlags = proveNoSignedWrapViaInduction(AR); |
2089 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); |
2090 | if (AR->hasNoSignedWrap()) { |
2091 | // Same as nsw case above - duplicated here to avoid a compile time |
2092 | // issue. It's not clear that the order of checks does matter, but |
2093 | // it's one of two issue possible causes for a change which was |
2094 | // reverted. Be conservative for the moment. |
2095 | Start = |
2096 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); |
2097 | Step = getSignExtendExpr(Step, Ty, Depth + 1); |
2098 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
2099 | } |
2100 | |
2101 | // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> |
2102 | // if D + (C - D + Step * n) could be proven to not signed wrap |
2103 | // where D maximizes the number of trailing zeros of (C - D + Step * n) |
2104 | if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { |
2105 | const APInt &C = SC->getAPInt(); |
2106 | const APInt &D = extractConstantWithoutWrapping(*this, C, Step); |
2107 | if (D != 0) { |
2108 | const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); |
2109 | const SCEV *SResidual = |
2110 | getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); |
2111 | const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); |
2112 | return getAddExpr(SSExtD, SSExtR, |
2113 | (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), |
2114 | Depth + 1); |
2115 | } |
2116 | } |
2117 | |
2118 | if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { |
2119 | setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); |
2120 | Start = |
2121 | getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); |
2122 | Step = getSignExtendExpr(Step, Ty, Depth + 1); |
2123 | return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); |
2124 | } |
2125 | } |
2126 | |
2127 | // If the input value is provably positive and we could not simplify |
2128 | // away the sext build a zext instead. |
2129 | if (isKnownNonNegative(Op)) |
2130 | return getZeroExtendExpr(Op, Ty, Depth + 1); |
2131 | |
2132 | // sext(smin(x, y)) -> smin(sext(x), sext(y)) |
2133 | // sext(smax(x, y)) -> smax(sext(x), sext(y)) |
2134 | if (isa<SCEVSMinExpr>(Op) || isa<SCEVSMaxExpr>(Op)) { |
2135 | auto *MinMax = cast<SCEVMinMaxExpr>(Op); |
2136 | SmallVector<const SCEV *, 4> Operands; |
2137 | for (auto *Operand : MinMax->operands()) |
2138 | Operands.push_back(getSignExtendExpr(Operand, Ty)); |
2139 | if (isa<SCEVSMinExpr>(MinMax)) |
2140 | return getSMinExpr(Operands); |
2141 | return getSMaxExpr(Operands); |
2142 | } |
2143 | |
2144 | // The cast wasn't folded; create an explicit cast node. |
2145 | // Recompute the insert position, as it may have been invalidated. |
2146 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
2147 | SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), |
2148 | Op, Ty); |
2149 | UniqueSCEVs.InsertNode(S, IP); |
2150 | registerUser(S, { Op }); |
2151 | return S; |
2152 | } |
2153 | |
2154 | const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op, |
2155 | Type *Ty) { |
2156 | switch (Kind) { |
2157 | case scTruncate: |
2158 | return getTruncateExpr(Op, Ty); |
2159 | case scZeroExtend: |
2160 | return getZeroExtendExpr(Op, Ty); |
2161 | case scSignExtend: |
2162 | return getSignExtendExpr(Op, Ty); |
2163 | case scPtrToInt: |
2164 | return getPtrToIntExpr(Op, Ty); |
2165 | default: |
2166 | llvm_unreachable("Not a SCEV cast expression!")::llvm::llvm_unreachable_internal("Not a SCEV cast expression!" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2166); |
2167 | } |
2168 | } |
2169 | |
2170 | /// getAnyExtendExpr - Return a SCEV for the given operand extended with |
2171 | /// unspecified bits out to the given type. |
2172 | const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, |
2173 | Type *Ty) { |
2174 | assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2175, __extension__ __PRETTY_FUNCTION__)) |
2175 | "This is not an extending conversion!")(static_cast <bool> (getTypeSizeInBits(Op->getType() ) < getTypeSizeInBits(Ty) && "This is not an extending conversion!" ) ? void (0) : __assert_fail ("getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && \"This is not an extending conversion!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2175, __extension__ __PRETTY_FUNCTION__)); |
2176 | assert(isSCEVable(Ty) &&(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2177, __extension__ __PRETTY_FUNCTION__)) |
2177 | "This is not a conversion to a SCEVable type!")(static_cast <bool> (isSCEVable(Ty) && "This is not a conversion to a SCEVable type!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"This is not a conversion to a SCEVable type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2177, __extension__ __PRETTY_FUNCTION__)); |
2178 | Ty = getEffectiveSCEVType(Ty); |
2179 | |
2180 | // Sign-extend negative constants. |
2181 | if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) |
2182 | if (SC->getAPInt().isNegative()) |
2183 | return getSignExtendExpr(Op, Ty); |
2184 | |
2185 | // Peel off a truncate cast. |
2186 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { |
2187 | const SCEV *NewOp = T->getOperand(); |
2188 | if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) |
2189 | return getAnyExtendExpr(NewOp, Ty); |
2190 | return getTruncateOrNoop(NewOp, Ty); |
2191 | } |
2192 | |
2193 | // Next try a zext cast. If the cast is folded, use it. |
2194 | const SCEV *ZExt = getZeroExtendExpr(Op, Ty); |
2195 | if (!isa<SCEVZeroExtendExpr>(ZExt)) |
2196 | return ZExt; |
2197 | |
2198 | // Next try a sext cast. If the cast is folded, use it. |
2199 | const SCEV *SExt = getSignExtendExpr(Op, Ty); |
2200 | if (!isa<SCEVSignExtendExpr>(SExt)) |
2201 | return SExt; |
2202 | |
2203 | // Force the cast to be folded into the operands of an addrec. |
2204 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { |
2205 | SmallVector<const SCEV *, 4> Ops; |
2206 | for (const SCEV *Op : AR->operands()) |
2207 | Ops.push_back(getAnyExtendExpr(Op, Ty)); |
2208 | return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); |
2209 | } |
2210 | |
2211 | // If the expression is obviously signed, use the sext cast value. |
2212 | if (isa<SCEVSMaxExpr>(Op)) |
2213 | return SExt; |
2214 | |
2215 | // Absent any other information, use the zext cast value. |
2216 | return ZExt; |
2217 | } |
2218 | |
2219 | /// Process the given Ops list, which is a list of operands to be added under |
2220 | /// the given scale, update the given map. This is a helper function for |
2221 | /// getAddRecExpr. As an example of what it does, given a sequence of operands |
2222 | /// that would form an add expression like this: |
2223 | /// |
2224 | /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) |
2225 | /// |
2226 | /// where A and B are constants, update the map with these values: |
2227 | /// |
2228 | /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) |
2229 | /// |
2230 | /// and add 13 + A*B*29 to AccumulatedConstant. |
2231 | /// This will allow getAddRecExpr to produce this: |
2232 | /// |
2233 | /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) |
2234 | /// |
2235 | /// This form often exposes folding opportunities that are hidden in |
2236 | /// the original operand list. |
2237 | /// |
2238 | /// Return true iff it appears that any interesting folding opportunities |
2239 | /// may be exposed. This helps getAddRecExpr short-circuit extra work in |
2240 | /// the common case where no interesting opportunities are present, and |
2241 | /// is also used as a check to avoid infinite recursion. |
2242 | static bool |
2243 | CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, |
2244 | SmallVectorImpl<const SCEV *> &NewOps, |
2245 | APInt &AccumulatedConstant, |
2246 | ArrayRef<const SCEV *> Ops, const APInt &Scale, |
2247 | ScalarEvolution &SE) { |
2248 | bool Interesting = false; |
2249 | |
2250 | // Iterate over the add operands. They are sorted, with constants first. |
2251 | unsigned i = 0; |
2252 | while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { |
2253 | ++i; |
2254 | // Pull a buried constant out to the outside. |
2255 | if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) |
2256 | Interesting = true; |
2257 | AccumulatedConstant += Scale * C->getAPInt(); |
2258 | } |
2259 | |
2260 | // Next comes everything else. We're especially interested in multiplies |
2261 | // here, but they're in the middle, so just visit the rest with one loop. |
2262 | for (; i != Ops.size(); ++i) { |
2263 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); |
2264 | if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { |
2265 | APInt NewScale = |
2266 | Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); |
2267 | if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { |
2268 | // A multiplication of a constant with another add; recurse. |
2269 | const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); |
2270 | Interesting |= |
2271 | CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, |
2272 | Add->operands(), NewScale, SE); |
2273 | } else { |
2274 | // A multiplication of a constant with some other value. Update |
2275 | // the map. |
2276 | SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); |
2277 | const SCEV *Key = SE.getMulExpr(MulOps); |
2278 | auto Pair = M.insert({Key, NewScale}); |
2279 | if (Pair.second) { |
2280 | NewOps.push_back(Pair.first->first); |
2281 | } else { |
2282 | Pair.first->second += NewScale; |
2283 | // The map already had an entry for this value, which may indicate |
2284 | // a folding opportunity. |
2285 | Interesting = true; |
2286 | } |
2287 | } |
2288 | } else { |
2289 | // An ordinary operand. Update the map. |
2290 | std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = |
2291 | M.insert({Ops[i], Scale}); |
2292 | if (Pair.second) { |
2293 | NewOps.push_back(Pair.first->first); |
2294 | } else { |
2295 | Pair.first->second += Scale; |
2296 | // The map already had an entry for this value, which may indicate |
2297 | // a folding opportunity. |
2298 | Interesting = true; |
2299 | } |
2300 | } |
2301 | } |
2302 | |
2303 | return Interesting; |
2304 | } |
2305 | |
2306 | bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, |
2307 | const SCEV *LHS, const SCEV *RHS, |
2308 | const Instruction *CtxI) { |
2309 | const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, |
2310 | SCEV::NoWrapFlags, unsigned); |
2311 | switch (BinOp) { |
2312 | default: |
2313 | llvm_unreachable("Unsupported binary op")::llvm::llvm_unreachable_internal("Unsupported binary op", "llvm/lib/Analysis/ScalarEvolution.cpp" , 2313); |
2314 | case Instruction::Add: |
2315 | Operation = &ScalarEvolution::getAddExpr; |
2316 | break; |
2317 | case Instruction::Sub: |
2318 | Operation = &ScalarEvolution::getMinusSCEV; |
2319 | break; |
2320 | case Instruction::Mul: |
2321 | Operation = &ScalarEvolution::getMulExpr; |
2322 | break; |
2323 | } |
2324 | |
2325 | const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = |
2326 | Signed ? &ScalarEvolution::getSignExtendExpr |
2327 | : &ScalarEvolution::getZeroExtendExpr; |
2328 | |
2329 | // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) |
2330 | auto *NarrowTy = cast<IntegerType>(LHS->getType()); |
2331 | auto *WideTy = |
2332 | IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); |
2333 | |
2334 | const SCEV *A = (this->*Extension)( |
2335 | (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); |
2336 | const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0); |
2337 | const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0); |
2338 | const SCEV *B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0); |
2339 | if (A == B) |
2340 | return true; |
2341 | // Can we use context to prove the fact we need? |
2342 | if (!CtxI) |
2343 | return false; |
2344 | // TODO: Support mul. |
2345 | if (BinOp == Instruction::Mul) |
2346 | return false; |
2347 | auto *RHSC = dyn_cast<SCEVConstant>(RHS); |
2348 | // TODO: Lift this limitation. |
2349 | if (!RHSC) |
2350 | return false; |
2351 | APInt C = RHSC->getAPInt(); |
2352 | unsigned NumBits = C.getBitWidth(); |
2353 | bool IsSub = (BinOp == Instruction::Sub); |
2354 | bool IsNegativeConst = (Signed && C.isNegative()); |
2355 | // Compute the direction and magnitude by which we need to check overflow. |
2356 | bool OverflowDown = IsSub ^ IsNegativeConst; |
2357 | APInt Magnitude = C; |
2358 | if (IsNegativeConst) { |
2359 | if (C == APInt::getSignedMinValue(NumBits)) |
2360 | // TODO: SINT_MIN on inversion gives the same negative value, we don't |
2361 | // want to deal with that. |
2362 | return false; |
2363 | Magnitude = -C; |
2364 | } |
2365 | |
2366 | ICmpInst::Predicate Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; |
2367 | if (OverflowDown) { |
2368 | // To avoid overflow down, we need to make sure that MIN + Magnitude <= LHS. |
2369 | APInt Min = Signed ? APInt::getSignedMinValue(NumBits) |
2370 | : APInt::getMinValue(NumBits); |
2371 | APInt Limit = Min + Magnitude; |
2372 | return isKnownPredicateAt(Pred, getConstant(Limit), LHS, CtxI); |
2373 | } else { |
2374 | // To avoid overflow up, we need to make sure that LHS <= MAX - Magnitude. |
2375 | APInt Max = Signed ? APInt::getSignedMaxValue(NumBits) |
2376 | : APInt::getMaxValue(NumBits); |
2377 | APInt Limit = Max - Magnitude; |
2378 | return isKnownPredicateAt(Pred, LHS, getConstant(Limit), CtxI); |
2379 | } |
2380 | } |
2381 | |
2382 | std::optional<SCEV::NoWrapFlags> |
2383 | ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( |
2384 | const OverflowingBinaryOperator *OBO) { |
2385 | // It cannot be done any better. |
2386 | if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) |
2387 | return std::nullopt; |
2388 | |
2389 | SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; |
2390 | |
2391 | if (OBO->hasNoUnsignedWrap()) |
2392 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); |
2393 | if (OBO->hasNoSignedWrap()) |
2394 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); |
2395 | |
2396 | bool Deduced = false; |
2397 | |
2398 | if (OBO->getOpcode() != Instruction::Add && |
2399 | OBO->getOpcode() != Instruction::Sub && |
2400 | OBO->getOpcode() != Instruction::Mul) |
2401 | return std::nullopt; |
2402 | |
2403 | const SCEV *LHS = getSCEV(OBO->getOperand(0)); |
2404 | const SCEV *RHS = getSCEV(OBO->getOperand(1)); |
2405 | |
2406 | const Instruction *CtxI = |
2407 | UseContextForNoWrapFlagInference ? dyn_cast<Instruction>(OBO) : nullptr; |
2408 | if (!OBO->hasNoUnsignedWrap() && |
2409 | willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), |
2410 | /* Signed */ false, LHS, RHS, CtxI)) { |
2411 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); |
2412 | Deduced = true; |
2413 | } |
2414 | |
2415 | if (!OBO->hasNoSignedWrap() && |
2416 | willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), |
2417 | /* Signed */ true, LHS, RHS, CtxI)) { |
2418 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); |
2419 | Deduced = true; |
2420 | } |
2421 | |
2422 | if (Deduced) |
2423 | return Flags; |
2424 | return std::nullopt; |
2425 | } |
2426 | |
2427 | // We're trying to construct a SCEV of type `Type' with `Ops' as operands and |
2428 | // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of |
2429 | // can't-overflow flags for the operation if possible. |
2430 | static SCEV::NoWrapFlags |
2431 | StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, |
2432 | const ArrayRef<const SCEV *> Ops, |
2433 | SCEV::NoWrapFlags Flags) { |
2434 | using namespace std::placeholders; |
2435 | |
2436 | using OBO = OverflowingBinaryOperator; |
2437 | |
2438 | bool CanAnalyze = |
2439 | Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; |
2440 | (void)CanAnalyze; |
2441 | assert(CanAnalyze && "don't call from other places!")(static_cast <bool> (CanAnalyze && "don't call from other places!" ) ? void (0) : __assert_fail ("CanAnalyze && \"don't call from other places!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2441, __extension__ __PRETTY_FUNCTION__)); |
2442 | |
2443 | int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; |
2444 | SCEV::NoWrapFlags SignOrUnsignWrap = |
2445 | ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); |
2446 | |
2447 | // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. |
2448 | auto IsKnownNonNegative = [&](const SCEV *S) { |
2449 | return SE->isKnownNonNegative(S); |
2450 | }; |
2451 | |
2452 | if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) |
2453 | Flags = |
2454 | ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); |
2455 | |
2456 | SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); |
2457 | |
2458 | if (SignOrUnsignWrap != SignOrUnsignMask && |
2459 | (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && |
2460 | isa<SCEVConstant>(Ops[0])) { |
2461 | |
2462 | auto Opcode = [&] { |
2463 | switch (Type) { |
2464 | case scAddExpr: |
2465 | return Instruction::Add; |
2466 | case scMulExpr: |
2467 | return Instruction::Mul; |
2468 | default: |
2469 | llvm_unreachable("Unexpected SCEV op.")::llvm::llvm_unreachable_internal("Unexpected SCEV op.", "llvm/lib/Analysis/ScalarEvolution.cpp" , 2469); |
2470 | } |
2471 | }(); |
2472 | |
2473 | const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); |
2474 | |
2475 | // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. |
2476 | if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { |
2477 | auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( |
2478 | Opcode, C, OBO::NoSignedWrap); |
2479 | if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) |
2480 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); |
2481 | } |
2482 | |
2483 | // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. |
2484 | if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { |
2485 | auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( |
2486 | Opcode, C, OBO::NoUnsignedWrap); |
2487 | if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) |
2488 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); |
2489 | } |
2490 | } |
2491 | |
2492 | // <0,+,nonnegative><nw> is also nuw |
2493 | // TODO: Add corresponding nsw case |
2494 | if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) && |
2495 | !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 && |
2496 | Ops[0]->isZero() && IsKnownNonNegative(Ops[1])) |
2497 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); |
2498 | |
2499 | // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW |
2500 | if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && |
2501 | Ops.size() == 2) { |
2502 | if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0])) |
2503 | if (UDiv->getOperand(1) == Ops[1]) |
2504 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); |
2505 | if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1])) |
2506 | if (UDiv->getOperand(1) == Ops[0]) |
2507 | Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); |
2508 | } |
2509 | |
2510 | return Flags; |
2511 | } |
2512 | |
2513 | bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { |
2514 | return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); |
2515 | } |
2516 | |
2517 | /// Get a canonical add expression, or something simpler if possible. |
2518 | const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, |
2519 | SCEV::NoWrapFlags OrigFlags, |
2520 | unsigned Depth) { |
2521 | assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&(static_cast <bool> (!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && "only nuw or nsw allowed") ? void (0) : __assert_fail ("!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && \"only nuw or nsw allowed\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2522, __extension__ __PRETTY_FUNCTION__)) |
2522 | "only nuw or nsw allowed")(static_cast <bool> (!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && "only nuw or nsw allowed") ? void (0) : __assert_fail ("!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && \"only nuw or nsw allowed\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2522, __extension__ __PRETTY_FUNCTION__)); |
2523 | assert(!Ops.empty() && "Cannot get empty add!")(static_cast <bool> (!Ops.empty() && "Cannot get empty add!" ) ? void (0) : __assert_fail ("!Ops.empty() && \"Cannot get empty add!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2523, __extension__ __PRETTY_FUNCTION__)); |
2524 | if (Ops.size() == 1) return Ops[0]; |
2525 | #ifndef NDEBUG |
2526 | Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); |
2527 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) |
2528 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&(static_cast <bool> (getEffectiveSCEVType(Ops[i]->getType ()) == ETy && "SCEVAddExpr operand types don't match!" ) ? void (0) : __assert_fail ("getEffectiveSCEVType(Ops[i]->getType()) == ETy && \"SCEVAddExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2529, __extension__ __PRETTY_FUNCTION__)) |
2529 | "SCEVAddExpr operand types don't match!")(static_cast <bool> (getEffectiveSCEVType(Ops[i]->getType ()) == ETy && "SCEVAddExpr operand types don't match!" ) ? void (0) : __assert_fail ("getEffectiveSCEVType(Ops[i]->getType()) == ETy && \"SCEVAddExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2529, __extension__ __PRETTY_FUNCTION__)); |
2530 | unsigned NumPtrs = count_if( |
2531 | Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); |
2532 | assert(NumPtrs <= 1 && "add has at most one pointer operand")(static_cast <bool> (NumPtrs <= 1 && "add has at most one pointer operand" ) ? void (0) : __assert_fail ("NumPtrs <= 1 && \"add has at most one pointer operand\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2532, __extension__ __PRETTY_FUNCTION__)); |
2533 | #endif |
2534 | |
2535 | // Sort by complexity, this groups all similar expression types together. |
2536 | GroupByComplexity(Ops, &LI, DT); |
2537 | |
2538 | // If there are any constants, fold them together. |
2539 | unsigned Idx = 0; |
2540 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { |
2541 | ++Idx; |
2542 | assert(Idx < Ops.size())(static_cast <bool> (Idx < Ops.size()) ? void (0) : __assert_fail ("Idx < Ops.size()", "llvm/lib/Analysis/ScalarEvolution.cpp" , 2542, __extension__ __PRETTY_FUNCTION__)); |
2543 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { |
2544 | // We found two constants, fold them together! |
2545 | Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); |
2546 | if (Ops.size() == 2) return Ops[0]; |
2547 | Ops.erase(Ops.begin()+1); // Erase the folded element |
2548 | LHSC = cast<SCEVConstant>(Ops[0]); |
2549 | } |
2550 | |
2551 | // If we are left with a constant zero being added, strip it off. |
2552 | if (LHSC->getValue()->isZero()) { |
2553 | Ops.erase(Ops.begin()); |
2554 | --Idx; |
2555 | } |
2556 | |
2557 | if (Ops.size() == 1) return Ops[0]; |
2558 | } |
2559 | |
2560 | // Delay expensive flag strengthening until necessary. |
2561 | auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { |
2562 | return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); |
2563 | }; |
2564 | |
2565 | // Limit recursion calls depth. |
2566 | if (Depth > MaxArithDepth || hasHugeExpression(Ops)) |
2567 | return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); |
2568 | |
2569 | if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) { |
2570 | // Don't strengthen flags if we have no new information. |
2571 | SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); |
2572 | if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) |
2573 | Add->setNoWrapFlags(ComputeFlags(Ops)); |
2574 | return S; |
2575 | } |
2576 | |
2577 | // Okay, check to see if the same value occurs in the operand list more than |
2578 | // once. If so, merge them together into an multiply expression. Since we |
2579 | // sorted the list, these values are required to be adjacent. |
2580 | Type *Ty = Ops[0]->getType(); |
2581 | bool FoundMatch = false; |
2582 | for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) |
2583 | if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 |
2584 | // Scan ahead to count how many equal operands there are. |
2585 | unsigned Count = 2; |
2586 | while (i+Count != e && Ops[i+Count] == Ops[i]) |
2587 | ++Count; |
2588 | // Merge the values into a multiply. |
2589 | const SCEV *Scale = getConstant(Ty, Count); |
2590 | const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); |
2591 | if (Ops.size() == Count) |
2592 | return Mul; |
2593 | Ops[i] = Mul; |
2594 | Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); |
2595 | --i; e -= Count - 1; |
2596 | FoundMatch = true; |
2597 | } |
2598 | if (FoundMatch) |
2599 | return getAddExpr(Ops, OrigFlags, Depth + 1); |
2600 | |
2601 | // Check for truncates. If all the operands are truncated from the same |
2602 | // type, see if factoring out the truncate would permit the result to be |
2603 | // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) |
2604 | // if the contents of the resulting outer trunc fold to something simple. |
2605 | auto FindTruncSrcType = [&]() -> Type * { |
2606 | // We're ultimately looking to fold an addrec of truncs and muls of only |
2607 | // constants and truncs, so if we find any other types of SCEV |
2608 | // as operands of the addrec then we bail and return nullptr here. |
2609 | // Otherwise, we return the type of the operand of a trunc that we find. |
2610 | if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) |
2611 | return T->getOperand()->getType(); |
2612 | if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { |
2613 | const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); |
2614 | if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) |
2615 | return T->getOperand()->getType(); |
2616 | } |
2617 | return nullptr; |
2618 | }; |
2619 | if (auto *SrcType = FindTruncSrcType()) { |
2620 | SmallVector<const SCEV *, 8> LargeOps; |
2621 | bool Ok = true; |
2622 | // Check all the operands to see if they can be represented in the |
2623 | // source type of the truncate. |
2624 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) { |
2625 | if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { |
2626 | if (T->getOperand()->getType() != SrcType) { |
2627 | Ok = false; |
2628 | break; |
2629 | } |
2630 | LargeOps.push_back(T->getOperand()); |
2631 | } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { |
2632 | LargeOps.push_back(getAnyExtendExpr(C, SrcType)); |
2633 | } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { |
2634 | SmallVector<const SCEV *, 8> LargeMulOps; |
2635 | for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { |
2636 | if (const SCEVTruncateExpr *T = |
2637 | dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { |
2638 | if (T->getOperand()->getType() != SrcType) { |
2639 | Ok = false; |
2640 | break; |
2641 | } |
2642 | LargeMulOps.push_back(T->getOperand()); |
2643 | } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { |
2644 | LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); |
2645 | } else { |
2646 | Ok = false; |
2647 | break; |
2648 | } |
2649 | } |
2650 | if (Ok) |
2651 | LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); |
2652 | } else { |
2653 | Ok = false; |
2654 | break; |
2655 | } |
2656 | } |
2657 | if (Ok) { |
2658 | // Evaluate the expression in the larger type. |
2659 | const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); |
2660 | // If it folds to something simple, use it. Otherwise, don't. |
2661 | if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) |
2662 | return getTruncateExpr(Fold, Ty); |
2663 | } |
2664 | } |
2665 | |
2666 | if (Ops.size() == 2) { |
2667 | // Check if we have an expression of the form ((X + C1) - C2), where C1 and |
2668 | // C2 can be folded in a way that allows retaining wrapping flags of (X + |
2669 | // C1). |
2670 | const SCEV *A = Ops[0]; |
2671 | const SCEV *B = Ops[1]; |
2672 | auto *AddExpr = dyn_cast<SCEVAddExpr>(B); |
2673 | auto *C = dyn_cast<SCEVConstant>(A); |
2674 | if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { |
2675 | auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); |
2676 | auto C2 = C->getAPInt(); |
2677 | SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; |
2678 | |
2679 | APInt ConstAdd = C1 + C2; |
2680 | auto AddFlags = AddExpr->getNoWrapFlags(); |
2681 | // Adding a smaller constant is NUW if the original AddExpr was NUW. |
2682 | if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) && |
2683 | ConstAdd.ule(C1)) { |
2684 | PreservedFlags = |
2685 | ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); |
2686 | } |
2687 | |
2688 | // Adding a constant with the same sign and small magnitude is NSW, if the |
2689 | // original AddExpr was NSW. |
2690 | if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) && |
2691 | C1.isSignBitSet() == ConstAdd.isSignBitSet() && |
2692 | ConstAdd.abs().ule(C1.abs())) { |
2693 | PreservedFlags = |
2694 | ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); |
2695 | } |
2696 | |
2697 | if (PreservedFlags != SCEV::FlagAnyWrap) { |
2698 | SmallVector<const SCEV *, 4> NewOps(AddExpr->operands()); |
2699 | NewOps[0] = getConstant(ConstAdd); |
2700 | return getAddExpr(NewOps, PreservedFlags); |
2701 | } |
2702 | } |
2703 | } |
2704 | |
2705 | // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y) |
2706 | if (Ops.size() == 2) { |
2707 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]); |
2708 | if (Mul && Mul->getNumOperands() == 2 && |
2709 | Mul->getOperand(0)->isAllOnesValue()) { |
2710 | const SCEV *X; |
2711 | const SCEV *Y; |
2712 | if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) { |
2713 | return getMulExpr(Y, getUDivExpr(X, Y)); |
2714 | } |
2715 | } |
2716 | } |
2717 | |
2718 | // Skip past any other cast SCEVs. |
2719 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) |
2720 | ++Idx; |
2721 | |
2722 | // If there are add operands they would be next. |
2723 | if (Idx < Ops.size()) { |
2724 | bool DeletedAdd = false; |
2725 | // If the original flags and all inlined SCEVAddExprs are NUW, use the |
2726 | // common NUW flag for expression after inlining. Other flags cannot be |
2727 | // preserved, because they may depend on the original order of operations. |
2728 | SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); |
2729 | while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { |
2730 | if (Ops.size() > AddOpsInlineThreshold || |
2731 | Add->getNumOperands() > AddOpsInlineThreshold) |
2732 | break; |
2733 | // If we have an add, expand the add operands onto the end of the operands |
2734 | // list. |
2735 | Ops.erase(Ops.begin()+Idx); |
2736 | append_range(Ops, Add->operands()); |
2737 | DeletedAdd = true; |
2738 | CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); |
2739 | } |
2740 | |
2741 | // If we deleted at least one add, we added operands to the end of the list, |
2742 | // and they are not necessarily sorted. Recurse to resort and resimplify |
2743 | // any operands we just acquired. |
2744 | if (DeletedAdd) |
2745 | return getAddExpr(Ops, CommonFlags, Depth + 1); |
2746 | } |
2747 | |
2748 | // Skip over the add expression until we get to a multiply. |
2749 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) |
2750 | ++Idx; |
2751 | |
2752 | // Check to see if there are any folding opportunities present with |
2753 | // operands multiplied by constant values. |
2754 | if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { |
2755 | uint64_t BitWidth = getTypeSizeInBits(Ty); |
2756 | DenseMap<const SCEV *, APInt> M; |
2757 | SmallVector<const SCEV *, 8> NewOps; |
2758 | APInt AccumulatedConstant(BitWidth, 0); |
2759 | if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, |
2760 | Ops, APInt(BitWidth, 1), *this)) { |
2761 | struct APIntCompare { |
2762 | bool operator()(const APInt &LHS, const APInt &RHS) const { |
2763 | return LHS.ult(RHS); |
2764 | } |
2765 | }; |
2766 | |
2767 | // Some interesting folding opportunity is present, so its worthwhile to |
2768 | // re-generate the operands list. Group the operands by constant scale, |
2769 | // to avoid multiplying by the same constant scale multiple times. |
2770 | std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; |
2771 | for (const SCEV *NewOp : NewOps) |
2772 | MulOpLists[M.find(NewOp)->second].push_back(NewOp); |
2773 | // Re-generate the operands list. |
2774 | Ops.clear(); |
2775 | if (AccumulatedConstant != 0) |
2776 | Ops.push_back(getConstant(AccumulatedConstant)); |
2777 | for (auto &MulOp : MulOpLists) { |
2778 | if (MulOp.first == 1) { |
2779 | Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); |
2780 | } else if (MulOp.first != 0) { |
2781 | Ops.push_back(getMulExpr( |
2782 | getConstant(MulOp.first), |
2783 | getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), |
2784 | SCEV::FlagAnyWrap, Depth + 1)); |
2785 | } |
2786 | } |
2787 | if (Ops.empty()) |
2788 | return getZero(Ty); |
2789 | if (Ops.size() == 1) |
2790 | return Ops[0]; |
2791 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); |
2792 | } |
2793 | } |
2794 | |
2795 | // If we are adding something to a multiply expression, make sure the |
2796 | // something is not already an operand of the multiply. If so, merge it into |
2797 | // the multiply. |
2798 | for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { |
2799 | const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); |
2800 | for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { |
2801 | const SCEV *MulOpSCEV = Mul->getOperand(MulOp); |
2802 | if (isa<SCEVConstant>(MulOpSCEV)) |
2803 | continue; |
2804 | for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) |
2805 | if (MulOpSCEV == Ops[AddOp]) { |
2806 | // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) |
2807 | const SCEV *InnerMul = Mul->getOperand(MulOp == 0); |
2808 | if (Mul->getNumOperands() != 2) { |
2809 | // If the multiply has more than two operands, we must get the |
2810 | // Y*Z term. |
2811 | SmallVector<const SCEV *, 4> MulOps( |
2812 | Mul->operands().take_front(MulOp)); |
2813 | append_range(MulOps, Mul->operands().drop_front(MulOp + 1)); |
2814 | InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); |
2815 | } |
2816 | SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; |
2817 | const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); |
2818 | const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, |
2819 | SCEV::FlagAnyWrap, Depth + 1); |
2820 | if (Ops.size() == 2) return OuterMul; |
2821 | if (AddOp < Idx) { |
2822 | Ops.erase(Ops.begin()+AddOp); |
2823 | Ops.erase(Ops.begin()+Idx-1); |
2824 | } else { |
2825 | Ops.erase(Ops.begin()+Idx); |
2826 | Ops.erase(Ops.begin()+AddOp-1); |
2827 | } |
2828 | Ops.push_back(OuterMul); |
2829 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); |
2830 | } |
2831 | |
2832 | // Check this multiply against other multiplies being added together. |
2833 | for (unsigned OtherMulIdx = Idx+1; |
2834 | OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); |
2835 | ++OtherMulIdx) { |
2836 | const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); |
2837 | // If MulOp occurs in OtherMul, we can fold the two multiplies |
2838 | // together. |
2839 | for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); |
2840 | OMulOp != e; ++OMulOp) |
2841 | if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { |
2842 | // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) |
2843 | const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); |
2844 | if (Mul->getNumOperands() != 2) { |
2845 | SmallVector<const SCEV *, 4> MulOps( |
2846 | Mul->operands().take_front(MulOp)); |
2847 | append_range(MulOps, Mul->operands().drop_front(MulOp+1)); |
2848 | InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); |
2849 | } |
2850 | const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); |
2851 | if (OtherMul->getNumOperands() != 2) { |
2852 | SmallVector<const SCEV *, 4> MulOps( |
2853 | OtherMul->operands().take_front(OMulOp)); |
2854 | append_range(MulOps, OtherMul->operands().drop_front(OMulOp+1)); |
2855 | InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); |
2856 | } |
2857 | SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; |
2858 | const SCEV *InnerMulSum = |
2859 | getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); |
2860 | const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, |
2861 | SCEV::FlagAnyWrap, Depth + 1); |
2862 | if (Ops.size() == 2) return OuterMul; |
2863 | Ops.erase(Ops.begin()+Idx); |
2864 | Ops.erase(Ops.begin()+OtherMulIdx-1); |
2865 | Ops.push_back(OuterMul); |
2866 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); |
2867 | } |
2868 | } |
2869 | } |
2870 | } |
2871 | |
2872 | // If there are any add recurrences in the operands list, see if any other |
2873 | // added values are loop invariant. If so, we can fold them into the |
2874 | // recurrence. |
2875 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) |
2876 | ++Idx; |
2877 | |
2878 | // Scan over all recurrences, trying to fold loop invariants into them. |
2879 | for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { |
2880 | // Scan all of the other operands to this add and add them to the vector if |
2881 | // they are loop invariant w.r.t. the recurrence. |
2882 | SmallVector<const SCEV *, 8> LIOps; |
2883 | const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); |
2884 | const Loop *AddRecLoop = AddRec->getLoop(); |
2885 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
2886 | if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { |
2887 | LIOps.push_back(Ops[i]); |
2888 | Ops.erase(Ops.begin()+i); |
2889 | --i; --e; |
2890 | } |
2891 | |
2892 | // If we found some loop invariants, fold them into the recurrence. |
2893 | if (!LIOps.empty()) { |
2894 | // Compute nowrap flags for the addition of the loop-invariant ops and |
2895 | // the addrec. Temporarily push it as an operand for that purpose. These |
2896 | // flags are valid in the scope of the addrec only. |
2897 | LIOps.push_back(AddRec); |
2898 | SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); |
2899 | LIOps.pop_back(); |
2900 | |
2901 | // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} |
2902 | LIOps.push_back(AddRec->getStart()); |
2903 | |
2904 | SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); |
2905 | |
2906 | // It is not in general safe to propagate flags valid on an add within |
2907 | // the addrec scope to one outside it. We must prove that the inner |
2908 | // scope is guaranteed to execute if the outer one does to be able to |
2909 | // safely propagate. We know the program is undefined if poison is |
2910 | // produced on the inner scoped addrec. We also know that *for this use* |
2911 | // the outer scoped add can't overflow (because of the flags we just |
2912 | // computed for the inner scoped add) without the program being undefined. |
2913 | // Proving that entry to the outer scope neccesitates entry to the inner |
2914 | // scope, thus proves the program undefined if the flags would be violated |
2915 | // in the outer scope. |
2916 | SCEV::NoWrapFlags AddFlags = Flags; |
2917 | if (AddFlags != SCEV::FlagAnyWrap) { |
2918 | auto *DefI = getDefiningScopeBound(LIOps); |
2919 | auto *ReachI = &*AddRecLoop->getHeader()->begin(); |
2920 | if (!isGuaranteedToTransferExecutionTo(DefI, ReachI)) |
2921 | AddFlags = SCEV::FlagAnyWrap; |
2922 | } |
2923 | AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1); |
2924 | |
2925 | // Build the new addrec. Propagate the NUW and NSW flags if both the |
2926 | // outer add and the inner addrec are guaranteed to have no overflow. |
2927 | // Always propagate NW. |
2928 | Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); |
2929 | const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); |
2930 | |
2931 | // If all of the other operands were loop invariant, we are done. |
2932 | if (Ops.size() == 1) return NewRec; |
2933 | |
2934 | // Otherwise, add the folded AddRec by the non-invariant parts. |
2935 | for (unsigned i = 0;; ++i) |
2936 | if (Ops[i] == AddRec) { |
2937 | Ops[i] = NewRec; |
2938 | break; |
2939 | } |
2940 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); |
2941 | } |
2942 | |
2943 | // Okay, if there weren't any loop invariants to be folded, check to see if |
2944 | // there are multiple AddRec's with the same loop induction variable being |
2945 | // added together. If so, we can fold them. |
2946 | for (unsigned OtherIdx = Idx+1; |
2947 | OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); |
2948 | ++OtherIdx) { |
2949 | // We expect the AddRecExpr's to be sorted in reverse dominance order, |
2950 | // so that the 1st found AddRecExpr is dominated by all others. |
2951 | assert(DT.dominates((static_cast <bool> (DT.dominates( cast<SCEVAddRecExpr >(Ops[OtherIdx])->getLoop()->getHeader(), AddRec-> getLoop()->getHeader()) && "AddRecExprs are not sorted in reverse dominance order?" ) ? void (0) : __assert_fail ("DT.dominates( cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), AddRec->getLoop()->getHeader()) && \"AddRecExprs are not sorted in reverse dominance order?\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2954, __extension__ __PRETTY_FUNCTION__)) |
2952 | cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),(static_cast <bool> (DT.dominates( cast<SCEVAddRecExpr >(Ops[OtherIdx])->getLoop()->getHeader(), AddRec-> getLoop()->getHeader()) && "AddRecExprs are not sorted in reverse dominance order?" ) ? void (0) : __assert_fail ("DT.dominates( cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), AddRec->getLoop()->getHeader()) && \"AddRecExprs are not sorted in reverse dominance order?\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2954, __extension__ __PRETTY_FUNCTION__)) |
2953 | AddRec->getLoop()->getHeader()) &&(static_cast <bool> (DT.dominates( cast<SCEVAddRecExpr >(Ops[OtherIdx])->getLoop()->getHeader(), AddRec-> getLoop()->getHeader()) && "AddRecExprs are not sorted in reverse dominance order?" ) ? void (0) : __assert_fail ("DT.dominates( cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), AddRec->getLoop()->getHeader()) && \"AddRecExprs are not sorted in reverse dominance order?\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2954, __extension__ __PRETTY_FUNCTION__)) |
2954 | "AddRecExprs are not sorted in reverse dominance order?")(static_cast <bool> (DT.dominates( cast<SCEVAddRecExpr >(Ops[OtherIdx])->getLoop()->getHeader(), AddRec-> getLoop()->getHeader()) && "AddRecExprs are not sorted in reverse dominance order?" ) ? void (0) : __assert_fail ("DT.dominates( cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), AddRec->getLoop()->getHeader()) && \"AddRecExprs are not sorted in reverse dominance order?\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 2954, __extension__ __PRETTY_FUNCTION__)); |
2955 | if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { |
2956 | // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> |
2957 | SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); |
2958 | for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); |
2959 | ++OtherIdx) { |
2960 | const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); |
2961 | if (OtherAddRec->getLoop() == AddRecLoop) { |
2962 | for (unsigned i = 0, e = OtherAddRec->getNumOperands(); |
2963 | i != e; ++i) { |
2964 | if (i >= AddRecOps.size()) { |
2965 | append_range(AddRecOps, OtherAddRec->operands().drop_front(i)); |
2966 | break; |
2967 | } |
2968 | SmallVector<const SCEV *, 2> TwoOps = { |
2969 | AddRecOps[i], OtherAddRec->getOperand(i)}; |
2970 | AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); |
2971 | } |
2972 | Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; |
2973 | } |
2974 | } |
2975 | // Step size has changed, so we cannot guarantee no self-wraparound. |
2976 | Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); |
2977 | return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); |
2978 | } |
2979 | } |
2980 | |
2981 | // Otherwise couldn't fold anything into this recurrence. Move onto the |
2982 | // next one. |
2983 | } |
2984 | |
2985 | // Okay, it looks like we really DO need an add expr. Check to see if we |
2986 | // already have one, otherwise create a new one. |
2987 | return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); |
2988 | } |
2989 | |
2990 | const SCEV * |
2991 | ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, |
2992 | SCEV::NoWrapFlags Flags) { |
2993 | FoldingSetNodeID ID; |
2994 | ID.AddInteger(scAddExpr); |
2995 | for (const SCEV *Op : Ops) |
2996 | ID.AddPointer(Op); |
2997 | void *IP = nullptr; |
2998 | SCEVAddExpr *S = |
2999 | static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); |
3000 | if (!S) { |
3001 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
3002 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
3003 | S = new (SCEVAllocator) |
3004 | SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); |
3005 | UniqueSCEVs.InsertNode(S, IP); |
3006 | registerUser(S, Ops); |
3007 | } |
3008 | S->setNoWrapFlags(Flags); |
3009 | return S; |
3010 | } |
3011 | |
3012 | const SCEV * |
3013 | ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, |
3014 | const Loop *L, SCEV::NoWrapFlags Flags) { |
3015 | FoldingSetNodeID ID; |
3016 | ID.AddInteger(scAddRecExpr); |
3017 | for (const SCEV *Op : Ops) |
3018 | ID.AddPointer(Op); |
3019 | ID.AddPointer(L); |
3020 | void *IP = nullptr; |
3021 | SCEVAddRecExpr *S = |
3022 | static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); |
3023 | if (!S) { |
3024 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
3025 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
3026 | S = new (SCEVAllocator) |
3027 | SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); |
3028 | UniqueSCEVs.InsertNode(S, IP); |
3029 | LoopUsers[L].push_back(S); |
3030 | registerUser(S, Ops); |
3031 | } |
3032 | setNoWrapFlags(S, Flags); |
3033 | return S; |
3034 | } |
3035 | |
3036 | const SCEV * |
3037 | ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, |
3038 | SCEV::NoWrapFlags Flags) { |
3039 | FoldingSetNodeID ID; |
3040 | ID.AddInteger(scMulExpr); |
3041 | for (const SCEV *Op : Ops) |
3042 | ID.AddPointer(Op); |
3043 | void *IP = nullptr; |
3044 | SCEVMulExpr *S = |
3045 | static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); |
3046 | if (!S) { |
3047 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
3048 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
3049 | S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), |
3050 | O, Ops.size()); |
3051 | UniqueSCEVs.InsertNode(S, IP); |
3052 | registerUser(S, Ops); |
3053 | } |
3054 | S->setNoWrapFlags(Flags); |
3055 | return S; |
3056 | } |
3057 | |
3058 | static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { |
3059 | uint64_t k = i*j; |
3060 | if (j > 1 && k / j != i) Overflow = true; |
3061 | return k; |
3062 | } |
3063 | |
3064 | /// Compute the result of "n choose k", the binomial coefficient. If an |
3065 | /// intermediate computation overflows, Overflow will be set and the return will |
3066 | /// be garbage. Overflow is not cleared on absence of overflow. |
3067 | static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { |
3068 | // We use the multiplicative formula: |
3069 | // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . |
3070 | // At each iteration, we take the n-th term of the numeral and divide by the |
3071 | // (k-n)th term of the denominator. This division will always produce an |
3072 | // integral result, and helps reduce the chance of overflow in the |
3073 | // intermediate computations. However, we can still overflow even when the |
3074 | // final result would fit. |
3075 | |
3076 | if (n == 0 || n == k) return 1; |
3077 | if (k > n) return 0; |
3078 | |
3079 | if (k > n/2) |
3080 | k = n-k; |
3081 | |
3082 | uint64_t r = 1; |
3083 | for (uint64_t i = 1; i <= k; ++i) { |
3084 | r = umul_ov(r, n-(i-1), Overflow); |
3085 | r /= i; |
3086 | } |
3087 | return r; |
3088 | } |
3089 | |
3090 | /// Determine if any of the operands in this SCEV are a constant or if |
3091 | /// any of the add or multiply expressions in this SCEV contain a constant. |
3092 | static bool containsConstantInAddMulChain(const SCEV *StartExpr) { |
3093 | struct FindConstantInAddMulChain { |
3094 | bool FoundConstant = false; |
3095 | |
3096 | bool follow(const SCEV *S) { |
3097 | FoundConstant |= isa<SCEVConstant>(S); |
3098 | return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); |
3099 | } |
3100 | |
3101 | bool isDone() const { |
3102 | return FoundConstant; |
3103 | } |
3104 | }; |
3105 | |
3106 | FindConstantInAddMulChain F; |
3107 | SCEVTraversal<FindConstantInAddMulChain> ST(F); |
3108 | ST.visitAll(StartExpr); |
3109 | return F.FoundConstant; |
3110 | } |
3111 | |
3112 | /// Get a canonical multiply expression, or something simpler if possible. |
3113 | const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, |
3114 | SCEV::NoWrapFlags OrigFlags, |
3115 | unsigned Depth) { |
3116 | assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&(static_cast <bool> (OrigFlags == maskFlags(OrigFlags, SCEV ::FlagNUW | SCEV::FlagNSW) && "only nuw or nsw allowed" ) ? void (0) : __assert_fail ("OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && \"only nuw or nsw allowed\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3117, __extension__ __PRETTY_FUNCTION__)) |
3117 | "only nuw or nsw allowed")(static_cast <bool> (OrigFlags == maskFlags(OrigFlags, SCEV ::FlagNUW | SCEV::FlagNSW) && "only nuw or nsw allowed" ) ? void (0) : __assert_fail ("OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && \"only nuw or nsw allowed\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3117, __extension__ __PRETTY_FUNCTION__)); |
3118 | assert(!Ops.empty() && "Cannot get empty mul!")(static_cast <bool> (!Ops.empty() && "Cannot get empty mul!" ) ? void (0) : __assert_fail ("!Ops.empty() && \"Cannot get empty mul!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3118, __extension__ __PRETTY_FUNCTION__)); |
3119 | if (Ops.size() == 1) return Ops[0]; |
3120 | #ifndef NDEBUG |
3121 | Type *ETy = Ops[0]->getType(); |
3122 | assert(!ETy->isPointerTy())(static_cast <bool> (!ETy->isPointerTy()) ? void (0) : __assert_fail ("!ETy->isPointerTy()", "llvm/lib/Analysis/ScalarEvolution.cpp" , 3122, __extension__ __PRETTY_FUNCTION__)); |
3123 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) |
3124 | assert(Ops[i]->getType() == ETy &&(static_cast <bool> (Ops[i]->getType() == ETy && "SCEVMulExpr operand types don't match!") ? void (0) : __assert_fail ("Ops[i]->getType() == ETy && \"SCEVMulExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3125, __extension__ __PRETTY_FUNCTION__)) |
3125 | "SCEVMulExpr operand types don't match!")(static_cast <bool> (Ops[i]->getType() == ETy && "SCEVMulExpr operand types don't match!") ? void (0) : __assert_fail ("Ops[i]->getType() == ETy && \"SCEVMulExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3125, __extension__ __PRETTY_FUNCTION__)); |
3126 | #endif |
3127 | |
3128 | // Sort by complexity, this groups all similar expression types together. |
3129 | GroupByComplexity(Ops, &LI, DT); |
3130 | |
3131 | // If there are any constants, fold them together. |
3132 | unsigned Idx = 0; |
3133 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { |
3134 | ++Idx; |
3135 | assert(Idx < Ops.size())(static_cast <bool> (Idx < Ops.size()) ? void (0) : __assert_fail ("Idx < Ops.size()", "llvm/lib/Analysis/ScalarEvolution.cpp" , 3135, __extension__ __PRETTY_FUNCTION__)); |
3136 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { |
3137 | // We found two constants, fold them together! |
3138 | Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); |
3139 | if (Ops.size() == 2) return Ops[0]; |
3140 | Ops.erase(Ops.begin()+1); // Erase the folded element |
3141 | LHSC = cast<SCEVConstant>(Ops[0]); |
3142 | } |
3143 | |
3144 | // If we have a multiply of zero, it will always be zero. |
3145 | if (LHSC->getValue()->isZero()) |
3146 | return LHSC; |
3147 | |
3148 | // If we are left with a constant one being multiplied, strip it off. |
3149 | if (LHSC->getValue()->isOne()) { |
3150 | Ops.erase(Ops.begin()); |
3151 | --Idx; |
3152 | } |
3153 | |
3154 | if (Ops.size() == 1) |
3155 | return Ops[0]; |
3156 | } |
3157 | |
3158 | // Delay expensive flag strengthening until necessary. |
3159 | auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { |
3160 | return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); |
3161 | }; |
3162 | |
3163 | // Limit recursion calls depth. |
3164 | if (Depth > MaxArithDepth || hasHugeExpression(Ops)) |
3165 | return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); |
3166 | |
3167 | if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) { |
3168 | // Don't strengthen flags if we have no new information. |
3169 | SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); |
3170 | if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) |
3171 | Mul->setNoWrapFlags(ComputeFlags(Ops)); |
3172 | return S; |
3173 | } |
3174 | |
3175 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { |
3176 | if (Ops.size() == 2) { |
3177 | // C1*(C2+V) -> C1*C2 + C1*V |
3178 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) |
3179 | // If any of Add's ops are Adds or Muls with a constant, apply this |
3180 | // transformation as well. |
3181 | // |
3182 | // TODO: There are some cases where this transformation is not |
3183 | // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of |
3184 | // this transformation should be narrowed down. |
3185 | if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) { |
3186 | const SCEV *LHS = getMulExpr(LHSC, Add->getOperand(0), |
3187 | SCEV::FlagAnyWrap, Depth + 1); |
3188 | const SCEV *RHS = getMulExpr(LHSC, Add->getOperand(1), |
3189 | SCEV::FlagAnyWrap, Depth + 1); |
3190 | return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1); |
3191 | } |
3192 | |
3193 | if (Ops[0]->isAllOnesValue()) { |
3194 | // If we have a mul by -1 of an add, try distributing the -1 among the |
3195 | // add operands. |
3196 | if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { |
3197 | SmallVector<const SCEV *, 4> NewOps; |
3198 | bool AnyFolded = false; |
3199 | for (const SCEV *AddOp : Add->operands()) { |
3200 | const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, |
3201 | Depth + 1); |
3202 | if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; |
3203 | NewOps.push_back(Mul); |
3204 | } |
3205 | if (AnyFolded) |
3206 | return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); |
3207 | } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { |
3208 | // Negation preserves a recurrence's no self-wrap property. |
3209 | SmallVector<const SCEV *, 4> Operands; |
3210 | for (const SCEV *AddRecOp : AddRec->operands()) |
3211 | Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, |
3212 | Depth + 1)); |
3213 | // Let M be the minimum representable signed value. AddRec with nsw |
3214 | // multiplied by -1 can have signed overflow if and only if it takes a |
3215 | // value of M: M * (-1) would stay M and (M + 1) * (-1) would be the |
3216 | // maximum signed value. In all other cases signed overflow is |
3217 | // impossible. |
3218 | auto FlagsMask = SCEV::FlagNW; |
3219 | if (hasFlags(AddRec->getNoWrapFlags(), SCEV::FlagNSW)) { |
3220 | auto MinInt = |
3221 | APInt::getSignedMinValue(getTypeSizeInBits(AddRec->getType())); |
3222 | if (getSignedRangeMin(AddRec) != MinInt) |
3223 | FlagsMask = setFlags(FlagsMask, SCEV::FlagNSW); |
3224 | } |
3225 | return getAddRecExpr(Operands, AddRec->getLoop(), |
3226 | AddRec->getNoWrapFlags(FlagsMask)); |
3227 | } |
3228 | } |
3229 | } |
3230 | } |
3231 | |
3232 | // Skip over the add expression until we get to a multiply. |
3233 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) |
3234 | ++Idx; |
3235 | |
3236 | // If there are mul operands inline them all into this expression. |
3237 | if (Idx < Ops.size()) { |
3238 | bool DeletedMul = false; |
3239 | while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { |
3240 | if (Ops.size() > MulOpsInlineThreshold) |
3241 | break; |
3242 | // If we have an mul, expand the mul operands onto the end of the |
3243 | // operands list. |
3244 | Ops.erase(Ops.begin()+Idx); |
3245 | append_range(Ops, Mul->operands()); |
3246 | DeletedMul = true; |
3247 | } |
3248 | |
3249 | // If we deleted at least one mul, we added operands to the end of the |
3250 | // list, and they are not necessarily sorted. Recurse to resort and |
3251 | // resimplify any operands we just acquired. |
3252 | if (DeletedMul) |
3253 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); |
3254 | } |
3255 | |
3256 | // If there are any add recurrences in the operands list, see if any other |
3257 | // added values are loop invariant. If so, we can fold them into the |
3258 | // recurrence. |
3259 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) |
3260 | ++Idx; |
3261 | |
3262 | // Scan over all recurrences, trying to fold loop invariants into them. |
3263 | for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { |
3264 | // Scan all of the other operands to this mul and add them to the vector |
3265 | // if they are loop invariant w.r.t. the recurrence. |
3266 | SmallVector<const SCEV *, 8> LIOps; |
3267 | const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); |
3268 | const Loop *AddRecLoop = AddRec->getLoop(); |
3269 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
3270 | if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { |
3271 | LIOps.push_back(Ops[i]); |
3272 | Ops.erase(Ops.begin()+i); |
3273 | --i; --e; |
3274 | } |
3275 | |
3276 | // If we found some loop invariants, fold them into the recurrence. |
3277 | if (!LIOps.empty()) { |
3278 | // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} |
3279 | SmallVector<const SCEV *, 4> NewOps; |
3280 | NewOps.reserve(AddRec->getNumOperands()); |
3281 | const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); |
3282 | for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) |
3283 | NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), |
3284 | SCEV::FlagAnyWrap, Depth + 1)); |
3285 | |
3286 | // Build the new addrec. Propagate the NUW and NSW flags if both the |
3287 | // outer mul and the inner addrec are guaranteed to have no overflow. |
3288 | // |
3289 | // No self-wrap cannot be guaranteed after changing the step size, but |
3290 | // will be inferred if either NUW or NSW is true. |
3291 | SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); |
3292 | const SCEV *NewRec = getAddRecExpr( |
3293 | NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); |
3294 | |
3295 | // If all of the other operands were loop invariant, we are done. |
3296 | if (Ops.size() == 1) return NewRec; |
3297 | |
3298 | // Otherwise, multiply the folded AddRec by the non-invariant parts. |
3299 | for (unsigned i = 0;; ++i) |
3300 | if (Ops[i] == AddRec) { |
3301 | Ops[i] = NewRec; |
3302 | break; |
3303 | } |
3304 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); |
3305 | } |
3306 | |
3307 | // Okay, if there weren't any loop invariants to be folded, check to see |
3308 | // if there are multiple AddRec's with the same loop induction variable |
3309 | // being multiplied together. If so, we can fold them. |
3310 | |
3311 | // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> |
3312 | // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ |
3313 | // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z |
3314 | // ]]],+,...up to x=2n}. |
3315 | // Note that the arguments to choose() are always integers with values |
3316 | // known at compile time, never SCEV objects. |
3317 | // |
3318 | // The implementation avoids pointless extra computations when the two |
3319 | // addrec's are of different length (mathematically, it's equivalent to |
3320 | // an infinite stream of zeros on the right). |
3321 | bool OpsModified = false; |
3322 | for (unsigned OtherIdx = Idx+1; |
3323 | OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); |
3324 | ++OtherIdx) { |
3325 | const SCEVAddRecExpr *OtherAddRec = |
3326 | dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); |
3327 | if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) |
3328 | continue; |
3329 | |
3330 | // Limit max number of arguments to avoid creation of unreasonably big |
3331 | // SCEVAddRecs with very complex operands. |
3332 | if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > |
3333 | MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) |
3334 | continue; |
3335 | |
3336 | bool Overflow = false; |
3337 | Type *Ty = AddRec->getType(); |
3338 | bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; |
3339 | SmallVector<const SCEV*, 7> AddRecOps; |
3340 | for (int x = 0, xe = AddRec->getNumOperands() + |
3341 | OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { |
3342 | SmallVector <const SCEV *, 7> SumOps; |
3343 | for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { |
3344 | uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); |
3345 | for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), |
3346 | ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); |
3347 | z < ze && !Overflow; ++z) { |
3348 | uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); |
3349 | uint64_t Coeff; |
3350 | if (LargerThan64Bits) |
3351 | Coeff = umul_ov(Coeff1, Coeff2, Overflow); |
3352 | else |
3353 | Coeff = Coeff1*Coeff2; |
3354 | const SCEV *CoeffTerm = getConstant(Ty, Coeff); |
3355 | const SCEV *Term1 = AddRec->getOperand(y-z); |
3356 | const SCEV *Term2 = OtherAddRec->getOperand(z); |
3357 | SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, |
3358 | SCEV::FlagAnyWrap, Depth + 1)); |
3359 | } |
3360 | } |
3361 | if (SumOps.empty()) |
3362 | SumOps.push_back(getZero(Ty)); |
3363 | AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); |
3364 | } |
3365 | if (!Overflow) { |
3366 | const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, |
3367 | SCEV::FlagAnyWrap); |
3368 | if (Ops.size() == 2) return NewAddRec; |
3369 | Ops[Idx] = NewAddRec; |
3370 | Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; |
3371 | OpsModified = true; |
3372 | AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); |
3373 | if (!AddRec) |
3374 | break; |
3375 | } |
3376 | } |
3377 | if (OpsModified) |
3378 | return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); |
3379 | |
3380 | // Otherwise couldn't fold anything into this recurrence. Move onto the |
3381 | // next one. |
3382 | } |
3383 | |
3384 | // Okay, it looks like we really DO need an mul expr. Check to see if we |
3385 | // already have one, otherwise create a new one. |
3386 | return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); |
3387 | } |
3388 | |
3389 | /// Represents an unsigned remainder expression based on unsigned division. |
3390 | const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, |
3391 | const SCEV *RHS) { |
3392 | assert(getEffectiveSCEVType(LHS->getType()) ==(static_cast <bool> (getEffectiveSCEVType(LHS->getType ()) == getEffectiveSCEVType(RHS->getType()) && "SCEVURemExpr operand types don't match!" ) ? void (0) : __assert_fail ("getEffectiveSCEVType(LHS->getType()) == getEffectiveSCEVType(RHS->getType()) && \"SCEVURemExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3394, __extension__ __PRETTY_FUNCTION__)) |
3393 | getEffectiveSCEVType(RHS->getType()) &&(static_cast <bool> (getEffectiveSCEVType(LHS->getType ()) == getEffectiveSCEVType(RHS->getType()) && "SCEVURemExpr operand types don't match!" ) ? void (0) : __assert_fail ("getEffectiveSCEVType(LHS->getType()) == getEffectiveSCEVType(RHS->getType()) && \"SCEVURemExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3394, __extension__ __PRETTY_FUNCTION__)) |
3394 | "SCEVURemExpr operand types don't match!")(static_cast <bool> (getEffectiveSCEVType(LHS->getType ()) == getEffectiveSCEVType(RHS->getType()) && "SCEVURemExpr operand types don't match!" ) ? void (0) : __assert_fail ("getEffectiveSCEVType(LHS->getType()) == getEffectiveSCEVType(RHS->getType()) && \"SCEVURemExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3394, __extension__ __PRETTY_FUNCTION__)); |
3395 | |
3396 | // Short-circuit easy cases |
3397 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { |
3398 | // If constant is one, the result is trivial |
3399 | if (RHSC->getValue()->isOne()) |
3400 | return getZero(LHS->getType()); // X urem 1 --> 0 |
3401 | |
3402 | // If constant is a power of two, fold into a zext(trunc(LHS)). |
3403 | if (RHSC->getAPInt().isPowerOf2()) { |
3404 | Type *FullTy = LHS->getType(); |
3405 | Type *TruncTy = |
3406 | IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); |
3407 | return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); |
3408 | } |
3409 | } |
3410 | |
3411 | // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) |
3412 | const SCEV *UDiv = getUDivExpr(LHS, RHS); |
3413 | const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); |
3414 | return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); |
3415 | } |
3416 | |
3417 | /// Get a canonical unsigned division expression, or something simpler if |
3418 | /// possible. |
3419 | const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, |
3420 | const SCEV *RHS) { |
3421 | assert(!LHS->getType()->isPointerTy() &&(static_cast <bool> (!LHS->getType()->isPointerTy () && "SCEVUDivExpr operand can't be pointer!") ? void (0) : __assert_fail ("!LHS->getType()->isPointerTy() && \"SCEVUDivExpr operand can't be pointer!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3422, __extension__ __PRETTY_FUNCTION__)) |
3422 | "SCEVUDivExpr operand can't be pointer!")(static_cast <bool> (!LHS->getType()->isPointerTy () && "SCEVUDivExpr operand can't be pointer!") ? void (0) : __assert_fail ("!LHS->getType()->isPointerTy() && \"SCEVUDivExpr operand can't be pointer!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3422, __extension__ __PRETTY_FUNCTION__)); |
3423 | assert(LHS->getType() == RHS->getType() &&(static_cast <bool> (LHS->getType() == RHS->getType () && "SCEVUDivExpr operand types don't match!") ? void (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"SCEVUDivExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3424, __extension__ __PRETTY_FUNCTION__)) |
3424 | "SCEVUDivExpr operand types don't match!")(static_cast <bool> (LHS->getType() == RHS->getType () && "SCEVUDivExpr operand types don't match!") ? void (0) : __assert_fail ("LHS->getType() == RHS->getType() && \"SCEVUDivExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3424, __extension__ __PRETTY_FUNCTION__)); |
3425 | |
3426 | FoldingSetNodeID ID; |
3427 | ID.AddInteger(scUDivExpr); |
3428 | ID.AddPointer(LHS); |
3429 | ID.AddPointer(RHS); |
3430 | void *IP = nullptr; |
3431 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) |
3432 | return S; |
3433 | |
3434 | // 0 udiv Y == 0 |
3435 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) |
3436 | if (LHSC->getValue()->isZero()) |
3437 | return LHS; |
3438 | |
3439 | if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { |
3440 | if (RHSC->getValue()->isOne()) |
3441 | return LHS; // X udiv 1 --> x |
3442 | // If the denominator is zero, the result of the udiv is undefined. Don't |
3443 | // try to analyze it, because the resolution chosen here may differ from |
3444 | // the resolution chosen in other parts of the compiler. |
3445 | if (!RHSC->getValue()->isZero()) { |
3446 | // Determine if the division can be folded into the operands of |
3447 | // its operands. |
3448 | // TODO: Generalize this to non-constants by using known-bits information. |
3449 | Type *Ty = LHS->getType(); |
3450 | unsigned LZ = RHSC->getAPInt().countl_zero(); |
3451 | unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; |
3452 | // For non-power-of-two values, effectively round the value up to the |
3453 | // nearest power of two. |
3454 | if (!RHSC->getAPInt().isPowerOf2()) |
3455 | ++MaxShiftAmt; |
3456 | IntegerType *ExtTy = |
3457 | IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); |
3458 | if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) |
3459 | if (const SCEVConstant *Step = |
3460 | dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { |
3461 | // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. |
3462 | const APInt &StepInt = Step->getAPInt(); |
3463 | const APInt &DivInt = RHSC->getAPInt(); |
3464 | if (!StepInt.urem(DivInt) && |
3465 | getZeroExtendExpr(AR, ExtTy) == |
3466 | getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), |
3467 | getZeroExtendExpr(Step, ExtTy), |
3468 | AR->getLoop(), SCEV::FlagAnyWrap)) { |
3469 | SmallVector<const SCEV *, 4> Operands; |
3470 | for (const SCEV *Op : AR->operands()) |
3471 | Operands.push_back(getUDivExpr(Op, RHS)); |
3472 | return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); |
3473 | } |
3474 | /// Get a canonical UDivExpr for a recurrence. |
3475 | /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. |
3476 | // We can currently only fold X%N if X is constant. |
3477 | const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); |
3478 | if (StartC && !DivInt.urem(StepInt) && |
3479 | getZeroExtendExpr(AR, ExtTy) == |
3480 | getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), |
3481 | getZeroExtendExpr(Step, ExtTy), |
3482 | AR->getLoop(), SCEV::FlagAnyWrap)) { |
3483 | const APInt &StartInt = StartC->getAPInt(); |
3484 | const APInt &StartRem = StartInt.urem(StepInt); |
3485 | if (StartRem != 0) { |
3486 | const SCEV *NewLHS = |
3487 | getAddRecExpr(getConstant(StartInt - StartRem), Step, |
3488 | AR->getLoop(), SCEV::FlagNW); |
3489 | if (LHS != NewLHS) { |
3490 | LHS = NewLHS; |
3491 | |
3492 | // Reset the ID to include the new LHS, and check if it is |
3493 | // already cached. |
3494 | ID.clear(); |
3495 | ID.AddInteger(scUDivExpr); |
3496 | ID.AddPointer(LHS); |
3497 | ID.AddPointer(RHS); |
3498 | IP = nullptr; |
3499 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) |
3500 | return S; |
3501 | } |
3502 | } |
3503 | } |
3504 | } |
3505 | // (A*B)/C --> A*(B/C) if safe and B/C can be folded. |
3506 | if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { |
3507 | SmallVector<const SCEV *, 4> Operands; |
3508 | for (const SCEV *Op : M->operands()) |
3509 | Operands.push_back(getZeroExtendExpr(Op, ExtTy)); |
3510 | if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) |
3511 | // Find an operand that's safely divisible. |
3512 | for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { |
3513 | const SCEV *Op = M->getOperand(i); |
3514 | const SCEV *Div = getUDivExpr(Op, RHSC); |
3515 | if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { |
3516 | Operands = SmallVector<const SCEV *, 4>(M->operands()); |
3517 | Operands[i] = Div; |
3518 | return getMulExpr(Operands); |
3519 | } |
3520 | } |
3521 | } |
3522 | |
3523 | // (A/B)/C --> A/(B*C) if safe and B*C can be folded. |
3524 | if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { |
3525 | if (auto *DivisorConstant = |
3526 | dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { |
3527 | bool Overflow = false; |
3528 | APInt NewRHS = |
3529 | DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); |
3530 | if (Overflow) { |
3531 | return getConstant(RHSC->getType(), 0, false); |
3532 | } |
3533 | return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); |
3534 | } |
3535 | } |
3536 | |
3537 | // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. |
3538 | if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { |
3539 | SmallVector<const SCEV *, 4> Operands; |
3540 | for (const SCEV *Op : A->operands()) |
3541 | Operands.push_back(getZeroExtendExpr(Op, ExtTy)); |
3542 | if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { |
3543 | Operands.clear(); |
3544 | for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { |
3545 | const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); |
3546 | if (isa<SCEVUDivExpr>(Op) || |
3547 | getMulExpr(Op, RHS) != A->getOperand(i)) |
3548 | break; |
3549 | Operands.push_back(Op); |
3550 | } |
3551 | if (Operands.size() == A->getNumOperands()) |
3552 | return getAddExpr(Operands); |
3553 | } |
3554 | } |
3555 | |
3556 | // Fold if both operands are constant. |
3557 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) |
3558 | return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt())); |
3559 | } |
3560 | } |
3561 | |
3562 | // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs |
3563 | // changes). Make sure we get a new one. |
3564 | IP = nullptr; |
3565 | if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; |
3566 | SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), |
3567 | LHS, RHS); |
3568 | UniqueSCEVs.InsertNode(S, IP); |
3569 | registerUser(S, {LHS, RHS}); |
3570 | return S; |
3571 | } |
3572 | |
3573 | APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { |
3574 | APInt A = C1->getAPInt().abs(); |
3575 | APInt B = C2->getAPInt().abs(); |
3576 | uint32_t ABW = A.getBitWidth(); |
3577 | uint32_t BBW = B.getBitWidth(); |
3578 | |
3579 | if (ABW > BBW) |
3580 | B = B.zext(ABW); |
3581 | else if (ABW < BBW) |
3582 | A = A.zext(BBW); |
3583 | |
3584 | return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); |
3585 | } |
3586 | |
3587 | /// Get a canonical unsigned division expression, or something simpler if |
3588 | /// possible. There is no representation for an exact udiv in SCEV IR, but we |
3589 | /// can attempt to remove factors from the LHS and RHS. We can't do this when |
3590 | /// it's not exact because the udiv may be clearing bits. |
3591 | const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, |
3592 | const SCEV *RHS) { |
3593 | // TODO: we could try to find factors in all sorts of things, but for now we |
3594 | // just deal with u/exact (multiply, constant). See SCEVDivision towards the |
3595 | // end of this file for inspiration. |
3596 | |
3597 | const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); |
3598 | if (!Mul || !Mul->hasNoUnsignedWrap()) |
3599 | return getUDivExpr(LHS, RHS); |
3600 | |
3601 | if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { |
3602 | // If the mulexpr multiplies by a constant, then that constant must be the |
3603 | // first element of the mulexpr. |
3604 | if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { |
3605 | if (LHSCst == RHSCst) { |
3606 | SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); |
3607 | return getMulExpr(Operands); |
3608 | } |
3609 | |
3610 | // We can't just assume that LHSCst divides RHSCst cleanly, it could be |
3611 | // that there's a factor provided by one of the other terms. We need to |
3612 | // check. |
3613 | APInt Factor = gcd(LHSCst, RHSCst); |
3614 | if (!Factor.isIntN(1)) { |
3615 | LHSCst = |
3616 | cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); |
3617 | RHSCst = |
3618 | cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); |
3619 | SmallVector<const SCEV *, 2> Operands; |
3620 | Operands.push_back(LHSCst); |
3621 | append_range(Operands, Mul->operands().drop_front()); |
3622 | LHS = getMulExpr(Operands); |
3623 | RHS = RHSCst; |
3624 | Mul = dyn_cast<SCEVMulExpr>(LHS); |
3625 | if (!Mul) |
3626 | return getUDivExactExpr(LHS, RHS); |
3627 | } |
3628 | } |
3629 | } |
3630 | |
3631 | for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { |
3632 | if (Mul->getOperand(i) == RHS) { |
3633 | SmallVector<const SCEV *, 2> Operands; |
3634 | append_range(Operands, Mul->operands().take_front(i)); |
3635 | append_range(Operands, Mul->operands().drop_front(i + 1)); |
3636 | return getMulExpr(Operands); |
3637 | } |
3638 | } |
3639 | |
3640 | return getUDivExpr(LHS, RHS); |
3641 | } |
3642 | |
3643 | /// Get an add recurrence expression for the specified loop. Simplify the |
3644 | /// expression as much as possible. |
3645 | const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, |
3646 | const Loop *L, |
3647 | SCEV::NoWrapFlags Flags) { |
3648 | SmallVector<const SCEV *, 4> Operands; |
3649 | Operands.push_back(Start); |
3650 | if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) |
3651 | if (StepChrec->getLoop() == L) { |
3652 | append_range(Operands, StepChrec->operands()); |
3653 | return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); |
3654 | } |
3655 | |
3656 | Operands.push_back(Step); |
3657 | return getAddRecExpr(Operands, L, Flags); |
3658 | } |
3659 | |
3660 | /// Get an add recurrence expression for the specified loop. Simplify the |
3661 | /// expression as much as possible. |
3662 | const SCEV * |
3663 | ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, |
3664 | const Loop *L, SCEV::NoWrapFlags Flags) { |
3665 | if (Operands.size() == 1) return Operands[0]; |
3666 | #ifndef NDEBUG |
3667 | Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); |
3668 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { |
3669 | assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&(static_cast <bool> (getEffectiveSCEVType(Operands[i]-> getType()) == ETy && "SCEVAddRecExpr operand types don't match!" ) ? void (0) : __assert_fail ("getEffectiveSCEVType(Operands[i]->getType()) == ETy && \"SCEVAddRecExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3670, __extension__ __PRETTY_FUNCTION__)) |
3670 | "SCEVAddRecExpr operand types don't match!")(static_cast <bool> (getEffectiveSCEVType(Operands[i]-> getType()) == ETy && "SCEVAddRecExpr operand types don't match!" ) ? void (0) : __assert_fail ("getEffectiveSCEVType(Operands[i]->getType()) == ETy && \"SCEVAddRecExpr operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3670, __extension__ __PRETTY_FUNCTION__)); |
3671 | assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer")(static_cast <bool> (!Operands[i]->getType()->isPointerTy () && "Step must be integer") ? void (0) : __assert_fail ("!Operands[i]->getType()->isPointerTy() && \"Step must be integer\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3671, __extension__ __PRETTY_FUNCTION__)); |
3672 | } |
3673 | for (unsigned i = 0, e = Operands.size(); i != e; ++i) |
3674 | assert(isLoopInvariant(Operands[i], L) &&(static_cast <bool> (isLoopInvariant(Operands[i], L) && "SCEVAddRecExpr operand is not loop-invariant!") ? void (0) : __assert_fail ("isLoopInvariant(Operands[i], L) && \"SCEVAddRecExpr operand is not loop-invariant!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3675, __extension__ __PRETTY_FUNCTION__)) |
3675 | "SCEVAddRecExpr operand is not loop-invariant!")(static_cast <bool> (isLoopInvariant(Operands[i], L) && "SCEVAddRecExpr operand is not loop-invariant!") ? void (0) : __assert_fail ("isLoopInvariant(Operands[i], L) && \"SCEVAddRecExpr operand is not loop-invariant!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3675, __extension__ __PRETTY_FUNCTION__)); |
3676 | #endif |
3677 | |
3678 | if (Operands.back()->isZero()) { |
3679 | Operands.pop_back(); |
3680 | return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X |
3681 | } |
3682 | |
3683 | // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and |
3684 | // use that information to infer NUW and NSW flags. However, computing a |
3685 | // BE count requires calling getAddRecExpr, so we may not yet have a |
3686 | // meaningful BE count at this point (and if we don't, we'd be stuck |
3687 | // with a SCEVCouldNotCompute as the cached BE count). |
3688 | |
3689 | Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); |
3690 | |
3691 | // Canonicalize nested AddRecs in by nesting them in order of loop depth. |
3692 | if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { |
3693 | const Loop *NestedLoop = NestedAR->getLoop(); |
3694 | if (L->contains(NestedLoop) |
3695 | ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) |
3696 | : (!NestedLoop->contains(L) && |
3697 | DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { |
3698 | SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); |
3699 | Operands[0] = NestedAR->getStart(); |
3700 | // AddRecs require their operands be loop-invariant with respect to their |
3701 | // loops. Don't perform this transformation if it would break this |
3702 | // requirement. |
3703 | bool AllInvariant = all_of( |
3704 | Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); |
3705 | |
3706 | if (AllInvariant) { |
3707 | // Create a recurrence for the outer loop with the same step size. |
3708 | // |
3709 | // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the |
3710 | // inner recurrence has the same property. |
3711 | SCEV::NoWrapFlags OuterFlags = |
3712 | maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); |
3713 | |
3714 | NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); |
3715 | AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { |
3716 | return isLoopInvariant(Op, NestedLoop); |
3717 | }); |
3718 | |
3719 | if (AllInvariant) { |
3720 | // Ok, both add recurrences are valid after the transformation. |
3721 | // |
3722 | // The inner recurrence keeps its NW flag but only keeps NUW/NSW if |
3723 | // the outer recurrence has the same property. |
3724 | SCEV::NoWrapFlags InnerFlags = |
3725 | maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); |
3726 | return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); |
3727 | } |
3728 | } |
3729 | // Reset Operands to its original state. |
3730 | Operands[0] = NestedAR; |
3731 | } |
3732 | } |
3733 | |
3734 | // Okay, it looks like we really DO need an addrec expr. Check to see if we |
3735 | // already have one, otherwise create a new one. |
3736 | return getOrCreateAddRecExpr(Operands, L, Flags); |
3737 | } |
3738 | |
3739 | const SCEV * |
3740 | ScalarEvolution::getGEPExpr(GEPOperator *GEP, |
3741 | const SmallVectorImpl<const SCEV *> &IndexExprs) { |
3742 | const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); |
3743 | // getSCEV(Base)->getType() has the same address space as Base->getType() |
3744 | // because SCEV::getType() preserves the address space. |
3745 | Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); |
3746 | const bool AssumeInBoundsFlags = [&]() { |
3747 | if (!GEP->isInBounds()) |
3748 | return false; |
3749 | |
3750 | // We'd like to propagate flags from the IR to the corresponding SCEV nodes, |
3751 | // but to do that, we have to ensure that said flag is valid in the entire |
3752 | // defined scope of the SCEV. |
3753 | auto *GEPI = dyn_cast<Instruction>(GEP); |
3754 | // TODO: non-instructions have global scope. We might be able to prove |
3755 | // some global scope cases |
3756 | return GEPI && isSCEVExprNeverPoison(GEPI); |
3757 | }(); |
3758 | |
3759 | SCEV::NoWrapFlags OffsetWrap = |
3760 | AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap; |
3761 | |
3762 | Type *CurTy = GEP->getType(); |
3763 | bool FirstIter = true; |
3764 | SmallVector<const SCEV *, 4> Offsets; |
3765 | for (const SCEV *IndexExpr : IndexExprs) { |
3766 | // Compute the (potentially symbolic) offset in bytes for this index. |
3767 | if (StructType *STy = dyn_cast<StructType>(CurTy)) { |
3768 | // For a struct, add the member offset. |
3769 | ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); |
3770 | unsigned FieldNo = Index->getZExtValue(); |
3771 | const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); |
3772 | Offsets.push_back(FieldOffset); |
3773 | |
3774 | // Update CurTy to the type of the field at Index. |
3775 | CurTy = STy->getTypeAtIndex(Index); |
3776 | } else { |
3777 | // Update CurTy to its element type. |
3778 | if (FirstIter) { |
3779 | assert(isa<PointerType>(CurTy) &&(static_cast <bool> (isa<PointerType>(CurTy) && "The first index of a GEP indexes a pointer") ? void (0) : __assert_fail ("isa<PointerType>(CurTy) && \"The first index of a GEP indexes a pointer\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3780, __extension__ __PRETTY_FUNCTION__)) |
3780 | "The first index of a GEP indexes a pointer")(static_cast <bool> (isa<PointerType>(CurTy) && "The first index of a GEP indexes a pointer") ? void (0) : __assert_fail ("isa<PointerType>(CurTy) && \"The first index of a GEP indexes a pointer\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3780, __extension__ __PRETTY_FUNCTION__)); |
3781 | CurTy = GEP->getSourceElementType(); |
3782 | FirstIter = false; |
3783 | } else { |
3784 | CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); |
3785 | } |
3786 | // For an array, add the element offset, explicitly scaled. |
3787 | const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); |
3788 | // Getelementptr indices are signed. |
3789 | IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); |
3790 | |
3791 | // Multiply the index by the element size to compute the element offset. |
3792 | const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); |
3793 | Offsets.push_back(LocalOffset); |
3794 | } |
3795 | } |
3796 | |
3797 | // Handle degenerate case of GEP without offsets. |
3798 | if (Offsets.empty()) |
3799 | return BaseExpr; |
3800 | |
3801 | // Add the offsets together, assuming nsw if inbounds. |
3802 | const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); |
3803 | // Add the base address and the offset. We cannot use the nsw flag, as the |
3804 | // base address is unsigned. However, if we know that the offset is |
3805 | // non-negative, we can use nuw. |
3806 | SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset) |
3807 | ? SCEV::FlagNUW : SCEV::FlagAnyWrap; |
3808 | auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap); |
3809 | assert(BaseExpr->getType() == GEPExpr->getType() &&(static_cast <bool> (BaseExpr->getType() == GEPExpr-> getType() && "GEP should not change type mid-flight." ) ? void (0) : __assert_fail ("BaseExpr->getType() == GEPExpr->getType() && \"GEP should not change type mid-flight.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3810, __extension__ __PRETTY_FUNCTION__)) |
3810 | "GEP should not change type mid-flight.")(static_cast <bool> (BaseExpr->getType() == GEPExpr-> getType() && "GEP should not change type mid-flight." ) ? void (0) : __assert_fail ("BaseExpr->getType() == GEPExpr->getType() && \"GEP should not change type mid-flight.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3810, __extension__ __PRETTY_FUNCTION__)); |
3811 | return GEPExpr; |
3812 | } |
3813 | |
3814 | SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, |
3815 | ArrayRef<const SCEV *> Ops) { |
3816 | FoldingSetNodeID ID; |
3817 | ID.AddInteger(SCEVType); |
3818 | for (const SCEV *Op : Ops) |
3819 | ID.AddPointer(Op); |
3820 | void *IP = nullptr; |
3821 | return UniqueSCEVs.FindNodeOrInsertPos(ID, IP); |
3822 | } |
3823 | |
3824 | const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { |
3825 | SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; |
3826 | return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); |
3827 | } |
3828 | |
3829 | const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, |
3830 | SmallVectorImpl<const SCEV *> &Ops) { |
3831 | assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!")(static_cast <bool> (SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!") ? void (0) : __assert_fail ("SCEVMinMaxExpr::isMinMaxType(Kind) && \"Not a SCEVMinMaxExpr!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3831, __extension__ __PRETTY_FUNCTION__)); |
3832 | assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!")(static_cast <bool> (!Ops.empty() && "Cannot get empty (u|s)(min|max)!" ) ? void (0) : __assert_fail ("!Ops.empty() && \"Cannot get empty (u|s)(min|max)!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3832, __extension__ __PRETTY_FUNCTION__)); |
3833 | if (Ops.size() == 1) return Ops[0]; |
3834 | #ifndef NDEBUG |
3835 | Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); |
3836 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) { |
3837 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&(static_cast <bool> (getEffectiveSCEVType(Ops[i]->getType ()) == ETy && "Operand types don't match!") ? void (0 ) : __assert_fail ("getEffectiveSCEVType(Ops[i]->getType()) == ETy && \"Operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3838, __extension__ __PRETTY_FUNCTION__)) |
3838 | "Operand types don't match!")(static_cast <bool> (getEffectiveSCEVType(Ops[i]->getType ()) == ETy && "Operand types don't match!") ? void (0 ) : __assert_fail ("getEffectiveSCEVType(Ops[i]->getType()) == ETy && \"Operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3838, __extension__ __PRETTY_FUNCTION__)); |
3839 | assert(Ops[0]->getType()->isPointerTy() ==(static_cast <bool> (Ops[0]->getType()->isPointerTy () == Ops[i]->getType()->isPointerTy() && "min/max should be consistently pointerish" ) ? void (0) : __assert_fail ("Ops[0]->getType()->isPointerTy() == Ops[i]->getType()->isPointerTy() && \"min/max should be consistently pointerish\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3841, __extension__ __PRETTY_FUNCTION__)) |
3840 | Ops[i]->getType()->isPointerTy() &&(static_cast <bool> (Ops[0]->getType()->isPointerTy () == Ops[i]->getType()->isPointerTy() && "min/max should be consistently pointerish" ) ? void (0) : __assert_fail ("Ops[0]->getType()->isPointerTy() == Ops[i]->getType()->isPointerTy() && \"min/max should be consistently pointerish\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3841, __extension__ __PRETTY_FUNCTION__)) |
3841 | "min/max should be consistently pointerish")(static_cast <bool> (Ops[0]->getType()->isPointerTy () == Ops[i]->getType()->isPointerTy() && "min/max should be consistently pointerish" ) ? void (0) : __assert_fail ("Ops[0]->getType()->isPointerTy() == Ops[i]->getType()->isPointerTy() && \"min/max should be consistently pointerish\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3841, __extension__ __PRETTY_FUNCTION__)); |
3842 | } |
3843 | #endif |
3844 | |
3845 | bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; |
3846 | bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; |
3847 | |
3848 | // Sort by complexity, this groups all similar expression types together. |
3849 | GroupByComplexity(Ops, &LI, DT); |
3850 | |
3851 | // Check if we have created the same expression before. |
3852 | if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) { |
3853 | return S; |
3854 | } |
3855 | |
3856 | // If there are any constants, fold them together. |
3857 | unsigned Idx = 0; |
3858 | if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { |
3859 | ++Idx; |
3860 | assert(Idx < Ops.size())(static_cast <bool> (Idx < Ops.size()) ? void (0) : __assert_fail ("Idx < Ops.size()", "llvm/lib/Analysis/ScalarEvolution.cpp" , 3860, __extension__ __PRETTY_FUNCTION__)); |
3861 | auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { |
3862 | switch (Kind) { |
3863 | case scSMaxExpr: |
3864 | return APIntOps::smax(LHS, RHS); |
3865 | case scSMinExpr: |
3866 | return APIntOps::smin(LHS, RHS); |
3867 | case scUMaxExpr: |
3868 | return APIntOps::umax(LHS, RHS); |
3869 | case scUMinExpr: |
3870 | return APIntOps::umin(LHS, RHS); |
3871 | default: |
3872 | llvm_unreachable("Unknown SCEV min/max opcode")::llvm::llvm_unreachable_internal("Unknown SCEV min/max opcode" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3872); |
3873 | } |
3874 | }; |
3875 | |
3876 | while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { |
3877 | // We found two constants, fold them together! |
3878 | ConstantInt *Fold = ConstantInt::get( |
3879 | getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); |
3880 | Ops[0] = getConstant(Fold); |
3881 | Ops.erase(Ops.begin()+1); // Erase the folded element |
3882 | if (Ops.size() == 1) return Ops[0]; |
3883 | LHSC = cast<SCEVConstant>(Ops[0]); |
3884 | } |
3885 | |
3886 | bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); |
3887 | bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); |
3888 | |
3889 | if (IsMax ? IsMinV : IsMaxV) { |
3890 | // If we are left with a constant minimum(/maximum)-int, strip it off. |
3891 | Ops.erase(Ops.begin()); |
3892 | --Idx; |
3893 | } else if (IsMax ? IsMaxV : IsMinV) { |
3894 | // If we have a max(/min) with a constant maximum(/minimum)-int, |
3895 | // it will always be the extremum. |
3896 | return LHSC; |
3897 | } |
3898 | |
3899 | if (Ops.size() == 1) return Ops[0]; |
3900 | } |
3901 | |
3902 | // Find the first operation of the same kind |
3903 | while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) |
3904 | ++Idx; |
3905 | |
3906 | // Check to see if one of the operands is of the same kind. If so, expand its |
3907 | // operands onto our operand list, and recurse to simplify. |
3908 | if (Idx < Ops.size()) { |
3909 | bool DeletedAny = false; |
3910 | while (Ops[Idx]->getSCEVType() == Kind) { |
3911 | const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); |
3912 | Ops.erase(Ops.begin()+Idx); |
3913 | append_range(Ops, SMME->operands()); |
3914 | DeletedAny = true; |
3915 | } |
3916 | |
3917 | if (DeletedAny) |
3918 | return getMinMaxExpr(Kind, Ops); |
3919 | } |
3920 | |
3921 | // Okay, check to see if the same value occurs in the operand list twice. If |
3922 | // so, delete one. Since we sorted the list, these values are required to |
3923 | // be adjacent. |
3924 | llvm::CmpInst::Predicate GEPred = |
3925 | IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; |
3926 | llvm::CmpInst::Predicate LEPred = |
3927 | IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; |
3928 | llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; |
3929 | llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; |
3930 | for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { |
3931 | if (Ops[i] == Ops[i + 1] || |
3932 | isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { |
3933 | // X op Y op Y --> X op Y |
3934 | // X op Y --> X, if we know X, Y are ordered appropriately |
3935 | Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); |
3936 | --i; |
3937 | --e; |
3938 | } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], |
3939 | Ops[i + 1])) { |
3940 | // X op Y --> Y, if we know X, Y are ordered appropriately |
3941 | Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); |
3942 | --i; |
3943 | --e; |
3944 | } |
3945 | } |
3946 | |
3947 | if (Ops.size() == 1) return Ops[0]; |
3948 | |
3949 | assert(!Ops.empty() && "Reduced smax down to nothing!")(static_cast <bool> (!Ops.empty() && "Reduced smax down to nothing!" ) ? void (0) : __assert_fail ("!Ops.empty() && \"Reduced smax down to nothing!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3949, __extension__ __PRETTY_FUNCTION__)); |
3950 | |
3951 | // Okay, it looks like we really DO need an expr. Check to see if we |
3952 | // already have one, otherwise create a new one. |
3953 | FoldingSetNodeID ID; |
3954 | ID.AddInteger(Kind); |
3955 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
3956 | ID.AddPointer(Ops[i]); |
3957 | void *IP = nullptr; |
3958 | const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); |
3959 | if (ExistingSCEV) |
3960 | return ExistingSCEV; |
3961 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
3962 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
3963 | SCEV *S = new (SCEVAllocator) |
3964 | SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); |
3965 | |
3966 | UniqueSCEVs.InsertNode(S, IP); |
3967 | registerUser(S, Ops); |
3968 | return S; |
3969 | } |
3970 | |
3971 | namespace { |
3972 | |
3973 | class SCEVSequentialMinMaxDeduplicatingVisitor final |
3974 | : public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, |
3975 | std::optional<const SCEV *>> { |
3976 | using RetVal = std::optional<const SCEV *>; |
3977 | using Base = SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, RetVal>; |
3978 | |
3979 | ScalarEvolution &SE; |
3980 | const SCEVTypes RootKind; // Must be a sequential min/max expression. |
3981 | const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind. |
3982 | SmallPtrSet<const SCEV *, 16> SeenOps; |
3983 | |
3984 | bool canRecurseInto(SCEVTypes Kind) const { |
3985 | // We can only recurse into the SCEV expression of the same effective type |
3986 | // as the type of our root SCEV expression. |
3987 | return RootKind == Kind || NonSequentialRootKind == Kind; |
3988 | }; |
3989 | |
3990 | RetVal visitAnyMinMaxExpr(const SCEV *S) { |
3991 | assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&(static_cast <bool> ((isa<SCEVMinMaxExpr>(S) || isa <SCEVSequentialMinMaxExpr>(S)) && "Only for min/max expressions." ) ? void (0) : __assert_fail ("(isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) && \"Only for min/max expressions.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3992, __extension__ __PRETTY_FUNCTION__)) |
3992 | "Only for min/max expressions.")(static_cast <bool> ((isa<SCEVMinMaxExpr>(S) || isa <SCEVSequentialMinMaxExpr>(S)) && "Only for min/max expressions." ) ? void (0) : __assert_fail ("(isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) && \"Only for min/max expressions.\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 3992, __extension__ __PRETTY_FUNCTION__)); |
3993 | SCEVTypes Kind = S->getSCEVType(); |
3994 | |
3995 | if (!canRecurseInto(Kind)) |
3996 | return S; |
3997 | |
3998 | auto *NAry = cast<SCEVNAryExpr>(S); |
3999 | SmallVector<const SCEV *> NewOps; |
4000 | bool Changed = visit(Kind, NAry->operands(), NewOps); |
4001 | |
4002 | if (!Changed) |
4003 | return S; |
4004 | if (NewOps.empty()) |
4005 | return std::nullopt; |
4006 | |
4007 | return isa<SCEVSequentialMinMaxExpr>(S) |
4008 | ? SE.getSequentialMinMaxExpr(Kind, NewOps) |
4009 | : SE.getMinMaxExpr(Kind, NewOps); |
4010 | } |
4011 | |
4012 | RetVal visit(const SCEV *S) { |
4013 | // Has the whole operand been seen already? |
4014 | if (!SeenOps.insert(S).second) |
4015 | return std::nullopt; |
4016 | return Base::visit(S); |
4017 | } |
4018 | |
4019 | public: |
4020 | SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE, |
4021 | SCEVTypes RootKind) |
4022 | : SE(SE), RootKind(RootKind), |
4023 | NonSequentialRootKind( |
4024 | SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( |
4025 | RootKind)) {} |
4026 | |
4027 | bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps, |
4028 | SmallVectorImpl<const SCEV *> &NewOps) { |
4029 | bool Changed = false; |
4030 | SmallVector<const SCEV *> Ops; |
4031 | Ops.reserve(OrigOps.size()); |
4032 | |
4033 | for (const SCEV *Op : OrigOps) { |
4034 | RetVal NewOp = visit(Op); |
4035 | if (NewOp != Op) |
4036 | Changed = true; |
4037 | if (NewOp) |
4038 | Ops.emplace_back(*NewOp); |
4039 | } |
4040 | |
4041 | if (Changed) |
4042 | NewOps = std::move(Ops); |
4043 | return Changed; |
4044 | } |
4045 | |
4046 | RetVal visitConstant(const SCEVConstant *Constant) { return Constant; } |
4047 | |
4048 | RetVal visitVScale(const SCEVVScale *VScale) { return VScale; } |
4049 | |
4050 | RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; } |
4051 | |
4052 | RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; } |
4053 | |
4054 | RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; } |
4055 | |
4056 | RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; } |
4057 | |
4058 | RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; } |
4059 | |
4060 | RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; } |
4061 | |
4062 | RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; } |
4063 | |
4064 | RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } |
4065 | |
4066 | RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) { |
4067 | return visitAnyMinMaxExpr(Expr); |
4068 | } |
4069 | |
4070 | RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) { |
4071 | return visitAnyMinMaxExpr(Expr); |
4072 | } |
4073 | |
4074 | RetVal visitSMinExpr(const SCEVSMinExpr *Expr) { |
4075 | return visitAnyMinMaxExpr(Expr); |
4076 | } |
4077 | |
4078 | RetVal visitUMinExpr(const SCEVUMinExpr *Expr) { |
4079 | return visitAnyMinMaxExpr(Expr); |
4080 | } |
4081 | |
4082 | RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) { |
4083 | return visitAnyMinMaxExpr(Expr); |
4084 | } |
4085 | |
4086 | RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; } |
4087 | |
4088 | RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; } |
4089 | }; |
4090 | |
4091 | } // namespace |
4092 | |
4093 | static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind) { |
4094 | switch (Kind) { |
4095 | case scConstant: |
4096 | case scVScale: |
4097 | case scTruncate: |
4098 | case scZeroExtend: |
4099 | case scSignExtend: |
4100 | case scPtrToInt: |
4101 | case scAddExpr: |
4102 | case scMulExpr: |
4103 | case scUDivExpr: |
4104 | case scAddRecExpr: |
4105 | case scUMaxExpr: |
4106 | case scSMaxExpr: |
4107 | case scUMinExpr: |
4108 | case scSMinExpr: |
4109 | case scUnknown: |
4110 | // If any operand is poison, the whole expression is poison. |
4111 | return true; |
4112 | case scSequentialUMinExpr: |
4113 | // FIXME: if the *first* operand is poison, the whole expression is poison. |
4114 | return false; // Pessimistically, say that it does not propagate poison. |
4115 | case scCouldNotCompute: |
4116 | llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")::llvm::llvm_unreachable_internal("Attempt to use a SCEVCouldNotCompute object!" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4116); |
4117 | } |
4118 | llvm_unreachable("Unknown SCEV kind!")::llvm::llvm_unreachable_internal("Unknown SCEV kind!", "llvm/lib/Analysis/ScalarEvolution.cpp" , 4118); |
4119 | } |
4120 | |
4121 | /// Return true if V is poison given that AssumedPoison is already poison. |
4122 | static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) { |
4123 | // The only way poison may be introduced in a SCEV expression is from a |
4124 | // poison SCEVUnknown (ConstantExprs are also represented as SCEVUnknown, |
4125 | // not SCEVConstant). Notably, nowrap flags in SCEV nodes can *not* |
4126 | // introduce poison -- they encode guaranteed, non-speculated knowledge. |
4127 | // |
4128 | // Additionally, all SCEV nodes propagate poison from inputs to outputs, |
4129 | // with the notable exception of umin_seq, where only poison from the first |
4130 | // operand is (unconditionally) propagated. |
4131 | struct SCEVPoisonCollector { |
4132 | bool LookThroughMaybePoisonBlocking; |
4133 | SmallPtrSet<const SCEV *, 4> MaybePoison; |
4134 | SCEVPoisonCollector(bool LookThroughMaybePoisonBlocking) |
4135 | : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {} |
4136 | |
4137 | bool follow(const SCEV *S) { |
4138 | if (!LookThroughMaybePoisonBlocking && |
4139 | !scevUnconditionallyPropagatesPoisonFromOperands(S->getSCEVType())) |
4140 | return false; |
4141 | |
4142 | if (auto *SU = dyn_cast<SCEVUnknown>(S)) { |
4143 | if (!isGuaranteedNotToBePoison(SU->getValue())) |
4144 | MaybePoison.insert(S); |
4145 | } |
4146 | return true; |
4147 | } |
4148 | bool isDone() const { return false; } |
4149 | }; |
4150 | |
4151 | // First collect all SCEVs that might result in AssumedPoison to be poison. |
4152 | // We need to look through potentially poison-blocking operations here, |
4153 | // because we want to find all SCEVs that *might* result in poison, not only |
4154 | // those that are *required* to. |
4155 | SCEVPoisonCollector PC1(/* LookThroughMaybePoisonBlocking */ true); |
4156 | visitAll(AssumedPoison, PC1); |
4157 | |
4158 | // AssumedPoison is never poison. As the assumption is false, the implication |
4159 | // is true. Don't bother walking the other SCEV in this case. |
4160 | if (PC1.MaybePoison.empty()) |
4161 | return true; |
4162 | |
4163 | // Collect all SCEVs in S that, if poison, *will* result in S being poison |
4164 | // as well. We cannot look through potentially poison-blocking operations |
4165 | // here, as their arguments only *may* make the result poison. |
4166 | SCEVPoisonCollector PC2(/* LookThroughMaybePoisonBlocking */ false); |
4167 | visitAll(S, PC2); |
4168 | |
4169 | // Make sure that no matter which SCEV in PC1.MaybePoison is actually poison, |
4170 | // it will also make S poison by being part of PC2.MaybePoison. |
4171 | return all_of(PC1.MaybePoison, |
4172 | [&](const SCEV *S) { return PC2.MaybePoison.contains(S); }); |
4173 | } |
4174 | |
4175 | const SCEV * |
4176 | ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind, |
4177 | SmallVectorImpl<const SCEV *> &Ops) { |
4178 | assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&(static_cast <bool> (SCEVSequentialMinMaxExpr::isSequentialMinMaxType (Kind) && "Not a SCEVSequentialMinMaxExpr!") ? void ( 0) : __assert_fail ("SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) && \"Not a SCEVSequentialMinMaxExpr!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4179, __extension__ __PRETTY_FUNCTION__)) |
4179 | "Not a SCEVSequentialMinMaxExpr!")(static_cast <bool> (SCEVSequentialMinMaxExpr::isSequentialMinMaxType (Kind) && "Not a SCEVSequentialMinMaxExpr!") ? void ( 0) : __assert_fail ("SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) && \"Not a SCEVSequentialMinMaxExpr!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4179, __extension__ __PRETTY_FUNCTION__)); |
4180 | assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!")(static_cast <bool> (!Ops.empty() && "Cannot get empty (u|s)(min|max)!" ) ? void (0) : __assert_fail ("!Ops.empty() && \"Cannot get empty (u|s)(min|max)!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4180, __extension__ __PRETTY_FUNCTION__)); |
4181 | if (Ops.size() == 1) |
4182 | return Ops[0]; |
4183 | #ifndef NDEBUG |
4184 | Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); |
4185 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) { |
4186 | assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&(static_cast <bool> (getEffectiveSCEVType(Ops[i]->getType ()) == ETy && "Operand types don't match!") ? void (0 ) : __assert_fail ("getEffectiveSCEVType(Ops[i]->getType()) == ETy && \"Operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4187, __extension__ __PRETTY_FUNCTION__)) |
4187 | "Operand types don't match!")(static_cast <bool> (getEffectiveSCEVType(Ops[i]->getType ()) == ETy && "Operand types don't match!") ? void (0 ) : __assert_fail ("getEffectiveSCEVType(Ops[i]->getType()) == ETy && \"Operand types don't match!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4187, __extension__ __PRETTY_FUNCTION__)); |
4188 | assert(Ops[0]->getType()->isPointerTy() ==(static_cast <bool> (Ops[0]->getType()->isPointerTy () == Ops[i]->getType()->isPointerTy() && "min/max should be consistently pointerish" ) ? void (0) : __assert_fail ("Ops[0]->getType()->isPointerTy() == Ops[i]->getType()->isPointerTy() && \"min/max should be consistently pointerish\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4190, __extension__ __PRETTY_FUNCTION__)) |
4189 | Ops[i]->getType()->isPointerTy() &&(static_cast <bool> (Ops[0]->getType()->isPointerTy () == Ops[i]->getType()->isPointerTy() && "min/max should be consistently pointerish" ) ? void (0) : __assert_fail ("Ops[0]->getType()->isPointerTy() == Ops[i]->getType()->isPointerTy() && \"min/max should be consistently pointerish\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4190, __extension__ __PRETTY_FUNCTION__)) |
4190 | "min/max should be consistently pointerish")(static_cast <bool> (Ops[0]->getType()->isPointerTy () == Ops[i]->getType()->isPointerTy() && "min/max should be consistently pointerish" ) ? void (0) : __assert_fail ("Ops[0]->getType()->isPointerTy() == Ops[i]->getType()->isPointerTy() && \"min/max should be consistently pointerish\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4190, __extension__ __PRETTY_FUNCTION__)); |
4191 | } |
4192 | #endif |
4193 | |
4194 | // Note that SCEVSequentialMinMaxExpr is *NOT* commutative, |
4195 | // so we can *NOT* do any kind of sorting of the expressions! |
4196 | |
4197 | // Check if we have created the same expression before. |
4198 | if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) |
4199 | return S; |
4200 | |
4201 | // FIXME: there are *some* simplifications that we can do here. |
4202 | |
4203 | // Keep only the first instance of an operand. |
4204 | { |
4205 | SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind); |
4206 | bool Changed = Deduplicator.visit(Kind, Ops, Ops); |
4207 | if (Changed) |
4208 | return getSequentialMinMaxExpr(Kind, Ops); |
4209 | } |
4210 | |
4211 | // Check to see if one of the operands is of the same kind. If so, expand its |
4212 | // operands onto our operand list, and recurse to simplify. |
4213 | { |
4214 | unsigned Idx = 0; |
4215 | bool DeletedAny = false; |
4216 | while (Idx < Ops.size()) { |
4217 | if (Ops[Idx]->getSCEVType() != Kind) { |
4218 | ++Idx; |
4219 | continue; |
4220 | } |
4221 | const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]); |
4222 | Ops.erase(Ops.begin() + Idx); |
4223 | Ops.insert(Ops.begin() + Idx, SMME->operands().begin(), |
4224 | SMME->operands().end()); |
4225 | DeletedAny = true; |
4226 | } |
4227 | |
4228 | if (DeletedAny) |
4229 | return getSequentialMinMaxExpr(Kind, Ops); |
4230 | } |
4231 | |
4232 | const SCEV *SaturationPoint; |
4233 | ICmpInst::Predicate Pred; |
4234 | switch (Kind) { |
4235 | case scSequentialUMinExpr: |
4236 | SaturationPoint = getZero(Ops[0]->getType()); |
4237 | Pred = ICmpInst::ICMP_ULE; |
4238 | break; |
4239 | default: |
4240 | llvm_unreachable("Not a sequential min/max type.")::llvm::llvm_unreachable_internal("Not a sequential min/max type." , "llvm/lib/Analysis/ScalarEvolution.cpp", 4240); |
4241 | } |
4242 | |
4243 | for (unsigned i = 1, e = Ops.size(); i != e; ++i) { |
4244 | // We can replace %x umin_seq %y with %x umin %y if either: |
4245 | // * %y being poison implies %x is also poison. |
4246 | // * %x cannot be the saturating value (e.g. zero for umin). |
4247 | if (::impliesPoison(Ops[i], Ops[i - 1]) || |
4248 | isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1], |
4249 | SaturationPoint)) { |
4250 | SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]}; |
4251 | Ops[i - 1] = getMinMaxExpr( |
4252 | SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind), |
4253 | SeqOps); |
4254 | Ops.erase(Ops.begin() + i); |
4255 | return getSequentialMinMaxExpr(Kind, Ops); |
4256 | } |
4257 | // Fold %x umin_seq %y to %x if %x ule %y. |
4258 | // TODO: We might be able to prove the predicate for a later operand. |
4259 | if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) { |
4260 | Ops.erase(Ops.begin() + i); |
4261 | return getSequentialMinMaxExpr(Kind, Ops); |
4262 | } |
4263 | } |
4264 | |
4265 | // Okay, it looks like we really DO need an expr. Check to see if we |
4266 | // already have one, otherwise create a new one. |
4267 | FoldingSetNodeID ID; |
4268 | ID.AddInteger(Kind); |
4269 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
4270 | ID.AddPointer(Ops[i]); |
4271 | void *IP = nullptr; |
4272 | const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); |
4273 | if (ExistingSCEV) |
4274 | return ExistingSCEV; |
4275 | |
4276 | const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); |
4277 | std::uninitialized_copy(Ops.begin(), Ops.end(), O); |
4278 | SCEV *S = new (SCEVAllocator) |
4279 | SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); |
4280 | |
4281 | UniqueSCEVs.InsertNode(S, IP); |
4282 | registerUser(S, Ops); |
4283 | return S; |
4284 | } |
4285 | |
4286 | const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { |
4287 | SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; |
4288 | return getSMaxExpr(Ops); |
4289 | } |
4290 | |
4291 | const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { |
4292 | return getMinMaxExpr(scSMaxExpr, Ops); |
4293 | } |
4294 | |
4295 | const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { |
4296 | SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; |
4297 | return getUMaxExpr(Ops); |
4298 | } |
4299 | |
4300 | const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { |
4301 | return getMinMaxExpr(scUMaxExpr, Ops); |
4302 | } |
4303 | |
4304 | const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, |
4305 | const SCEV *RHS) { |
4306 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; |
4307 | return getSMinExpr(Ops); |
4308 | } |
4309 | |
4310 | const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { |
4311 | return getMinMaxExpr(scSMinExpr, Ops); |
4312 | } |
4313 | |
4314 | const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS, |
4315 | bool Sequential) { |
4316 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; |
4317 | return getUMinExpr(Ops, Sequential); |
4318 | } |
4319 | |
4320 | const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops, |
4321 | bool Sequential) { |
4322 | return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops) |
4323 | : getMinMaxExpr(scUMinExpr, Ops); |
4324 | } |
4325 | |
4326 | const SCEV * |
4327 | ScalarEvolution::getSizeOfExpr(Type *IntTy, TypeSize Size) { |
4328 | const SCEV *Res = getConstant(IntTy, Size.getKnownMinValue()); |
4329 | if (Size.isScalable()) |
4330 | Res = getMulExpr(Res, getVScale(IntTy)); |
4331 | return Res; |
4332 | } |
4333 | |
4334 | const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { |
4335 | return getSizeOfExpr(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); |
4336 | } |
4337 | |
4338 | const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { |
4339 | return getSizeOfExpr(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); |
4340 | } |
4341 | |
4342 | const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, |
4343 | StructType *STy, |
4344 | unsigned FieldNo) { |
4345 | // We can bypass creating a target-independent constant expression and then |
4346 | // folding it back into a ConstantInt. This is just a compile-time |
4347 | // optimization. |
4348 | return getConstant( |
4349 | IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); |
4350 | } |
4351 | |
4352 | const SCEV *ScalarEvolution::getUnknown(Value *V) { |
4353 | // Don't attempt to do anything other than create a SCEVUnknown object |
4354 | // here. createSCEV only calls getUnknown after checking for all other |
4355 | // interesting possibilities, and any other code that calls getUnknown |
4356 | // is doing so in order to hide a value from SCEV canonicalization. |
4357 | |
4358 | FoldingSetNodeID ID; |
4359 | ID.AddInteger(scUnknown); |
4360 | ID.AddPointer(V); |
4361 | void *IP = nullptr; |
4362 | if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { |
4363 | assert(cast<SCEVUnknown>(S)->getValue() == V &&(static_cast <bool> (cast<SCEVUnknown>(S)->getValue () == V && "Stale SCEVUnknown in uniquing map!") ? void (0) : __assert_fail ("cast<SCEVUnknown>(S)->getValue() == V && \"Stale SCEVUnknown in uniquing map!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4364, __extension__ __PRETTY_FUNCTION__)) |
4364 | "Stale SCEVUnknown in uniquing map!")(static_cast <bool> (cast<SCEVUnknown>(S)->getValue () == V && "Stale SCEVUnknown in uniquing map!") ? void (0) : __assert_fail ("cast<SCEVUnknown>(S)->getValue() == V && \"Stale SCEVUnknown in uniquing map!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4364, __extension__ __PRETTY_FUNCTION__)); |
4365 | return S; |
4366 | } |
4367 | SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, |
4368 | FirstUnknown); |
4369 | FirstUnknown = cast<SCEVUnknown>(S); |
4370 | UniqueSCEVs.InsertNode(S, IP); |
4371 | return S; |
4372 | } |
4373 | |
4374 | //===----------------------------------------------------------------------===// |
4375 | // Basic SCEV Analysis and PHI Idiom Recognition Code |
4376 | // |
4377 | |
4378 | /// Test if values of the given type are analyzable within the SCEV |
4379 | /// framework. This primarily includes integer types, and it can optionally |
4380 | /// include pointer types if the ScalarEvolution class has access to |
4381 | /// target-specific information. |
4382 | bool ScalarEvolution::isSCEVable(Type *Ty) const { |
4383 | // Integers and pointers are always SCEVable. |
4384 | return Ty->isIntOrPtrTy(); |
4385 | } |
4386 | |
4387 | /// Return the size in bits of the specified type, for which isSCEVable must |
4388 | /// return true. |
4389 | uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { |
4390 | assert(isSCEVable(Ty) && "Type is not SCEVable!")(static_cast <bool> (isSCEVable(Ty) && "Type is not SCEVable!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"Type is not SCEVable!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4390, __extension__ __PRETTY_FUNCTION__)); |
4391 | if (Ty->isPointerTy()) |
4392 | return getDataLayout().getIndexTypeSizeInBits(Ty); |
4393 | return getDataLayout().getTypeSizeInBits(Ty); |
4394 | } |
4395 | |
4396 | /// Return a type with the same bitwidth as the given type and which represents |
4397 | /// how SCEV will treat the given type, for which isSCEVable must return |
4398 | /// true. For pointer types, this is the pointer index sized integer type. |
4399 | Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { |
4400 | assert(isSCEVable(Ty) && "Type is not SCEVable!")(static_cast <bool> (isSCEVable(Ty) && "Type is not SCEVable!" ) ? void (0) : __assert_fail ("isSCEVable(Ty) && \"Type is not SCEVable!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4400, __extension__ __PRETTY_FUNCTION__)); |
4401 | |
4402 | if (Ty->isIntegerTy()) |
4403 | return Ty; |
4404 | |
4405 | // The only other support type is pointer. |
4406 | assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!")(static_cast <bool> (Ty->isPointerTy() && "Unexpected non-pointer non-integer type!" ) ? void (0) : __assert_fail ("Ty->isPointerTy() && \"Unexpected non-pointer non-integer type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4406, __extension__ __PRETTY_FUNCTION__)); |
4407 | return getDataLayout().getIndexType(Ty); |
4408 | } |
4409 | |
4410 | Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { |
4411 | return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; |
4412 | } |
4413 | |
4414 | bool ScalarEvolution::instructionCouldExistWitthOperands(const SCEV *A, |
4415 | const SCEV *B) { |
4416 | /// For a valid use point to exist, the defining scope of one operand |
4417 | /// must dominate the other. |
4418 | bool PreciseA, PreciseB; |
4419 | auto *ScopeA = getDefiningScopeBound({A}, PreciseA); |
4420 | auto *ScopeB = getDefiningScopeBound({B}, PreciseB); |
4421 | if (!PreciseA || !PreciseB) |
4422 | // Can't tell. |
4423 | return false; |
4424 | return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) || |
4425 | DT.dominates(ScopeB, ScopeA); |
4426 | } |
4427 | |
4428 | |
4429 | const SCEV *ScalarEvolution::getCouldNotCompute() { |
4430 | return CouldNotCompute.get(); |
4431 | } |
4432 | |
4433 | bool ScalarEvolution::checkValidity(const SCEV *S) const { |
4434 | bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { |
4435 | auto *SU = dyn_cast<SCEVUnknown>(S); |
4436 | return SU && SU->getValue() == nullptr; |
4437 | }); |
4438 | |
4439 | return !ContainsNulls; |
4440 | } |
4441 | |
4442 | bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { |
4443 | HasRecMapType::iterator I = HasRecMap.find(S); |
4444 | if (I != HasRecMap.end()) |
4445 | return I->second; |
4446 | |
4447 | bool FoundAddRec = |
4448 | SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); |
4449 | HasRecMap.insert({S, FoundAddRec}); |
4450 | return FoundAddRec; |
4451 | } |
4452 | |
4453 | /// Return the ValueOffsetPair set for \p S. \p S can be represented |
4454 | /// by the value and offset from any ValueOffsetPair in the set. |
4455 | ArrayRef<Value *> ScalarEvolution::getSCEVValues(const SCEV *S) { |
4456 | ExprValueMapType::iterator SI = ExprValueMap.find_as(S); |
4457 | if (SI == ExprValueMap.end()) |
4458 | return std::nullopt; |
4459 | #ifndef NDEBUG |
4460 | if (VerifySCEVMap) { |
4461 | // Check there is no dangling Value in the set returned. |
4462 | for (Value *V : SI->second) |
4463 | assert(ValueExprMap.count(V))(static_cast <bool> (ValueExprMap.count(V)) ? void (0) : __assert_fail ("ValueExprMap.count(V)", "llvm/lib/Analysis/ScalarEvolution.cpp" , 4463, __extension__ __PRETTY_FUNCTION__)); |
4464 | } |
4465 | #endif |
4466 | return SI->second.getArrayRef(); |
4467 | } |
4468 | |
4469 | /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) |
4470 | /// cannot be used separately. eraseValueFromMap should be used to remove |
4471 | /// V from ValueExprMap and ExprValueMap at the same time. |
4472 | void ScalarEvolution::eraseValueFromMap(Value *V) { |
4473 | ValueExprMapType::iterator I = ValueExprMap.find_as(V); |
4474 | if (I != ValueExprMap.end()) { |
4475 | auto EVIt = ExprValueMap.find(I->second); |
4476 | bool Removed = EVIt->second.remove(V); |
4477 | (void) Removed; |
4478 | assert(Removed && "Value not in ExprValueMap?")(static_cast <bool> (Removed && "Value not in ExprValueMap?" ) ? void (0) : __assert_fail ("Removed && \"Value not in ExprValueMap?\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4478, __extension__ __PRETTY_FUNCTION__)); |
4479 | ValueExprMap.erase(I); |
4480 | } |
4481 | } |
4482 | |
4483 | void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) { |
4484 | // A recursive query may have already computed the SCEV. It should be |
4485 | // equivalent, but may not necessarily be exactly the same, e.g. due to lazily |
4486 | // inferred nowrap flags. |
4487 | auto It = ValueExprMap.find_as(V); |
4488 | if (It == ValueExprMap.end()) { |
4489 | ValueExprMap.insert({SCEVCallbackVH(V, this), S}); |
4490 | ExprValueMap[S].insert(V); |
4491 | } |
4492 | } |
4493 | |
4494 | /// Determine whether this instruction is either not SCEVable or will always |
4495 | /// produce a SCEVUnknown. We do not have to walk past such instructions when |
4496 | /// invalidating. |
4497 | static bool isAlwaysUnknown(const Instruction *I) { |
4498 | switch (I->getOpcode()) { |
4499 | case Instruction::Load: |
4500 | return true; |
4501 | default: |
4502 | return false; |
4503 | } |
4504 | } |
4505 | |
4506 | /// Return an existing SCEV if it exists, otherwise analyze the expression and |
4507 | /// create a new one. |
4508 | const SCEV *ScalarEvolution::getSCEV(Value *V) { |
4509 | assert(isSCEVable(V->getType()) && "Value is not SCEVable!")(static_cast <bool> (isSCEVable(V->getType()) && "Value is not SCEVable!") ? void (0) : __assert_fail ("isSCEVable(V->getType()) && \"Value is not SCEVable!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4509, __extension__ __PRETTY_FUNCTION__)); |
4510 | |
4511 | if (const SCEV *S = getExistingSCEV(V)) |
4512 | return S; |
4513 | const SCEV *S = createSCEVIter(V); |
4514 | assert((!isa<Instruction>(V) || !isAlwaysUnknown(cast<Instruction>(V)) ||(static_cast <bool> ((!isa<Instruction>(V) || !isAlwaysUnknown (cast<Instruction>(V)) || isa<SCEVUnknown>(S)) && "isAlwaysUnknown() instruction is not SCEVUnknown") ? void ( 0) : __assert_fail ("(!isa<Instruction>(V) || !isAlwaysUnknown(cast<Instruction>(V)) || isa<SCEVUnknown>(S)) && \"isAlwaysUnknown() instruction is not SCEVUnknown\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4516, __extension__ __PRETTY_FUNCTION__)) |
4515 | isa<SCEVUnknown>(S)) &&(static_cast <bool> ((!isa<Instruction>(V) || !isAlwaysUnknown (cast<Instruction>(V)) || isa<SCEVUnknown>(S)) && "isAlwaysUnknown() instruction is not SCEVUnknown") ? void ( 0) : __assert_fail ("(!isa<Instruction>(V) || !isAlwaysUnknown(cast<Instruction>(V)) || isa<SCEVUnknown>(S)) && \"isAlwaysUnknown() instruction is not SCEVUnknown\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4516, __extension__ __PRETTY_FUNCTION__)) |
4516 | "isAlwaysUnknown() instruction is not SCEVUnknown")(static_cast <bool> ((!isa<Instruction>(V) || !isAlwaysUnknown (cast<Instruction>(V)) || isa<SCEVUnknown>(S)) && "isAlwaysUnknown() instruction is not SCEVUnknown") ? void ( 0) : __assert_fail ("(!isa<Instruction>(V) || !isAlwaysUnknown(cast<Instruction>(V)) || isa<SCEVUnknown>(S)) && \"isAlwaysUnknown() instruction is not SCEVUnknown\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4516, __extension__ __PRETTY_FUNCTION__)); |
4517 | return S; |
4518 | } |
4519 | |
4520 | const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { |
4521 | assert(isSCEVable(V->getType()) && "Value is not SCEVable!")(static_cast <bool> (isSCEVable(V->getType()) && "Value is not SCEVable!") ? void (0) : __assert_fail ("isSCEVable(V->getType()) && \"Value is not SCEVable!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4521, __extension__ __PRETTY_FUNCTION__)); |
4522 | |
4523 | ValueExprMapType::iterator I = ValueExprMap.find_as(V); |
4524 | if (I != ValueExprMap.end()) { |
4525 | const SCEV *S = I->second; |
4526 | assert(checkValidity(S) &&(static_cast <bool> (checkValidity(S) && "existing SCEV has not been properly invalidated" ) ? void (0) : __assert_fail ("checkValidity(S) && \"existing SCEV has not been properly invalidated\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4527, __extension__ __PRETTY_FUNCTION__)) |
4527 | "existing SCEV has not been properly invalidated")(static_cast <bool> (checkValidity(S) && "existing SCEV has not been properly invalidated" ) ? void (0) : __assert_fail ("checkValidity(S) && \"existing SCEV has not been properly invalidated\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4527, __extension__ __PRETTY_FUNCTION__)); |
4528 | return S; |
4529 | } |
4530 | return nullptr; |
4531 | } |
4532 | |
4533 | /// Return a SCEV corresponding to -V = -1*V |
4534 | const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, |
4535 | SCEV::NoWrapFlags Flags) { |
4536 | if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) |
4537 | return getConstant( |
4538 | cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); |
4539 | |
4540 | Type *Ty = V->getType(); |
4541 | Ty = getEffectiveSCEVType(Ty); |
4542 | return getMulExpr(V, getMinusOne(Ty), Flags); |
4543 | } |
4544 | |
4545 | /// If Expr computes ~A, return A else return nullptr |
4546 | static const SCEV *MatchNotExpr(const SCEV *Expr) { |
4547 | const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); |
4548 | if (!Add || Add->getNumOperands() != 2 || |
4549 | !Add->getOperand(0)->isAllOnesValue()) |
4550 | return nullptr; |
4551 | |
4552 | const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); |
4553 | if (!AddRHS || AddRHS->getNumOperands() != 2 || |
4554 | !AddRHS->getOperand(0)->isAllOnesValue()) |
4555 | return nullptr; |
4556 | |
4557 | return AddRHS->getOperand(1); |
4558 | } |
4559 | |
4560 | /// Return a SCEV corresponding to ~V = -1-V |
4561 | const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { |
4562 | assert(!V->getType()->isPointerTy() && "Can't negate pointer")(static_cast <bool> (!V->getType()->isPointerTy() && "Can't negate pointer") ? void (0) : __assert_fail ("!V->getType()->isPointerTy() && \"Can't negate pointer\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4562, __extension__ __PRETTY_FUNCTION__)); |
4563 | |
4564 | if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) |
4565 | return getConstant( |
4566 | cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); |
4567 | |
4568 | // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) |
4569 | if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { |
4570 | auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { |
4571 | SmallVector<const SCEV *, 2> MatchedOperands; |
4572 | for (const SCEV *Operand : MME->operands()) { |
4573 | const SCEV *Matched = MatchNotExpr(Operand); |
4574 | if (!Matched) |
4575 | return (const SCEV *)nullptr; |
4576 | MatchedOperands.push_back(Matched); |
4577 | } |
4578 | return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), |
4579 | MatchedOperands); |
4580 | }; |
4581 | if (const SCEV *Replaced = MatchMinMaxNegation(MME)) |
4582 | return Replaced; |
4583 | } |
4584 | |
4585 | Type *Ty = V->getType(); |
4586 | Ty = getEffectiveSCEVType(Ty); |
4587 | return getMinusSCEV(getMinusOne(Ty), V); |
4588 | } |
4589 | |
4590 | const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) { |
4591 | assert(P->getType()->isPointerTy())(static_cast <bool> (P->getType()->isPointerTy()) ? void (0) : __assert_fail ("P->getType()->isPointerTy()" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4591, __extension__ __PRETTY_FUNCTION__)); |
4592 | |
4593 | if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) { |
4594 | // The base of an AddRec is the first operand. |
4595 | SmallVector<const SCEV *> Ops{AddRec->operands()}; |
4596 | Ops[0] = removePointerBase(Ops[0]); |
4597 | // Don't try to transfer nowrap flags for now. We could in some cases |
4598 | // (for example, if pointer operand of the AddRec is a SCEVUnknown). |
4599 | return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); |
4600 | } |
4601 | if (auto *Add = dyn_cast<SCEVAddExpr>(P)) { |
4602 | // The base of an Add is the pointer operand. |
4603 | SmallVector<const SCEV *> Ops{Add->operands()}; |
4604 | const SCEV **PtrOp = nullptr; |
4605 | for (const SCEV *&AddOp : Ops) { |
4606 | if (AddOp->getType()->isPointerTy()) { |
4607 | assert(!PtrOp && "Cannot have multiple pointer ops")(static_cast <bool> (!PtrOp && "Cannot have multiple pointer ops" ) ? void (0) : __assert_fail ("!PtrOp && \"Cannot have multiple pointer ops\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4607, __extension__ __PRETTY_FUNCTION__)); |
4608 | PtrOp = &AddOp; |
4609 | } |
4610 | } |
4611 | *PtrOp = removePointerBase(*PtrOp); |
4612 | // Don't try to transfer nowrap flags for now. We could in some cases |
4613 | // (for example, if the pointer operand of the Add is a SCEVUnknown). |
4614 | return getAddExpr(Ops); |
4615 | } |
4616 | // Any other expression must be a pointer base. |
4617 | return getZero(P->getType()); |
4618 | } |
4619 | |
4620 | const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, |
4621 | SCEV::NoWrapFlags Flags, |
4622 | unsigned Depth) { |
4623 | // Fast path: X - X --> 0. |
4624 | if (LHS == RHS) |
4625 | return getZero(LHS->getType()); |
4626 | |
4627 | // If we subtract two pointers with different pointer bases, bail. |
4628 | // Eventually, we're going to add an assertion to getMulExpr that we |
4629 | // can't multiply by a pointer. |
4630 | if (RHS->getType()->isPointerTy()) { |
4631 | if (!LHS->getType()->isPointerTy() || |
4632 | getPointerBase(LHS) != getPointerBase(RHS)) |
4633 | return getCouldNotCompute(); |
4634 | LHS = removePointerBase(LHS); |
4635 | RHS = removePointerBase(RHS); |
4636 | } |
4637 | |
4638 | // We represent LHS - RHS as LHS + (-1)*RHS. This transformation |
4639 | // makes it so that we cannot make much use of NUW. |
4640 | auto AddFlags = SCEV::FlagAnyWrap; |
4641 | const bool RHSIsNotMinSigned = |
4642 | !getSignedRangeMin(RHS).isMinSignedValue(); |
4643 | if (hasFlags(Flags, SCEV::FlagNSW)) { |
4644 | // Let M be the minimum representable signed value. Then (-1)*RHS |
4645 | // signed-wraps if and only if RHS is M. That can happen even for |
4646 | // a NSW subtraction because e.g. (-1)*M signed-wraps even though |
4647 | // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + |
4648 | // (-1)*RHS, we need to prove that RHS != M. |
4649 | // |
4650 | // If LHS is non-negative and we know that LHS - RHS does not |
4651 | // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap |
4652 | // either by proving that RHS > M or that LHS >= 0. |
4653 | if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { |
4654 | AddFlags = SCEV::FlagNSW; |
4655 | } |
4656 | } |
4657 | |
4658 | // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - |
4659 | // RHS is NSW and LHS >= 0. |
4660 | // |
4661 | // The difficulty here is that the NSW flag may have been proven |
4662 | // relative to a loop that is to be found in a recurrence in LHS and |
4663 | // not in RHS. Applying NSW to (-1)*M may then let the NSW have a |
4664 | // larger scope than intended. |
4665 | auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; |
4666 | |
4667 | return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); |
4668 | } |
4669 | |
4670 | const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, |
4671 | unsigned Depth) { |
4672 | Type *SrcTy = V->getType(); |
4673 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or zero extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot truncate or zero extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4674, __extension__ __PRETTY_FUNCTION__)) |
4674 | "Cannot truncate or zero extend with non-integer arguments!")(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or zero extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot truncate or zero extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4674, __extension__ __PRETTY_FUNCTION__)); |
4675 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
4676 | return V; // No conversion |
4677 | if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) |
4678 | return getTruncateExpr(V, Ty, Depth); |
4679 | return getZeroExtendExpr(V, Ty, Depth); |
4680 | } |
4681 | |
4682 | const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, |
4683 | unsigned Depth) { |
4684 | Type *SrcTy = V->getType(); |
4685 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or zero extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot truncate or zero extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4686, __extension__ __PRETTY_FUNCTION__)) |
4686 | "Cannot truncate or zero extend with non-integer arguments!")(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or zero extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot truncate or zero extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4686, __extension__ __PRETTY_FUNCTION__)); |
4687 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
4688 | return V; // No conversion |
4689 | if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) |
4690 | return getTruncateExpr(V, Ty, Depth); |
4691 | return getSignExtendExpr(V, Ty, Depth); |
4692 | } |
4693 | |
4694 | const SCEV * |
4695 | ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { |
4696 | Type *SrcTy = V->getType(); |
4697 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or zero extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot noop or zero extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4698, __extension__ __PRETTY_FUNCTION__)) |
4698 | "Cannot noop or zero extend with non-integer arguments!")(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or zero extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot noop or zero extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4698, __extension__ __PRETTY_FUNCTION__)); |
4699 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(SrcTy) <= getTypeSizeInBits (Ty) && "getNoopOrZeroExtend cannot truncate!") ? void (0) : __assert_fail ("getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && \"getNoopOrZeroExtend cannot truncate!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4700, __extension__ __PRETTY_FUNCTION__)) |
4700 | "getNoopOrZeroExtend cannot truncate!")(static_cast <bool> (getTypeSizeInBits(SrcTy) <= getTypeSizeInBits (Ty) && "getNoopOrZeroExtend cannot truncate!") ? void (0) : __assert_fail ("getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && \"getNoopOrZeroExtend cannot truncate!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4700, __extension__ __PRETTY_FUNCTION__)); |
4701 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
4702 | return V; // No conversion |
4703 | return getZeroExtendExpr(V, Ty); |
4704 | } |
4705 | |
4706 | const SCEV * |
4707 | ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { |
4708 | Type *SrcTy = V->getType(); |
4709 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or sign extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot noop or sign extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4710, __extension__ __PRETTY_FUNCTION__)) |
4710 | "Cannot noop or sign extend with non-integer arguments!")(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or sign extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot noop or sign extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4710, __extension__ __PRETTY_FUNCTION__)); |
4711 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(SrcTy) <= getTypeSizeInBits (Ty) && "getNoopOrSignExtend cannot truncate!") ? void (0) : __assert_fail ("getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && \"getNoopOrSignExtend cannot truncate!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4712, __extension__ __PRETTY_FUNCTION__)) |
4712 | "getNoopOrSignExtend cannot truncate!")(static_cast <bool> (getTypeSizeInBits(SrcTy) <= getTypeSizeInBits (Ty) && "getNoopOrSignExtend cannot truncate!") ? void (0) : __assert_fail ("getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && \"getNoopOrSignExtend cannot truncate!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4712, __extension__ __PRETTY_FUNCTION__)); |
4713 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
4714 | return V; // No conversion |
4715 | return getSignExtendExpr(V, Ty); |
4716 | } |
4717 | |
4718 | const SCEV * |
4719 | ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { |
4720 | Type *SrcTy = V->getType(); |
4721 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or any extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot noop or any extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4722, __extension__ __PRETTY_FUNCTION__)) |
4722 | "Cannot noop or any extend with non-integer arguments!")(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot noop or any extend with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot noop or any extend with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4722, __extension__ __PRETTY_FUNCTION__)); |
4723 | assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(SrcTy) <= getTypeSizeInBits (Ty) && "getNoopOrAnyExtend cannot truncate!") ? void (0) : __assert_fail ("getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && \"getNoopOrAnyExtend cannot truncate!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4724, __extension__ __PRETTY_FUNCTION__)) |
4724 | "getNoopOrAnyExtend cannot truncate!")(static_cast <bool> (getTypeSizeInBits(SrcTy) <= getTypeSizeInBits (Ty) && "getNoopOrAnyExtend cannot truncate!") ? void (0) : __assert_fail ("getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && \"getNoopOrAnyExtend cannot truncate!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4724, __extension__ __PRETTY_FUNCTION__)); |
4725 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
4726 | return V; // No conversion |
4727 | return getAnyExtendExpr(V, Ty); |
4728 | } |
4729 | |
4730 | const SCEV * |
4731 | ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { |
4732 | Type *SrcTy = V->getType(); |
4733 | assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or noop with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot truncate or noop with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4734, __extension__ __PRETTY_FUNCTION__)) |
4734 | "Cannot truncate or noop with non-integer arguments!")(static_cast <bool> (SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && "Cannot truncate or noop with non-integer arguments!" ) ? void (0) : __assert_fail ("SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && \"Cannot truncate or noop with non-integer arguments!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4734, __extension__ __PRETTY_FUNCTION__)); |
4735 | assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&(static_cast <bool> (getTypeSizeInBits(SrcTy) >= getTypeSizeInBits (Ty) && "getTruncateOrNoop cannot extend!") ? void (0 ) : __assert_fail ("getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && \"getTruncateOrNoop cannot extend!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4736, __extension__ __PRETTY_FUNCTION__)) |
4736 | "getTruncateOrNoop cannot extend!")(static_cast <bool> (getTypeSizeInBits(SrcTy) >= getTypeSizeInBits (Ty) && "getTruncateOrNoop cannot extend!") ? void (0 ) : __assert_fail ("getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && \"getTruncateOrNoop cannot extend!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4736, __extension__ __PRETTY_FUNCTION__)); |
4737 | if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) |
4738 | return V; // No conversion |
4739 | return getTruncateExpr(V, Ty); |
4740 | } |
4741 | |
4742 | const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, |
4743 | const SCEV *RHS) { |
4744 | const SCEV *PromotedLHS = LHS; |
4745 | const SCEV *PromotedRHS = RHS; |
4746 | |
4747 | if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) |
4748 | PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); |
4749 | else |
4750 | PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); |
4751 | |
4752 | return getUMaxExpr(PromotedLHS, PromotedRHS); |
4753 | } |
4754 | |
4755 | const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, |
4756 | const SCEV *RHS, |
4757 | bool Sequential) { |
4758 | SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; |
4759 | return getUMinFromMismatchedTypes(Ops, Sequential); |
4760 | } |
4761 | |
4762 | const SCEV * |
4763 | ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops, |
4764 | bool Sequential) { |
4765 | assert(!Ops.empty() && "At least one operand must be!")(static_cast <bool> (!Ops.empty() && "At least one operand must be!" ) ? void (0) : __assert_fail ("!Ops.empty() && \"At least one operand must be!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4765, __extension__ __PRETTY_FUNCTION__)); |
4766 | // Trivial case. |
4767 | if (Ops.size() == 1) |
4768 | return Ops[0]; |
4769 | |
4770 | // Find the max type first. |
4771 | Type *MaxType = nullptr; |
4772 | for (const auto *S : Ops) |
4773 | if (MaxType) |
4774 | MaxType = getWiderType(MaxType, S->getType()); |
4775 | else |
4776 | MaxType = S->getType(); |
4777 | assert(MaxType && "Failed to find maximum type!")(static_cast <bool> (MaxType && "Failed to find maximum type!" ) ? void (0) : __assert_fail ("MaxType && \"Failed to find maximum type!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4777, __extension__ __PRETTY_FUNCTION__)); |
4778 | |
4779 | // Extend all ops to max type. |
4780 | SmallVector<const SCEV *, 2> PromotedOps; |
4781 | for (const auto *S : Ops) |
4782 | PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); |
4783 | |
4784 | // Generate umin. |
4785 | return getUMinExpr(PromotedOps, Sequential); |
4786 | } |
4787 | |
4788 | const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { |
4789 | // A pointer operand may evaluate to a nonpointer expression, such as null. |
4790 | if (!V->getType()->isPointerTy()) |
4791 | return V; |
4792 | |
4793 | while (true) { |
4794 | if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { |
4795 | V = AddRec->getStart(); |
4796 | } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { |
4797 | const SCEV *PtrOp = nullptr; |
4798 | for (const SCEV *AddOp : Add->operands()) { |
4799 | if (AddOp->getType()->isPointerTy()) { |
4800 | assert(!PtrOp && "Cannot have multiple pointer ops")(static_cast <bool> (!PtrOp && "Cannot have multiple pointer ops" ) ? void (0) : __assert_fail ("!PtrOp && \"Cannot have multiple pointer ops\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4800, __extension__ __PRETTY_FUNCTION__)); |
4801 | PtrOp = AddOp; |
4802 | } |
4803 | } |
4804 | assert(PtrOp && "Must have pointer op")(static_cast <bool> (PtrOp && "Must have pointer op" ) ? void (0) : __assert_fail ("PtrOp && \"Must have pointer op\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4804, __extension__ __PRETTY_FUNCTION__)); |
4805 | V = PtrOp; |
4806 | } else // Not something we can look further into. |
4807 | return V; |
4808 | } |
4809 | } |
4810 | |
4811 | /// Push users of the given Instruction onto the given Worklist. |
4812 | static void PushDefUseChildren(Instruction *I, |
4813 | SmallVectorImpl<Instruction *> &Worklist, |
4814 | SmallPtrSetImpl<Instruction *> &Visited) { |
4815 | // Push the def-use children onto the Worklist stack. |
4816 | for (User *U : I->users()) { |
4817 | auto *UserInsn = cast<Instruction>(U); |
4818 | if (isAlwaysUnknown(UserInsn)) |
4819 | continue; |
4820 | if (Visited.insert(UserInsn).second) |
4821 | Worklist.push_back(UserInsn); |
4822 | } |
4823 | } |
4824 | |
4825 | namespace { |
4826 | |
4827 | /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start |
4828 | /// expression in case its Loop is L. If it is not L then |
4829 | /// if IgnoreOtherLoops is true then use AddRec itself |
4830 | /// otherwise rewrite cannot be done. |
4831 | /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. |
4832 | class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { |
4833 | public: |
4834 | static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, |
4835 | bool IgnoreOtherLoops = true) { |
4836 | SCEVInitRewriter Rewriter(L, SE); |
4837 | const SCEV *Result = Rewriter.visit(S); |
4838 | if (Rewriter.hasSeenLoopVariantSCEVUnknown()) |
4839 | return SE.getCouldNotCompute(); |
4840 | return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops |
4841 | ? SE.getCouldNotCompute() |
4842 | : Result; |
4843 | } |
4844 | |
4845 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { |
4846 | if (!SE.isLoopInvariant(Expr, L)) |
4847 | SeenLoopVariantSCEVUnknown = true; |
4848 | return Expr; |
4849 | } |
4850 | |
4851 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { |
4852 | // Only re-write AddRecExprs for this loop. |
4853 | if (Expr->getLoop() == L) |
4854 | return Expr->getStart(); |
4855 | SeenOtherLoops = true; |
4856 | return Expr; |
4857 | } |
4858 | |
4859 | bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } |
4860 | |
4861 | bool hasSeenOtherLoops() { return SeenOtherLoops; } |
4862 | |
4863 | private: |
4864 | explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) |
4865 | : SCEVRewriteVisitor(SE), L(L) {} |
4866 | |
4867 | const Loop *L; |
4868 | bool SeenLoopVariantSCEVUnknown = false; |
4869 | bool SeenOtherLoops = false; |
4870 | }; |
4871 | |
4872 | /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post |
4873 | /// increment expression in case its Loop is L. If it is not L then |
4874 | /// use AddRec itself. |
4875 | /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. |
4876 | class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { |
4877 | public: |
4878 | static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { |
4879 | SCEVPostIncRewriter Rewriter(L, SE); |
4880 | const SCEV *Result = Rewriter.visit(S); |
4881 | return Rewriter.hasSeenLoopVariantSCEVUnknown() |
4882 | ? SE.getCouldNotCompute() |
4883 | : Result; |
4884 | } |
4885 | |
4886 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { |
4887 | if (!SE.isLoopInvariant(Expr, L)) |
4888 | SeenLoopVariantSCEVUnknown = true; |
4889 | return Expr; |
4890 | } |
4891 | |
4892 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { |
4893 | // Only re-write AddRecExprs for this loop. |
4894 | if (Expr->getLoop() == L) |
4895 | return Expr->getPostIncExpr(SE); |
4896 | SeenOtherLoops = true; |
4897 | return Expr; |
4898 | } |
4899 | |
4900 | bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } |
4901 | |
4902 | bool hasSeenOtherLoops() { return SeenOtherLoops; } |
4903 | |
4904 | private: |
4905 | explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) |
4906 | : SCEVRewriteVisitor(SE), L(L) {} |
4907 | |
4908 | const Loop *L; |
4909 | bool SeenLoopVariantSCEVUnknown = false; |
4910 | bool SeenOtherLoops = false; |
4911 | }; |
4912 | |
4913 | /// This class evaluates the compare condition by matching it against the |
4914 | /// condition of loop latch. If there is a match we assume a true value |
4915 | /// for the condition while building SCEV nodes. |
4916 | class SCEVBackedgeConditionFolder |
4917 | : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { |
4918 | public: |
4919 | static const SCEV *rewrite(const SCEV *S, const Loop *L, |
4920 | ScalarEvolution &SE) { |
4921 | bool IsPosBECond = false; |
4922 | Value *BECond = nullptr; |
4923 | if (BasicBlock *Latch = L->getLoopLatch()) { |
4924 | BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); |
4925 | if (BI && BI->isConditional()) { |
4926 | assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&(static_cast <bool> (BI->getSuccessor(0) != BI->getSuccessor (1) && "Both outgoing branches should not target same header!" ) ? void (0) : __assert_fail ("BI->getSuccessor(0) != BI->getSuccessor(1) && \"Both outgoing branches should not target same header!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4927, __extension__ __PRETTY_FUNCTION__)) |
4927 | "Both outgoing branches should not target same header!")(static_cast <bool> (BI->getSuccessor(0) != BI->getSuccessor (1) && "Both outgoing branches should not target same header!" ) ? void (0) : __assert_fail ("BI->getSuccessor(0) != BI->getSuccessor(1) && \"Both outgoing branches should not target same header!\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 4927, __extension__ __PRETTY_FUNCTION__)); |
4928 | BECond = BI->getCondition(); |
4929 | IsPosBECond = BI->getSuccessor(0) == L->getHeader(); |
4930 | } else { |
4931 | return S; |
4932 | } |
4933 | } |
4934 | SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); |
4935 | return Rewriter.visit(S); |
4936 | } |
4937 | |
4938 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { |
4939 | const SCEV *Result = Expr; |
4940 | bool InvariantF = SE.isLoopInvariant(Expr, L); |
4941 | |
4942 | if (!InvariantF) { |
4943 | Instruction *I = cast<Instruction>(Expr->getValue()); |
4944 | switch (I->getOpcode()) { |
4945 | case Instruction::Select: { |
4946 | SelectInst *SI = cast<SelectInst>(I); |
4947 | std::optional<const SCEV *> Res = |
4948 | compareWithBackedgeCondition(SI->getCondition()); |
4949 | if (Res) { |
4950 | bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne(); |
4951 | Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); |
4952 | } |
4953 | break; |
4954 | } |
4955 | default: { |
4956 | std::optional<const SCEV *> Res = compareWithBackedgeCondition(I); |
4957 | if (Res) |
4958 | Result = *Res; |
4959 | break; |
4960 | } |
4961 | } |
4962 | } |
4963 | return Result; |
4964 | } |
4965 | |
4966 | private: |
4967 | explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, |
4968 | bool IsPosBECond, ScalarEvolution &SE) |
4969 | : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), |
4970 | IsPositiveBECond(IsPosBECond) {} |
4971 | |
4972 | std::optional<const SCEV *> compareWithBackedgeCondition(Value *IC); |
4973 | |
4974 | const Loop *L; |
4975 | /// Loop back condition. |
4976 | Value *BackedgeCond = nullptr; |
4977 | /// Set to true if loop back is on positive branch condition. |
4978 | bool IsPositiveBECond; |
4979 | }; |
4980 | |
4981 | std::optional<const SCEV *> |
4982 | SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { |
4983 | |
4984 | // If value matches the backedge condition for loop latch, |
4985 | // then return a constant evolution node based on loopback |
4986 | // branch taken. |
4987 | if (BackedgeCond == IC) |
4988 | return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) |
4989 | : SE.getZero(Type::getInt1Ty(SE.getContext())); |
4990 | return std::nullopt; |
4991 | } |
4992 | |
4993 | class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { |
4994 | public: |
4995 | static const SCEV *rewrite(const SCEV *S, const Loop *L, |
4996 | ScalarEvolution &SE) { |
4997 | SCEVShiftRewriter Rewriter(L, SE); |
4998 | const SCEV *Result = Rewriter.visit(S); |
4999 | return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); |
5000 | } |
5001 | |
5002 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { |
5003 | // Only allow AddRecExprs for this loop. |
5004 | if (!SE.isLoopInvariant(Expr, L)) |
5005 | Valid = false; |
5006 | return Expr; |
5007 | } |
5008 | |
5009 | const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { |
5010 | if (Expr->getLoop() == L && Expr->isAffine()) |
5011 | return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); |
5012 | Valid = false; |
5013 | return Expr; |
5014 | } |
5015 | |
5016 | bool isValid() { return Valid; } |
5017 | |
5018 | private: |
5019 | explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) |
5020 | : SCEVRewriteVisitor(SE), L(L) {} |
5021 | |
5022 | const Loop *L; |
5023 | bool Valid = true; |
5024 | }; |
5025 | |
5026 | } // end anonymous namespace |
5027 | |
5028 | SCEV::NoWrapFlags |
5029 | ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { |
5030 | if (!AR->isAffine()) |
5031 | return SCEV::FlagAnyWrap; |
5032 | |
5033 | using OBO = OverflowingBinaryOperator; |
5034 | |
5035 | SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; |
5036 | |
5037 | if (!AR->hasNoSelfWrap()) { |
5038 | const SCEV *BECount = getConstantMaxBackedgeTakenCount(AR->getLoop()); |
5039 | if (const SCEVConstant *BECountMax = dyn_cast<SCEVConstant>(BECount)) { |
5040 | ConstantRange StepCR = getSignedRange(AR->getStepRecurrence(*this)); |
5041 | const APInt &BECountAP = BECountMax->getAPInt(); |
5042 | unsigned NoOverflowBitWidth = |
5043 | BECountAP.getActiveBits() + StepCR.getMinSignedBits(); |
5044 | if (NoOverflowBitWidth <= getTypeSizeInBits(AR->getType())) |
5045 | Result = ScalarEvolution::setFlags(Result, SCEV::FlagNW); |
5046 | } |
5047 | } |
5048 | |
5049 | if (!AR->hasNoSignedWrap()) { |
5050 | ConstantRange AddRecRange = getSignedRange(AR); |
5051 | ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); |
5052 | |
5053 | auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( |
5054 | Instruction::Add, IncRange, OBO::NoSignedWrap); |
5055 | if (NSWRegion.contains(AddRecRange)) |
5056 | Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); |
5057 | } |
5058 | |
5059 | if (!AR->hasNoUnsignedWrap()) { |
5060 | ConstantRange AddRecRange = getUnsignedRange(AR); |
5061 | ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); |
5062 | |
5063 | auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( |
5064 | Instruction::Add, IncRange, OBO::NoUnsignedWrap); |
5065 | if (NUWRegion.contains(AddRecRange)) |
5066 | Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); |
5067 | } |
5068 | |
5069 | return Result; |
5070 | } |
5071 | |
5072 | SCEV::NoWrapFlags |
5073 | ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { |
5074 | SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); |
5075 | |
5076 | if (AR->hasNoSignedWrap()) |
5077 | return Result; |
5078 | |
5079 | if (!AR->isAffine()) |
5080 | return Result; |
5081 | |
5082 | // This function can be expensive, only try to prove NSW once per AddRec. |
5083 | if (!SignedWrapViaInductionTried.insert(AR).second) |
5084 | return Result; |
5085 | |
5086 | const SCEV *Step = AR->getStepRecurrence(*this); |
5087 | const Loop *L = AR->getLoop(); |
5088 | |
5089 | // Check whether the backedge-taken count is SCEVCouldNotCompute. |
5090 | // Note that this serves two purposes: It filters out loops that are |
5091 | // simply not analyzable, and it covers the case where this code is |
5092 | // being called from within backedge-taken count analysis, such that |
5093 | // attempting to ask for the backedge-taken count would likely result |
5094 | // in infinite recursion. In the later case, the analysis code will |
5095 | // cope with a conservative value, and it will take care to purge |
5096 | // that value once it has finished. |
5097 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); |
5098 | |
5099 | // Normally, in the cases we can prove no-overflow via a |
5100 | // backedge guarding condition, we can also compute a backedge |
5101 | // taken count for the loop. The exceptions are assumptions and |
5102 | // guards present in the loop -- SCEV is not great at exploiting |
5103 | // these to compute max backedge taken counts, but can still use |
5104 | // these to prove lack of overflow. Use this fact to avoid |
5105 | // doing extra work that may not pay off. |
5106 | |
5107 | if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && |
5108 | AC.assumptions().empty()) |
5109 | return Result; |
5110 | |
5111 | // If the backedge is guarded by a comparison with the pre-inc value the |
5112 | // addrec is safe. Also, if the entry is guarded by a comparison with the |
5113 | // start value and the backedge is guarded by a comparison with the post-inc |
5114 | // value, the addrec is safe. |
5115 | ICmpInst::Predicate Pred; |
5116 | const SCEV *OverflowLimit = |
5117 | getSignedOverflowLimitForStep(Step, &Pred, this); |
5118 | if (OverflowLimit && |
5119 | (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || |
5120 | isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { |
5121 | Result = setFlags(Result, SCEV::FlagNSW); |
5122 | } |
5123 | return Result; |
5124 | } |
5125 | SCEV::NoWrapFlags |
5126 | ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { |
5127 | SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); |
5128 | |
5129 | if (AR->hasNoUnsignedWrap()) |
5130 | return Result; |
5131 | |
5132 | if (!AR->isAffine()) |
5133 | return Result; |
5134 | |
5135 | // This function can be expensive, only try to prove NUW once per AddRec. |
5136 | if (!UnsignedWrapViaInductionTried.insert(AR).second) |
5137 | return Result; |
5138 | |
5139 | const SCEV *Step = AR->getStepRecurrence(*this); |
5140 | unsigned BitWidth = getTypeSizeInBits(AR->getType()); |
5141 | const Loop *L = AR->getLoop(); |
5142 | |
5143 | // Check whether the backedge-taken count is SCEVCouldNotCompute. |
5144 | // Note that this serves two purposes: It filters out loops that are |
5145 | // simply not analyzable, and it covers the case where this code is |
5146 | // being called from within backedge-taken count analysis, such that |
5147 | // attempting to ask for the backedge-taken count would likely result |
5148 | // in infinite recursion. In the later case, the analysis code will |
5149 | // cope with a conservative value, and it will take care to purge |
5150 | // that value once it has finished. |
5151 | const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); |
5152 | |
5153 | // Normally, in the cases we can prove no-overflow via a |
5154 | // backedge guarding condition, we can also compute a backedge |
5155 | // taken count for the loop. The exceptions are assumptions and |
5156 | // guards present in the loop -- SCEV is not great at exploiting |
5157 | // these to compute max backedge taken counts, but can still use |
5158 | // these to prove lack of overflow. Use this fact to avoid |
5159 | // doing extra work that may not pay off. |
5160 | |
5161 | if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && |
5162 | AC.assumptions().empty()) |
5163 | return Result; |
5164 | |
5165 | // If the backedge is guarded by a comparison with the pre-inc value the |
5166 | // addrec is safe. Also, if the entry is guarded by a comparison with the |
5167 | // start value and the backedge is guarded by a comparison with the post-inc |
5168 | // value, the addrec is safe. |
5169 | if (isKnownPositive(Step)) { |
5170 | const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - |
5171 | getUnsignedRangeMax(Step)); |
5172 | if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || |
5173 | isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { |
5174 | Result = setFlags(Result, SCEV::FlagNUW); |
5175 | } |
5176 | } |
5177 | |
5178 | return Result; |
5179 | } |
5180 | |
5181 | namespace { |
5182 | |
5183 | /// Represents an abstract binary operation. This may exist as a |
5184 | /// normal instruction or constant expression, or may have been |
5185 | /// derived from an expression tree. |
5186 | struct BinaryOp { |
5187 | unsigned Opcode; |
5188 | Value *LHS; |
5189 | Value *RHS; |
5190 | bool IsNSW = false; |
5191 | bool IsNUW = false; |
5192 | |
5193 | /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or |
5194 | /// constant expression. |
5195 | Operator *Op = nullptr; |
5196 | |
5197 | explicit BinaryOp(Operator *Op) |
5198 | : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), |
5199 | Op(Op) { |
5200 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { |
5201 | IsNSW = OBO->hasNoSignedWrap(); |
5202 | IsNUW = OBO->hasNoUnsignedWrap(); |
5203 | } |
5204 | } |
5205 | |
5206 | explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, |
5207 | bool IsNUW = false) |
5208 | : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} |
5209 | }; |
5210 | |
5211 | } // end anonymous namespace |
5212 | |
5213 | /// Try to map \p V into a BinaryOp, and return \c std::nullopt on failure. |
5214 | static std::optional<BinaryOp> MatchBinaryOp(Value *V, const DataLayout &DL, |
5215 | AssumptionCache &AC, |
5216 | const DominatorTree &DT, |
5217 | const Instruction *CxtI) { |
5218 | auto *Op = dyn_cast<Operator>(V); |
5219 | if (!Op) |
5220 | return std::nullopt; |
5221 | |
5222 | // Implementation detail: all the cleverness here should happen without |
5223 | // creating new SCEV expressions -- our caller knowns tricks to avoid creating |
5224 | // SCEV expressions when possible, and we should not break that. |
5225 | |
5226 | switch (Op->getOpcode()) { |
5227 | case Instruction::Add: |
5228 | case Instruction::Sub: |
5229 | case Instruction::Mul: |
5230 | case Instruction::UDiv: |
5231 | case Instruction::URem: |
5232 | case Instruction::And: |
5233 | case Instruction::AShr: |
5234 | case Instruction::Shl: |
5235 | return BinaryOp(Op); |
5236 | |
5237 | case Instruction::Or: { |
5238 | // LLVM loves to convert `add` of operands with no common bits |
5239 | // into an `or`. But SCEV really doesn't deal with `or` that well, |
5240 | // so try extra hard to recognize this `or` as an `add`. |
5241 | if (haveNoCommonBitsSet(Op->getOperand(0), Op->getOperand(1), DL, &AC, CxtI, |
5242 | &DT, /*UseInstrInfo=*/true)) |
5243 | return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1), |
5244 | /*IsNSW=*/true, /*IsNUW=*/true); |
5245 | return BinaryOp(Op); |
5246 | } |
5247 | |
5248 | case Instruction::Xor: |
5249 | if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) |
5250 | // If the RHS of the xor is a signmask, then this is just an add. |
5251 | // Instcombine turns add of signmask into xor as a strength reduction step. |
5252 | if (RHSC->getValue().isSignMask()) |
5253 | return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); |
5254 | // Binary `xor` is a bit-wise `add`. |
5255 | if (V->getType()->isIntegerTy(1)) |
5256 | return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); |
5257 | return BinaryOp(Op); |
5258 | |
5259 | case Instruction::LShr: |
5260 | // Turn logical shift right of a constant into a unsigned divide. |
5261 | if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { |
5262 | uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); |
5263 | |
5264 | // If the shift count is not less than the bitwidth, the result of |
5265 | // the shift is undefined. Don't try to analyze it, because the |
5266 | // resolution chosen here may differ from the resolution chosen in |
5267 | // other parts of the compiler. |
5268 | if (SA->getValue().ult(BitWidth)) { |
5269 | Constant *X = |
5270 | ConstantInt::get(SA->getContext(), |
5271 | APInt::getOneBitSet(BitWidth, SA->getZExtValue())); |
5272 | return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); |
5273 | } |
5274 | } |
5275 | return BinaryOp(Op); |
5276 | |
5277 | case Instruction::ExtractValue: { |
5278 | auto *EVI = cast<ExtractValueInst>(Op); |
5279 | if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) |
5280 | break; |
5281 | |
5282 | auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); |
5283 | if (!WO) |
5284 | break; |
5285 | |
5286 | Instruction::BinaryOps BinOp = WO->getBinaryOp(); |
5287 | bool Signed = WO->isSigned(); |
5288 | // TODO: Should add nuw/nsw flags for mul as well. |
5289 | if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) |
5290 | return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); |
5291 | |
5292 | // Now that we know that all uses of the arithmetic-result component of |
5293 | // CI are guarded by the overflow check, we can go ahead and pretend |
5294 | // that the arithmetic is non-overflowing. |
5295 | return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), |
5296 | /* IsNSW = */ Signed, /* IsNUW = */ !Signed); |
5297 | } |
5298 | |
5299 | default: |
5300 | break; |
5301 | } |
5302 | |
5303 | // Recognise intrinsic loop.decrement.reg, and as this has exactly the same |
5304 | // semantics as a Sub, return a binary sub expression. |
5305 | if (auto *II = dyn_cast<IntrinsicInst>(V)) |
5306 | if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) |
5307 | return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); |
5308 | |
5309 | return std::nullopt; |
5310 | } |
5311 | |
5312 | /// Helper function to createAddRecFromPHIWithCasts. We have a phi |
5313 | /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via |
5314 | /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the |
5315 | /// way. This function checks if \p Op, an operand of this SCEVAddExpr, |
5316 | /// follows one of the following patterns: |
5317 | /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) |
5318 | /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) |
5319 | /// If the SCEV expression of \p Op conforms with one of the expected patterns |
5320 | /// we return the type of the truncation operation, and indicate whether the |
5321 | /// truncated type should be treated as signed/unsigned by setting |
5322 | /// \p Signed to true/false, respectively. |
5323 | static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, |
5324 | bool &Signed, ScalarEvolution &SE) { |
5325 | // The case where Op == SymbolicPHI (that is, with no type conversions on |
5326 | // the way) is handled by the regular add recurrence creating logic and |
5327 | // would have already been triggered in createAddRecForPHI. Reaching it here |
5328 | // means that createAddRecFromPHI had failed for this PHI before (e.g., |
5329 | // because one of the other operands of the SCEVAddExpr updating this PHI is |
5330 | // not invariant). |
5331 | // |
5332 | // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in |
5333 | // this case predicates that allow us to prove that Op == SymbolicPHI will |
5334 | // be added. |
5335 | if (Op == SymbolicPHI) |
5336 | return nullptr; |
5337 | |
5338 | unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); |
5339 | unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); |
5340 | if (SourceBits != NewBits) |
5341 | return nullptr; |
5342 | |
5343 | const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); |
5344 | const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); |
5345 | if (!SExt && !ZExt) |
5346 | return nullptr; |
5347 | const SCEVTruncateExpr *Trunc = |
5348 | SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) |
5349 | : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); |
5350 | if (!Trunc) |
5351 | return nullptr; |
5352 | const SCEV *X = Trunc->getOperand(); |
5353 | if (X != SymbolicPHI) |
5354 | return nullptr; |
5355 | Signed = SExt != nullptr; |
5356 | return Trunc->getType(); |
5357 | } |
5358 | |
5359 | static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { |
5360 | if (!PN->getType()->isIntegerTy()) |
5361 | return nullptr; |
5362 | const Loop *L = LI.getLoopFor(PN->getParent()); |
5363 | if (!L || L->getHeader() != PN->getParent()) |
5364 | return nullptr; |
5365 | return L; |
5366 | } |
5367 | |
5368 | // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the |
5369 | // computation that updates the phi follows the following pattern: |
5370 | // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum |
5371 | // which correspond to a phi->trunc->sext/zext->add->phi update chain. |
5372 | // If so, try to see if it can be rewritten as an AddRecExpr under some |
5373 | // Predicates. If successful, return them as a pair. Also cache the results |
5374 | // of the analysis. |
5375 | // |
5376 | // Example usage scenario: |
5377 | // Say the Rewriter is called for the following SCEV: |
5378 | // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) |
5379 | // where: |
5380 | // %X = phi i64 (%Start, %BEValue) |
5381 | // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), |
5382 | // and call this function with %SymbolicPHI = %X. |
5383 | // |
5384 | // The analysis will find that the value coming around the backedge has |
5385 | // the following SCEV: |
5386 | // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) |
5387 | // Upon concluding that this matches the desired pattern, the function |
5388 | // will return the pair {NewAddRec, SmallPredsVec} where: |
5389 | // NewAddRec = {%Start,+,%Step} |
5390 | // SmallPredsVec = {P1, P2, P3} as follows: |
5391 | // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> |
5392 | // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) |
5393 | // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) |
5394 | // The returned pair means that SymbolicPHI can be rewritten into NewAddRec |
5395 | // under the predicates {P1,P2,P3}. |
5396 | // This predicated rewrite will be cached in PredicatedSCEVRewrites: |
5397 | // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} |
5398 | // |
5399 | // TODO's: |
5400 | // |
5401 | // 1) Extend the Induction descriptor to also support inductions that involve |
5402 | // casts: When needed (namely, when we are called in the context of the |
5403 | // vectorizer induction analysis), a Set of cast instructions will be |
5404 | // populated by this method, and provided back to isInductionPHI. This is |
5405 | // needed to allow the vectorizer to properly record them to be ignored by |
5406 | // the cost model and to avoid vectorizing them (otherwise these casts, |
5407 | // which are redundant under the runtime overflow checks, will be |
5408 | // vectorized, which can be costly). |
5409 | // |
5410 | // 2) Support additional induction/PHISCEV patterns: We also want to support |
5411 | // inductions where the sext-trunc / zext-trunc operations (partly) occur |
5412 | // after the induction update operation (the induction increment): |
5413 | // |
5414 | // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) |
5415 | // which correspond to a phi->add->trunc->sext/zext->phi update chain. |
5416 | // |
5417 | // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) |
5418 | // which correspond to a phi->trunc->add->sext/zext->phi update chain. |
5419 | // |
5420 | // 3) Outline common code with createAddRecFromPHI to avoid duplication. |
5421 | std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> |
5422 | ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { |
5423 | SmallVector<const SCEVPredicate *, 3> Predicates; |
5424 | |
5425 | // *** Part1: Analyze if we have a phi-with-cast pattern for which we can |
5426 | // return an AddRec expression under some predicate. |
5427 | |
5428 | auto *PN = cast<PHINode>(SymbolicPHI->getValue()); |
5429 | const Loop *L = isIntegerLoopHeaderPHI(PN, LI); |
5430 | assert(L && "Expecting an integer loop header phi")(static_cast <bool> (L && "Expecting an integer loop header phi" ) ? void (0) : __assert_fail ("L && \"Expecting an integer loop header phi\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 5430, __extension__ __PRETTY_FUNCTION__)); |
5431 | |
5432 | // The loop may have multiple entrances or multiple exits; we can analyze |
5433 | // this phi as an addrec if it has a unique entry value and a unique |
5434 | // backedge value. |
5435 | Value *BEValueV = nullptr, *StartValueV = nullptr; |
5436 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
5437 | Value *V = PN->getIncomingValue(i); |
5438 | if (L->contains(PN->getIncomingBlock(i))) { |
5439 | if (!BEValueV) { |
5440 | BEValueV = V; |
5441 | } else if (BEValueV != V) { |
5442 | BEValueV = nullptr; |
5443 | break; |
5444 | } |
5445 | } else if (!StartValueV) { |
5446 | StartValueV = V; |
5447 | } else if (StartValueV != V) { |
5448 | StartValueV = nullptr; |
5449 | break; |
5450 | } |
5451 | } |
5452 | if (!BEValueV || !StartValueV) |
5453 | return std::nullopt; |
5454 | |
5455 | const SCEV *BEValue = getSCEV(BEValueV); |
5456 | |
5457 | // If the value coming around the backedge is an add with the symbolic |
5458 | // value we just inserted, possibly with casts that we can ignore under |
5459 | // an appropriate runtime guard, then we found a simple induction variable! |
5460 | const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); |
5461 | if (!Add) |
5462 | return std::nullopt; |
5463 | |
5464 | // If there is a single occurrence of the symbolic value, possibly |
5465 | // casted, replace it with a recurrence. |
5466 | unsigned FoundIndex = Add->getNumOperands(); |
5467 | Type *TruncTy = nullptr; |
5468 | bool Signed; |
5469 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) |
5470 | if ((TruncTy = |
5471 | isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) |
5472 | if (FoundIndex == e) { |
5473 | FoundIndex = i; |
5474 | break; |
5475 | } |
5476 | |
5477 | if (FoundIndex == Add->getNumOperands()) |
5478 | return std::nullopt; |
5479 | |
5480 | // Create an add with everything but the specified operand. |
5481 | SmallVector<const SCEV *, 8> Ops; |
5482 | for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) |
5483 | if (i != FoundIndex) |
5484 | Ops.push_back(Add->getOperand(i)); |
5485 | const SCEV *Accum = getAddExpr(Ops); |
5486 | |
5487 | // The runtime checks will not be valid if the step amount is |
5488 | // varying inside the loop. |
5489 | if (!isLoopInvariant(Accum, L)) |
5490 | return std::nullopt; |
5491 | |
5492 | // *** Part2: Create the predicates |
5493 | |
5494 | // Analysis was successful: we have a phi-with-cast pattern for which we |
5495 | // can return an AddRec expression under the following predicates: |
5496 | // |
5497 | // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) |
5498 | // fits within the truncated type (does not overflow) for i = 0 to n-1. |
5499 | // P2: An Equal predicate that guarantees that |
5500 | // Start = (Ext ix (Trunc iy (Start) to ix) to iy) |
5501 | // P3: An Equal predicate that guarantees that |
5502 | // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) |
5503 | // |
5504 | // As we next prove, the above predicates guarantee that: |
5505 | // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) |
5506 | // |
5507 | // |
5508 | // More formally, we want to prove that: |
5509 | // Expr(i+1) = Start + (i+1) * Accum |
5510 | // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum |
5511 | // |
5512 | // Given that: |
5513 | // 1) Expr(0) = Start |
5514 | // 2) Expr(1) = Start + Accum |
5515 | // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 |
5516 | // 3) Induction hypothesis (step i): |
5517 | // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum |
5518 | // |
5519 | // Proof: |
5520 | // Expr(i+1) = |
5521 | // = Start + (i+1)*Accum |
5522 | // = (Start + i*Accum) + Accum |
5523 | // = Expr(i) + Accum |
5524 | // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum |
5525 | // :: from step i |
5526 | // |
5527 | // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum |
5528 | // |
5529 | // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) |
5530 | // + (Ext ix (Trunc iy (Accum) to ix) to iy) |
5531 | // + Accum :: from P3 |
5532 | // |
5533 | // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) |
5534 | // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) |
5535 | // |
5536 | // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum |
5537 | // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum |
5538 | // |
5539 | // By induction, the same applies to all iterations 1<=i<n: |
5540 | // |
5541 | |
5542 | // Create a truncated addrec for which we will add a no overflow check (P1). |
5543 | const SCEV *StartVal = getSCEV(StartValueV); |
5544 | const SCEV *PHISCEV = |
5545 | getAddRecExpr(getTruncateExpr(StartVal, TruncTy), |
5546 | getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); |
5547 | |
5548 | // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. |
5549 | // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV |
5550 | // will be constant. |
5551 | // |
5552 | // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't |
5553 | // add P1. |
5554 | if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { |
5555 | SCEVWrapPredicate::IncrementWrapFlags AddedFlags = |
5556 | Signed ? SCEVWrapPredicate::IncrementNSSW |
5557 | : SCEVWrapPredicate::IncrementNUSW; |
5558 | const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); |
5559 | Predicates.push_back(AddRecPred); |
5560 | } |
5561 | |
5562 | // Create the Equal Predicates P2,P3: |
5563 | |
5564 | // It is possible that the predicates P2 and/or P3 are computable at |
5565 | // compile time due to StartVal and/or Accum being constants. |
5566 | // If either one is, then we can check that now and escape if either P2 |
5567 | // or P3 is false. |
5568 | |
5569 | // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) |
5570 | // for each of StartVal and Accum |
5571 | auto getExtendedExpr = [&](const SCEV *Expr, |
5572 | bool CreateSignExtend) -> const SCEV * { |
5573 | assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant")(static_cast <bool> (isLoopInvariant(Expr, L) && "Expr is expected to be invariant") ? void (0) : __assert_fail ("isLoopInvariant(Expr, L) && \"Expr is expected to be invariant\"" , "llvm/lib/Analysis/ScalarEvolution.cpp", 5573, __extension__ __PRETTY_FUNCTION__)); |
5574 | const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); |
5575 | const SCEV *ExtendedExpr = |
5576 | CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) |
5577 | : getZeroExtendExpr(TruncatedExpr, Expr->getType()); |
5578 | return ExtendedExpr; |
5579 | }; |
5580 | |
5581 | // Given: |
5582 | // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy |
5583 | // = getExtendedExpr(Expr) |
5584 | // Determine whether the predicate P: Expr == ExtendedExpr |
5585 | // is known to be false at compile time |
5586 | auto PredIsKnownFalse = [&](const SCEV *Expr, |
5587 | const SCEV *ExtendedExpr) -> bool { |
5588 | return Expr != ExtendedExpr && |
5589 | isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); |
5590 | }; |
5591 | |
5592 | const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); |